search for: dmar_domain

Displaying 20 results from an estimated 31 matches for "dmar_domain".

2019 Dec 21
0
[PATCH 3/8] iommu/vt-d: Remove IOVA handling code from non-dma_ops path
...--------------- 1 file changed, 33 insertions(+), 56 deletions(-) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 64b1a9793daa..8d72ea0fb843 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1908,7 +1908,8 @@ static void domain_exit(struct dmar_domain *domain) domain_remove_dev_info(domain); /* destroy iovas */ - put_iova_domain(&domain->iovad); + if (domain->domain.type == IOMMU_DOMAIN_DMA) + put_iova_domain(&domain->iovad); if (domain->pgd) { struct page *freelist; @@ -2671,19 +2672,9 @@ static struct dmar_dom...
2019 Dec 21
13
[PATCH 0/8] Convert the intel iommu driver to the dma-iommu api
This patchset converts the intel iommu driver to the dma-iommu api. While converting the driver I exposed a bug in the intel i915 driver which causes a huge amount of artifacts on the screen of my laptop. You can see a picture of it here: https://github.com/pippy360/kernelPatches/blob/master/IMG_20191219_225922.jpg This issue is most likely in the i915 driver and is most likely caused by the
2019 Dec 21
13
[PATCH 0/8] Convert the intel iommu driver to the dma-iommu api
This patchset converts the intel iommu driver to the dma-iommu api. While converting the driver I exposed a bug in the intel i915 driver which causes a huge amount of artifacts on the screen of my laptop. You can see a picture of it here: https://github.com/pippy360/kernelPatches/blob/master/IMG_20191219_225922.jpg This issue is most likely in the i915 driver and is most likely caused by the
2020 Aug 18
3
[PATCH V2 1/2] Add new flush_iotlb_range and handle freelists when using iommu_unmap_fast
...n); sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 237a470e1e9c..878178fe48f8 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1160,17 +1160,17 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, pages can only be freed after the IOTLB flush has been done. */ static struct page *domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, - unsigned long last_pfn) + unsigned long last_pfn, + struct page *freelist) { - struct page *freelist; -...
2020 Aug 18
3
[PATCH V2 1/2] Add new flush_iotlb_range and handle freelists when using iommu_unmap_fast
...n); sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 237a470e1e9c..878178fe48f8 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1160,17 +1160,17 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, pages can only be freed after the IOTLB flush has been done. */ static struct page *domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, - unsigned long last_pfn) + unsigned long last_pfn, + struct page *freelist) { - struct page *freelist; -...
2019 Dec 21
0
[PATCH 4/8] iommu: Handle freelists when using deferred flushing in iommu drivers
...ain); sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 8d72ea0fb843..675ca2aa6e20 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1145,9 +1145,9 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, pages can only be freed after the IOTLB flush has been done. */ static struct page *domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, - unsigned long last_pfn) + unsigned long last_pfn, + struct page *freelist) { - struct page *freelist;...
2020 Apr 14
0
[PATCH v2 16/33] iommu/vt-d: Convert to probe/release_device() call-backs
...drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -5781,78 +5781,27 @@ static bool intel_iommu_capable(enum iommu_cap cap) return false; } -static int intel_iommu_add_device(struct device *dev) +static struct iommu_device *intel_iommu_probe_device(struct device *dev) { - struct dmar_domain *dmar_domain; - struct iommu_domain *domain; struct intel_iommu *iommu; - struct iommu_group *group; u8 bus, devfn; - int ret; iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) - return -ENODEV; - - iommu_device_link(&iommu->iommu, dev); + return ERR_PTR(-ENODEV);...
2020 Aug 17
1
[PATCH 1/2] Add new flush_iotlb_range and handle freelists when using iommu_unmap_fast
...n); sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 237a470e1e9c..878178fe48f8 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1160,17 +1160,17 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, pages can only be freed after the IOTLB flush has been done. */ static struct page *domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, - unsigned long last_pfn) + unsigned long last_pfn, + struct page *freelist) { - struct page *freelist; -...
2020 Aug 18
0
[PATCH V2 1/2] Add new flush_iotlb_range and handle freelists when using iommu_unmap_fast
...= (sysmmu_iova_t)l_iova; > diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c > index 237a470e1e9c..878178fe48f8 100644 > --- a/drivers/iommu/intel/iommu.c > +++ b/drivers/iommu/intel/iommu.c > @@ -1160,17 +1160,17 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, > pages can only be freed after the IOTLB flush has been done. */ > static struct page *domain_unmap(struct dmar_domain *domain, > unsigned long start_pfn, > - unsigned long last_pfn) > + unsigned long last_pfn, > + struct page *freeli...
2023 Jan 18
10
[PATCH v2 00/10] Let iommufd charge IOPTE allocations to the memory cgroup
iommufd follows the same design as KVM and uses memory cgroups to limit the amount of kernel memory a iommufd file descriptor can pin down. The various internal data structures already use GFP_KERNEL_ACCOUNT to charge its own memory. However, one of the biggest consumers of kernel memory is the IOPTEs stored under the iommu_domain and these allocations are not tracked. This series is the first
2023 Jan 23
11
[PATCH v3 00/10] Let iommufd charge IOPTE allocations to the memory cgroup
iommufd follows the same design as KVM and uses memory cgroups to limit the amount of kernel memory a iommufd file descriptor can pin down. The various internal data structures already use GFP_KERNEL_ACCOUNT to charge its own memory. However, one of the biggest consumers of kernel memory is the IOPTEs stored under the iommu_domain and these allocations are not tracked. This series is the first
2023 Jan 23
11
[PATCH v3 00/10] Let iommufd charge IOPTE allocations to the memory cgroup
iommufd follows the same design as KVM and uses memory cgroups to limit the amount of kernel memory a iommufd file descriptor can pin down. The various internal data structures already use GFP_KERNEL_ACCOUNT to charge its own memory. However, one of the biggest consumers of kernel memory is the IOPTEs stored under the iommu_domain and these allocations are not tracked. This series is the first
2023 Jan 06
8
[PATCH 0/8] Let iommufd charge IOPTE allocations to the memory cgroup
iommufd follows the same design as KVM and uses memory cgroups to limit the amount of kernel memory a iommufd file descriptor can pin down. The various internal data structures already use GFP_KERNEL_ACCOUNT to charge its own memory. However, one of the biggest consumers of kernel memory is the IOPTEs stored under the iommu_domain and these allocations are not tracked. This series is the first
2023 Jan 06
8
[PATCH 0/8] Let iommufd charge IOPTE allocations to the memory cgroup
iommufd follows the same design as KVM and uses memory cgroups to limit the amount of kernel memory a iommufd file descriptor can pin down. The various internal data structures already use GFP_KERNEL_ACCOUNT to charge its own memory. However, one of the biggest consumers of kernel memory is the IOPTEs stored under the iommu_domain and these allocations are not tracked. This series is the first
2023 Jan 06
8
[PATCH 0/8] Let iommufd charge IOPTE allocations to the memory cgroup
iommufd follows the same design as KVM and uses memory cgroups to limit the amount of kernel memory a iommufd file descriptor can pin down. The various internal data structures already use GFP_KERNEL_ACCOUNT to charge its own memory. However, one of the biggest consumers of kernel memory is the IOPTEs stored under the iommu_domain and these allocations are not tracked. This series is the first
2019 Dec 21
0
[PATCH 2/8] iommu/vt-d: Use default dma_direct_* mapping functions for direct mapped devices
...apping(struct device *dev) -{ - struct device_domain_info *info; - - info = dev->archdata.iommu; - if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO) - return (info->domain == si_domain); - - return 0; -} - static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) { struct dmar_domain *ndomain; @@ -3461,12 +3450,6 @@ static struct dmar_domain *get_private_domain_for_dev(struct device *dev) return domain; } -/* Check if the dev needs to go through non-identity map and unmap process.*/ -static bool iommu_no_mapping(struct d...
2020 Apr 29
0
[PATCH 1/5] swiotlb: Introduce concept of swiotlb_pool
..._direction dir, unsigned long attrs, cfb94a372f2d4e Lu Baolu 2019-09-06 3945 u64 dma_mask) cfb94a372f2d4e Lu Baolu 2019-09-06 3946 { cfb94a372f2d4e Lu Baolu 2019-09-06 3947 size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE); cfb94a372f2d4e Lu Baolu 2019-09-06 3948 struct dmar_domain *domain; cfb94a372f2d4e Lu Baolu 2019-09-06 3949 struct intel_iommu *iommu; cfb94a372f2d4e Lu Baolu 2019-09-06 3950 unsigned long iova_pfn; cfb94a372f2d4e Lu Baolu 2019-09-06 3951 unsigned long nrpages; cfb94a372f2d4e Lu Baolu 2019-09-06 3952 phys_addr_t tlb_addr; cfb94a...
2011 Jun 02
0
[PATCH] pci: Use pr_<level> and pr_fmt
...main_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); if (!iommu->domain_ids) { - printk(KERN_ERR "Allocating domain id array failed\n"); + pr_err("Allocating domain id array failed\n"); return -ENOMEM; } iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), GFP_KERNEL); if (!iommu->domains) { - printk(KERN_ERR "Allocating domain array failed\n"); + pr_err("Allocating domain array failed\n"); return -ENOMEM; } @@ -1325,7 +1323,7 @@ static int iommu_attach_domain(struct dmar_domain *domain, num = find_first_ze...
2011 Jun 02
0
[PATCH] pci: Use pr_<level> and pr_fmt
...main_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); if (!iommu->domain_ids) { - printk(KERN_ERR "Allocating domain id array failed\n"); + pr_err("Allocating domain id array failed\n"); return -ENOMEM; } iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), GFP_KERNEL); if (!iommu->domains) { - printk(KERN_ERR "Allocating domain array failed\n"); + pr_err("Allocating domain array failed\n"); return -ENOMEM; } @@ -1325,7 +1323,7 @@ static int iommu_attach_domain(struct dmar_domain *domain, num = find_first_ze...
2011 Jun 02
0
[PATCH] pci: Use pr_<level> and pr_fmt
...main_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); if (!iommu->domain_ids) { - printk(KERN_ERR "Allocating domain id array failed\n"); + pr_err("Allocating domain id array failed\n"); return -ENOMEM; } iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), GFP_KERNEL); if (!iommu->domains) { - printk(KERN_ERR "Allocating domain array failed\n"); + pr_err("Allocating domain array failed\n"); return -ENOMEM; } @@ -1325,7 +1323,7 @@ static int iommu_attach_domain(struct dmar_domain *domain, num = find_first_ze...