Christoph Hellwig
2021-Mar-01 08:42 UTC
cleanup unused or almost unused IOMMU APIs and the FSL PAMU driver
Hi all, there are a bunch of IOMMU APIs that are entirely unused, or only used as a private communication channel between the FSL PAMU driver and it's only consumer, the qbman portal driver. So this series drops a huge chunk of entirely unused FSL PAMU functionality, then drops all kinds of unused IOMMU APIs, and then replaces what is left of the iommu_attrs with properly typed, smaller and easier to use specific APIs. Diffstat: arch/powerpc/include/asm/fsl_pamu_stash.h | 12 drivers/gpu/drm/msm/adreno/adreno_gpu.c | 2 drivers/iommu/amd/iommu.c | 23 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 85 --- drivers/iommu/arm/arm-smmu/arm-smmu.c | 122 +--- drivers/iommu/dma-iommu.c | 8 drivers/iommu/fsl_pamu.c | 264 ---------- drivers/iommu/fsl_pamu.h | 10 drivers/iommu/fsl_pamu_domain.c | 694 ++-------------------------- drivers/iommu/fsl_pamu_domain.h | 46 - drivers/iommu/intel/iommu.c | 55 -- drivers/iommu/iommu.c | 75 --- drivers/soc/fsl/qbman/qman_portal.c | 56 -- drivers/vfio/vfio_iommu_type1.c | 31 - drivers/vhost/vdpa.c | 10 include/linux/iommu.h | 81 --- 16 files changed, 214 insertions(+), 1360 deletions(-)
Christoph Hellwig
2021-Mar-01 08:42 UTC
[PATCH 01/17] iommu: remove the unused domain_window_disable method
domain_window_disable is wired up by fsl_pamu, but never actually called. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/fsl_pamu_domain.c | 48 --------------------------------- include/linux/iommu.h | 2 -- 2 files changed, 50 deletions(-) diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index b2110767caf49c..53380cf1fa452f 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -473,53 +473,6 @@ static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr) return ret; } -static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr) -{ - struct device_domain_info *info; - int ret = 0; - - list_for_each_entry(info, &dma_domain->devices, link) { - if (dma_domain->win_cnt == 1 && dma_domain->enabled) { - ret = pamu_disable_liodn(info->liodn); - if (!ret) - dma_domain->enabled = 0; - } else { - ret = pamu_disable_spaace(info->liodn, wnd_nr); - } - } - - return ret; -} - -static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) -{ - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); - unsigned long flags; - int ret; - - spin_lock_irqsave(&dma_domain->domain_lock, flags); - if (!dma_domain->win_arr) { - pr_debug("Number of windows not configured\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return; - } - - if (wnd_nr >= dma_domain->win_cnt) { - pr_debug("Invalid window index\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return; - } - - if (dma_domain->win_arr[wnd_nr].valid) { - ret = disable_domain_win(dma_domain, wnd_nr); - if (!ret) { - dma_domain->win_arr[wnd_nr].valid = 0; - dma_domain->mapped--; - } - } - - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); -} static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot) @@ -1032,7 +985,6 @@ static const struct iommu_ops fsl_pamu_ops = { .attach_dev = fsl_pamu_attach_device, .detach_dev = fsl_pamu_detach_device, .domain_window_enable = fsl_pamu_window_enable, - .domain_window_disable = fsl_pamu_window_disable, .iova_to_phys = fsl_pamu_iova_to_phys, .domain_set_attr = fsl_pamu_set_domain_attr, .domain_get_attr = fsl_pamu_get_domain_attr, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 5e7fe519430af4..47c8b318d8f523 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -209,7 +209,6 @@ struct iommu_iotlb_gather { * @put_resv_regions: Free list of reserved regions for a device * @apply_resv_region: Temporary helper call-back for iova reserved ranges * @domain_window_enable: Configure and enable a particular window for a domain - * @domain_window_disable: Disable a particular window for a domain * @of_xlate: add OF master IDs to iommu grouping * @is_attach_deferred: Check if domain attach should be deferred from iommu * driver init to device driver init (default no) @@ -270,7 +269,6 @@ struct iommu_ops { /* Window handling functions */ int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot); - void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); int (*of_xlate)(struct device *dev, struct of_phandle_args *args); bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); -- 2.29.2
Christoph Hellwig
2021-Mar-01 08:42 UTC
[PATCH 02/17] iommu/fsl_pamu: remove fsl_pamu_get_domain_attr
None of the values returned by this function are ever queried. Also remove the DOMAIN_ATTR_FSL_PAMUV1 enum value that is not otherwise used. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/fsl_pamu_domain.c | 30 ------------------------------ include/linux/iommu.h | 4 ---- 2 files changed, 34 deletions(-) diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 53380cf1fa452f..e587ec43f7e750 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -832,35 +832,6 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, return ret; } -static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, - enum iommu_attr attr_type, void *data) -{ - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); - int ret = 0; - - switch (attr_type) { - case DOMAIN_ATTR_FSL_PAMU_STASH: - memcpy(data, &dma_domain->dma_stash, - sizeof(struct pamu_stash_attribute)); - break; - case DOMAIN_ATTR_FSL_PAMU_ENABLE: - *(int *)data = dma_domain->enabled; - break; - case DOMAIN_ATTR_FSL_PAMUV1: - *(int *)data = DOMAIN_ATTR_FSL_PAMUV1; - break; - case DOMAIN_ATTR_WINDOWS: - *(u32 *)data = dma_domain->win_cnt; - break; - default: - pr_debug("Unsupported attribute type\n"); - ret = -EINVAL; - break; - } - - return ret; -} - static struct iommu_group *get_device_iommu_group(struct device *dev) { struct iommu_group *group; @@ -987,7 +958,6 @@ static const struct iommu_ops fsl_pamu_ops = { .domain_window_enable = fsl_pamu_window_enable, .iova_to_phys = fsl_pamu_iova_to_phys, .domain_set_attr = fsl_pamu_set_domain_attr, - .domain_get_attr = fsl_pamu_get_domain_attr, .probe_device = fsl_pamu_probe_device, .release_device = fsl_pamu_release_device, .device_group = fsl_pamu_device_group, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 47c8b318d8f523..52874ae164dd60 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -104,9 +104,6 @@ enum iommu_cap { * -the actual size of the mapped region of a window must be power * of 2 starting with 4KB and physical address must be naturally * aligned. - * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints. - * The caller can invoke iommu_domain_get_attr to check if the underlying - * iommu implementation supports these constraints. */ enum iommu_attr { @@ -115,7 +112,6 @@ enum iommu_attr { DOMAIN_ATTR_WINDOWS, DOMAIN_ATTR_FSL_PAMU_STASH, DOMAIN_ATTR_FSL_PAMU_ENABLE, - DOMAIN_ATTR_FSL_PAMUV1, DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_IO_PGTABLE_CFG, -- 2.29.2
Christoph Hellwig
2021-Mar-01 08:42 UTC
[PATCH 03/17] iommu/fsl_pamu: remove support for setting DOMAIN_ATTR_GEOMETRY
The default geometry is the same as the one set by qman_port given that FSL_PAMU depends on having 64-bit physical and thus DMA addresses. Remove the support to update the geometry and remove the now pointless geom_size field. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/fsl_pamu_domain.c | 55 +++-------------------------- drivers/iommu/fsl_pamu_domain.h | 6 ---- drivers/soc/fsl/qbman/qman_portal.c | 12 ------- 3 files changed, 5 insertions(+), 68 deletions(-) diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index e587ec43f7e750..7bd08ddad07779 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -62,7 +62,7 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i geom = &dma_domain->iommu_domain.geometry; - if (!win_cnt || !dma_domain->geom_size) { + if (!win_cnt) { pr_debug("Number of windows/geometry not configured for the domain\n"); return 0; } @@ -72,7 +72,7 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i dma_addr_t subwin_iova; u32 wnd; - subwin_size = dma_domain->geom_size >> ilog2(win_cnt); + subwin_size = (geom->aperture_end + 1) >> ilog2(win_cnt); subwin_iova = iova & ~(subwin_size - 1); wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size); win_ptr = &dma_domain->win_arr[wnd]; @@ -234,7 +234,7 @@ static int pamu_set_liodn(int liodn, struct device *dev, get_ome_index(&omi_index, dev); window_addr = geom_attr->aperture_start; - window_size = dma_domain->geom_size; + window_size = geom_attr->aperture_end + 1; spin_lock_irqsave(&iommu_lock, flags); ret = pamu_disable_liodn(liodn); @@ -303,7 +303,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void) domain->stash_id = ~(u32)0; domain->snoop_id = ~(u32)0; domain->win_cnt = pamu_get_max_subwin_cnt(); - domain->geom_size = 0; INIT_LIST_HEAD(&domain->devices); @@ -502,7 +501,8 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, return -EINVAL; } - win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt); + win_size = (domain->geometry.aperture_end + 1) >> + ilog2(dma_domain->win_cnt); if (size > win_size) { pr_debug("Invalid window size\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); @@ -665,41 +665,6 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain, pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); } -static int configure_domain_geometry(struct iommu_domain *domain, void *data) -{ - struct iommu_domain_geometry *geom_attr = data; - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); - dma_addr_t geom_size; - unsigned long flags; - - geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1; - /* - * Sanity check the geometry size. Also, we do not support - * DMA outside of the geometry. - */ - if (check_size(geom_size, geom_attr->aperture_start) || - !geom_attr->force_aperture) { - pr_debug("Invalid PAMU geometry attributes\n"); - return -EINVAL; - } - - spin_lock_irqsave(&dma_domain->domain_lock, flags); - if (dma_domain->enabled) { - pr_debug("Can't set geometry attributes as domain is active\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EBUSY; - } - - /* Copy the domain geometry information */ - memcpy(&domain->geometry, geom_attr, - sizeof(struct iommu_domain_geometry)); - dma_domain->geom_size = geom_size; - - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - - return 0; -} - /* Set the domain stash attribute */ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) { @@ -769,13 +734,6 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) return -EBUSY; } - /* Ensure that the geometry has been set for the domain */ - if (!dma_domain->geom_size) { - pr_debug("Please configure geometry before setting the number of windows\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EINVAL; - } - /* * Ensure we have valid window count i.e. it should be less than * maximum permissible limit and should be a power of two. @@ -811,9 +769,6 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, int ret = 0; switch (attr_type) { - case DOMAIN_ATTR_GEOMETRY: - ret = configure_domain_geometry(domain, data); - break; case DOMAIN_ATTR_FSL_PAMU_STASH: ret = configure_domain_stash(dma_domain, data); break; diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h index 2865d42782e802..53d359d66fe577 100644 --- a/drivers/iommu/fsl_pamu_domain.h +++ b/drivers/iommu/fsl_pamu_domain.h @@ -17,12 +17,6 @@ struct dma_window { }; struct fsl_dma_domain { - /* - * Indicates the geometry size for the domain. - * This would be set when the geometry is - * configured for the domain. - */ - dma_addr_t geom_size; /* * Number of windows assocaited with this domain. * During domain initialization, it is set to the diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index 5685b67068931a..c958e6310d3094 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -47,7 +47,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) #ifdef CONFIG_FSL_PAMU struct device *dev = pcfg->dev; int window_count = 1; - struct iommu_domain_geometry geom_attr; struct pamu_stash_attribute stash_attr; int ret; @@ -56,17 +55,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); goto no_iommu; } - geom_attr.aperture_start = 0; - geom_attr.aperture_end - ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1; - geom_attr.force_aperture = true; - ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY, - &geom_attr); - if (ret < 0) { - dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, - ret); - goto out_domain_free; - } ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS, &window_count); if (ret < 0) { -- 2.29.2
Christoph Hellwig
2021-Mar-01 08:42 UTC
[PATCH 04/17] iommu/fsl_pamu: merge iommu_alloc_dma_domain into fsl_pamu_domain_alloc
Keep the functionality to allocate the domain together. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/fsl_pamu_domain.c | 34 ++++++++++----------------------- 1 file changed, 10 insertions(+), 24 deletions(-) diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 7bd08ddad07779..a4da5597755d3d 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -292,25 +292,6 @@ static int check_size(u64 size, dma_addr_t iova) return 0; } -static struct fsl_dma_domain *iommu_alloc_dma_domain(void) -{ - struct fsl_dma_domain *domain; - - domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); - if (!domain) - return NULL; - - domain->stash_id = ~(u32)0; - domain->snoop_id = ~(u32)0; - domain->win_cnt = pamu_get_max_subwin_cnt(); - - INIT_LIST_HEAD(&domain->devices); - - spin_lock_init(&domain->domain_lock); - - return domain; -} - static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) { unsigned long flags; @@ -412,12 +393,17 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) if (type != IOMMU_DOMAIN_UNMANAGED) return NULL; - dma_domain = iommu_alloc_dma_domain(); - if (!dma_domain) { - pr_debug("dma_domain allocation failed\n"); + dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); + if (!dma_domain) return NULL; - } - /* defaul geometry 64 GB i.e. maximum system address */ + + dma_domain->stash_id = ~(u32)0; + dma_domain->snoop_id = ~(u32)0; + dma_domain->win_cnt = pamu_get_max_subwin_cnt(); + INIT_LIST_HEAD(&dma_domain->devices); + spin_lock_init(&dma_domain->domain_lock); + + /* default geometry 64 GB i.e. maximum system address */ dma_domain->iommu_domain. geometry.aperture_start = 0; dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1; dma_domain->iommu_domain.geometry.force_aperture = true; -- 2.29.2
Christoph Hellwig
2021-Mar-01 08:42 UTC
[PATCH 05/17] iommu/fsl_pamu: remove support for multiple windows
The only domains allocated forces use of a single window. Remove all the code related to multiple window support, as well as the need for qman_portal to force a single window. Remove the now unused DOMAIN_ATTR_WINDOWS iommu_attr. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/fsl_pamu.c | 264 +------------------------- drivers/iommu/fsl_pamu.h | 10 +- drivers/iommu/fsl_pamu_domain.c | 275 +++++----------------------- drivers/iommu/fsl_pamu_domain.h | 12 +- drivers/soc/fsl/qbman/qman_portal.c | 7 - include/linux/iommu.h | 1 - 6 files changed, 59 insertions(+), 510 deletions(-) diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index b9a974d9783113..3e1647cd5ad47a 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -63,19 +63,6 @@ static const struct of_device_id l3_device_ids[] = { /* maximum subwindows permitted per liodn */ static u32 max_subwindow_count; -/* Pool for fspi allocation */ -static struct gen_pool *spaace_pool; - -/** - * pamu_get_max_subwin_cnt() - Return the maximum supported - * subwindow count per liodn. - * - */ -u32 pamu_get_max_subwin_cnt(void) -{ - return max_subwindow_count; -} - /** * pamu_get_ppaace() - Return the primary PACCE * @liodn: liodn PAACT index for desired PAACE @@ -155,13 +142,6 @@ static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) return fls64(addrspace_size) - 2; } -/* Derive the PAACE window count encoding for the subwindow count */ -static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt) -{ - /* window count is 2^(WCE+1) bytes */ - return __ffs(subwindow_cnt) - 1; -} - /* * Set the PAACE type as primary and set the coherency required domain * attribute @@ -174,89 +154,11 @@ static void pamu_init_ppaace(struct paace *ppaace) PAACE_M_COHERENCE_REQ); } -/* - * Set the PAACE type as secondary and set the coherency required domain - * attribute. - */ -static void pamu_init_spaace(struct paace *spaace) -{ - set_bf(spaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_SECONDARY); - set_bf(spaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR, - PAACE_M_COHERENCE_REQ); -} - -/* - * Return the spaace (corresponding to the secondary window index) - * for a particular ppaace. - */ -static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum) -{ - u32 subwin_cnt; - struct paace *spaace = NULL; - - subwin_cnt = 1UL << (get_bf(paace->impl_attr, PAACE_IA_WCE) + 1); - - if (wnum < subwin_cnt) - spaace = &spaact[paace->fspi + wnum]; - else - pr_debug("secondary paace out of bounds\n"); - - return spaace; -} - -/** - * pamu_get_fspi_and_allocate() - Allocates fspi index and reserves subwindows - * required for primary PAACE in the secondary - * PAACE table. - * @subwin_cnt: Number of subwindows to be reserved. - * - * A PPAACE entry may have a number of associated subwindows. A subwindow - * corresponds to a SPAACE entry in the SPAACT table. Each PAACE entry stores - * the index (fspi) of the first SPAACE entry in the SPAACT table. This - * function returns the index of the first SPAACE entry. The remaining - * SPAACE entries are reserved contiguously from that index. - * - * Returns a valid fspi index in the range of 0 - SPAACE_NUMBER_ENTRIES on success. - * If no SPAACE entry is available or the allocator can not reserve the required - * number of contiguous entries function returns ULONG_MAX indicating a failure. - * - */ -static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt) -{ - unsigned long spaace_addr; - - spaace_addr = gen_pool_alloc(spaace_pool, subwin_cnt * sizeof(struct paace)); - if (!spaace_addr) - return ULONG_MAX; - - return (spaace_addr - (unsigned long)spaact) / (sizeof(struct paace)); -} - -/* Release the subwindows reserved for a particular LIODN */ -void pamu_free_subwins(int liodn) -{ - struct paace *ppaace; - u32 subwin_cnt, size; - - ppaace = pamu_get_ppaace(liodn); - if (!ppaace) { - pr_debug("Invalid liodn entry\n"); - return; - } - - if (get_bf(ppaace->addr_bitfields, PPAACE_AF_MW)) { - subwin_cnt = 1UL << (get_bf(ppaace->impl_attr, PAACE_IA_WCE) + 1); - size = (subwin_cnt - 1) * sizeof(struct paace); - gen_pool_free(spaace_pool, (unsigned long)&spaact[ppaace->fspi], size); - set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0); - } -} - /* * Function used for updating stash destination for the coressponding * LIODN. */ -int pamu_update_paace_stash(int liodn, u32 subwin, u32 value) +int pamu_update_paace_stash(int liodn, u32 value) { struct paace *paace; @@ -265,11 +167,6 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value) pr_debug("Invalid liodn entry\n"); return -ENOENT; } - if (subwin) { - paace = pamu_get_spaace(paace, subwin - 1); - if (!paace) - return -ENOENT; - } set_bf(paace->impl_attr, PAACE_IA_CID, value); mb(); @@ -277,31 +174,6 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value) return 0; } -/* Disable a subwindow corresponding to the LIODN */ -int pamu_disable_spaace(int liodn, u32 subwin) -{ - struct paace *paace; - - paace = pamu_get_ppaace(liodn); - if (!paace) { - pr_debug("Invalid liodn entry\n"); - return -ENOENT; - } - if (subwin) { - paace = pamu_get_spaace(paace, subwin - 1); - if (!paace) - return -ENOENT; - set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID); - } else { - set_bf(paace->addr_bitfields, PAACE_AF_AP, - PAACE_AP_PERMS_DENIED); - } - - mb(); - - return 0; -} - /** * pamu_config_paace() - Sets up PPAACE entry for specified liodn * @@ -314,17 +186,15 @@ int pamu_disable_spaace(int liodn, u32 subwin) * stashid not defined * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then * snoopid not defined - * @subwin_cnt: number of sub-windows * @prot: window permissions * * Returns 0 upon success else error code < 0 returned */ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size, u32 omi, unsigned long rpn, u32 snoopid, u32 stashid, - u32 subwin_cnt, int prot) + int prot) { struct paace *ppaace; - unsigned long fspi; if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) { pr_debug("window size too small or not a power of two %pa\n", @@ -368,116 +238,12 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size, if (~snoopid != 0) ppaace->domain_attr.to_host.snpid = snoopid; - if (subwin_cnt) { - /* The first entry is in the primary PAACE instead */ - fspi = pamu_get_fspi_and_allocate(subwin_cnt - 1); - if (fspi == ULONG_MAX) { - pr_debug("spaace indexes exhausted\n"); - return -EINVAL; - } - - /* window count is 2^(WCE+1) bytes */ - set_bf(ppaace->impl_attr, PAACE_IA_WCE, - map_subwindow_cnt_to_wce(subwin_cnt)); - set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0x1); - ppaace->fspi = fspi; - } else { - set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE); - ppaace->twbah = rpn >> 20; - set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, rpn); - set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot); - set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0); - set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0); - } - mb(); - - return 0; -} - -/** - * pamu_config_spaace() - Sets up SPAACE entry for specified subwindow - * - * @liodn: Logical IO device number - * @subwin_cnt: number of sub-windows associated with dma-window - * @subwin: subwindow index - * @subwin_size: size of subwindow - * @omi: Operation mapping index - * @rpn: real (true physical) page number - * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then - * snoopid not defined - * @stashid: cache stash id for associated cpu - * @enable: enable/disable subwindow after reconfiguration - * @prot: sub window permissions - * - * Returns 0 upon success else error code < 0 returned - */ -int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin, - phys_addr_t subwin_size, u32 omi, unsigned long rpn, - u32 snoopid, u32 stashid, int enable, int prot) -{ - struct paace *paace; - - /* setup sub-windows */ - if (!subwin_cnt) { - pr_debug("Invalid subwindow count\n"); - return -EINVAL; - } - - paace = pamu_get_ppaace(liodn); - if (subwin > 0 && subwin < subwin_cnt && paace) { - paace = pamu_get_spaace(paace, subwin - 1); - - if (paace && !(paace->addr_bitfields & PAACE_V_VALID)) { - pamu_init_spaace(paace); - set_bf(paace->addr_bitfields, SPAACE_AF_LIODN, liodn); - } - } - - if (!paace) { - pr_debug("Invalid liodn entry\n"); - return -ENOENT; - } - - if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) { - pr_debug("subwindow size out of range, or not a power of 2\n"); - return -EINVAL; - } - - if (rpn == ULONG_MAX) { - pr_debug("real page number out of range\n"); - return -EINVAL; - } - - /* window size is 2^(WSE+1) bytes */ - set_bf(paace->win_bitfields, PAACE_WIN_SWSE, - map_addrspace_size_to_wse(subwin_size)); - - set_bf(paace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE); - paace->twbah = rpn >> 20; - set_bf(paace->win_bitfields, PAACE_WIN_TWBAL, rpn); - set_bf(paace->addr_bitfields, PAACE_AF_AP, prot); - - /* configure snoop id */ - if (~snoopid != 0) - paace->domain_attr.to_host.snpid = snoopid; - - /* set up operation mapping if it's configured */ - if (omi < OME_NUMBER_ENTRIES) { - set_bf(paace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); - paace->op_encode.index_ot.omi = omi; - } else if (~omi != 0) { - pr_debug("bad operation mapping index: %d\n", omi); - return -EINVAL; - } - - if (~stashid != 0) - set_bf(paace->impl_attr, PAACE_IA_CID, stashid); - - smp_wmb(); - - if (enable) - set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID); - + set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE); + ppaace->twbah = rpn >> 20; + set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, rpn); + set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot); + set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0); + set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0); mb(); return 0; @@ -1129,17 +895,6 @@ static int fsl_pamu_probe(struct platform_device *pdev) spaact_phys = virt_to_phys(spaact); omt_phys = virt_to_phys(omt); - spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1); - if (!spaace_pool) { - ret = -ENOMEM; - dev_err(dev, "Failed to allocate spaace gen pool\n"); - goto error; - } - - ret = gen_pool_add(spaace_pool, (unsigned long)spaact, SPAACT_SIZE, -1); - if (ret) - goto error_genpool; - pamubypenr = in_be32(&guts_regs->pamubypenr); for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size; @@ -1167,9 +922,6 @@ static int fsl_pamu_probe(struct platform_device *pdev) return 0; -error_genpool: - gen_pool_destroy(spaace_pool); - error: if (irq != NO_IRQ) free_irq(irq, data); diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h index e1496ba96160fd..04fd843d718dd1 100644 --- a/drivers/iommu/fsl_pamu.h +++ b/drivers/iommu/fsl_pamu.h @@ -383,18 +383,12 @@ struct ome { int pamu_domain_init(void); int pamu_enable_liodn(int liodn); int pamu_disable_liodn(int liodn); -void pamu_free_subwins(int liodn); int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size, u32 omi, unsigned long rpn, u32 snoopid, uint32_t stashid, - u32 subwin_cnt, int prot); -int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin_addr, - phys_addr_t subwin_size, u32 omi, unsigned long rpn, - uint32_t snoopid, u32 stashid, int enable, int prot); + int prot); u32 get_stash_id(u32 stash_dest_hint, u32 vcpu); void get_ome_index(u32 *omi_index, struct device *dev); -int pamu_update_paace_stash(int liodn, u32 subwin, u32 value); -int pamu_disable_spaace(int liodn, u32 subwin); -u32 pamu_get_max_subwin_cnt(void); +int pamu_update_paace_stash(int liodn, u32 value); #endif /* __FSL_PAMU_H */ diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index a4da5597755d3d..e6bdd38fc18409 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -56,65 +56,19 @@ static int __init iommu_init_mempool(void) static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) { - u32 win_cnt = dma_domain->win_cnt; struct dma_window *win_ptr = &dma_domain->win_arr[0]; struct iommu_domain_geometry *geom; geom = &dma_domain->iommu_domain.geometry; - if (!win_cnt) { - pr_debug("Number of windows/geometry not configured for the domain\n"); - return 0; - } - - if (win_cnt > 1) { - u64 subwin_size; - dma_addr_t subwin_iova; - u32 wnd; - - subwin_size = (geom->aperture_end + 1) >> ilog2(win_cnt); - subwin_iova = iova & ~(subwin_size - 1); - wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size); - win_ptr = &dma_domain->win_arr[wnd]; - } - if (win_ptr->valid) return win_ptr->paddr + (iova & (win_ptr->size - 1)); return 0; } -static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) -{ - struct dma_window *sub_win_ptr = &dma_domain->win_arr[0]; - int i, ret; - unsigned long rpn, flags; - - for (i = 0; i < dma_domain->win_cnt; i++) { - if (sub_win_ptr[i].valid) { - rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT; - spin_lock_irqsave(&iommu_lock, flags); - ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i, - sub_win_ptr[i].size, - ~(u32)0, - rpn, - dma_domain->snoop_id, - dma_domain->stash_id, - (i > 0) ? 1 : 0, - sub_win_ptr[i].prot); - spin_unlock_irqrestore(&iommu_lock, flags); - if (ret) { - pr_debug("SPAACE configuration failed for liodn %d\n", - liodn); - return ret; - } - } - } - - return ret; -} - -static int map_win(int liodn, struct fsl_dma_domain *dma_domain) +/* Map the DMA window corresponding to the LIODN */ +static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) { int ret; struct dma_window *wnd = &dma_domain->win_arr[0]; @@ -127,7 +81,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain) ~(u32)0, wnd->paddr >> PAMU_PAGE_SHIFT, dma_domain->snoop_id, dma_domain->stash_id, - 0, wnd->prot); + wnd->prot); spin_unlock_irqrestore(&iommu_lock, flags); if (ret) pr_debug("PAACE configuration failed for liodn %d\n", liodn); @@ -135,50 +89,27 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain) return ret; } -/* Map the DMA window corresponding to the LIODN */ -static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) -{ - if (dma_domain->win_cnt > 1) - return map_subwins(liodn, dma_domain); - else - return map_win(liodn, dma_domain); -} - /* Update window/subwindow mapping for the LIODN */ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr) { int ret; struct dma_window *wnd = &dma_domain->win_arr[wnd_nr]; + phys_addr_t wnd_addr; unsigned long flags; spin_lock_irqsave(&iommu_lock, flags); - if (dma_domain->win_cnt > 1) { - ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr, - wnd->size, - ~(u32)0, - wnd->paddr >> PAMU_PAGE_SHIFT, - dma_domain->snoop_id, - dma_domain->stash_id, - (wnd_nr > 0) ? 1 : 0, - wnd->prot); - if (ret) - pr_debug("Subwindow reconfiguration failed for liodn %d\n", - liodn); - } else { - phys_addr_t wnd_addr; - wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; + wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; - ret = pamu_config_ppaace(liodn, wnd_addr, - wnd->size, - ~(u32)0, - wnd->paddr >> PAMU_PAGE_SHIFT, - dma_domain->snoop_id, dma_domain->stash_id, - 0, wnd->prot); - if (ret) - pr_debug("Window reconfiguration failed for liodn %d\n", - liodn); - } + ret = pamu_config_ppaace(liodn, wnd_addr, + wnd->size, + ~(u32)0, + wnd->paddr >> PAMU_PAGE_SHIFT, + dma_domain->snoop_id, dma_domain->stash_id, + wnd->prot); + if (ret) + pr_debug("Window reconfiguration failed for liodn %d\n", + liodn); spin_unlock_irqrestore(&iommu_lock, flags); @@ -192,21 +123,12 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, unsigned long flags; spin_lock_irqsave(&iommu_lock, flags); - if (!dma_domain->win_arr) { - pr_debug("Windows not configured, stash destination update failed for liodn %d\n", - liodn); + ret = pamu_update_paace_stash(liodn, val); + if (ret) { + pr_debug("Failed to update SPAACE %d field for liodn %d\n ", + i, liodn); spin_unlock_irqrestore(&iommu_lock, flags); - return -EINVAL; - } - - for (i = 0; i < dma_domain->win_cnt; i++) { - ret = pamu_update_paace_stash(liodn, i, val); - if (ret) { - pr_debug("Failed to update SPAACE %d field for liodn %d\n ", - i, liodn); - spin_unlock_irqrestore(&iommu_lock, flags); - return ret; - } + return ret; } spin_unlock_irqrestore(&iommu_lock, flags); @@ -217,14 +139,12 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, /* Set the geometry parameters for a LIODN */ static int pamu_set_liodn(int liodn, struct device *dev, struct fsl_dma_domain *dma_domain, - struct iommu_domain_geometry *geom_attr, - u32 win_cnt) + struct iommu_domain_geometry *geom_attr) { phys_addr_t window_addr, window_size; - phys_addr_t subwin_size; - int ret = 0, i; u32 omi_index = ~(u32)0; unsigned long flags; + int ret; /* * Configure the omi_index at the geometry setup time. @@ -241,34 +161,14 @@ static int pamu_set_liodn(int liodn, struct device *dev, if (!ret) ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index, 0, dma_domain->snoop_id, - dma_domain->stash_id, win_cnt, 0); + dma_domain->stash_id, 0); spin_unlock_irqrestore(&iommu_lock, flags); if (ret) { - pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n", - liodn, win_cnt); + pr_debug("PAACE configuration failed for liodn %d\n", + liodn); return ret; } - if (win_cnt > 1) { - subwin_size = window_size >> ilog2(win_cnt); - for (i = 0; i < win_cnt; i++) { - spin_lock_irqsave(&iommu_lock, flags); - ret = pamu_disable_spaace(liodn, i); - if (!ret) - ret = pamu_config_spaace(liodn, win_cnt, i, - subwin_size, omi_index, - 0, dma_domain->snoop_id, - dma_domain->stash_id, - 0, 0); - spin_unlock_irqrestore(&iommu_lock, flags); - if (ret) { - pr_debug("SPAACE configuration failed for liodn %d\n", - liodn); - return ret; - } - } - } - return ret; } @@ -292,14 +192,12 @@ static int check_size(u64 size, dma_addr_t iova) return 0; } -static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) +static void remove_device_ref(struct device_domain_info *info) { unsigned long flags; list_del(&info->link); spin_lock_irqsave(&iommu_lock, flags); - if (win_cnt > 1) - pamu_free_subwins(info->liodn); pamu_disable_liodn(info->liodn); spin_unlock_irqrestore(&iommu_lock, flags); spin_lock_irqsave(&device_domain_lock, flags); @@ -317,7 +215,7 @@ static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain) /* Remove the device from the domain device list */ list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) { if (!dev || (info->dev == dev)) - remove_device_ref(info, dma_domain->win_cnt); + remove_device_ref(info); } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); } @@ -399,7 +297,6 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) dma_domain->stash_id = ~(u32)0; dma_domain->snoop_id = ~(u32)0; - dma_domain->win_cnt = pamu_get_max_subwin_cnt(); INIT_LIST_HEAD(&dma_domain->devices); spin_lock_init(&dma_domain->domain_lock); @@ -411,24 +308,6 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) return &dma_domain->iommu_domain; } -/* Configure geometry settings for all LIODNs associated with domain */ -static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain, - struct iommu_domain_geometry *geom_attr, - u32 win_cnt) -{ - struct device_domain_info *info; - int ret = 0; - - list_for_each_entry(info, &dma_domain->devices, link) { - ret = pamu_set_liodn(info->liodn, info->dev, dma_domain, - geom_attr, win_cnt); - if (ret) - break; - } - - return ret; -} - /* Update stash destination for all LIODNs associated with the domain */ static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) { @@ -475,39 +354,30 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, pamu_prot |= PAACE_AP_PERMS_UPDATE; spin_lock_irqsave(&dma_domain->domain_lock, flags); - if (!dma_domain->win_arr) { - pr_debug("Number of windows not configured\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -ENODEV; - } - - if (wnd_nr >= dma_domain->win_cnt) { + if (wnd_nr > 0) { pr_debug("Invalid window index\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EINVAL; } - win_size = (domain->geometry.aperture_end + 1) >> - ilog2(dma_domain->win_cnt); + win_size = (domain->geometry.aperture_end + 1) >> ilog2(1); if (size > win_size) { pr_debug("Invalid window size\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EINVAL; } - if (dma_domain->win_cnt == 1) { - if (dma_domain->enabled) { - pr_debug("Disable the window before updating the mapping\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EBUSY; - } + if (dma_domain->enabled) { + pr_debug("Disable the window before updating the mapping\n"); + spin_unlock_irqrestore(&dma_domain->domain_lock, flags); + return -EBUSY; + } - ret = check_size(size, domain->geometry.aperture_start); - if (ret) { - pr_debug("Aperture start not aligned to the size\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EINVAL; - } + ret = check_size(size, domain->geometry.aperture_start); + if (ret) { + pr_debug("Aperture start not aligned to the size\n"); + spin_unlock_irqrestore(&dma_domain->domain_lock, flags); + return -EINVAL; } wnd = &dma_domain->win_arr[wnd_nr]; @@ -560,22 +430,18 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, * for the domain. If yes, set the geometry for * the LIODN. */ - if (dma_domain->win_arr) { - u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0; - - ret = pamu_set_liodn(liodn[i], dev, dma_domain, - &domain->geometry, win_cnt); + ret = pamu_set_liodn(liodn[i], dev, dma_domain, + &domain->geometry); + if (ret) + break; + if (dma_domain->mapped) { + /* + * Create window/subwindow mapping for + * the LIODN. + */ + ret = map_liodn(liodn[i], dma_domain); if (ret) break; - if (dma_domain->mapped) { - /* - * Create window/subwindow mapping for - * the LIODN. - */ - ret = map_liodn(liodn[i], dma_domain); - if (ret) - break; - } } } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); @@ -706,48 +572,6 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en return 0; } -static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) -{ - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); - unsigned long flags; - int ret; - - spin_lock_irqsave(&dma_domain->domain_lock, flags); - /* Ensure domain is inactive i.e. DMA should be disabled for the domain */ - if (dma_domain->enabled) { - pr_debug("Can't set geometry attributes as domain is active\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EBUSY; - } - - /* - * Ensure we have valid window count i.e. it should be less than - * maximum permissible limit and should be a power of two. - */ - if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) { - pr_debug("Invalid window count\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EINVAL; - } - - ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, - w_count > 1 ? w_count : 0); - if (!ret) { - kfree(dma_domain->win_arr); - dma_domain->win_arr = kcalloc(w_count, - sizeof(*dma_domain->win_arr), - GFP_ATOMIC); - if (!dma_domain->win_arr) { - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -ENOMEM; - } - dma_domain->win_cnt = w_count; - } - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - - return ret; -} - static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, enum iommu_attr attr_type, void *data) { @@ -761,9 +585,6 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, case DOMAIN_ATTR_FSL_PAMU_ENABLE: ret = configure_domain_dma_state(dma_domain, *(int *)data); break; - case DOMAIN_ATTR_WINDOWS: - ret = fsl_pamu_set_windows(domain, *(u32 *)data); - break; default: pr_debug("Unsupported attribute type\n"); ret = -EINVAL; diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h index 53d359d66fe577..b9236fb5a8f82e 100644 --- a/drivers/iommu/fsl_pamu_domain.h +++ b/drivers/iommu/fsl_pamu_domain.h @@ -17,23 +17,13 @@ struct dma_window { }; struct fsl_dma_domain { - /* - * Number of windows assocaited with this domain. - * During domain initialization, it is set to the - * the maximum number of subwindows allowed for a LIODN. - * Minimum value for this is 1 indicating a single PAMU - * window, without any sub windows. Value can be set/ - * queried by set_attr/get_attr API for DOMAIN_ATTR_WINDOWS. - * Value can only be set once the geometry has been configured. - */ - u32 win_cnt; /* * win_arr contains information of the configured * windows for a domain. This is allocated only * when the number of windows for the domain are * set. */ - struct dma_window *win_arr; + struct dma_window win_arr[1]; /* list of devices associated with the domain */ struct list_head devices; /* dma_domain states: diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index c958e6310d3094..3d56ec4b373b4b 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -55,13 +55,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); goto no_iommu; } - ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS, - &window_count); - if (ret < 0) { - dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, - ret); - goto out_domain_free; - } stash_attr.cpu = cpu; stash_attr.cache = PAMU_ATTR_CACHE_L1; ret = iommu_domain_set_attr(pcfg->iommu_domain, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 52874ae164dd60..861c3558c878bf 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -109,7 +109,6 @@ enum iommu_cap { enum iommu_attr { DOMAIN_ATTR_GEOMETRY, DOMAIN_ATTR_PAGING, - DOMAIN_ATTR_WINDOWS, DOMAIN_ATTR_FSL_PAMU_STASH, DOMAIN_ATTR_FSL_PAMU_ENABLE, DOMAIN_ATTR_NESTING, /* two stages of translation */ -- 2.29.2
Christoph Hellwig
2021-Mar-01 08:42 UTC
[PATCH 06/17] iommu/fsl_pamu: remove ->domain_window_enable
The only thing that fsl_pamu_window_enable does for the current caller is to fill in the prot value in the only dma_window structure, and to propagate a few values from the iommu_domain_geometry struture into the dma_window. Remove the dma_window entirely, hardcode the prot value and otherwise use the iommu_domain_geometry structure instead. Remove the now unused ->domain_window_enable iommu method. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/fsl_pamu_domain.c | 182 +++------------------------- drivers/iommu/fsl_pamu_domain.h | 17 --- drivers/iommu/iommu.c | 11 -- drivers/soc/fsl/qbman/qman_portal.c | 7 -- include/linux/iommu.h | 17 --- 5 files changed, 14 insertions(+), 220 deletions(-) diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index e6bdd38fc18409..fd2bc88b690465 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -54,34 +54,18 @@ static int __init iommu_init_mempool(void) return 0; } -static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) -{ - struct dma_window *win_ptr = &dma_domain->win_arr[0]; - struct iommu_domain_geometry *geom; - - geom = &dma_domain->iommu_domain.geometry; - - if (win_ptr->valid) - return win_ptr->paddr + (iova & (win_ptr->size - 1)); - - return 0; -} - /* Map the DMA window corresponding to the LIODN */ static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) { int ret; - struct dma_window *wnd = &dma_domain->win_arr[0]; - phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; + struct iommu_domain_geometry *geom = &dma_domain->iommu_domain.geometry; unsigned long flags; spin_lock_irqsave(&iommu_lock, flags); - ret = pamu_config_ppaace(liodn, wnd_addr, - wnd->size, - ~(u32)0, - wnd->paddr >> PAMU_PAGE_SHIFT, - dma_domain->snoop_id, dma_domain->stash_id, - wnd->prot); + ret = pamu_config_ppaace(liodn, geom->aperture_start, + geom->aperture_end - 1, ~(u32)0, + 0, dma_domain->snoop_id, dma_domain->stash_id, + PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE); spin_unlock_irqrestore(&iommu_lock, flags); if (ret) pr_debug("PAACE configuration failed for liodn %d\n", liodn); @@ -89,33 +73,6 @@ static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) return ret; } -/* Update window/subwindow mapping for the LIODN */ -static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr) -{ - int ret; - struct dma_window *wnd = &dma_domain->win_arr[wnd_nr]; - phys_addr_t wnd_addr; - unsigned long flags; - - spin_lock_irqsave(&iommu_lock, flags); - - wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; - - ret = pamu_config_ppaace(liodn, wnd_addr, - wnd->size, - ~(u32)0, - wnd->paddr >> PAMU_PAGE_SHIFT, - dma_domain->snoop_id, dma_domain->stash_id, - wnd->prot); - if (ret) - pr_debug("Window reconfiguration failed for liodn %d\n", - liodn); - - spin_unlock_irqrestore(&iommu_lock, flags); - - return ret; -} - static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, u32 val) { @@ -172,26 +129,6 @@ static int pamu_set_liodn(int liodn, struct device *dev, return ret; } -static int check_size(u64 size, dma_addr_t iova) -{ - /* - * Size must be a power of two and at least be equal - * to PAMU page size. - */ - if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { - pr_debug("Size too small or not a power of two\n"); - return -EINVAL; - } - - /* iova must be page size aligned */ - if (iova & (size - 1)) { - pr_debug("Address is not aligned with window size\n"); - return -EINVAL; - } - - return 0; -} - static void remove_device_ref(struct device_domain_info *info) { unsigned long flags; @@ -257,13 +194,10 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); - if (iova < domain->geometry.aperture_start || iova > domain->geometry.aperture_end) return 0; - - return get_phys_addr(dma_domain, iova); + return iova; } static bool fsl_pamu_capable(enum iommu_cap cap) @@ -279,7 +213,6 @@ static void fsl_pamu_domain_free(struct iommu_domain *domain) detach_device(NULL, dma_domain); dma_domain->enabled = 0; - dma_domain->mapped = 0; kmem_cache_free(fsl_pamu_domain_cache, dma_domain); } @@ -323,84 +256,6 @@ static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) return ret; } -/* Update domain mappings for all LIODNs associated with the domain */ -static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr) -{ - struct device_domain_info *info; - int ret = 0; - - list_for_each_entry(info, &dma_domain->devices, link) { - ret = update_liodn(info->liodn, dma_domain, wnd_nr); - if (ret) - break; - } - return ret; -} - - -static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t paddr, u64 size, int prot) -{ - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); - struct dma_window *wnd; - int pamu_prot = 0; - int ret; - unsigned long flags; - u64 win_size; - - if (prot & IOMMU_READ) - pamu_prot |= PAACE_AP_PERMS_QUERY; - if (prot & IOMMU_WRITE) - pamu_prot |= PAACE_AP_PERMS_UPDATE; - - spin_lock_irqsave(&dma_domain->domain_lock, flags); - if (wnd_nr > 0) { - pr_debug("Invalid window index\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EINVAL; - } - - win_size = (domain->geometry.aperture_end + 1) >> ilog2(1); - if (size > win_size) { - pr_debug("Invalid window size\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EINVAL; - } - - if (dma_domain->enabled) { - pr_debug("Disable the window before updating the mapping\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EBUSY; - } - - ret = check_size(size, domain->geometry.aperture_start); - if (ret) { - pr_debug("Aperture start not aligned to the size\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EINVAL; - } - - wnd = &dma_domain->win_arr[wnd_nr]; - if (!wnd->valid) { - wnd->paddr = paddr; - wnd->size = size; - wnd->prot = pamu_prot; - - ret = update_domain_mapping(dma_domain, wnd_nr); - if (!ret) { - wnd->valid = 1; - dma_domain->mapped++; - } - } else { - pr_debug("Disable the window before updating the mapping\n"); - ret = -EBUSY; - } - - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - - return ret; -} - /* * Attach the LIODN to the DMA domain and configure the geometry * and window mappings. @@ -434,15 +289,14 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, &domain->geometry); if (ret) break; - if (dma_domain->mapped) { - /* - * Create window/subwindow mapping for - * the LIODN. - */ - ret = map_liodn(liodn[i], dma_domain); - if (ret) - break; - } + + /* + * Create window/subwindow mapping for + * the LIODN. + */ + ret = map_liodn(liodn[i], dma_domain); + if (ret) + break; } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); @@ -552,13 +406,6 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en int ret; spin_lock_irqsave(&dma_domain->domain_lock, flags); - - if (enable && !dma_domain->mapped) { - pr_debug("Can't enable DMA domain without valid mapping\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -ENODEV; - } - dma_domain->enabled = enable; list_for_each_entry(info, &dma_domain->devices, link) { ret = (enable) ? pamu_enable_liodn(info->liodn) : @@ -717,7 +564,6 @@ static const struct iommu_ops fsl_pamu_ops = { .domain_free = fsl_pamu_domain_free, .attach_dev = fsl_pamu_attach_device, .detach_dev = fsl_pamu_detach_device, - .domain_window_enable = fsl_pamu_window_enable, .iova_to_phys = fsl_pamu_iova_to_phys, .domain_set_attr = fsl_pamu_set_domain_attr, .probe_device = fsl_pamu_probe_device, diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h index b9236fb5a8f82e..13ee06e0ef0136 100644 --- a/drivers/iommu/fsl_pamu_domain.h +++ b/drivers/iommu/fsl_pamu_domain.h @@ -9,26 +9,10 @@ #include "fsl_pamu.h" -struct dma_window { - phys_addr_t paddr; - u64 size; - int valid; - int prot; -}; - struct fsl_dma_domain { - /* - * win_arr contains information of the configured - * windows for a domain. This is allocated only - * when the number of windows for the domain are - * set. - */ - struct dma_window win_arr[1]; /* list of devices associated with the domain */ struct list_head devices; /* dma_domain states: - * mapped - A particular mapping has been created - * within the configured geometry. * enabled - DMA has been enabled for the given * domain. This translates to setting of the * valid bit for the primary PAACE in the PAMU @@ -37,7 +21,6 @@ struct fsl_dma_domain { * enabled for it. * */ - int mapped; int enabled; /* stash_id obtained from the stash attribute details */ u32 stash_id; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d0b0a15dba8413..b212bf0261820b 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2610,17 +2610,6 @@ size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); } -int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t paddr, u64 size, int prot) -{ - if (unlikely(domain->ops->domain_window_enable == NULL)) - return -ENODEV; - - return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, - prot); -} -EXPORT_SYMBOL_GPL(iommu_domain_window_enable); - /** * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework * @domain: the iommu domain where the fault has happened diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index 3d56ec4b373b4b..9ee1663f422cbf 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -65,13 +65,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) __func__, ret); goto out_domain_free; } - ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36, - IOMMU_READ | IOMMU_WRITE); - if (ret < 0) { - dev_err(dev, "%s(): iommu_domain_window_enable() = %d", - __func__, ret); - goto out_domain_free; - } ret = iommu_attach_device(pcfg->iommu_domain, dev); if (ret < 0) { dev_err(dev, "%s(): iommu_device_attach() = %d", __func__, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 861c3558c878bf..f7baa81887a8bc 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -203,7 +203,6 @@ struct iommu_iotlb_gather { * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device * @apply_resv_region: Temporary helper call-back for iova reserved ranges - * @domain_window_enable: Configure and enable a particular window for a domain * @of_xlate: add OF master IDs to iommu grouping * @is_attach_deferred: Check if domain attach should be deferred from iommu * driver init to device driver init (default no) @@ -261,10 +260,6 @@ struct iommu_ops { struct iommu_domain *domain, struct iommu_resv_region *region); - /* Window handling functions */ - int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t paddr, u64 size, int prot); - int (*of_xlate)(struct device *dev, struct of_phandle_args *args); bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); @@ -505,11 +500,6 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, void *data); -/* Window handling function prototypes */ -extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t offset, u64 size, - int prot); - extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); @@ -735,13 +725,6 @@ static inline void iommu_iotlb_sync(struct iommu_domain *domain, { } -static inline int iommu_domain_window_enable(struct iommu_domain *domain, - u32 wnd_nr, phys_addr_t paddr, - u64 size, int prot) -{ - return -ENODEV; -} - static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { return 0; -- 2.29.2
Christoph Hellwig
2021-Mar-01 08:42 UTC
[PATCH 07/17] iommu/fsl_pamu: replace DOMAIN_ATTR_FSL_PAMU_STASH with a direct call
Add a fsl_pamu_configure_l1_stash API that qman_portal can call directly instead of indirecting through the iommu attr API. Signed-off-by: Christoph Hellwig <hch at lst.de> --- arch/powerpc/include/asm/fsl_pamu_stash.h | 12 +++--------- drivers/iommu/fsl_pamu_domain.c | 16 +++------------- drivers/iommu/fsl_pamu_domain.h | 2 -- drivers/soc/fsl/qbman/qman_portal.c | 18 +++--------------- include/linux/iommu.h | 1 - 5 files changed, 9 insertions(+), 40 deletions(-) diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h index 30a31ad2123d86..c0fbadb70b5dad 100644 --- a/arch/powerpc/include/asm/fsl_pamu_stash.h +++ b/arch/powerpc/include/asm/fsl_pamu_stash.h @@ -7,6 +7,8 @@ #ifndef __FSL_PAMU_STASH_H #define __FSL_PAMU_STASH_H +struct iommu_domain; + /* cache stash targets */ enum pamu_stash_target { PAMU_ATTR_CACHE_L1 = 1, @@ -14,14 +16,6 @@ enum pamu_stash_target { PAMU_ATTR_CACHE_L3, }; -/* - * This attribute allows configuring stashig specific parameters - * in the PAMU hardware. - */ - -struct pamu_stash_attribute { - u32 cpu; /* cpu number */ - u32 cache; /* cache to stash to: L1,L2,L3 */ -}; +int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu); #endif /* __FSL_PAMU_STASH_H */ diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index fd2bc88b690465..40eff4b7bc5d42 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -372,27 +372,20 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain, } /* Set the domain stash attribute */ -static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) +int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu) { - struct pamu_stash_attribute *stash_attr = data; + struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); unsigned long flags; int ret; spin_lock_irqsave(&dma_domain->domain_lock, flags); - - memcpy(&dma_domain->dma_stash, stash_attr, - sizeof(struct pamu_stash_attribute)); - - dma_domain->stash_id = get_stash_id(stash_attr->cache, - stash_attr->cpu); + dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu); if (dma_domain->stash_id == ~(u32)0) { pr_debug("Invalid stash attributes\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EINVAL; } - ret = update_domain_stash(dma_domain, dma_domain->stash_id); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return ret; @@ -426,9 +419,6 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, int ret = 0; switch (attr_type) { - case DOMAIN_ATTR_FSL_PAMU_STASH: - ret = configure_domain_stash(dma_domain, data); - break; case DOMAIN_ATTR_FSL_PAMU_ENABLE: ret = configure_domain_dma_state(dma_domain, *(int *)data); break; diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h index 13ee06e0ef0136..cd488004acd1b3 100644 --- a/drivers/iommu/fsl_pamu_domain.h +++ b/drivers/iommu/fsl_pamu_domain.h @@ -22,9 +22,7 @@ struct fsl_dma_domain { * */ int enabled; - /* stash_id obtained from the stash attribute details */ u32 stash_id; - struct pamu_stash_attribute dma_stash; u32 snoop_id; struct iommu_domain iommu_domain; spinlock_t domain_lock; diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index 9ee1663f422cbf..798b3a1ffd0b9c 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -47,7 +47,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) #ifdef CONFIG_FSL_PAMU struct device *dev = pcfg->dev; int window_count = 1; - struct pamu_stash_attribute stash_attr; int ret; pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); @@ -55,13 +54,9 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); goto no_iommu; } - stash_attr.cpu = cpu; - stash_attr.cache = PAMU_ATTR_CACHE_L1; - ret = iommu_domain_set_attr(pcfg->iommu_domain, - DOMAIN_ATTR_FSL_PAMU_STASH, - &stash_attr); + ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu); if (ret < 0) { - dev_err(dev, "%s(): iommu_domain_set_attr() = %d", + dev_err(dev, "%s(): fsl_pamu_configure_l1_stash() = %d", __func__, ret); goto out_domain_free; } @@ -143,15 +138,8 @@ static void qman_portal_update_sdest(const struct qm_portal_config *pcfg, unsigned int cpu) { #ifdef CONFIG_FSL_PAMU /* TODO */ - struct pamu_stash_attribute stash_attr; - int ret; - if (pcfg->iommu_domain) { - stash_attr.cpu = cpu; - stash_attr.cache = PAMU_ATTR_CACHE_L1; - ret = iommu_domain_set_attr(pcfg->iommu_domain, - DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr); - if (ret < 0) { + if (fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu) < 0) { dev_err(pcfg->dev, "Failed to update pamu stash setting\n"); return; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index f7baa81887a8bc..208e570e8d99e7 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -109,7 +109,6 @@ enum iommu_cap { enum iommu_attr { DOMAIN_ATTR_GEOMETRY, DOMAIN_ATTR_PAGING, - DOMAIN_ATTR_FSL_PAMU_STASH, DOMAIN_ATTR_FSL_PAMU_ENABLE, DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, -- 2.29.2
Christoph Hellwig
2021-Mar-01 08:42 UTC
[PATCH 08/17] iommu/fsl_pamu: merge pamu_set_liodn and map_liodn
Merge the two fuctions that configure the ppaace into a single coherent function. I somehow doubt we need the two pamu_config_ppaace calls, but keep the existing behavior just to be on the safe side. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/fsl_pamu_domain.c | 65 +++++++++------------------------ 1 file changed, 17 insertions(+), 48 deletions(-) diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 40eff4b7bc5d42..4a4944332674f7 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -54,25 +54,6 @@ static int __init iommu_init_mempool(void) return 0; } -/* Map the DMA window corresponding to the LIODN */ -static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) -{ - int ret; - struct iommu_domain_geometry *geom = &dma_domain->iommu_domain.geometry; - unsigned long flags; - - spin_lock_irqsave(&iommu_lock, flags); - ret = pamu_config_ppaace(liodn, geom->aperture_start, - geom->aperture_end - 1, ~(u32)0, - 0, dma_domain->snoop_id, dma_domain->stash_id, - PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE); - spin_unlock_irqrestore(&iommu_lock, flags); - if (ret) - pr_debug("PAACE configuration failed for liodn %d\n", liodn); - - return ret; -} - static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, u32 val) { @@ -94,11 +75,11 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, } /* Set the geometry parameters for a LIODN */ -static int pamu_set_liodn(int liodn, struct device *dev, - struct fsl_dma_domain *dma_domain, - struct iommu_domain_geometry *geom_attr) +static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev, + int liodn) { - phys_addr_t window_addr, window_size; + struct iommu_domain *domain = &dma_domain->iommu_domain; + struct iommu_domain_geometry *geom = &domain->geometry; u32 omi_index = ~(u32)0; unsigned long flags; int ret; @@ -110,22 +91,25 @@ static int pamu_set_liodn(int liodn, struct device *dev, */ get_ome_index(&omi_index, dev); - window_addr = geom_attr->aperture_start; - window_size = geom_attr->aperture_end + 1; - spin_lock_irqsave(&iommu_lock, flags); ret = pamu_disable_liodn(liodn); - if (!ret) - ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index, - 0, dma_domain->snoop_id, - dma_domain->stash_id, 0); + if (ret) + goto out_unlock; + ret = pamu_config_ppaace(liodn, geom->aperture_start, + geom->aperture_end - 1, omi_index, 0, + dma_domain->snoop_id, dma_domain->stash_id, 0); + if (ret) + goto out_unlock; + ret = pamu_config_ppaace(liodn, geom->aperture_start, + geom->aperture_end - 1, ~(u32)0, + 0, dma_domain->snoop_id, dma_domain->stash_id, + PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE); +out_unlock: spin_unlock_irqrestore(&iommu_lock, flags); if (ret) { pr_debug("PAACE configuration failed for liodn %d\n", liodn); - return ret; } - return ret; } @@ -265,7 +249,6 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, int num) { unsigned long flags; - struct iommu_domain *domain = &dma_domain->iommu_domain; int ret = 0; int i; @@ -280,21 +263,7 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, } attach_device(dma_domain, liodn[i], dev); - /* - * Check if geometry has already been configured - * for the domain. If yes, set the geometry for - * the LIODN. - */ - ret = pamu_set_liodn(liodn[i], dev, dma_domain, - &domain->geometry); - if (ret) - break; - - /* - * Create window/subwindow mapping for - * the LIODN. - */ - ret = map_liodn(liodn[i], dma_domain); + ret = pamu_set_liodn(dma_domain, dev, liodn[i]); if (ret) break; } -- 2.29.2
Christoph Hellwig
2021-Mar-01 08:42 UTC
[PATCH 09/17] iommu/fsl_pamu: merge handle_attach_device into fsl_pamu_attach_device
No good reason to split this functionality over two functions. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/fsl_pamu_domain.c | 59 +++++++++++---------------------- 1 file changed, 20 insertions(+), 39 deletions(-) diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 4a4944332674f7..962cdc1a4a1924 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -240,45 +240,13 @@ static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) return ret; } -/* - * Attach the LIODN to the DMA domain and configure the geometry - * and window mappings. - */ -static int handle_attach_device(struct fsl_dma_domain *dma_domain, - struct device *dev, const u32 *liodn, - int num) -{ - unsigned long flags; - int ret = 0; - int i; - - spin_lock_irqsave(&dma_domain->domain_lock, flags); - for (i = 0; i < num; i++) { - /* Ensure that LIODN value is valid */ - if (liodn[i] >= PAACE_NUMBER_ENTRIES) { - pr_debug("Invalid liodn %d, attach device failed for %pOF\n", - liodn[i], dev->of_node); - ret = -EINVAL; - break; - } - - attach_device(dma_domain, liodn[i], dev); - ret = pamu_set_liodn(dma_domain, dev, liodn[i]); - if (ret) - break; - } - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - - return ret; -} - static int fsl_pamu_attach_device(struct iommu_domain *domain, struct device *dev) { struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); + unsigned long flags; + int len, ret = 0, i; const u32 *liodn; - u32 liodn_cnt; - int len, ret = 0; struct pci_dev *pdev = NULL; struct pci_controller *pci_ctl; @@ -298,14 +266,27 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, } liodn = of_get_property(dev->of_node, "fsl,liodn", &len); - if (liodn) { - liodn_cnt = len / sizeof(u32); - ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt); - } else { + if (!liodn) { pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); - ret = -EINVAL; + return -EINVAL; } + spin_lock_irqsave(&dma_domain->domain_lock, flags); + for (i = 0; i < len / sizeof(u32); i++) { + /* Ensure that LIODN value is valid */ + if (liodn[i] >= PAACE_NUMBER_ENTRIES) { + pr_debug("Invalid liodn %d, attach device failed for %pOF\n", + liodn[i], dev->of_node); + ret = -EINVAL; + break; + } + + attach_device(dma_domain, liodn[i], dev); + ret = pamu_set_liodn(dma_domain, dev, liodn[i]); + if (ret) + break; + } + spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return ret; } -- 2.29.2
Christoph Hellwig
2021-Mar-01 08:42 UTC
[PATCH 10/17] iommu/fsl_pamu: enable the liodn when attaching a device
Instead of a separate call to enable all devices from the list, just enablde the liodn one the device is attached to the iommu domain. This also remove the DOMAIN_ATTR_FSL_PAMU_ENABLE iommu_attr. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/fsl_pamu_domain.c | 47 ++--------------------------- drivers/iommu/fsl_pamu_domain.h | 10 ------ drivers/soc/fsl/qbman/qman_portal.c | 11 ------- include/linux/iommu.h | 1 - 4 files changed, 3 insertions(+), 66 deletions(-) diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 962cdc1a4a1924..21c6d9e79eddf9 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -195,9 +195,6 @@ static void fsl_pamu_domain_free(struct iommu_domain *domain) /* remove all the devices from the device list */ detach_device(NULL, dma_domain); - - dma_domain->enabled = 0; - kmem_cache_free(fsl_pamu_domain_cache, dma_domain); } @@ -285,6 +282,9 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, ret = pamu_set_liodn(dma_domain, dev, liodn[i]); if (ret) break; + ret = pamu_enable_liodn(liodn[i]); + if (ret) + break; } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return ret; @@ -341,46 +341,6 @@ int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu) return ret; } -/* Configure domain dma state i.e. enable/disable DMA */ -static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable) -{ - struct device_domain_info *info; - unsigned long flags; - int ret; - - spin_lock_irqsave(&dma_domain->domain_lock, flags); - dma_domain->enabled = enable; - list_for_each_entry(info, &dma_domain->devices, link) { - ret = (enable) ? pamu_enable_liodn(info->liodn) : - pamu_disable_liodn(info->liodn); - if (ret) - pr_debug("Unable to set dma state for liodn %d", - info->liodn); - } - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - - return 0; -} - -static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, - enum iommu_attr attr_type, void *data) -{ - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); - int ret = 0; - - switch (attr_type) { - case DOMAIN_ATTR_FSL_PAMU_ENABLE: - ret = configure_domain_dma_state(dma_domain, *(int *)data); - break; - default: - pr_debug("Unsupported attribute type\n"); - ret = -EINVAL; - break; - } - - return ret; -} - static struct iommu_group *get_device_iommu_group(struct device *dev) { struct iommu_group *group; @@ -505,7 +465,6 @@ static const struct iommu_ops fsl_pamu_ops = { .attach_dev = fsl_pamu_attach_device, .detach_dev = fsl_pamu_detach_device, .iova_to_phys = fsl_pamu_iova_to_phys, - .domain_set_attr = fsl_pamu_set_domain_attr, .probe_device = fsl_pamu_probe_device, .release_device = fsl_pamu_release_device, .device_group = fsl_pamu_device_group, diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h index cd488004acd1b3..5f4ed253f61b31 100644 --- a/drivers/iommu/fsl_pamu_domain.h +++ b/drivers/iommu/fsl_pamu_domain.h @@ -12,16 +12,6 @@ struct fsl_dma_domain { /* list of devices associated with the domain */ struct list_head devices; - /* dma_domain states: - * enabled - DMA has been enabled for the given - * domain. This translates to setting of the - * valid bit for the primary PAACE in the PAMU - * PAACT table. Domain geometry should be set and - * it must have a valid mapping before DMA can be - * enabled for it. - * - */ - int enabled; u32 stash_id; u32 snoop_id; struct iommu_domain iommu_domain; diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index 798b3a1ffd0b9c..bf38eb0042ed52 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -46,7 +46,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) { #ifdef CONFIG_FSL_PAMU struct device *dev = pcfg->dev; - int window_count = 1; int ret; pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); @@ -66,14 +65,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) ret); goto out_domain_free; } - ret = iommu_domain_set_attr(pcfg->iommu_domain, - DOMAIN_ATTR_FSL_PAMU_ENABLE, - &window_count); - if (ret < 0) { - dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, - ret); - goto out_detach_device; - } no_iommu: #endif @@ -82,8 +73,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) return; #ifdef CONFIG_FSL_PAMU -out_detach_device: - iommu_detach_device(pcfg->iommu_domain, NULL); out_domain_free: iommu_domain_free(pcfg->iommu_domain); pcfg->iommu_domain = NULL; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 208e570e8d99e7..840864844027dc 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -109,7 +109,6 @@ enum iommu_cap { enum iommu_attr { DOMAIN_ATTR_GEOMETRY, DOMAIN_ATTR_PAGING, - DOMAIN_ATTR_FSL_PAMU_ENABLE, DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_IO_PGTABLE_CFG, -- 2.29.2
Christoph Hellwig
2021-Mar-01 08:42 UTC
[PATCH 11/17] iommu/fsl_pamu: remove the snoop_id field
The snoop_id is always set to ~(u32)0. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/fsl_pamu_domain.c | 5 ++--- drivers/iommu/fsl_pamu_domain.h | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 21c6d9e79eddf9..701fc3f187a100 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -97,12 +97,12 @@ static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev, goto out_unlock; ret = pamu_config_ppaace(liodn, geom->aperture_start, geom->aperture_end - 1, omi_index, 0, - dma_domain->snoop_id, dma_domain->stash_id, 0); + ~(u32)0, dma_domain->stash_id, 0); if (ret) goto out_unlock; ret = pamu_config_ppaace(liodn, geom->aperture_start, geom->aperture_end - 1, ~(u32)0, - 0, dma_domain->snoop_id, dma_domain->stash_id, + 0, ~(u32)0, dma_domain->stash_id, PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE); out_unlock: spin_unlock_irqrestore(&iommu_lock, flags); @@ -210,7 +210,6 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) return NULL; dma_domain->stash_id = ~(u32)0; - dma_domain->snoop_id = ~(u32)0; INIT_LIST_HEAD(&dma_domain->devices); spin_lock_init(&dma_domain->domain_lock); diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h index 5f4ed253f61b31..95ac1b3cab3b69 100644 --- a/drivers/iommu/fsl_pamu_domain.h +++ b/drivers/iommu/fsl_pamu_domain.h @@ -13,7 +13,6 @@ struct fsl_dma_domain { /* list of devices associated with the domain */ struct list_head devices; u32 stash_id; - u32 snoop_id; struct iommu_domain iommu_domain; spinlock_t domain_lock; }; -- 2.29.2
DOMAIN_ATTR_PAGING is never used. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/iommu.c | 5 ----- include/linux/iommu.h | 1 - 2 files changed, 6 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index b212bf0261820b..9a4cda390993e6 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2668,7 +2668,6 @@ int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) { struct iommu_domain_geometry *geometry; - bool *paging; int ret = 0; switch (attr) { @@ -2676,10 +2675,6 @@ int iommu_domain_get_attr(struct iommu_domain *domain, geometry = data; *geometry = domain->geometry; - break; - case DOMAIN_ATTR_PAGING: - paging = data; - *paging = (domain->pgsize_bitmap != 0UL); break; default: if (!domain->ops->domain_get_attr) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 840864844027dc..180ff4bd7fa7ef 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -108,7 +108,6 @@ enum iommu_cap { enum iommu_attr { DOMAIN_ATTR_GEOMETRY, - DOMAIN_ATTR_PAGING, DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_IO_PGTABLE_CFG, -- 2.29.2
The geometry information can be trivially queried from the iommu_domain struture. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/iommu.c | 20 +++----------------- drivers/soc/fsl/qbman/qman_portal.c | 1 + drivers/vfio/vfio_iommu_type1.c | 26 ++++++++++++-------------- drivers/vhost/vdpa.c | 10 +++------- include/linux/iommu.h | 1 - 5 files changed, 19 insertions(+), 39 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 9a4cda390993e6..23daaea7883b75 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2667,23 +2667,9 @@ core_initcall(iommu_init); int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) { - struct iommu_domain_geometry *geometry; - int ret = 0; - - switch (attr) { - case DOMAIN_ATTR_GEOMETRY: - geometry = data; - *geometry = domain->geometry; - - break; - default: - if (!domain->ops->domain_get_attr) - return -EINVAL; - - ret = domain->ops->domain_get_attr(domain, attr, data); - } - - return ret; + if (!domain->ops->domain_get_attr) + return -EINVAL; + return domain->ops->domain_get_attr(domain, attr, data); } EXPORT_SYMBOL_GPL(iommu_domain_get_attr); diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index bf38eb0042ed52..4a4466cc26c232 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -53,6 +53,7 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); goto no_iommu; } + ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu); if (ret < 0) { dev_err(dev, "%s(): fsl_pamu_configure_l1_stash() = %d", diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 4bb162c1d649b3..c8e57f22f421c5 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -2252,7 +2252,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, int ret; bool resv_msi, msi_remap; phys_addr_t resv_msi_base = 0; - struct iommu_domain_geometry geo; + struct iommu_domain_geometry *geo; LIST_HEAD(iova_copy); LIST_HEAD(group_resv_regions); @@ -2333,10 +2333,9 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, goto out_domain; /* Get aperture info */ - iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, &geo); - - if (vfio_iommu_aper_conflict(iommu, geo.aperture_start, - geo.aperture_end)) { + geo = &domain->domain->geometry; + if (vfio_iommu_aper_conflict(iommu, geo->aperture_start, + geo->aperture_end)) { ret = -EINVAL; goto out_detach; } @@ -2359,8 +2358,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, if (ret) goto out_detach; - ret = vfio_iommu_aper_resize(&iova_copy, geo.aperture_start, - geo.aperture_end); + ret = vfio_iommu_aper_resize(&iova_copy, geo->aperture_start, + geo->aperture_end); if (ret) goto out_detach; @@ -2493,7 +2492,6 @@ static void vfio_iommu_aper_expand(struct vfio_iommu *iommu, struct list_head *iova_copy) { struct vfio_domain *domain; - struct iommu_domain_geometry geo; struct vfio_iova *node; dma_addr_t start = 0; dma_addr_t end = (dma_addr_t)~0; @@ -2502,12 +2500,12 @@ static void vfio_iommu_aper_expand(struct vfio_iommu *iommu, return; list_for_each_entry(domain, &iommu->domain_list, next) { - iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, - &geo); - if (geo.aperture_start > start) - start = geo.aperture_start; - if (geo.aperture_end < end) - end = geo.aperture_end; + struct iommu_domain_geometry *geo = &domain->domain->geometry; + + if (geo->aperture_start > start) + start = geo->aperture_start; + if (geo->aperture_end < end) + end = geo->aperture_end; } /* Modify aperture limits. The new aper is either same or bigger */ diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index ef688c8c0e0e6f..25824fab433d0a 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -826,18 +826,14 @@ static void vhost_vdpa_free_domain(struct vhost_vdpa *v) static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v) { struct vdpa_iova_range *range = &v->range; - struct iommu_domain_geometry geo; struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; if (ops->get_iova_range) { *range = ops->get_iova_range(vdpa); - } else if (v->domain && - !iommu_domain_get_attr(v->domain, - DOMAIN_ATTR_GEOMETRY, &geo) && - geo.force_aperture) { - range->first = geo.aperture_start; - range->last = geo.aperture_end; + } else if (v->domain && v->domain->geometry.force_aperture) { + range->first = v->domain->geometry.aperture_start; + range->last = v->domain->geometry.aperture_end; } else { range->first = 0; range->last = ULLONG_MAX; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 180ff4bd7fa7ef..c15a8658daad64 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -107,7 +107,6 @@ enum iommu_cap { */ enum iommu_attr { - DOMAIN_ATTR_GEOMETRY, DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_IO_PGTABLE_CFG, -- 2.29.2
Christoph Hellwig
2021-Mar-01 08:42 UTC
[PATCH 14/17] iommu: remove DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
Use explicit methods for setting and querying the information instead. Also remove the now unused iommu_domain_get_attr functionality. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/amd/iommu.c | 23 ++------- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 47 ++++++----------- drivers/iommu/arm/arm-smmu/arm-smmu.c | 56 +++++---------------- drivers/iommu/dma-iommu.c | 8 ++- drivers/iommu/intel/iommu.c | 27 ++-------- drivers/iommu/iommu.c | 19 +++---- include/linux/iommu.h | 17 ++----- 7 files changed, 51 insertions(+), 146 deletions(-) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index a69a8b573e40d0..37a8e51db17656 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -1771,24 +1771,11 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev) return acpihid_device_group(dev); } -static int amd_iommu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) +static bool amd_iommu_dma_use_flush_queue(struct iommu_domain *domain) { - switch (domain->type) { - case IOMMU_DOMAIN_UNMANAGED: - return -ENODEV; - case IOMMU_DOMAIN_DMA: - switch (attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - *(int *)data = !amd_iommu_unmap_flush; - return 0; - default: - return -ENODEV; - } - break; - default: - return -EINVAL; - } + if (domain->type != IOMMU_DOMAIN_DMA) + return false; + return !amd_iommu_unmap_flush; } /***************************************************************************** @@ -2257,7 +2244,7 @@ const struct iommu_ops amd_iommu_ops = { .release_device = amd_iommu_release_device, .probe_finalize = amd_iommu_probe_finalize, .device_group = amd_iommu_device_group, - .domain_get_attr = amd_iommu_domain_get_attr, + .dma_use_flush_queue = amd_iommu_dma_use_flush_queue, .get_resv_regions = amd_iommu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 8594b4a8304375..bf96172e8c1f71 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2449,33 +2449,21 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev) return group; } -static int arm_smmu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) +static bool arm_smmu_dma_use_flush_queue(struct iommu_domain *domain) { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - switch (domain->type) { - case IOMMU_DOMAIN_UNMANAGED: - switch (attr) { - case DOMAIN_ATTR_NESTING: - *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); - return 0; - default: - return -ENODEV; - } - break; - case IOMMU_DOMAIN_DMA: - switch (attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - *(int *)data = smmu_domain->non_strict; - return 0; - default: - return -ENODEV; - } - break; - default: - return -EINVAL; - } + if (domain->type != IOMMU_DOMAIN_DMA) + return false; + return smmu_domain->non_strict; +} + + +static void arm_smmu_dma_enable_flush_queue(struct iommu_domain *domain) +{ + if (domain->type != IOMMU_DOMAIN_DMA) + return; + to_smmu_domain(domain)->non_strict = true; } static int arm_smmu_domain_set_attr(struct iommu_domain *domain, @@ -2505,13 +2493,7 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, } break; case IOMMU_DOMAIN_DMA: - switch(attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - smmu_domain->non_strict = *(int *)data; - break; - default: - ret = -ENODEV; - } + ret = -ENODEV; break; default: ret = -EINVAL; @@ -2619,7 +2601,8 @@ static struct iommu_ops arm_smmu_ops = { .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .device_group = arm_smmu_device_group, - .domain_get_attr = arm_smmu_domain_get_attr, + .dma_use_flush_queue = arm_smmu_dma_use_flush_queue, + .dma_enable_flush_queue = arm_smmu_dma_enable_flush_queue, .domain_set_attr = arm_smmu_domain_set_attr, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index d8c6bfde6a6158..e7893e96f5177a 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1481,42 +1481,20 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev) return group; } -static int arm_smmu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) +static bool arm_smmu_dma_use_flush_queue(struct iommu_domain *domain) { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - switch(domain->type) { - case IOMMU_DOMAIN_UNMANAGED: - switch (attr) { - case DOMAIN_ATTR_NESTING: - *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); - return 0; - case DOMAIN_ATTR_IO_PGTABLE_CFG: { - struct io_pgtable_domain_attr *pgtbl_cfg = data; - *pgtbl_cfg = smmu_domain->pgtbl_cfg; + if (domain->type != IOMMU_DOMAIN_DMA) + return false; + return smmu_domain->pgtbl_cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT; +} - return 0; - } - default: - return -ENODEV; - } - break; - case IOMMU_DOMAIN_DMA: - switch (attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: { - bool non_strict = smmu_domain->pgtbl_cfg.quirks & - IO_PGTABLE_QUIRK_NON_STRICT; - *(int *)data = non_strict; - return 0; - } - default: - return -ENODEV; - } - break; - default: - return -EINVAL; - } +static void arm_smmu_dma_enable_flush_queue(struct iommu_domain *domain) +{ + if (domain->type != IOMMU_DOMAIN_DMA) + return; + to_smmu_domain(domain)->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; } static int arm_smmu_domain_set_attr(struct iommu_domain *domain, @@ -1557,16 +1535,7 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, } break; case IOMMU_DOMAIN_DMA: - switch (attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - if (*(int *)data) - smmu_domain->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; - else - smmu_domain->pgtbl_cfg.quirks &= ~IO_PGTABLE_QUIRK_NON_STRICT; - break; - default: - ret = -ENODEV; - } + ret = -ENODEV; break; default: ret = -EINVAL; @@ -1631,7 +1600,8 @@ static struct iommu_ops arm_smmu_ops = { .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .device_group = arm_smmu_device_group, - .domain_get_attr = arm_smmu_domain_get_attr, + .dma_use_flush_queue = arm_smmu_dma_use_flush_queue, + .dma_enable_flush_queue = arm_smmu_dma_enable_flush_queue, .domain_set_attr = arm_smmu_domain_set_attr, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 9ab6ee22c11088..d3fe5aad9d6ecf 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -305,8 +305,8 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) cookie = container_of(iovad, struct iommu_dma_cookie, iovad); domain = cookie->fq_domain; /* - * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE - * implies that ops->flush_iotlb_all must be non-NULL. + * The IOMMU driver supporting a DMA flush queue implies that + * ops->flush_iotlb_all must be non-NULL. */ domain->ops->flush_iotlb_all(domain); } @@ -329,7 +329,6 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, struct iommu_dma_cookie *cookie = domain->iova_cookie; unsigned long order, base_pfn; struct iova_domain *iovad; - int attr; if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; @@ -365,8 +364,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, init_iova_domain(iovad, 1UL << order, base_pfn); - if (!cookie->fq_domain && !iommu_domain_get_attr(domain, - DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { + if (!cookie->fq_domain && iommu_dma_use_flush_queue(domain)) { if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, iommu_dma_entry_dtor)) pr_warn("iova flush queue initialization failed\n"); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index ee0932307d646b..eaa80c33f4bc91 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5453,13 +5453,13 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain, return ret; } -static bool domain_use_flush_queue(void) +static bool intel_iommu_dma_use_flush_queue(struct iommu_domain *domain) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; bool r = true; - if (intel_iommu_strict) + if (domain->type != IOMMU_DOMAIN_DMA || intel_iommu_strict) return false; /* @@ -5483,27 +5483,6 @@ static bool domain_use_flush_queue(void) return r; } -static int -intel_iommu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - switch (domain->type) { - case IOMMU_DOMAIN_UNMANAGED: - return -ENODEV; - case IOMMU_DOMAIN_DMA: - switch (attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - *(int *)data = domain_use_flush_queue(); - return 0; - default: - return -ENODEV; - } - break; - default: - return -EINVAL; - } -} - /* * Check that the device does not live on an external facing PCI port that is * marked as untrusted. Such devices should not be able to apply quirks and @@ -5576,7 +5555,7 @@ const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, .domain_free = intel_iommu_domain_free, - .domain_get_attr = intel_iommu_domain_get_attr, + .dma_use_flush_queue = intel_iommu_dma_use_flush_queue, .domain_set_attr = intel_iommu_domain_set_attr, .attach_dev = intel_iommu_attach_device, .detach_dev = intel_iommu_detach_device, diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 23daaea7883b75..0f12c4d58cdc42 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1512,12 +1512,8 @@ static int iommu_group_alloc_default_domain(struct bus_type *bus, if (!group->domain) group->domain = dom; - if (!iommu_dma_strict) { - int attr = 1; - iommu_domain_set_attr(dom, - DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, - &attr); - } + if (!iommu_dma_strict && dom->ops->dma_enable_flush_queue) + dom->ops->dma_enable_flush_queue(dom); return 0; } @@ -2664,14 +2660,13 @@ static int __init iommu_init(void) } core_initcall(iommu_init); -int iommu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) +bool iommu_dma_use_flush_queue(struct iommu_domain *domain) { - if (!domain->ops->domain_get_attr) - return -EINVAL; - return domain->ops->domain_get_attr(domain, attr, data); + if (!domain->ops->dma_use_flush_queue) + return false; + return domain->ops->dma_use_flush_queue(domain); } -EXPORT_SYMBOL_GPL(iommu_domain_get_attr); +EXPORT_SYMBOL_GPL(iommu_dma_use_flush_queue); int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index c15a8658daad64..f30de33c6ff56e 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -108,7 +108,6 @@ enum iommu_cap { enum iommu_attr { DOMAIN_ATTR_NESTING, /* two stages of translation */ - DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_IO_PGTABLE_CFG, DOMAIN_ATTR_MAX, }; @@ -194,7 +193,8 @@ struct iommu_iotlb_gather { * @probe_finalize: Do final setup work after the device is added to an IOMMU * group and attached to the groups domain * @device_group: find iommu group for a particular device - * @domain_get_attr: Query domain attributes + * @dma_use_flush_queue: Returns %true if a DMA flush queue is used + * @dma_enable_flush_queue: Try to enable the DMA flush queue * @domain_set_attr: Change domain attributes * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device @@ -244,8 +244,8 @@ struct iommu_ops { void (*release_device)(struct device *dev); void (*probe_finalize)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); - int (*domain_get_attr)(struct iommu_domain *domain, - enum iommu_attr attr, void *data); + bool (*dma_use_flush_queue)(struct iommu_domain *domain); + void (*dma_enable_flush_queue)(struct iommu_domain *domain); int (*domain_set_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); @@ -491,8 +491,7 @@ extern int iommu_page_response(struct device *dev, extern int iommu_group_id(struct iommu_group *group); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); -extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, - void *data); +bool iommu_dma_use_flush_queue(struct iommu_domain *domain); extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, void *data); @@ -861,12 +860,6 @@ static inline int iommu_group_id(struct iommu_group *group) return -ENODEV; } -static inline int iommu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - return -EINVAL; -} - static inline int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) { -- 2.29.2
Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 40 ++++++--------------- drivers/iommu/arm/arm-smmu/arm-smmu.c | 30 ++++++++++------ drivers/iommu/intel/iommu.c | 28 +++++---------- drivers/iommu/iommu.c | 8 +++++ drivers/vfio/vfio_iommu_type1.c | 5 +-- include/linux/iommu.h | 4 ++- 6 files changed, 50 insertions(+), 65 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index bf96172e8c1f71..8e6fee3ea454d3 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2466,41 +2466,21 @@ static void arm_smmu_dma_enable_flush_queue(struct iommu_domain *domain) to_smmu_domain(domain)->non_strict = true; } -static int arm_smmu_domain_set_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) +static int arm_smmu_domain_enable_nesting(struct iommu_domain *domain) { - int ret = 0; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + int ret = -EPERM; - mutex_lock(&smmu_domain->init_mutex); + if (domain->type != IOMMU_DOMAIN_UNMANAGED) + return -EINVAL; - switch (domain->type) { - case IOMMU_DOMAIN_UNMANAGED: - switch (attr) { - case DOMAIN_ATTR_NESTING: - if (smmu_domain->smmu) { - ret = -EPERM; - goto out_unlock; - } - - if (*(int *)data) - smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; - else - smmu_domain->stage = ARM_SMMU_DOMAIN_S1; - break; - default: - ret = -ENODEV; - } - break; - case IOMMU_DOMAIN_DMA: - ret = -ENODEV; - break; - default: - ret = -EINVAL; + mutex_lock(&smmu_domain->init_mutex); + if (!smmu_domain->smmu) { + smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; + ret = 0; } - -out_unlock: mutex_unlock(&smmu_domain->init_mutex); + return ret; } @@ -2603,7 +2583,7 @@ static struct iommu_ops arm_smmu_ops = { .device_group = arm_smmu_device_group, .dma_use_flush_queue = arm_smmu_dma_use_flush_queue, .dma_enable_flush_queue = arm_smmu_dma_enable_flush_queue, - .domain_set_attr = arm_smmu_domain_set_attr, + .domain_enable_nesting = arm_smmu_domain_enable_nesting, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index e7893e96f5177a..2e17d990d04481 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1497,6 +1497,24 @@ static void arm_smmu_dma_enable_flush_queue(struct iommu_domain *domain) to_smmu_domain(domain)->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; } +static int arm_smmu_domain_enable_nesting(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + int ret = -EPERM; + + if (domain->type != IOMMU_DOMAIN_UNMANAGED) + return -EINVAL; + + mutex_lock(&smmu_domain->init_mutex); + if (!smmu_domain->smmu) { + smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; + ret = 0; + } + mutex_unlock(&smmu_domain->init_mutex); + + return ret; +} + static int arm_smmu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) { @@ -1508,17 +1526,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, switch(domain->type) { case IOMMU_DOMAIN_UNMANAGED: switch (attr) { - case DOMAIN_ATTR_NESTING: - if (smmu_domain->smmu) { - ret = -EPERM; - goto out_unlock; - } - - if (*(int *)data) - smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; - else - smmu_domain->stage = ARM_SMMU_DOMAIN_S1; - break; case DOMAIN_ATTR_IO_PGTABLE_CFG: { struct io_pgtable_domain_attr *pgtbl_cfg = data; @@ -1603,6 +1610,7 @@ static struct iommu_ops arm_smmu_ops = { .dma_use_flush_queue = arm_smmu_dma_use_flush_queue, .dma_enable_flush_queue = arm_smmu_dma_enable_flush_queue, .domain_set_attr = arm_smmu_domain_set_attr, + .domain_enable_nesting = arm_smmu_domain_enable_nesting, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index eaa80c33f4bc91..0f1374d6612a60 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5423,32 +5423,22 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, } static int -intel_iommu_domain_set_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) +intel_iommu_domain_enable_nesting(struct iommu_domain *domain) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); unsigned long flags; - int ret = 0; + int ret = -ENODEV; if (domain->type != IOMMU_DOMAIN_UNMANAGED) return -EINVAL; - switch (attr) { - case DOMAIN_ATTR_NESTING: - spin_lock_irqsave(&device_domain_lock, flags); - if (nested_mode_support() && - list_empty(&dmar_domain->devices)) { - dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE; - dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL; - } else { - ret = -ENODEV; - } - spin_unlock_irqrestore(&device_domain_lock, flags); - break; - default: - ret = -EINVAL; - break; + spin_lock_irqsave(&device_domain_lock, flags); + if (nested_mode_support() && list_empty(&dmar_domain->devices)) { + dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE; + dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL; + ret = 0; } + spin_unlock_irqrestore(&device_domain_lock, flags); return ret; } @@ -5556,7 +5546,7 @@ const struct iommu_ops intel_iommu_ops = { .domain_alloc = intel_iommu_domain_alloc, .domain_free = intel_iommu_domain_free, .dma_use_flush_queue = intel_iommu_dma_use_flush_queue, - .domain_set_attr = intel_iommu_domain_set_attr, + .domain_enable_nesting = intel_iommu_domain_enable_nesting, .attach_dev = intel_iommu_attach_device, .detach_dev = intel_iommu_detach_device, .aux_attach_dev = intel_iommu_aux_attach_device, diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 0f12c4d58cdc42..2e9e058501a953 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2685,6 +2685,14 @@ int iommu_domain_set_attr(struct iommu_domain *domain, } EXPORT_SYMBOL_GPL(iommu_domain_set_attr); +int iommu_domain_enable_nesting(struct iommu_domain *domain) +{ + if (!domain->ops->domain_enable_nesting) + return -EINVAL; + return domain->ops->domain_enable_nesting(domain); +} +EXPORT_SYMBOL_GPL(iommu_domain_enable_nesting); + void iommu_get_resv_regions(struct device *dev, struct list_head *list) { const struct iommu_ops *ops = dev->bus->iommu_ops; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index c8e57f22f421c5..9cea4d80dd66ed 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -2320,10 +2320,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, } if (iommu->nesting) { - int attr = 1; - - ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING, - &attr); + ret = iommu_domain_enable_nesting(domain->domain); if (ret) goto out_domain; } diff --git a/include/linux/iommu.h b/include/linux/iommu.h index f30de33c6ff56e..aed88aa3bd3edf 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -107,7 +107,6 @@ enum iommu_cap { */ enum iommu_attr { - DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_IO_PGTABLE_CFG, DOMAIN_ATTR_MAX, }; @@ -196,6 +195,7 @@ struct iommu_iotlb_gather { * @dma_use_flush_queue: Returns %true if a DMA flush queue is used * @dma_enable_flush_queue: Try to enable the DMA flush queue * @domain_set_attr: Change domain attributes + * @domain_enable_nesting: Enable nesting * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device * @apply_resv_region: Temporary helper call-back for iova reserved ranges @@ -248,6 +248,7 @@ struct iommu_ops { void (*dma_enable_flush_queue)(struct iommu_domain *domain); int (*domain_set_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); + int (*domain_enable_nesting)(struct iommu_domain *domain); /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); @@ -494,6 +495,7 @@ extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); bool iommu_dma_use_flush_queue(struct iommu_domain *domain); extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, void *data); +int iommu_domain_enable_nesting(struct iommu_domain *domain); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); -- 2.29.2
Christoph Hellwig
2021-Mar-01 08:42 UTC
[PATCH 16/17] iommu: remove DOMAIN_ATTR_IO_PGTABLE_CFG
Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 2 +- drivers/iommu/arm/arm-smmu/arm-smmu.c | 40 +++++++------------------ drivers/iommu/iommu.c | 9 ++++++ include/linux/iommu.h | 9 +++++- 4 files changed, 29 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 0f184c3dd9d9ec..78d98ab2ee3a68 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -191,7 +191,7 @@ void adreno_set_llc_attributes(struct iommu_domain *iommu) struct io_pgtable_domain_attr pgtbl_cfg; pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA; - iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg); + iommu_domain_set_pgtable_attr(iommu, &pgtbl_cfg); } struct msm_gem_address_space * diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 2e17d990d04481..2858999c86dfd1 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1515,40 +1515,22 @@ static int arm_smmu_domain_enable_nesting(struct iommu_domain *domain) return ret; } -static int arm_smmu_domain_set_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) +static int arm_smmu_domain_set_pgtable_attr(struct iommu_domain *domain, + struct io_pgtable_domain_attr *pgtbl_cfg) { - int ret = 0; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + int ret = -EPERM; - mutex_lock(&smmu_domain->init_mutex); - - switch(domain->type) { - case IOMMU_DOMAIN_UNMANAGED: - switch (attr) { - case DOMAIN_ATTR_IO_PGTABLE_CFG: { - struct io_pgtable_domain_attr *pgtbl_cfg = data; - - if (smmu_domain->smmu) { - ret = -EPERM; - goto out_unlock; - } + if (domain->type != IOMMU_DOMAIN_UNMANAGED) + return -EINVAL; - smmu_domain->pgtbl_cfg = *pgtbl_cfg; - break; - } - default: - ret = -ENODEV; - } - break; - case IOMMU_DOMAIN_DMA: - ret = -ENODEV; - break; - default: - ret = -EINVAL; + mutex_lock(&smmu_domain->init_mutex); + if (!smmu_domain->smmu) { + smmu_domain->pgtbl_cfg = *pgtbl_cfg; + ret = 0; } -out_unlock: mutex_unlock(&smmu_domain->init_mutex); + return ret; } @@ -1609,7 +1591,7 @@ static struct iommu_ops arm_smmu_ops = { .device_group = arm_smmu_device_group, .dma_use_flush_queue = arm_smmu_dma_use_flush_queue, .dma_enable_flush_queue = arm_smmu_dma_enable_flush_queue, - .domain_set_attr = arm_smmu_domain_set_attr, + .domain_set_pgtable_attr = arm_smmu_domain_set_pgtable_attr, .domain_enable_nesting = arm_smmu_domain_enable_nesting, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 2e9e058501a953..8490aefd4b41f8 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2693,6 +2693,15 @@ int iommu_domain_enable_nesting(struct iommu_domain *domain) } EXPORT_SYMBOL_GPL(iommu_domain_enable_nesting); +int iommu_domain_set_pgtable_attr(struct iommu_domain *domain, + struct io_pgtable_domain_attr *pgtbl_cfg) +{ + if (!domain->ops->domain_set_pgtable_attr) + return -EINVAL; + return domain->ops->domain_set_pgtable_attr(domain, pgtbl_cfg); +} +EXPORT_SYMBOL_GPL(iommu_domain_set_pgtable_attr); + void iommu_get_resv_regions(struct device *dev, struct list_head *list) { const struct iommu_ops *ops = dev->bus->iommu_ops; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index aed88aa3bd3edf..39d3ed4d2700ac 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -40,6 +40,7 @@ struct iommu_domain; struct notifier_block; struct iommu_sva; struct iommu_fault_event; +struct io_pgtable_domain_attr; /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 @@ -107,7 +108,6 @@ enum iommu_cap { */ enum iommu_attr { - DOMAIN_ATTR_IO_PGTABLE_CFG, DOMAIN_ATTR_MAX, }; @@ -196,6 +196,7 @@ struct iommu_iotlb_gather { * @dma_enable_flush_queue: Try to enable the DMA flush queue * @domain_set_attr: Change domain attributes * @domain_enable_nesting: Enable nesting + * @domain_set_pgtable_attr: Set io page table attributes * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device * @apply_resv_region: Temporary helper call-back for iova reserved ranges @@ -249,6 +250,8 @@ struct iommu_ops { int (*domain_set_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); int (*domain_enable_nesting)(struct iommu_domain *domain); + int (*domain_set_pgtable_attr)(struct iommu_domain *domain, + struct io_pgtable_domain_attr *pgtbl_cfg); /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); @@ -493,9 +496,13 @@ extern int iommu_group_id(struct iommu_group *group); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); bool iommu_dma_use_flush_queue(struct iommu_domain *domain); +int iommu_domain_set_pgtable_attr(struct iommu_domain *domain, + struct io_pgtable_domain_attr *pgtbl_cfg); extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, void *data); int iommu_domain_enable_nesting(struct iommu_domain *domain); +int iommu_domain_set_pgtable_attr(struct iommu_domain *domain, + struct io_pgtable_domain_attr *pgtbl_cfg); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); -- 2.29.2
Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/iommu/iommu.c | 17 ----------------- include/linux/iommu.h | 27 --------------------------- 2 files changed, 44 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 8490aefd4b41f8..b04e6cefe8520d 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2668,23 +2668,6 @@ bool iommu_dma_use_flush_queue(struct iommu_domain *domain) } EXPORT_SYMBOL_GPL(iommu_dma_use_flush_queue); -int iommu_domain_set_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - int ret = 0; - - switch (attr) { - default: - if (domain->ops->domain_set_attr == NULL) - return -EINVAL; - - ret = domain->ops->domain_set_attr(domain, attr, data); - } - - return ret; -} -EXPORT_SYMBOL_GPL(iommu_domain_set_attr); - int iommu_domain_enable_nesting(struct iommu_domain *domain) { if (!domain->ops->domain_enable_nesting) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 39d3ed4d2700ac..62535f563aa491 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -97,20 +97,6 @@ enum iommu_cap { IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ }; -/* - * Following constraints are specifc to FSL_PAMUV1: - * -aperture must be power of 2, and naturally aligned - * -number of windows must be power of 2, and address space size - * of each window is determined by aperture size / # of windows - * -the actual size of the mapped region of a window must be power - * of 2 starting with 4KB and physical address must be naturally - * aligned. - */ - -enum iommu_attr { - DOMAIN_ATTR_MAX, -}; - /* These are the possible reserved region types */ enum iommu_resv_type { /* Memory regions which must be mapped 1:1 at all times */ @@ -194,7 +180,6 @@ struct iommu_iotlb_gather { * @device_group: find iommu group for a particular device * @dma_use_flush_queue: Returns %true if a DMA flush queue is used * @dma_enable_flush_queue: Try to enable the DMA flush queue - * @domain_set_attr: Change domain attributes * @domain_enable_nesting: Enable nesting * @domain_set_pgtable_attr: Set io page table attributes * @get_resv_regions: Request list of reserved regions for a device @@ -247,8 +232,6 @@ struct iommu_ops { struct iommu_group *(*device_group)(struct device *dev); bool (*dma_use_flush_queue)(struct iommu_domain *domain); void (*dma_enable_flush_queue)(struct iommu_domain *domain); - int (*domain_set_attr)(struct iommu_domain *domain, - enum iommu_attr attr, void *data); int (*domain_enable_nesting)(struct iommu_domain *domain); int (*domain_set_pgtable_attr)(struct iommu_domain *domain, struct io_pgtable_domain_attr *pgtbl_cfg); @@ -498,11 +481,7 @@ extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); bool iommu_dma_use_flush_queue(struct iommu_domain *domain); int iommu_domain_set_pgtable_attr(struct iommu_domain *domain, struct io_pgtable_domain_attr *pgtbl_cfg); -extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, - void *data); int iommu_domain_enable_nesting(struct iommu_domain *domain); -int iommu_domain_set_pgtable_attr(struct iommu_domain *domain, - struct io_pgtable_domain_attr *pgtbl_cfg); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); @@ -869,12 +848,6 @@ static inline int iommu_group_id(struct iommu_group *group) return -ENODEV; } -static inline int iommu_domain_set_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - return -EINVAL; -} - static inline int iommu_device_register(struct iommu_device *iommu) { return -ENODEV; -- 2.29.2
Joerg Roedel
2021-Mar-04 11:10 UTC
cleanup unused or almost unused IOMMU APIs and the FSL PAMU driver
On Mon, Mar 01, 2021 at 09:42:40AM +0100, Christoph Hellwig wrote:> Diffstat: > arch/powerpc/include/asm/fsl_pamu_stash.h | 12 > drivers/gpu/drm/msm/adreno/adreno_gpu.c | 2 > drivers/iommu/amd/iommu.c | 23 > drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 85 --- > drivers/iommu/arm/arm-smmu/arm-smmu.c | 122 +--- > drivers/iommu/dma-iommu.c | 8 > drivers/iommu/fsl_pamu.c | 264 ---------- > drivers/iommu/fsl_pamu.h | 10 > drivers/iommu/fsl_pamu_domain.c | 694 ++-------------------------- > drivers/iommu/fsl_pamu_domain.h | 46 - > drivers/iommu/intel/iommu.c | 55 -- > drivers/iommu/iommu.c | 75 --- > drivers/soc/fsl/qbman/qman_portal.c | 56 -- > drivers/vfio/vfio_iommu_type1.c | 31 - > drivers/vhost/vdpa.c | 10 > include/linux/iommu.h | 81 --- > 16 files changed, 214 insertions(+), 1360 deletions(-)Nice cleanup, thanks. The fsl_pamu driver and interface has always been a little bit of an alien compared to other IOMMU drivers. I am inclined to merge this after -rc3 is out, given some reviews. Can you also please add changelogs to the last three patches? Thanks, Joerg