Jean-Philippe Brucker
2021-Nov-23 15:53 UTC
[PATCH v2 4/5] iommu/virtio: Pass end address to viommu_add_mapping()
To support identity mappings, the virtio-iommu driver must be able to
represent full 64-bit ranges internally. Pass (start, end) instead of
(start, size) to viommu_add/del_mapping().
Clean comments. The one about the returned size was never true: when
sweeping the whole address space the returned size will most certainly
be smaller than 2^64.
Reviewed-by: Kevin Tian <kevin.tian at intel.com>
Signed-off-by: Jean-Philippe Brucker <jean-philippe at linaro.org>
---
drivers/iommu/virtio-iommu.c | 31 +++++++++++++++----------------
1 file changed, 15 insertions(+), 16 deletions(-)
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index d63ec4d11b00..eceb9281c8c1 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -311,8 +311,8 @@ static int viommu_send_req_sync(struct viommu_dev *viommu,
void *buf,
*
* On success, return the new mapping. Otherwise return NULL.
*/
-static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long
iova,
- phys_addr_t paddr, size_t size, u32 flags)
+static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
+ phys_addr_t paddr, u32 flags)
{
unsigned long irqflags;
struct viommu_mapping *mapping;
@@ -323,7 +323,7 @@ static int viommu_add_mapping(struct viommu_domain *vdomain,
unsigned long iova,
mapping->paddr = paddr;
mapping->iova.start = iova;
- mapping->iova.last = iova + size - 1;
+ mapping->iova.last = end;
mapping->flags = flags;
spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
@@ -338,26 +338,24 @@ static int viommu_add_mapping(struct viommu_domain
*vdomain, unsigned long iova,
*
* @vdomain: the domain
* @iova: start of the range
- * @size: size of the range. A size of 0 corresponds to the entire address
- * space.
+ * @end: end of the range
*
- * On success, returns the number of unmapped bytes (>= size)
+ * On success, returns the number of unmapped bytes
*/
static size_t viommu_del_mappings(struct viommu_domain *vdomain,
- unsigned long iova, size_t size)
+ u64 iova, u64 end)
{
size_t unmapped = 0;
unsigned long flags;
- unsigned long last = iova + size - 1;
struct viommu_mapping *mapping = NULL;
struct interval_tree_node *node, *next;
spin_lock_irqsave(&vdomain->mappings_lock, flags);
- next = interval_tree_iter_first(&vdomain->mappings, iova, last);
+ next = interval_tree_iter_first(&vdomain->mappings, iova, end);
while (next) {
node = next;
mapping = container_of(node, struct viommu_mapping, iova);
- next = interval_tree_iter_next(node, iova, last);
+ next = interval_tree_iter_next(node, iova, end);
/* Trying to split a mapping? */
if (mapping->iova.start < iova)
@@ -656,8 +654,8 @@ static void viommu_domain_free(struct iommu_domain *domain)
{
struct viommu_domain *vdomain = to_viommu_domain(domain);
- /* Free all remaining mappings (size 2^64) */
- viommu_del_mappings(vdomain, 0, 0);
+ /* Free all remaining mappings */
+ viommu_del_mappings(vdomain, 0, ULLONG_MAX);
if (vdomain->viommu)
ida_free(&vdomain->viommu->domain_ids, vdomain->id);
@@ -742,6 +740,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned
long iova,
{
int ret;
u32 flags;
+ u64 end = iova + size - 1;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -752,7 +751,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned
long iova,
if (flags & ~vdomain->map_flags)
return -EINVAL;
- ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
+ ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
if (ret)
return ret;
@@ -761,7 +760,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned
long iova,
.domain = cpu_to_le32(vdomain->id),
.virt_start = cpu_to_le64(iova),
.phys_start = cpu_to_le64(paddr),
- .virt_end = cpu_to_le64(iova + size - 1),
+ .virt_end = cpu_to_le64(end),
.flags = cpu_to_le32(flags),
};
@@ -770,7 +769,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned
long iova,
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
if (ret)
- viommu_del_mappings(vdomain, iova, size);
+ viommu_del_mappings(vdomain, iova, end);
return ret;
}
@@ -783,7 +782,7 @@ static size_t viommu_unmap(struct iommu_domain *domain,
unsigned long iova,
struct virtio_iommu_req_unmap unmap;
struct viommu_domain *vdomain = to_viommu_domain(domain);
- unmapped = viommu_del_mappings(vdomain, iova, size);
+ unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
if (unmapped < size)
return 0;
--
2.33.1
Eric Auger
2021-Nov-27 17:09 UTC
[PATCH v2 4/5] iommu/virtio: Pass end address to viommu_add_mapping()
On 11/23/21 4:53 PM, Jean-Philippe Brucker wrote:> To support identity mappings, the virtio-iommu driver must be able to > represent full 64-bit ranges internally. Pass (start, end) instead of > (start, size) to viommu_add/del_mapping(). > > Clean comments. The one about the returned size was never true: when > sweeping the whole address space the returned size will most certainly > be smaller than 2^64. > > Reviewed-by: Kevin Tian <kevin.tian at intel.com> > Signed-off-by: Jean-Philippe Brucker <jean-philippe at linaro.org>Reviewed-by: Eric Auger <eric.auger at redhat.com> Eric> --- > drivers/iommu/virtio-iommu.c | 31 +++++++++++++++---------------- > 1 file changed, 15 insertions(+), 16 deletions(-) > > diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c > index d63ec4d11b00..eceb9281c8c1 100644 > --- a/drivers/iommu/virtio-iommu.c > +++ b/drivers/iommu/virtio-iommu.c > @@ -311,8 +311,8 @@ static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf, > * > * On success, return the new mapping. Otherwise return NULL. > */ > -static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova, > - phys_addr_t paddr, size_t size, u32 flags) > +static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end, > + phys_addr_t paddr, u32 flags) > { > unsigned long irqflags; > struct viommu_mapping *mapping; > @@ -323,7 +323,7 @@ static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova, > > mapping->paddr = paddr; > mapping->iova.start = iova; > - mapping->iova.last = iova + size - 1; > + mapping->iova.last = end; > mapping->flags = flags; > > spin_lock_irqsave(&vdomain->mappings_lock, irqflags); > @@ -338,26 +338,24 @@ static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova, > * > * @vdomain: the domain > * @iova: start of the range > - * @size: size of the range. A size of 0 corresponds to the entire address > - * space. > + * @end: end of the range > * > - * On success, returns the number of unmapped bytes (>= size) > + * On success, returns the number of unmapped bytes > */ > static size_t viommu_del_mappings(struct viommu_domain *vdomain, > - unsigned long iova, size_t size) > + u64 iova, u64 end) > { > size_t unmapped = 0; > unsigned long flags; > - unsigned long last = iova + size - 1; > struct viommu_mapping *mapping = NULL; > struct interval_tree_node *node, *next; > > spin_lock_irqsave(&vdomain->mappings_lock, flags); > - next = interval_tree_iter_first(&vdomain->mappings, iova, last); > + next = interval_tree_iter_first(&vdomain->mappings, iova, end); > while (next) { > node = next; > mapping = container_of(node, struct viommu_mapping, iova); > - next = interval_tree_iter_next(node, iova, last); > + next = interval_tree_iter_next(node, iova, end); > > /* Trying to split a mapping? */ > if (mapping->iova.start < iova) > @@ -656,8 +654,8 @@ static void viommu_domain_free(struct iommu_domain *domain) > { > struct viommu_domain *vdomain = to_viommu_domain(domain); > > - /* Free all remaining mappings (size 2^64) */ > - viommu_del_mappings(vdomain, 0, 0); > + /* Free all remaining mappings */ > + viommu_del_mappings(vdomain, 0, ULLONG_MAX); > > if (vdomain->viommu) > ida_free(&vdomain->viommu->domain_ids, vdomain->id); > @@ -742,6 +740,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, > { > int ret; > u32 flags; > + u64 end = iova + size - 1; > struct virtio_iommu_req_map map; > struct viommu_domain *vdomain = to_viommu_domain(domain); > > @@ -752,7 +751,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, > if (flags & ~vdomain->map_flags) > return -EINVAL; > > - ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); > + ret = viommu_add_mapping(vdomain, iova, end, paddr, flags); > if (ret) > return ret; > > @@ -761,7 +760,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, > .domain = cpu_to_le32(vdomain->id), > .virt_start = cpu_to_le64(iova), > .phys_start = cpu_to_le64(paddr), > - .virt_end = cpu_to_le64(iova + size - 1), > + .virt_end = cpu_to_le64(end), > .flags = cpu_to_le32(flags), > }; > > @@ -770,7 +769,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, > > ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); > if (ret) > - viommu_del_mappings(vdomain, iova, size); > + viommu_del_mappings(vdomain, iova, end); > > return ret; > } > @@ -783,7 +782,7 @@ static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova, > struct virtio_iommu_req_unmap unmap; > struct viommu_domain *vdomain = to_viommu_domain(domain); > > - unmapped = viommu_del_mappings(vdomain, iova, size); > + unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1); > if (unmapped < size) > return 0; >