Displaying 8 results from an estimated 8 matches for "viommu_map_pages".
Did you mean:
iommu_map_page
2023 Apr 14
2
[PATCH] iommu/virtio: Detach domain on endpoint release
...type = VIRTIO_IOMMU_T_DETACH,
+ .domain = cpu_to_le32(vdomain->id),
+ };
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ req.endpoint = cpu_to_le32(fwspec->ids[i]);
+ WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req)));
+ }
+ vdev->vdomain = NULL;
+}
+
static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -990,6 +1012,7 @@ static void viommu_release_device(struct device *dev)
{
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ viommu_...
2023 Apr 14
2
[PATCH] iommu/virtio: Detach domain on endpoint release
...type = VIRTIO_IOMMU_T_DETACH,
+ .domain = cpu_to_le32(vdomain->id),
+ };
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ req.endpoint = cpu_to_le32(fwspec->ids[i]);
+ WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req)));
+ }
+ vdev->vdomain = NULL;
+}
+
static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -990,6 +1012,7 @@ static void viommu_release_device(struct device *dev)
{
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ viommu_...
2023 Sep 04
0
[PATCH 1/2] iommu/virtio: Make use of ops->iotlb_sync_map
Hi Niklas,
Thanks for following up with these patches
On Fri, Aug 25, 2023 at 05:21:25PM +0200, Niklas Schnelle wrote:
> Pull out the sync operation from viommu_map_pages() by implementing
> ops->iotlb_sync_map. This allows the common IOMMU code to map multiple
> elements of an sg with a single sync (see iommu_map_sg()). Furthermore,
> it is also a requirement for IOMMU_CAP_DEFERRED_FLUSH.
>
> Link: https://lore.kernel.org/lkml/20230726111433.1105...
2023 Sep 04
1
[PATCH 2/2] iommu/virtio: Add ops->flush_iotlb_all and enable deferred flush
...ev, enum iommu_cap cap)
> switch (cap) {
> case IOMMU_CAP_CACHE_COHERENCY:
> return true;
> + case IOMMU_CAP_DEFERRED_FLUSH:
> + return true;
> default:
> return false;
> }
> @@ -1069,6 +1080,7 @@ static struct iommu_ops viommu_ops = {
> .map_pages = viommu_map_pages,
> .unmap_pages = viommu_unmap_pages,
> .iova_to_phys = viommu_iova_to_phys,
> + .flush_iotlb_all = viommu_flush_iotlb_all,
> .iotlb_sync = viommu_iotlb_sync,
> .iotlb_sync_map = viommu_iotlb_sync_map,
> .free = viommu_domain_free,
>
> --
> 2.39.2...
2023 May 15
3
[PATCH v2 0/2] iommu/virtio: Fixes
One fix reported by Akihiko, and another found while going over the
driver.
Jean-Philippe Brucker (2):
iommu/virtio: Detach domain on endpoint release
iommu/virtio: Return size mapped for a detached domain
drivers/iommu/virtio-iommu.c | 57 ++++++++++++++++++++++++++----------
1 file changed, 41 insertions(+), 16 deletions(-)
--
2.40.0
2023 Sep 06
1
[PATCH 2/2] iommu/virtio: Add ops->flush_iotlb_all and enable deferred flush
...; > > > + case IOMMU_CAP_DEFERRED_FLUSH:
> > > > + return true;
> > > > default:
> > > > return false;
> > > > }
> > > > @@ -1069,6 +1080,7 @@ static struct iommu_ops viommu_ops = {
> > > > .map_pages = viommu_map_pages,
> > > > .unmap_pages = viommu_unmap_pages,
> > > > .iova_to_phys = viommu_iova_to_phys,
> > > > + .flush_iotlb_all = viommu_flush_iotlb_all,
> > > > .iotlb_sync = viommu_iotlb_sync,
> > > > .iotlb_sync_map = viommu_io...
2023 Sep 06
1
[PATCH 2/2] iommu/virtio: Add ops->flush_iotlb_all and enable deferred flush
...; > > > + case IOMMU_CAP_DEFERRED_FLUSH:
> > > > + return true;
> > > > default:
> > > > return false;
> > > > }
> > > > @@ -1069,6 +1080,7 @@ static struct iommu_ops viommu_ops = {
> > > > .map_pages = viommu_map_pages,
> > > > .unmap_pages = viommu_unmap_pages,
> > > > .iova_to_phys = viommu_iova_to_phys,
> > > > + .flush_iotlb_all = viommu_flush_iotlb_all,
> > > > .iotlb_sync = viommu_iotlb_sync,
> > > > .iotlb_sync_map = viommu_io...
2023 May 10
1
[PATCH] iommu/virtio: Detach domain on endpoint release
...; + req.endpoint = cpu_to_le32(fwspec->ids[i]);
> + WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req)));
> + }
just a late question: don't you need to decrement vdomain's nr_endpoints?
Thanks
Eric
> + vdev->vdomain = NULL;
> +}
> +
> static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
> phys_addr_t paddr, size_t pgsize, size_t pgcount,
> int prot, gfp_t gfp, size_t *mapped)
> @@ -990,6 +1012,7 @@ static void viommu_release_device(struct device *dev)
> {
> struct viommu_endpoint *vdev = dev_iommu_p...