Displaying 4 results from an estimated 4 matches for "iommu_cap_deferred_flush".
2023 Sep 04
1
[PATCH 2/2] iommu/virtio: Add ops->flush_iotlb_all and enable deferred flush
...t device *dev, struct list_head *head)
> {
> struct iommu_resv_region *entry, *new_entry, *msi = NULL;
> @@ -1049,6 +1058,8 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
> switch (cap) {
> case IOMMU_CAP_CACHE_COHERENCY:
> return true;
> + case IOMMU_CAP_DEFERRED_FLUSH:
> + return true;
> default:
> return false;
> }
> @@ -1069,6 +1080,7 @@ static struct iommu_ops viommu_ops = {
> .map_pages = viommu_map_pages,
> .unmap_pages = viommu_unmap_pages,
> .iova_to_phys = viommu_iova_to_phys,
> + .flush_iotlb_all = viommu_...
2023 Sep 06
1
[PATCH 2/2] iommu/virtio: Add ops->flush_iotlb_all and enable deferred flush
...*entry, *new_entry, *msi = NULL;
> > > > @@ -1049,6 +1058,8 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
> > > > switch (cap) {
> > > > case IOMMU_CAP_CACHE_COHERENCY:
> > > > return true;
> > > > + case IOMMU_CAP_DEFERRED_FLUSH:
> > > > + return true;
> > > > default:
> > > > return false;
> > > > }
> > > > @@ -1069,6 +1080,7 @@ static struct iommu_ops viommu_ops = {
> > > > .map_pages = viommu_map_pages,
> > > > .unmap...
2023 Sep 06
1
[PATCH 2/2] iommu/virtio: Add ops->flush_iotlb_all and enable deferred flush
...*entry, *new_entry, *msi = NULL;
> > > > @@ -1049,6 +1058,8 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
> > > > switch (cap) {
> > > > case IOMMU_CAP_CACHE_COHERENCY:
> > > > return true;
> > > > + case IOMMU_CAP_DEFERRED_FLUSH:
> > > > + return true;
> > > > default:
> > > > return false;
> > > > }
> > > > @@ -1069,6 +1080,7 @@ static struct iommu_ops viommu_ops = {
> > > > .map_pages = viommu_map_pages,
> > > > .unmap...
2023 Sep 04
0
[PATCH 1/2] iommu/virtio: Make use of ops->iotlb_sync_map
...1:25PM +0200, Niklas Schnelle wrote:
> Pull out the sync operation from viommu_map_pages() by implementing
> ops->iotlb_sync_map. This allows the common IOMMU code to map multiple
> elements of an sg with a single sync (see iommu_map_sg()). Furthermore,
> it is also a requirement for IOMMU_CAP_DEFERRED_FLUSH.
>
> Link: https://lore.kernel.org/lkml/20230726111433.1105665-1-schnelle at linux.ibm.com/
> Signed-off-by: Niklas Schnelle <schnelle at linux.ibm.com>
> ---
> drivers/iommu/virtio-iommu.c | 15 ++++++++++++++-
> 1 file changed, 14 insertions(+), 1 deletion(-)
>
>...