search for: virtio_iommu_t_map

Displaying 20 results from an estimated 63 matches for "virtio_iommu_t_map".

2023 May 15
3
[PATCH v2 0/2] iommu/virtio: Fixes
One fix reported by Akihiko, and another found while going over the driver. Jean-Philippe Brucker (2): iommu/virtio: Detach domain on endpoint release iommu/virtio: Return size mapped for a detached domain drivers/iommu/virtio-iommu.c | 57 ++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 16 deletions(-) -- 2.40.0
2017 Oct 09
0
[virtio-dev] [RFC] virtio-iommu version 0.4
...; } + kfree(req); + vdomain->attached++; vdev->vdomain = vdomain; @@ -550,13 +558,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, { int ret; struct viommu_domain *vdomain = to_viommu_domain(domain); - struct virtio_iommu_req_map req = { - .head.type = VIRTIO_IOMMU_T_MAP, - .address_space = cpu_to_le32(vdomain->id), - .virt_addr = cpu_to_le64(iova), - .phys_addr = cpu_to_le64(paddr), - .size = cpu_to_le64(size), - }; + struct virtio_iommu_req_map *req; pr_debug("map %llu 0x%lx -> 0x%llx (%zu)\n", vdomain->id, iova, paddr, size); @@ -5...
2017 Jun 16
1
[virtio-dev] [RFC PATCH linux] iommu: Add virtio-iommu driver
...ion virtio_iommu_req r; > + > + *tail = sizeof(struct virtio_iommu_req_tail); > + > + switch (req->type) { > + case VIRTIO_IOMMU_T_ATTACH: > + size = sizeof(r.attach); > + break; > + case VIRTIO_IOMMU_T_DETACH: > + size = sizeof(r.detach); > + break; > + case VIRTIO_IOMMU_T_MAP: > + size = sizeof(r.map); > + break; > + case VIRTIO_IOMMU_T_UNMAP: > + size = sizeof(r.unmap); > + break; > + default: > + return -EINVAL; > + } > + > + *head = size - *tail; > + return 0; > +} > + > +static int viommu_receive_resp(struct viommu_dev...
2017 Jun 16
1
[virtio-dev] [RFC PATCH linux] iommu: Add virtio-iommu driver
...ion virtio_iommu_req r; > + > + *tail = sizeof(struct virtio_iommu_req_tail); > + > + switch (req->type) { > + case VIRTIO_IOMMU_T_ATTACH: > + size = sizeof(r.attach); > + break; > + case VIRTIO_IOMMU_T_DETACH: > + size = sizeof(r.detach); > + break; > + case VIRTIO_IOMMU_T_MAP: > + size = sizeof(r.map); > + break; > + case VIRTIO_IOMMU_T_UNMAP: > + size = sizeof(r.unmap); > + break; > + default: > + return -EINVAL; > + } > + > + *head = size - *tail; > + return 0; > +} > + > +static int viommu_receive_resp(struct viommu_dev...
2017 Apr 07
0
[RFC PATCH linux] iommu: Add virtio-iommu driver
...*head, + size_t *tail) +{ + size_t size; + union virtio_iommu_req r; + + *tail = sizeof(struct virtio_iommu_req_tail); + + switch (req->type) { + case VIRTIO_IOMMU_T_ATTACH: + size = sizeof(r.attach); + break; + case VIRTIO_IOMMU_T_DETACH: + size = sizeof(r.detach); + break; + case VIRTIO_IOMMU_T_MAP: + size = sizeof(r.map); + break; + case VIRTIO_IOMMU_T_UNMAP: + size = sizeof(r.unmap); + break; + default: + return -EINVAL; + } + + *head = size - *tail; + return 0; +} + +static int viommu_receive_resp(struct viommu_dev *viommu, int nr_expected) +{ + + unsigned int len; + int nr_received =...
2018 Oct 12
3
[PATCH v3 5/7] iommu: Add virtio-iommu driver
...+ > + spin_lock_irqsave(&vdomain->mappings_lock, flags); > + node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); > + while (node) { > + mapping = container_of(node, struct viommu_mapping, iova); > + map = (struct virtio_iommu_req_map) { > + .head.type = VIRTIO_IOMMU_T_MAP, > + .domain = cpu_to_le32(vdomain->id), > + .virt_start = cpu_to_le64(mapping->iova.start), > + .virt_end = cpu_to_le64(mapping->iova.last), > + .phys_start = cpu_to_le64(mapping->paddr), > + .flags = cpu_to_le32(mapping->flags), > + }; > + > +...
2018 Oct 12
3
[PATCH v3 5/7] iommu: Add virtio-iommu driver
...+ > + spin_lock_irqsave(&vdomain->mappings_lock, flags); > + node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); > + while (node) { > + mapping = container_of(node, struct viommu_mapping, iova); > + map = (struct virtio_iommu_req_map) { > + .head.type = VIRTIO_IOMMU_T_MAP, > + .domain = cpu_to_le32(vdomain->id), > + .virt_start = cpu_to_le64(mapping->iova.start), > + .virt_end = cpu_to_le64(mapping->iova.last), > + .phys_start = cpu_to_le64(mapping->paddr), > + .flags = cpu_to_le32(mapping->flags), > + }; > + > +...
2018 Nov 22
0
[PATCH v5 5/7] iommu: Add virtio-iommu driver
...uct virtio_iommu_req_map map; + + spin_lock_irqsave(&vdomain->mappings_lock, flags); + node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); + while (node) { + mapping = container_of(node, struct viommu_mapping, iova); + map = (struct virtio_iommu_req_map) { + .head.type = VIRTIO_IOMMU_T_MAP, + .domain = cpu_to_le32(vdomain->id), + .virt_start = cpu_to_le64(mapping->iova.start), + .virt_end = cpu_to_le64(mapping->iova.last), + .phys_start = cpu_to_le64(mapping->paddr), + .flags = cpu_to_le32(mapping->flags), + }; + + ret = viommu_send_req_sync(vdomain->v...
2018 Nov 15
0
[PATCH v4 5/7] iommu: Add virtio-iommu driver
...uct virtio_iommu_req_map map; + + spin_lock_irqsave(&vdomain->mappings_lock, flags); + node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); + while (node) { + mapping = container_of(node, struct viommu_mapping, iova); + map = (struct virtio_iommu_req_map) { + .head.type = VIRTIO_IOMMU_T_MAP, + .domain = cpu_to_le32(vdomain->id), + .virt_start = cpu_to_le64(mapping->iova.start), + .virt_end = cpu_to_le64(mapping->iova.last), + .phys_start = cpu_to_le64(mapping->paddr), + .flags = cpu_to_le32(mapping->flags), + }; + + ret = viommu_send_req_sync(vdomain->v...
2017 Apr 07
0
[RFC 2/3] virtio-iommu: device probing and operations
...ents, such a request would need to be described by two chains of descriptors, which might be more complex to implement efficiently, but still possible. Both devices and drivers must assume that requests are segmented anyway.) Type may be one of: VIRTIO_IOMMU_T_ATTACH 1 VIRTIO_IOMMU_T_DETACH 2 VIRTIO_IOMMU_T_MAP 3 VIRTIO_IOMMU_T_UNMAP 4 A few general-purpose status codes are defined here. Driver must not assume a specific status to be returned for an invalid request. Except for 0 that always means "success", these values are hints to make troubleshooting easier. VIRTIO_IOMMU_S_OK 0 All g...
2018 Nov 15
1
[PATCH v3 6/7] iommu/virtio: Add probe request
...> /* Max domain ID size */ > __u8 domain_bits; > + __u8 padding[3]; > + /* Probe buffer size */ > + __u32 probe_size; > }; > > /* Request types */ > @@ -32,6 +36,7 @@ struct virtio_iommu_config { > #define VIRTIO_IOMMU_T_DETACH 0x02 > #define VIRTIO_IOMMU_T_MAP 0x03 > #define VIRTIO_IOMMU_T_UNMAP 0x04 > +#define VIRTIO_IOMMU_T_PROBE 0x05 > > /* Status types */ > #define VIRTIO_IOMMU_S_OK 0x00 > @@ -98,4 +103,38 @@ struct virtio_iommu_req_unmap { > struct virtio_iommu_req_tail tail; > }; > > +#define VIRTIO_...
2018 Jun 21
0
[PATCH v2 2/5] iommu: Add virtio-iommu driver
...uct virtio_iommu_req_map map; + + spin_lock_irqsave(&vdomain->mappings_lock, flags); + node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); + while (node) { + mapping = container_of(node, struct viommu_mapping, iova); + map = (struct virtio_iommu_req_map) { + .head.type = VIRTIO_IOMMU_T_MAP, + .domain = cpu_to_le32(vdomain->id), + .virt_start = cpu_to_le64(mapping->iova.start), + .virt_end = cpu_to_le64(mapping->iova.last), + .phys_start = cpu_to_le64(mapping->paddr), + .flags = cpu_to_le32(mapping->flags), + }; + + ret = viommu_send_req_sync(vdomain->v...
2018 Oct 12
0
[PATCH v3 5/7] iommu: Add virtio-iommu driver
...uct virtio_iommu_req_map map; + + spin_lock_irqsave(&vdomain->mappings_lock, flags); + node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); + while (node) { + mapping = container_of(node, struct viommu_mapping, iova); + map = (struct virtio_iommu_req_map) { + .head.type = VIRTIO_IOMMU_T_MAP, + .domain = cpu_to_le32(vdomain->id), + .virt_start = cpu_to_le64(mapping->iova.start), + .virt_end = cpu_to_le64(mapping->iova.last), + .phys_start = cpu_to_le64(mapping->paddr), + .flags = cpu_to_le32(mapping->flags), + }; + + ret = viommu_send_req_sync(vdomain->v...
2018 Feb 14
0
[PATCH 1/4] iommu: Add virtio-iommu driver
...bottom) +{ + size_t size; + union virtio_iommu_req *r = (void *)req; + + *bottom = sizeof(struct virtio_iommu_req_tail); + + switch (req->type) { + case VIRTIO_IOMMU_T_ATTACH: + size = sizeof(r->attach); + break; + case VIRTIO_IOMMU_T_DETACH: + size = sizeof(r->detach); + break; + case VIRTIO_IOMMU_T_MAP: + size = sizeof(r->map); + break; + case VIRTIO_IOMMU_T_UNMAP: + size = sizeof(r->unmap); + break; + default: + return -EINVAL; + } + + *top = size - *bottom; + return 0; +} + +static int viommu_receive_resp(struct viommu_dev *viommu, int nr_sent, + struct list_head *sent) +{ +...
2018 Nov 22
15
[PATCH v5 0/7] Add virtio-iommu driver
Implement the virtio-iommu driver, following specification v0.9 [1]. Since v4 [2] I fixed the issues reported by Eric, and added Reviewed-by from Eric and Rob. Thanks! I changed the specification to fix one inconsistency discussed in v4. That the device fills the probe buffer with zeroes is now a "SHOULD" instead of a "MAY", since it's the only way for the driver to know
2018 Nov 22
15
[PATCH v5 0/7] Add virtio-iommu driver
Implement the virtio-iommu driver, following specification v0.9 [1]. Since v4 [2] I fixed the issues reported by Eric, and added Reviewed-by from Eric and Rob. Thanks! I changed the specification to fix one inconsistency discussed in v4. That the device fills the probe buffer with zeroes is now a "SHOULD" instead of a "MAY", since it's the only way for the driver to know
2019 May 30
0
[PATCH v8 5/7] iommu: Add virtio-iommu driver
...uct virtio_iommu_req_map map; + + spin_lock_irqsave(&vdomain->mappings_lock, flags); + node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); + while (node) { + mapping = container_of(node, struct viommu_mapping, iova); + map = (struct virtio_iommu_req_map) { + .head.type = VIRTIO_IOMMU_T_MAP, + .domain = cpu_to_le32(vdomain->id), + .virt_start = cpu_to_le64(mapping->iova.start), + .virt_end = cpu_to_le64(mapping->iova.last), + .phys_start = cpu_to_le64(mapping->paddr), + .flags = cpu_to_le32(mapping->flags), + }; + + ret = viommu_send_req_sync(vdomain->v...
2017 Apr 12
0
[RFC 0/3] virtio-iommu: a paravirtualized IOMMU
...rspace is running a net driver (e.g. DPDK). It allocates a > buffer with mmap, obtaining virtual address VA. It then send a > VFIO_IOMMU_MAP_DMA request to map VA to an IOVA (possibly VA=IOVA). > b. The maping request is relayed to the host through virtio > (VIRTIO_IOMMU_T_MAP). > c. The mapping request is relayed to the physical IOMMU through VFIO. > > (2) a. The guest userspace driver can now instruct the device to directly > access the buffer at IOVA > b. IOVA accesses from the device are translated into physical > addresses...
2018 Nov 08
0
[PATCH v3 5/7] iommu: Add virtio-iommu driver
...rqsave(&vdomain->mappings_lock, flags); >> + node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); >> + while (node) { >> + mapping = container_of(node, struct viommu_mapping, iova); >> + map = (struct virtio_iommu_req_map) { >> + .head.type = VIRTIO_IOMMU_T_MAP, >> + .domain = cpu_to_le32(vdomain->id), >> + .virt_start = cpu_to_le64(mapping->iova.start), >> + .virt_end = cpu_to_le64(mapping->iova.last), >> + .phys_start = cpu_to_le64(mapping->paddr), >> + .flags = cpu_to_le32(mapping->flags), >&g...
2018 Nov 23
2
[PATCH v5 5/7] iommu: Add virtio-iommu driver
...+ > + spin_lock_irqsave(&vdomain->mappings_lock, flags); > + node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); > + while (node) { > + mapping = container_of(node, struct viommu_mapping, iova); > + map = (struct virtio_iommu_req_map) { > + .head.type = VIRTIO_IOMMU_T_MAP, > + .domain = cpu_to_le32(vdomain->id), > + .virt_start = cpu_to_le64(mapping->iova.start), > + .virt_end = cpu_to_le64(mapping->iova.last), > + .phys_start = cpu_to_le64(mapping->paddr), > + .flags = cpu_to_le32(mapping->flags), > + }; > + > +...