Displaying 20 results from an estimated 44 matches for "virt_end".
2017 Oct 23
3
[RFC] virtio-iommu version 0.5
This is version 0.5 of the virtio-iommu specification, the paravirtualized
IOMMU. This version addresses feedback from v0.4 and adds an event virtqueue.
Please find the specification, LaTeX sources and pdf, at:
git://linux-arm.org/virtio-iommu.git viommu/v0.5
http://linux-arm.org/git?p=virtio-iommu.git;a=blob;f=dist/v0.5/virtio-iommu-v0.5.pdf
A detailed changelog since v0.4 follows. You can find
2017 Oct 23
3
[RFC] virtio-iommu version 0.5
This is version 0.5 of the virtio-iommu specification, the paravirtualized
IOMMU. This version addresses feedback from v0.4 and adds an event virtqueue.
Please find the specification, LaTeX sources and pdf, at:
git://linux-arm.org/virtio-iommu.git viommu/v0.5
http://linux-arm.org/git?p=virtio-iommu.git;a=blob;f=dist/v0.5/virtio-iommu-v0.5.pdf
A detailed changelog since v0.4 follows. You can find
2018 Jun 13
0
[RFC] virtio-iommu version 0.7
...ns}{Device Types / IOMMU Device / Device operations}
@@ -321,12 +312,14 @@ to zero.
The device MUST ignore reserved fields of the head and the tail of a
request.
-If the VIRTIO_IOMMU_F_INPUT_RANGE feature is offered, the device MUST
-truncate the range described by \field{virt_start} and \field{virt_end} in
-requests to fit in the range described by \field{input_range}.
+If the VIRTIO_IOMMU_F_INPUT_RANGE feature is offered and the range
+described by fields \field{virt_start} and \field{virt_end} doesn't fit in
+the range described by \field{input_range}, the device MAY set
+\field{status} to...
2023 May 15
3
[PATCH v2 0/2] iommu/virtio: Fixes
One fix reported by Akihiko, and another found while going over the
driver.
Jean-Philippe Brucker (2):
iommu/virtio: Detach domain on endpoint release
iommu/virtio: Return size mapped for a detached domain
drivers/iommu/virtio-iommu.c | 57 ++++++++++++++++++++++++++----------
1 file changed, 41 insertions(+), 16 deletions(-)
--
2.40.0
2018 Oct 12
3
[PATCH v3 5/7] iommu: Add virtio-iommu driver
...L);
> + while (node) {
> + mapping = container_of(node, struct viommu_mapping, iova);
> + map = (struct virtio_iommu_req_map) {
> + .head.type = VIRTIO_IOMMU_T_MAP,
> + .domain = cpu_to_le32(vdomain->id),
> + .virt_start = cpu_to_le64(mapping->iova.start),
> + .virt_end = cpu_to_le64(mapping->iova.last),
> + .phys_start = cpu_to_le64(mapping->paddr),
> + .flags = cpu_to_le32(mapping->flags),
> + };
> +
> + ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> + if (ret)
> + break;
> +
> + node = int...
2018 Oct 12
3
[PATCH v3 5/7] iommu: Add virtio-iommu driver
...L);
> + while (node) {
> + mapping = container_of(node, struct viommu_mapping, iova);
> + map = (struct virtio_iommu_req_map) {
> + .head.type = VIRTIO_IOMMU_T_MAP,
> + .domain = cpu_to_le32(vdomain->id),
> + .virt_start = cpu_to_le64(mapping->iova.start),
> + .virt_end = cpu_to_le64(mapping->iova.last),
> + .phys_start = cpu_to_le64(mapping->paddr),
> + .flags = cpu_to_le32(mapping->flags),
> + };
> +
> + ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> + if (ret)
> + break;
> +
> + node = int...
2018 Nov 22
0
[PATCH v5 5/7] iommu: Add virtio-iommu driver
...t(&vdomain->mappings, 0, -1UL);
+ while (node) {
+ mapping = container_of(node, struct viommu_mapping, iova);
+ map = (struct virtio_iommu_req_map) {
+ .head.type = VIRTIO_IOMMU_T_MAP,
+ .domain = cpu_to_le32(vdomain->id),
+ .virt_start = cpu_to_le64(mapping->iova.start),
+ .virt_end = cpu_to_le64(mapping->iova.last),
+ .phys_start = cpu_to_le64(mapping->paddr),
+ .flags = cpu_to_le32(mapping->flags),
+ };
+
+ ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+ if (ret)
+ break;
+
+ node = interval_tree_iter_next(node, 0, -1UL);
+ }
+ sp...
2018 Nov 15
0
[PATCH v4 5/7] iommu: Add virtio-iommu driver
...t(&vdomain->mappings, 0, -1UL);
+ while (node) {
+ mapping = container_of(node, struct viommu_mapping, iova);
+ map = (struct virtio_iommu_req_map) {
+ .head.type = VIRTIO_IOMMU_T_MAP,
+ .domain = cpu_to_le32(vdomain->id),
+ .virt_start = cpu_to_le64(mapping->iova.start),
+ .virt_end = cpu_to_le64(mapping->iova.last),
+ .phys_start = cpu_to_le64(mapping->paddr),
+ .flags = cpu_to_le32(mapping->flags),
+ };
+
+ ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+ if (ret)
+ break;
+
+ node = interval_tree_iter_next(node, 0, -1UL);
+ }
+ sp...
2018 Jun 21
0
[PATCH v2 2/5] iommu: Add virtio-iommu driver
...t(&vdomain->mappings, 0, -1UL);
+ while (node) {
+ mapping = container_of(node, struct viommu_mapping, iova);
+ map = (struct virtio_iommu_req_map) {
+ .head.type = VIRTIO_IOMMU_T_MAP,
+ .domain = cpu_to_le32(vdomain->id),
+ .virt_start = cpu_to_le64(mapping->iova.start),
+ .virt_end = cpu_to_le64(mapping->iova.last),
+ .phys_start = cpu_to_le64(mapping->paddr),
+ .flags = cpu_to_le32(mapping->flags),
+ };
+
+ ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+ if (ret)
+ break;
+
+ node = interval_tree_iter_next(node, 0, -1UL);
+ }
+ sp...
2018 Oct 12
0
[PATCH v3 5/7] iommu: Add virtio-iommu driver
...t(&vdomain->mappings, 0, -1UL);
+ while (node) {
+ mapping = container_of(node, struct viommu_mapping, iova);
+ map = (struct virtio_iommu_req_map) {
+ .head.type = VIRTIO_IOMMU_T_MAP,
+ .domain = cpu_to_le32(vdomain->id),
+ .virt_start = cpu_to_le64(mapping->iova.start),
+ .virt_end = cpu_to_le64(mapping->iova.last),
+ .phys_start = cpu_to_le64(mapping->paddr),
+ .flags = cpu_to_le32(mapping->flags),
+ };
+
+ ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+ if (ret)
+ break;
+
+ node = interval_tree_iter_next(node, 0, -1UL);
+ }
+ sp...
2019 May 30
0
[PATCH v8 5/7] iommu: Add virtio-iommu driver
...t(&vdomain->mappings, 0, -1UL);
+ while (node) {
+ mapping = container_of(node, struct viommu_mapping, iova);
+ map = (struct virtio_iommu_req_map) {
+ .head.type = VIRTIO_IOMMU_T_MAP,
+ .domain = cpu_to_le32(vdomain->id),
+ .virt_start = cpu_to_le64(mapping->iova.start),
+ .virt_end = cpu_to_le64(mapping->iova.last),
+ .phys_start = cpu_to_le64(mapping->paddr),
+ .flags = cpu_to_le32(mapping->flags),
+ };
+
+ ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+ if (ret)
+ break;
+
+ node = interval_tree_iter_next(node, 0, -1UL);
+ }
+ sp...
2018 Nov 08
0
[PATCH v3 5/7] iommu: Add virtio-iommu driver
...{
>> + mapping = container_of(node, struct viommu_mapping, iova);
>> + map = (struct virtio_iommu_req_map) {
>> + .head.type = VIRTIO_IOMMU_T_MAP,
>> + .domain = cpu_to_le32(vdomain->id),
>> + .virt_start = cpu_to_le64(mapping->iova.start),
>> + .virt_end = cpu_to_le64(mapping->iova.last),
>> + .phys_start = cpu_to_le64(mapping->paddr),
>> + .flags = cpu_to_le32(mapping->flags),
>> + };
>> +
>> + ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
>> + if (ret)
>> + brea...
2018 Nov 23
2
[PATCH v5 5/7] iommu: Add virtio-iommu driver
...L);
> + while (node) {
> + mapping = container_of(node, struct viommu_mapping, iova);
> + map = (struct virtio_iommu_req_map) {
> + .head.type = VIRTIO_IOMMU_T_MAP,
> + .domain = cpu_to_le32(vdomain->id),
> + .virt_start = cpu_to_le64(mapping->iova.start),
> + .virt_end = cpu_to_le64(mapping->iova.last),
> + .phys_start = cpu_to_le64(mapping->paddr),
> + .flags = cpu_to_le32(mapping->flags),
> + };
> +
> + ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> + if (ret)
> + break;
> +
> + node = int...
2018 Nov 23
2
[PATCH v5 5/7] iommu: Add virtio-iommu driver
...L);
> + while (node) {
> + mapping = container_of(node, struct viommu_mapping, iova);
> + map = (struct virtio_iommu_req_map) {
> + .head.type = VIRTIO_IOMMU_T_MAP,
> + .domain = cpu_to_le32(vdomain->id),
> + .virt_start = cpu_to_le64(mapping->iova.start),
> + .virt_end = cpu_to_le64(mapping->iova.last),
> + .phys_start = cpu_to_le64(mapping->paddr),
> + .flags = cpu_to_le32(mapping->flags),
> + };
> +
> + ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> + if (ret)
> + break;
> +
> + node = int...
2018 Nov 22
15
[PATCH v5 0/7] Add virtio-iommu driver
Implement the virtio-iommu driver, following specification v0.9 [1].
Since v4 [2] I fixed the issues reported by Eric, and added Reviewed-by
from Eric and Rob. Thanks!
I changed the specification to fix one inconsistency discussed in v4.
That the device fills the probe buffer with zeroes is now a "SHOULD"
instead of a "MAY", since it's the only way for the driver to know
2018 Nov 22
15
[PATCH v5 0/7] Add virtio-iommu driver
Implement the virtio-iommu driver, following specification v0.9 [1].
Since v4 [2] I fixed the issues reported by Eric, and added Reviewed-by
from Eric and Rob. Thanks!
I changed the specification to fix one inconsistency discussed in v4.
That the device fills the probe buffer with zeroes is now a "SHOULD"
instead of a "MAY", since it's the only way for the driver to know
2018 Nov 23
2
[PATCH v5 5/7] iommu: Add virtio-iommu driver
...L);
> + while (node) {
> + mapping = container_of(node, struct viommu_mapping, iova);
> + map = (struct virtio_iommu_req_map) {
> + .head.type = VIRTIO_IOMMU_T_MAP,
> + .domain = cpu_to_le32(vdomain->id),
> + .virt_start = cpu_to_le64(mapping->iova.start),
> + .virt_end = cpu_to_le64(mapping->iova.last),
> + .phys_start = cpu_to_le64(mapping->paddr),
> + .flags = cpu_to_le32(mapping->flags),
> + };
> +
> + ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> + if (ret)
> + break;
> +
> + node = int...
2018 Nov 23
2
[PATCH v5 5/7] iommu: Add virtio-iommu driver
...L);
> + while (node) {
> + mapping = container_of(node, struct viommu_mapping, iova);
> + map = (struct virtio_iommu_req_map) {
> + .head.type = VIRTIO_IOMMU_T_MAP,
> + .domain = cpu_to_le32(vdomain->id),
> + .virt_start = cpu_to_le64(mapping->iova.start),
> + .virt_end = cpu_to_le64(mapping->iova.last),
> + .phys_start = cpu_to_le64(mapping->paddr),
> + .flags = cpu_to_le32(mapping->flags),
> + };
> +
> + ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> + if (ret)
> + break;
> +
> + node = int...
2018 Jun 22
1
[PATCH v2 2/5] iommu: Add virtio-iommu driver
...L);
> + while (node) {
> + mapping = container_of(node, struct viommu_mapping, iova);
> + map = (struct virtio_iommu_req_map) {
> + .head.type = VIRTIO_IOMMU_T_MAP,
> + .domain = cpu_to_le32(vdomain->id),
> + .virt_start = cpu_to_le64(mapping->iova.start),
> + .virt_end = cpu_to_le64(mapping->iova.last),
> + .phys_start = cpu_to_le64(mapping->paddr),
> + .flags = cpu_to_le32(mapping->flags),
> + };
> +
> + ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> + if (ret)
> + break;
> +
> + node = int...
2019 Feb 21
1
[PATCH v7 0/7] Add virtio-iommu driver
...finalizing the 1.1
> spec so I expect it to take a while.
I read v0.9 of the spec and have some minor comments, hope this is a
good place to send them:
1. In section 2.6.2, one reads
If the VIRTIO_IOMMU_F_INPUT_RANGE feature is offered and the range
described by fields virt_start and virt_end doesn?t fit in the range
described by input_range, the device MAY set status to VIRTIO_-
IOMMU_S_RANGE and ignore the request.
Shouldn't int say "If the VIRTIO_IOMMU_F_INPUT_RANGE feature is
negotiated" instead?
2. There's a typo at the end of section 2.6.5:
The VIR...