Displaying 20 results from an estimated 84 matches for "sg_dma_address".
2016 Dec 08
1
[PATCH 1/2] virtio_ring: Do not call dma_map_page if sg is already mapped.
..._sg(const struct vring_virtqueue *vq,
> if (!vring_use_dma_api(vq->vq.vdev))
> return (dma_addr_t)sg_phys(sg);
>
> + /* If the sg is already mapped, return the DMA address */
How come we even reach this code for rpmsg?
Does vring_use_dma_api return true for rpmsg?
> + if (sg_dma_address(sg)) {
> + sg->length = sg_dma_len(sg);
> + return sg_dma_address(sg);
> + }
> +
Is there a rule that says 0 is not a valid address?
> /*
> * We can't use dma_map_sg, because we don't use scatterlists in
> * the way it expects (we don't guarantee that...
2016 Dec 08
1
[PATCH 1/2] virtio_ring: Do not call dma_map_page if sg is already mapped.
..._sg(const struct vring_virtqueue *vq,
> if (!vring_use_dma_api(vq->vq.vdev))
> return (dma_addr_t)sg_phys(sg);
>
> + /* If the sg is already mapped, return the DMA address */
How come we even reach this code for rpmsg?
Does vring_use_dma_api return true for rpmsg?
> + if (sg_dma_address(sg)) {
> + sg->length = sg_dma_len(sg);
> + return sg_dma_address(sg);
> + }
> +
Is there a rule that says 0 is not a valid address?
> /*
> * We can't use dma_map_sg, because we don't use scatterlists in
> * the way it expects (we don't guarantee that...
2016 Dec 08
3
[PATCH 0/2] Virtio ring works with DMA coherent memory
RPMsg uses dma_alloc_coherent() to allocate memory to shared with the remote.
In this case, as there is no pages setup in the dma_alloc_coherent(),
we cannot get the physical address back from the virtual address, and thus,
we can set the sg_dma_addr to store the DMA address and mark it already DMA
mapped.
When virtio vring sees the sg_dma_addr is ready set, do not call dma_map_page().
The issue
2016 Dec 08
3
[PATCH 0/2] Virtio ring works with DMA coherent memory
RPMsg uses dma_alloc_coherent() to allocate memory to shared with the remote.
In this case, as there is no pages setup in the dma_alloc_coherent(),
we cannot get the physical address back from the virtual address, and thus,
we can set the sg_dma_addr to store the DMA address and mark it already DMA
mapped.
When virtio vring sees the sg_dma_addr is ready set, do not call dma_map_page().
The issue
2019 Dec 21
0
[PATCH 8/8] DO NOT MERGE: iommu: disable list appending in dma-iommu
...st */
@@ -862,39 +861,16 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int s_length = sg_dma_len(s);
unsigned int s_iova_len = s->length;
+ if (i > 0)
+ cur = sg_next(cur);
+
s->offset += s_iova_off;
s->length = s_length;
- sg_dma_address(s) = DMA_MAPPING_ERROR;
- sg_dma_len(s) = 0;
-
- /*
- * Now fill in the real DMA data. If...
- * - there is a valid output segment to append to
- * - and this segment starts on an IOVA page boundary
- * - but doesn't fall at a segment boundary
- * - and wouldn't make the resulti...
2020 Sep 08
2
[PATCH] drm/virtio: drop quirks handling
...kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
GFP_KERNEL);
@@ -180,9 +175,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
}
for_each_sg(shmem->pages->sgl, sg, *nents, si) {
- (*ents)[si].addr = cpu_to_le64(use_dma_api
- ? sg_dma_address(sg)
- : sg_phys(sg));
+ (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
(*ents)[si].length = cpu_to_le32(sg->length);
(*ents)[si].padding = 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index c93c2db35aaf..1c1d2834547d 100644...
2020 Sep 08
2
[PATCH] drm/virtio: drop quirks handling
...kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
GFP_KERNEL);
@@ -180,9 +175,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
}
for_each_sg(shmem->pages->sgl, sg, *nents, si) {
- (*ents)[si].addr = cpu_to_le64(use_dma_api
- ? sg_dma_address(sg)
- : sg_phys(sg));
+ (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
(*ents)[si].length = cpu_to_le32(sg->length);
(*ents)[si].padding = 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index c93c2db35aaf..1c1d2834547d 100644...
2013 Aug 11
2
Fixing nouveau for >4k PAGE_SIZE
...ma_len(sg) >> PAGE_SHIFT;
+ sglen = sg_dma_len(sg) >> shift;
end = pte + sglen;
if (unlikely(end >= max))
@@ -106,7 +107,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
len = end - pte;
for (m = 0; m < len; m++) {
- dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ dma_addr_t addr = sg_dma_address(sg) + (m << shift);
vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
num--;
@@ -121,7 +122,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
}
if (m < sglen) {
for (; m <...
2016 Nov 22
2
[RFC LINUX PATCH 0/2] Virtio ring works with DMA coherent memory
RPMsg uses dma_alloc_coherent() to allocate memory to shared with the remote.
In this case, as there is no pages setup in the dma_alloc_coherent(),
we cannot get the physical address back from the virtual address, and thus,
we can set the sg_dma_addr to store the DMA address and mark it already DMA
mapped.
When virtio vring sees the sg_dma_addr is ready set, do not call dma_map_page().
The issue
2016 Dec 06
2
[RFC LINUX PATCH 0/2] Virtio ring works with DMA coherent memory
RPMsg uses dma_alloc_coherent() to allocate memory to shared with the remote.
In this case, as there is no pages setup in the dma_alloc_coherent(),
we cannot get the physical address back from the virtual address, and thus,
we can set the sg_dma_addr to store the DMA address and mark it already DMA
mapped.
When virtio vring sees the sg_dma_addr is ready set, do not call dma_map_page().
The issue
2016 Nov 22
2
[RFC LINUX PATCH 0/2] Virtio ring works with DMA coherent memory
RPMsg uses dma_alloc_coherent() to allocate memory to shared with the remote.
In this case, as there is no pages setup in the dma_alloc_coherent(),
we cannot get the physical address back from the virtual address, and thus,
we can set the sg_dma_addr to store the DMA address and mark it already DMA
mapped.
When virtio vring sees the sg_dma_addr is ready set, do not call dma_map_page().
The issue
2016 Dec 06
2
[RFC LINUX PATCH 0/2] Virtio ring works with DMA coherent memory
RPMsg uses dma_alloc_coherent() to allocate memory to shared with the remote.
In this case, as there is no pages setup in the dma_alloc_coherent(),
we cannot get the physical address back from the virtual address, and thus,
we can set the sg_dma_addr to store the DMA address and mark it already DMA
mapped.
When virtio vring sees the sg_dma_addr is ready set, do not call dma_map_page().
The issue
2020 Feb 05
2
[PATCH 4/4] drm/virtio: move virtio_gpu_mem_entry initialization to new function
..._array(bo->nents, sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!bo->ents) {
+ DRM_ERROR("failed to allocate ent list\n");
+ return -ENOMEM;
+ }
+
+ for_each_sg(bo->pages->sgl, sg, bo->nents, si) {
+ bo->ents[si].addr = cpu_to_le64(use_dma_api
+ ? sg_dma_address(sg)
+ : sg_phys(sg));
+ bo->ents[si].length = cpu_to_le32(sg->length);
+ bo->ents[si].padding = 0;
+ }
+ return 0;
+}
+
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
@@ -16...
2020 Feb 05
2
[PATCH 4/4] drm/virtio: move virtio_gpu_mem_entry initialization to new function
..._array(bo->nents, sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!bo->ents) {
+ DRM_ERROR("failed to allocate ent list\n");
+ return -ENOMEM;
+ }
+
+ for_each_sg(bo->pages->sgl, sg, bo->nents, si) {
+ bo->ents[si].addr = cpu_to_le64(use_dma_api
+ ? sg_dma_address(sg)
+ : sg_phys(sg));
+ bo->ents[si].length = cpu_to_le32(sg->length);
+ bo->ents[si].padding = 0;
+ }
+ return 0;
+}
+
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
@@ -16...
2013 Aug 11
2
Fixing nouveau for >4k PAGE_SIZE
...uld catch it with a
WARN_ON and maybe simplify the code a bunch while at it...
> > @@ -106,7 +107,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
> > len = end - pte;
> >
> > for (m = 0; m < len; m++) {
> > - dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
> > + dma_addr_t addr = sg_dma_address(sg) + (m << shift);
> >
> > vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
> > num--;
> > @@ -121,7 +122,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 leng...
2020 Feb 07
1
[PATCH v2 4/4] drm/virtio: move virtio_gpu_mem_entry initialization to new function
...= kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!(*ents)) {
+ DRM_ERROR("failed to allocate ent list\n");
+ return -ENOMEM;
+ }
+
+ for_each_sg(bo->pages->sgl, sg, *nents, si) {
+ (*ents)[si].addr = cpu_to_le64(use_dma_api
+ ? sg_dma_address(sg)
+ : sg_phys(sg));
+ (*ents)[si].length = cpu_to_le32(sg->length);
+ (*ents)[si].padding = 0;
+ }
+ return 0;
+}
+
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
@@ -129,...
2013 Aug 11
0
Fixing nouveau for >4k PAGE_SIZE
...t; if (unlikely(end >= max))
Please add a WARN_ON(big); in map_sg and map_sg_table if you do this.
> @@ -106,7 +107,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
> len = end - pte;
>
> for (m = 0; m < len; m++) {
> - dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
> + dma_addr_t addr = sg_dma_address(sg) + (m << shift);
>
> vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
> num--;
> @@ -121,7 +122,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
> }
> if...
2016 Dec 06
0
[RFC LINUX PATCH 1/2] virtio_ring: Do not call dma_map_page if sg is already mapped.
.../drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -180,6 +180,12 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
if (!vring_use_dma_api(vq->vq.vdev))
return (dma_addr_t)sg_phys(sg);
+ /* If the sg is already mapped, return the DMA address */
+ if (sg_dma_address(sg)) {
+ sg->length = sg_dma_len(sg);
+ return sg_dma_address(sg);
+ }
+
/*
* We can't use dma_map_sg, because we don't use scatterlists in
* the way it expects (we don't guarantee that the scatterlist
--
1.9.1
2016 Dec 08
0
[PATCH 1/2] virtio_ring: Do not call dma_map_page if sg is already mapped.
.../drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -180,6 +180,12 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
if (!vring_use_dma_api(vq->vq.vdev))
return (dma_addr_t)sg_phys(sg);
+ /* If the sg is already mapped, return the DMA address */
+ if (sg_dma_address(sg)) {
+ sg->length = sg_dma_len(sg);
+ return sg_dma_address(sg);
+ }
+
/*
* We can't use dma_map_sg, because we don't use scatterlists in
* the way it expects (we don't guarantee that the scatterlist
--
1.9.1
2013 Nov 29
2
Fixing nouveau for >4k PAGE_SIZE
...return;
- end = pte + sglen;
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
+ /* We dont' handle objects that aren't PAGE_SIZE aligned either */
+ if (WARN_ON((offset << 12) & ~PAGE_MASK))
+ return;
- for (m = 0; m < len; m++) {
- dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ /* Iterate sglist elements */
+ for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[0];
+ unsigned long m, sglen;
+ dma_addr_t addr;
- vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
- num--;
-...