Displaying 20 results from an estimated 108 matches for "vring_mapping_error".
2023 May 17
2
[PATCH vhost v9 01/12] virtio_ring: put mapping error check in vring_map_one_sg
This patch put the dma addr error check in vring_map_one_sg().
The benefits of doing this:
1. make vring_map_one_sg more simple, without calling
vring_mapping_error to check the return value.
2. reduce one judgment of vq->use_dma_api.
Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com>
---
drivers/virtio/virtio_ring.c | 37 +++++++++++++++++++++---------------
1 file changed, 22 insertions(+), 15 deletions(-)
diff --git a/drivers/virtio/virtio...
2023 Mar 02
1
[PATCH vhost v1 02/12] virtio_ring: split: separate DMA codes
...],
+ unsigned int total_sg,
+ unsigned int out_sgs,
+ unsigned int in_sgs)
+{
+ struct scatterlist *sg;
+ unsigned int n;
+
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
+
+ if (vring_mapping_error(vq, addr))
+ return -ENOMEM;
+
+ sg->dma_address = addr;
+ }
+ }
+
+ for (; n < (out_sgs + in_sgs); n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
+
+ if (vring_mapping_error(vq, addr))
+ return -ENOMEM;
+
+ sg...
2023 Mar 21
1
[PATCH vhost v3 01/11] virtio_ring: split: separate dma codes
...unsigned int out_sgs,
+ unsigned int in_sgs)
+{
+ struct scatterlist *sg;
+ unsigned int n;
+
+ if (!vq->use_dma_api)
+ return 0;
+
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
+
+ if (vring_mapping_error(vq, addr))
+ goto err;
+
+ sg->dma_address = addr;
+ }
+ }
+
+ for (; n < (out_sgs + in_sgs); n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
+
+ if (vring_mapping_error(vq, addr))
+ goto err;
+
+ sg->dma_add...
2023 Feb 20
2
[PATCH vhost 01/10] virtio_ring: split: refactor virtqueue_add_split() for premapped
...gt; + unsigned int n;
>
> - START_USE(vq);
> + for (n = 0; n < out_sgs; n++) {
> + for (sg = sgs[n]; sg; sg = sg_next(sg)) {
> + dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
> +
> + if (vring_mapping_error(vq, addr))
> + return -ENOMEM;
> +
> + sg->dma_address = addr;
> + }
> + }
> + for (; n < (out_sgs + in_sgs); n++) {
> + for (sg = sgs[n]; sg; sg = sg_next(sg)) {
> +...
2023 Mar 02
1
[PATCH vhost v1 06/12] virtio_ring: packed: separate DMA codes
...ddr_t addr;
@@ -1362,14 +1362,9 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- addr = vring_map_one_sg(vq, sg, n < out_sgs ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
- goto unmap_release;
-
desc[i].flags = cpu_to_le16(n < out_sgs ?
0 : VRING_DESC_F_WRITE);
- desc[i].addr = cpu_to_le64(addr);
+ desc[i].addr = cpu_to_le64(sg->dma_address);
desc[i].len = cpu_to_le32(sg->length);
i++;
}
@@ -1380,7 +1375,7 @@ static i...
2023 Mar 22
1
[PATCH vhost v4 01/11] virtio_ring: split: separate dma codes
...unsigned int out_sgs,
+ unsigned int in_sgs)
+{
+ struct scatterlist *sg;
+ unsigned int n;
+
+ if (!vq->use_dma_api)
+ return 0;
+
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
+
+ if (vring_mapping_error(vq, addr))
+ goto err;
+
+ sg->dma_address = addr;
+ }
+ }
+
+ for (; n < (out_sgs + in_sgs); n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
+
+ if (vring_mapping_error(vq, addr))
+ goto err;
+
+ sg->dma_add...
2023 Apr 25
1
[PATCH vhost v7 01/11] virtio_ring: split: separate dma codes
...unsigned int out_sgs,
+ unsigned int in_sgs)
+{
+ struct scatterlist *sg;
+ unsigned int n;
+
+ if (!vq->use_dma_api)
+ return 0;
+
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
+
+ if (vring_mapping_error(vq, addr))
+ goto err;
+
+ sg->dma_address = addr;
+ }
+ }
+
+ for (; n < (out_sgs + in_sgs); n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
+
+ if (vring_mapping_error(vq, addr))
+ goto err;
+
+ sg->dma_add...
2018 Mar 16
2
[PATCH RFC 2/2] virtio_ring: support packed ring
...gt;
>>>>> + }
>>>>> + }
>>>>> + for (; n < (out_sgs + in_sgs); n++) {
>>>>> + for (sg = sgs[n]; sg; sg = sg_next(sg)) {
>>>>> + dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
>>>>> + if (vring_mapping_error(vq, addr))
>>>>> + goto unmap_release;
>>>>> +
>>>>> + flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT |
>>>>> + VRING_DESC_F_WRITE |
>>>>> + VRING_DESC_F_AVAIL(vq->wrap_counter) |
>>>>>...
2018 Mar 16
2
[PATCH RFC 2/2] virtio_ring: support packed ring
...gt;
>>>>> + }
>>>>> + }
>>>>> + for (; n < (out_sgs + in_sgs); n++) {
>>>>> + for (sg = sgs[n]; sg; sg = sg_next(sg)) {
>>>>> + dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
>>>>> + if (vring_mapping_error(vq, addr))
>>>>> + goto unmap_release;
>>>>> +
>>>>> + flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT |
>>>>> + VRING_DESC_F_WRITE |
>>>>> + VRING_DESC_F_AVAIL(vq->wrap_counter) |
>>>>>...
2018 Mar 16
0
[PATCH RFC 2/2] virtio_ring: support packed ring
...> > > + }
> > > > > > + for (; n < (out_sgs + in_sgs); n++) {
> > > > > > + for (sg = sgs[n]; sg; sg = sg_next(sg)) {
> > > > > > + dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
> > > > > > + if (vring_mapping_error(vq, addr))
> > > > > > + goto unmap_release;
> > > > > > +
> > > > > > + flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT |
> > > > > > + VRING_DESC_F_WRITE |
> > > > > > + VRING_DESC_F_AVA...
2016 Feb 03
0
[PATCH v7 5/9] virtio_ring: Support DMA APIs
...VICE : DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(vring_dma_dev(vq),
+ virtio64_to_cpu(vq->vq.vdev, desc->addr),
+ virtio32_to_cpu(vq->vq.vdev, desc->len),
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static int vring_mapping_error(const struct vring_virtqueue *vq,
+ dma_addr_t addr)
+{
+ if (!vring_use_dma_api(vq->vq.vdev))
+ return 0;
+
+ return dma_mapping_error(vring_dma_dev(vq), addr);
+}
+
static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
unsigned int total_sg, gfp_t gfp)
{
@@ -161,7...
2023 Jul 10
10
[PATCH vhost v11 00/10] virtio core prepares for AF_XDP
## About DMA APIs
Now, virtio may can not work with DMA APIs when virtio features do not have
VIRTIO_F_ACCESS_PLATFORM.
1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just
work with the "real" devices.
2. I tried to let xsk support callballs to get phy address from virtio-net
driver as the dma address. But the maintainers of xsk may want to use
2015 Oct 28
0
[PATCH v3 0/3] virtio DMA API core stuff
...us what it was? ...
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -292,7 +292,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
vq, desc, total_sg * sizeof(struct vring_desc),
DMA_TO_DEVICE);
- if (vring_mapping_error(vq, vq->vring.desc[head].addr))
+ if (vring_mapping_error(vq, addr))
goto unmap_release;
vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
That wasn't going to be the reason for Christian's failu...
2016 Feb 03
1
[PATCH v7 5/9] virtio_ring: Support DMA APIs
..._unmap_page(vring_dma_dev(vq),
> + virtio64_to_cpu(vq->vq.vdev, desc->addr),
> + virtio32_to_cpu(vq->vq.vdev, desc->len),
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + }
> +}
> +
> +static int vring_mapping_error(const struct vring_virtqueue *vq,
> + dma_addr_t addr)
> +{
> + if (!vring_use_dma_api(vq->vq.vdev))
> + return 0;
> +
> + return dma_mapping_error(vring_dma_dev(vq), addr);
> +}
> +
> static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
>...
2016 Feb 03
1
[PATCH v7 5/9] virtio_ring: Support DMA APIs
..._unmap_page(vring_dma_dev(vq),
> + virtio64_to_cpu(vq->vq.vdev, desc->addr),
> + virtio32_to_cpu(vq->vq.vdev, desc->len),
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + }
> +}
> +
> +static int vring_mapping_error(const struct vring_virtqueue *vq,
> + dma_addr_t addr)
> +{
> + if (!vring_use_dma_api(vq->vq.vdev))
> + return 0;
> +
> + return dma_mapping_error(vring_dma_dev(vq), addr);
> +}
> +
> static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
>...
2015 Oct 28
10
[PATCH v3 0/3] virtio DMA API core stuff
This switches virtio to use the DMA API unconditionally. I'm sure
it breaks things, but it seems to work on x86 using virtio-pci, with
and without Xen, and using both the modern 1.0 variant and the
legacy variant.
Changes from v2:
- Fix really embarrassing bug. This version actually works.
Changes from v1:
- Fix an endian conversion error causing a BUG to hit.
- Fix a DMA ordering issue
2015 Oct 28
10
[PATCH v3 0/3] virtio DMA API core stuff
This switches virtio to use the DMA API unconditionally. I'm sure
it breaks things, but it seems to work on x86 using virtio-pci, with
and without Xen, and using both the modern 1.0 variant and the
legacy variant.
Changes from v2:
- Fix really embarrassing bug. This version actually works.
Changes from v1:
- Fix an endian conversion error causing a BUG to hit.
- Fix a DMA ordering issue
2023 Aug 10
12
[PATCH vhost v13 00/12] virtio core prepares for AF_XDP
## About DMA APIs
Now, virtio may can not work with DMA APIs when virtio features do not have
VIRTIO_F_ACCESS_PLATFORM.
1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just
work with the "real" devices.
2. I tried to let xsk support callballs to get phy address from virtio-net
driver as the dma address. But the maintainers of xsk may want to use
2023 Aug 10
12
[PATCH vhost v13 00/12] virtio core prepares for AF_XDP
## About DMA APIs
Now, virtio may can not work with DMA APIs when virtio features do not have
VIRTIO_F_ACCESS_PLATFORM.
1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just
work with the "real" devices.
2. I tried to let xsk support callballs to get phy address from virtio-net
driver as the dma address. But the maintainers of xsk may want to use
2018 May 29
0
[RFC v5 2/5] virtio_ring: support creating packed ring
...gt; > +
> > + /* Driver ring wrap counter. */
> > + u8 avail_wrap_counter;
> > +
> > + /* Device ring wrap counter. */
> > + u8 used_wrap_counter;
>
> How about just use boolean?
I can change it to bool if you want.
>
[...]
> > -static int vring_mapping_error(const struct vring_virtqueue *vq,
> > - dma_addr_t addr)
> > -{
> > - if (!vring_use_dma_api(vq->vq.vdev))
> > - return 0;
> > -
> > - return dma_mapping_error(vring_dma_dev(vq), addr);
> > -}
>
> It looks to me if you keep vring_mapping...