Displaying 20 results from an estimated 32 matches for "desc_extra".
2023 Mar 22
1
[PATCH vhost v4 02/11] virtio_ring: packed: separate dma codes
...cked(struct virtqueue *_vq,
else
desc[i].flags = flags;
- desc[i].addr = cpu_to_le64(addr);
+ desc[i].addr = cpu_to_le64(vring_sg_address(sg));
desc[i].len = cpu_to_le32(sg->length);
desc[i].id = cpu_to_le16(id);
if (unlikely(vq->use_dma_api)) {
- vq->packed.desc_extra[curr].addr = addr;
+ vq->packed.desc_extra[curr].addr = vring_sg_address(sg);
vq->packed.desc_extra[curr].len = sg->length;
vq->packed.desc_extra[curr].flags =
le16_to_cpu(flags);
@@ -1547,26 +1544,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
E...
2023 Mar 02
1
[PATCH vhost v1 06/12] virtio_ring: packed: separate DMA codes
...d(struct vring_virtqueue *vq,
else
desc[i].flags = flags;
- desc[i].addr = cpu_to_le64(addr);
+ desc[i].addr = cpu_to_le64(sg->dma_address);
desc[i].len = cpu_to_le32(sg->length);
desc[i].id = cpu_to_le16(id);
if (unlikely(vq->use_dma_api)) {
- vq->packed.desc_extra[curr].addr = addr;
+ vq->packed.desc_extra[curr].addr = sg->dma_address;
vq->packed.desc_extra[curr].len = sg->length;
vq->packed.desc_extra[curr].flags =
le16_to_cpu(flags);
@@ -1576,25 +1555,6 @@ static inline int virtqueue_add_vring_packed(struct vring_virtqueue...
2023 Mar 22
1
[PATCH vhost v4 04/11] virtio_ring: split: support premapped
...truct vring_desc_state_packed {
@@ -448,7 +449,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
}
static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
- unsigned int i)
+ unsigned int i, bool dma_map_internal)
{
struct vring_desc_extra *extra = vq->split.desc_extra;
u16 flags;
@@ -465,6 +466,9 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
+ if (!dma_map_internal)
+ goto out;
+
dma_unmap_page(vrin...
2023 Mar 21
1
[PATCH vhost v3 04/11] virtio_ring: split: support premapped
...gt; @@ -448,7 +449,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
> }
>
> static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> - unsigned int i)
> + unsigned int i, bool map_inter)
> {
> struct vring_desc_extra *extra = vq->split.desc_extra;
> u16 flags;
> @@ -465,6 +466,9 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> (flags & VRING_DESC_F_WRITE) ?
> DMA_FROM_DEVICE : DMA_TO_DEVICE);
> } else {
> + if (!map_inter)
> + goto o...
2023 May 26
1
[PATCH] virtio_ring: validate used buffer length
...= false;
+module_param(force_used_validation, bool, 0444);
+
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
#define BAD_RING(_vq, fmt, args...) \
@@ -105,6 +108,9 @@ struct vring_virtqueue_split {
struct vring_desc_state_split *desc_state;
struct vring_desc_extra *desc_extra;
+ /* Maximum in buffer length, NULL means no used validation */
+ u32 *buflen;
+
/* DMA address and size information */
dma_addr_t queue_dma_addr;
size_t queue_size_in_bytes;
@@ -145,6 +151,9 @@ struct vring_virtqueue_packed {
struct vring_desc_state_packed *desc_state;
str...
2023 Mar 21
11
[PATCH vhost v3 00/11] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero
copy feature of xsk (XDP socket) needs to be supported by the driver. The
performance of zero copy is very good.
ENV: Qemu with vhost.
vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS
-----------------------------|---------------|------------------|------------
xmit by sockperf: 90% | 100%
2023 Mar 07
2
[PATCH vhost v1 03/12] virtio_ring: split: introduce virtqueue_add_split_premapped()
...st struct vring_virtqueue *vq,
> }
>
> static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> - unsigned int i)
> + unsigned int i, bool dma_map)
> {
> struct vring_desc_extra *extra = vq->split.desc_extra;
> u16 flags;
> @@ -457,6 +458,9 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> (flags & VRING_DESC_F_WRITE) ?
> DMA_FROM_DEVICE : DMA_TO_DEVI...
2023 Mar 22
11
[PATCH vhost v4 00/11] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero
copy feature of xsk (XDP socket) needs to be supported by the driver. The
performance of zero copy is very good.
ENV: Qemu with vhost.
vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS
-----------------------------|---------------|------------------|------------
xmit by sockperf: 90% | 100%
2023 Mar 24
11
[PATCH vhost v5 00/11] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero
copy feature of xsk (XDP socket) needs to be supported by the driver. The
performance of zero copy is very good.
ENV: Qemu with vhost.
vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS
-----------------------------|---------------|------------------|------------
xmit by sockperf: 90% | 100%
2023 Mar 27
11
[PATCH vhost v6 00/11] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero
copy feature of xsk (XDP socket) needs to be supported by the driver. The
performance of zero copy is very good.
ENV: Qemu with vhost.
vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS
-----------------------------|---------------|------------------|------------
xmit by sockperf: 90% | 100%
2023 May 09
12
[PATCH vhost v8 00/12] virtio core prepares for AF_XDP
## About DMA APIs
Now, virtio may can not work with DMA APIs when virtio features do not have
VIRTIO_F_ACCESS_PLATFORM.
1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just
work with the "real" devices.
2. I tried to let xsk support callballs to get phy address from virtio-net
driver as the dma address. But the maintainers of xsk may want to use
2023 May 31
1
[PATCH] virtio_ring: validate used buffer length
...then don't validate
at all - driver that did not add any inbufs is not going to look
at length.
Allowing passing NULL as length and skipping validation
if len = 0 might also be a good idea.
> >
> >
> > > >
> > > > > > Second what's wrong with dma_desc_extra that we already maintain?
> > > > > > Third motivation - it's part and parcel of the hardening effort yes?
> > > > >
> > > > > They are different. dma_desc_extra is for a descriptor ring, but this
> > > > > is for a used ring. Techn...
2023 Apr 25
12
[PATCH vhost v7 00/11] virtio core prepares for AF_XDP
## About DMA APIs
Now, virtio may can not work with DMA APIs when virtio features do not have
VIRTIO_F_ACCESS_PLATFORM.
1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just
work with the "real" devices.
2. I tried to let xsk support callballs to get phy address from virtio-net
driver as the dma address. But the maintainers of xsk may want to use
2023 May 31
1
[PATCH] virtio_ring: validate used buffer length
...parameter?
> >
> > If we enable it unconditionally for modern devices, it may break some
> > buggy moden device (vsock without a fix as an example).
> >
> > >
> > >
> > > > >
> > > > > > > Second what's wrong with dma_desc_extra that we already maintain?
> > > > > > > Third motivation - it's part and parcel of the hardening effort yes?
> > > > > >
> > > > > > They are different. dma_desc_extra is for a descriptor ring, but this
> > > > > > is fo...
2023 Jun 01
1
[PATCH] virtio_ring: validate used buffer length
...a driver detects a malicious device, is it
really good to try to keep working or warn and disable the device? Or
maybe we can have an option for the user to decide?
>
>
> > >
> > >
> > > > >
> > > > > > > Second what's wrong with dma_desc_extra that we already maintain?
> > > > > > > Third motivation - it's part and parcel of the hardening effort yes?
> > > > > >
> > > > > > They are different. dma_desc_extra is for a descriptor ring, but this
> > > > > > is fo...
2023 Mar 07
1
[PATCH vhost v1 03/12] virtio_ring: split: introduce virtqueue_add_split_premapped()
...bool dma_map; /* Addr is mapped by virtio core or not. */
>
> This will stress the cache, can we pack the boolean into indir_desc?
Rethink about this, I think we cannot pack this into indir_desc.
Because we may save ctx to indir_desc.
We can save this info to vring_desc_extra.addr, null means not dma mapped by
virtio core.
Thanks.
>
> > };
> >
> > struct vring_desc_state_packed {
> > @@ -440,7 +441,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
> > }
> >
> > static unsigned int vring_...
2023 Feb 20
1
[PATCH vhost 04/10] virtio_ring: split: introduce virtqueue_add_split_premapped()
...gt; }
> >
> > static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> > - unsigned int i)
> > + unsigned int i, bool premapped)
> > {
> > struct vring_desc_extra *extra = vq->split.desc_extra;
> > u16 flags;
> > @@ -457,6 +458,9 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> > (flags & VRING_DESC_F_WRITE) ?
> > DMA_FROM_...
2023 Jun 01
1
[PATCH] virtio_ring: validate used buffer length
...If we enable it unconditionally for modern devices, it may break some
> > > buggy moden device (vsock without a fix as an example).
> > >
> > > >
> > > >
> > > > > >
> > > > > > > > Second what's wrong with dma_desc_extra that we already maintain?
> > > > > > > > Third motivation - it's part and parcel of the hardening effort yes?
> > > > > > >
> > > > > > > They are different. dma_desc_extra is for a descriptor ring, but this
> > > >...
2023 Jun 22
1
[PATCH vhost v10 05/10] virtio_ring: split-detach: support return dma info to driver
...needs_kick;
> }
>
> -static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
> - void **ctx)
> +static void detach_cursor_init_split(struct vring_virtqueue *vq,
> + struct virtqueue_detach_cursor *cursor, u16 head)
> +{
> + struct vring_desc_extra *extra;
> +
> + extra = &vq->split.desc_extra[head];
> +
> + /* Clear data ptr. */
> + vq->split.desc_state[head].data = NULL;
> +
> + cursor->head = head;
> + cursor->done = 0;
> +
> + if (extra->flags & VRING_DESC_F_INDIRECT) {
> + cursor-&...
2023 Feb 14
11
[PATCH vhost 00/10] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero
copy feature of xsk (XDP socket) needs to be supported by the driver. The
performance of zero copy is very good.
ENV: Qemu with vhost.
vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS
-----------------------------|---------------|------------------|------------
xmit by sockperf: 90% | 100%