Displaying 20 results from an estimated 23 matches for "virtnet_rq_free_unused_buf".
2023 Jan 13
2
[PATCH net-next 2/2] virtio_net: Reuse buffer free function
virtnet_rq_free_unused_buf() helper function to free the buffer
already exists. Avoid code duplication by reusing existing function.
Signed-off-by: Parav Pandit <parav at nvidia.com>
---
drivers/net/virtio_net.c | 8 +-------
1 file changed, 1 insertion(+), 7 deletions(-)
diff --git a/drivers/net/virtio_net.c b/driv...
2023 May 05
2
[PATCH v4] virtio_net: suppress cpu stall when free_unused_bufs
...e case when the virtqueue is very large?
Thanks
> }
>
> for (i = 0; i < vi->max_queue_pairs; i++) {
> struct virtqueue *vq = vi->rq[i].vq;
> while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
> virtnet_rq_free_unused_buf(vq, buf);
> + cond_resched();
> }
> }
>
> --
> 2.20.1
>
2023 May 07
1
[PATCH v4] virtio_net: suppress cpu stall when free_unused_bufs
...k of these queues.
> > }
> >
> > for (i = 0; i < vi->max_queue_pairs; i++) {
> > struct virtqueue *vq = vi->rq[i].vq;
> > while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
> > virtnet_rq_free_unused_buf(vq, buf);
> > + cond_resched();
> > }
> > }
> >
> > --
> > 2.20.1
> >
2023 Jan 13
3
[PATCH net-next 0/2] Small packet processing handling changes
Hi,
These two changes improve the small packet handling.
Patch summary:
patch-1 fixes the length check by considering Ethernet 60B frame size
patch-2 avoids code duplication by reuses existing buffer free helper
Please review.
Parav Pandit (2):
virtio_net: Fix short frame length check
virtio_net: Reuse buffer free function
drivers/net/virtio_net.c | 10 ++--------
1 file changed, 2
2023 Jun 22
1
[PATCH vhost v10 10/10] virtio_net: support dma premapped
...ch_unused_buf(sq->vq, sq->premapped)) != NULL)
> + virtnet_sq_free_unused_buf(sq->vq, buf);
> }
>
> for (i = 0; i < vi->max_queue_pairs; i++) {
> - struct virtqueue *vq = vi->rq[i].vq;
> - while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
> - virtnet_rq_free_unused_buf(vq, buf);
> + struct receive_queue *rq = &vi->rq[i];
> +
> + while ((buf = virtnet_detach_unused_buf(rq->vq, rq->premapped)) != NULL)
> + virtnet_rq_free_unused_buf(rq->vq, buf);
> }
> }
>
> @@ -3658,6 +3765,18 @@ static int virtnet_find_vqs(struct v...
2023 Aug 21
3
[PATCH net-next v3] virtio_net: Introduce skb_vnet_common_hdr to avoid typecasting
...rtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -303,6 +303,14 @@ struct padded_vnet_hdr {
char padding[12];
};
+struct virtio_net_common_hdr {
+ union {
+ struct virtio_net_hdr hdr;
+ struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
+ struct virtio_net_hdr_v1_hash hash_v1_hdr;
+ };
+};
+
static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
@@ -344,9 +352,10 @@ static int rxq2vq(int rxq)
return rxq * 2;
}
-static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
+static inline struct virtio_net_com...
2023 Aug 17
1
[PATCH net-next v2] virtio_net: Introduce skb_vnet_common_hdr to avoid typecasting
...12 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -303,6 +303,13 @@ struct padded_vnet_hdr {
char padding[12];
};
+struct virtio_net_common_hdr {
+ union {
+ struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
+ struct virtio_net_hdr_v1_hash hash_v1_hdr;
+ };
+};
+
static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
@@ -344,9 +351,10 @@ static int rxq2vq(int rxq)
return rxq * 2;
}
-static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
+static inline struct virtio_net_com...
2023 Apr 03
1
[PATCH net-next 3/8] virtio_net: introduce virtnet_xdp_handler() to seprate the logic of run xdp
...9;d prefer this to be done on top unless it is a must. But I don't see
any advantage of introducing this, it's partial mapping of XDP action
and it needs to be extended when XDP action is extended. (And we've
already had: VIRTIO_XDP_REDIR and VIRTIO_XDP_TX ...)
> +
> static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
>
> @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> return ret;
> }
>
> +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, s...
2023 May 04
0
[PATCH v4] virtio_net: suppress cpu stall when free_unused_bufs
...eue_detach_unused_buf(vq)) != NULL)
> virtnet_sq_free_unused_buf(vq, buf);
> + cond_resched();
> }
>
> for (i = 0; i < vi->max_queue_pairs; i++) {
> struct virtqueue *vq = vi->rq[i].vq;
> while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
> virtnet_rq_free_unused_buf(vq, buf);
> + cond_resched();
> }
> }
>
> --
> 2.20.1
2023 Aug 17
1
[PATCH net-next v2] virtio_net: Introduce skb_vnet_common_hdr to avoid typecasting
...; + struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
> + struct virtio_net_hdr_v1_hash hash_v1_hdr;
> + };
> +};
Perhaps even add in struct virtio_net_hdr. As that is the original of
the three structs, and all the initial fields overlap.
> +
> static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
>
> @@ -344,9 +351,10 @@ static int rxq2vq(int rxq)
> return rxq * 2;
> }
>
> -static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)...
2023 Apr 04
1
[PATCH net-next 3/8] virtio_net: introduce virtnet_xdp_handler() to seprate the logic of run xdp
...extend when XDP action is extended. At least I have not thought of this
> > situation.
>
> What's the advantages of such indirection compared to using XDP action directly?
>
> Thanks
>
> >
> >
> > >
> > > > +
> > > > static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > >
> > > > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> > > > return ret;
> > > >...
2023 Apr 04
1
[PATCH net-next 3/8] virtio_net: introduce virtnet_xdp_handler() to seprate the logic of run xdp
...thought of this
> > > situation.
> >
> > What's the advantages of such indirection compared to using XDP action directly?
> >
> > Thanks
> >
> > >
> > >
> > > >
> > > > > +
> > > > > static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > >
> > > > > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> > > > > return ret...
2023 Apr 04
1
[PATCH net-next 3/8] virtio_net: introduce virtnet_xdp_handler() to seprate the logic of run xdp
...vantages of such indirection compared to using XDP action directly?
> > > >
> > > > Thanks
> > > >
> > > > >
> > > > >
> > > > > >
> > > > > > > +
> > > > > > > static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > > > static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > > >
> > > > > > > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> > >...
2023 Mar 06
4
[PATCH net 0/2] add checking sq is full inside xdp xmit
If the queue of xdp xmit is not an independent queue, then when the xdp
xmit used all the desc, the xmit from the __dev_queue_xmit() may encounter
the following error.
net ens4: Unexpected TXQ (0) queue failure: -28
This patch adds a check whether sq is full in XDP Xmit.
Thanks.
Xuan Zhuo (2):
virtio_net: separate the logic of checking whether sq is full
virtio_net: add checking sq is full
2023 Aug 17
1
[PATCH net-next v2] virtio_net: Introduce skb_vnet_common_hdr to avoid typecasting
...>> +};
>
> Perhaps even add in struct virtio_net_hdr. As that is the original of
> the three structs, and all the initial fields overlap.
>
But I didn't use virtio_net_hdr in this patch, is it redundant to put it
here? what do you think?
>> +
>> static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
>> static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
>>
>> @@ -344,9 +351,10 @@ static int rxq2vq(int rxq)
>> return rxq * 2;
>> }
>>
>> -static inline struct virtio_net_hdr_mrg_rxbuf *sk...
2023 Jul 10
10
[PATCH vhost v11 00/10] virtio core prepares for AF_XDP
## About DMA APIs
Now, virtio may can not work with DMA APIs when virtio features do not have
VIRTIO_F_ACCESS_PLATFORM.
1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just
work with the "real" devices.
2. I tried to let xsk support callballs to get phy address from virtio-net
driver as the dma address. But the maintainers of xsk may want to use
2023 Aug 10
12
[PATCH vhost v13 00/12] virtio core prepares for AF_XDP
## About DMA APIs
Now, virtio may can not work with DMA APIs when virtio features do not have
VIRTIO_F_ACCESS_PLATFORM.
1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just
work with the "real" devices.
2. I tried to let xsk support callballs to get phy address from virtio-net
driver as the dma address. But the maintainers of xsk may want to use
2023 Aug 10
12
[PATCH vhost v13 00/12] virtio core prepares for AF_XDP
## About DMA APIs
Now, virtio may can not work with DMA APIs when virtio features do not have
VIRTIO_F_ACCESS_PLATFORM.
1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just
work with the "real" devices.
2. I tried to let xsk support callballs to get phy address from virtio-net
driver as the dma address. But the maintainers of xsk may want to use
2023 Jun 02
12
[PATCH vhost v10 00/10] virtio core prepares for AF_XDP
## About DMA APIs
Now, virtio may can not work with DMA APIs when virtio features do not have
VIRTIO_F_ACCESS_PLATFORM.
1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just
work with the "real" devices.
2. I tried to let xsk support callballs to get phy address from virtio-net
driver as the dma address. But the maintainers of xsk may want to use
2023 Jun 02
12
[PATCH vhost v10 00/10] virtio core prepares for AF_XDP
## About DMA APIs
Now, virtio may can not work with DMA APIs when virtio features do not have
VIRTIO_F_ACCESS_PLATFORM.
1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just
work with the "real" devices.
2. I tried to let xsk support callballs to get phy address from virtio-net
driver as the dma address. But the maintainers of xsk may want to use