Xuan Zhuo
2023-Apr-03 04:12 UTC
[PATCH net-next 3/8] virtio_net: introduce virtnet_xdp_handler() to seprate the logic of run xdp
On Mon, 3 Apr 2023 10:43:03 +0800, Jason Wang <jasowang at redhat.com> wrote:> On Tue, Mar 28, 2023 at 8:04?PM Xuan Zhuo <xuanzhuo at linux.alibaba.com> wrote: > > > > At present, we have two similar logic to perform the XDP prog. > > > > Therefore, this PATCH separates the code of executing XDP, which is > > conducive to later maintenance. > > > > Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com> > > --- > > drivers/net/virtio_net.c | 142 +++++++++++++++++++++------------------ > > 1 file changed, 75 insertions(+), 67 deletions(-) > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > > index bb426958cdd4..72b9d6ee4024 100644 > > --- a/drivers/net/virtio_net.c > > +++ b/drivers/net/virtio_net.c > > @@ -301,6 +301,15 @@ struct padded_vnet_hdr { > > char padding[12]; > > }; > > > > +enum { > > + /* xdp pass */ > > + VIRTNET_XDP_RES_PASS, > > + /* drop packet. the caller needs to release the page. */ > > + VIRTNET_XDP_RES_DROP, > > + /* packet is consumed by xdp. the caller needs to do nothing. */ > > + VIRTNET_XDP_RES_CONSUMED, > > +}; > > I'd prefer this to be done on top unless it is a must. But I don't see > any advantage of introducing this, it's partial mapping of XDP action > and it needs to be extended when XDP action is extended. (And we've > already had: VIRTIO_XDP_REDIR and VIRTIO_XDP_TX ...)No, these are the three states of buffer after XDP processing. * PASS: goto make skb * DROP: we should release buffer * CUNSUMED: xdp prog used the buffer, we do nothing The latter two are not particularly related to XDP ACTION. And it does not need to extend when XDP action is extended. At least I have not thought of this situation.> > > + > > static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf); > > static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); > > > > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev, > > return ret; > > } > > > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, > > + struct net_device *dev, > > + unsigned int *xdp_xmit, > > + struct virtnet_rq_stats *stats) > > +{ > > + struct xdp_frame *xdpf; > > + int err; > > + u32 act; > > + > > + act = bpf_prog_run_xdp(xdp_prog, xdp); > > + stats->xdp_packets++; > > + > > + switch (act) { > > + case XDP_PASS: > > + return VIRTNET_XDP_RES_PASS; > > + > > + case XDP_TX: > > + stats->xdp_tx++; > > + xdpf = xdp_convert_buff_to_frame(xdp); > > + if (unlikely(!xdpf)) > > + return VIRTNET_XDP_RES_DROP; > > + > > + err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); > > + if (unlikely(!err)) { > > + xdp_return_frame_rx_napi(xdpf); > > + } else if (unlikely(err < 0)) { > > + trace_xdp_exception(dev, xdp_prog, act); > > + return VIRTNET_XDP_RES_DROP; > > + } > > + > > + *xdp_xmit |= VIRTIO_XDP_TX; > > + return VIRTNET_XDP_RES_CONSUMED; > > + > > + case XDP_REDIRECT: > > + stats->xdp_redirects++; > > + err = xdp_do_redirect(dev, xdp, xdp_prog); > > + if (err) > > + return VIRTNET_XDP_RES_DROP; > > + > > + *xdp_xmit |= VIRTIO_XDP_REDIR; > > + return VIRTNET_XDP_RES_CONSUMED; > > + > > + default: > > + bpf_warn_invalid_xdp_action(dev, xdp_prog, act); > > + fallthrough; > > + case XDP_ABORTED: > > + trace_xdp_exception(dev, xdp_prog, act); > > + fallthrough; > > + case XDP_DROP: > > + return VIRTNET_XDP_RES_DROP; > > + } > > +} > > + > > static unsigned int virtnet_get_headroom(struct virtnet_info *vi) > > { > > return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; > > @@ -876,7 +938,6 @@ static struct sk_buff *receive_small(struct net_device *dev, > > struct page *page = virt_to_head_page(buf); > > unsigned int delta = 0; > > struct page *xdp_page; > > - int err; > > unsigned int metasize = 0; > > > > len -= vi->hdr_len; > > @@ -898,7 +959,6 @@ static struct sk_buff *receive_small(struct net_device *dev, > > xdp_prog = rcu_dereference(rq->xdp_prog); > > if (xdp_prog) { > > struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; > > - struct xdp_frame *xdpf; > > struct xdp_buff xdp; > > void *orig_data; > > u32 act; > > @@ -931,46 +991,22 @@ static struct sk_buff *receive_small(struct net_device *dev, > > xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, > > xdp_headroom, len, true); > > orig_data = xdp.data; > > - act = bpf_prog_run_xdp(xdp_prog, &xdp); > > - stats->xdp_packets++; > > + > > + act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); > > > > switch (act) { > > - case XDP_PASS: > > + case VIRTNET_XDP_RES_PASS: > > /* Recalculate length in case bpf program changed it */ > > delta = orig_data - xdp.data; > > len = xdp.data_end - xdp.data; > > metasize = xdp.data - xdp.data_meta; > > break; > > - case XDP_TX: > > - stats->xdp_tx++; > > - xdpf = xdp_convert_buff_to_frame(&xdp); > > - if (unlikely(!xdpf)) > > - goto err_xdp; > > - err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); > > - if (unlikely(!err)) { > > - xdp_return_frame_rx_napi(xdpf); > > - } else if (unlikely(err < 0)) { > > - trace_xdp_exception(vi->dev, xdp_prog, act); > > - goto err_xdp; > > - } > > - *xdp_xmit |= VIRTIO_XDP_TX; > > - rcu_read_unlock(); > > - goto xdp_xmit; > > - case XDP_REDIRECT: > > - stats->xdp_redirects++; > > - err = xdp_do_redirect(dev, &xdp, xdp_prog); > > - if (err) > > - goto err_xdp; > > - *xdp_xmit |= VIRTIO_XDP_REDIR; > > + > > + case VIRTNET_XDP_RES_CONSUMED: > > rcu_read_unlock(); > > goto xdp_xmit; > > - default: > > - bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); > > - fallthrough; > > - case XDP_ABORTED: > > - trace_xdp_exception(vi->dev, xdp_prog, act); > > - goto err_xdp; > > - case XDP_DROP: > > + > > + case VIRTNET_XDP_RES_DROP: > > goto err_xdp; > > } > > } > > @@ -1277,7 +1313,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, > > if (xdp_prog) { > > unsigned int xdp_frags_truesz = 0; > > struct skb_shared_info *shinfo; > > - struct xdp_frame *xdpf; > > struct page *xdp_page; > > struct xdp_buff xdp; > > void *data; > > @@ -1294,49 +1329,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, > > if (unlikely(err)) > > goto err_xdp_frags; > > > > - act = bpf_prog_run_xdp(xdp_prog, &xdp); > > - stats->xdp_packets++; > > + act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); > > > > switch (act) { > > - case XDP_PASS: > > + case VIRTNET_XDP_RES_PASS: > > head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); > > if (unlikely(!head_skb)) > > goto err_xdp_frags; > > > > rcu_read_unlock(); > > return head_skb; > > - case XDP_TX: > > - stats->xdp_tx++; > > - xdpf = xdp_convert_buff_to_frame(&xdp); > > - if (unlikely(!xdpf)) { > > - netdev_dbg(dev, "convert buff to frame failed for xdp\n"); > > Nit: This debug is lost after the conversion.Will fix. Thanks.> > Thanks > > > - goto err_xdp_frags; > > - } > > - err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); > > - if (unlikely(!err)) { > > - xdp_return_frame_rx_napi(xdpf); > > - } else if (unlikely(err < 0)) { > > - trace_xdp_exception(vi->dev, xdp_prog, act); > > - goto err_xdp_frags; > > - } > > - *xdp_xmit |= VIRTIO_XDP_TX; > > - rcu_read_unlock(); > > - goto xdp_xmit; > > - case XDP_REDIRECT: > > - stats->xdp_redirects++; > > - err = xdp_do_redirect(dev, &xdp, xdp_prog); > > - if (err) > > - goto err_xdp_frags; > > - *xdp_xmit |= VIRTIO_XDP_REDIR; > > + > > + case VIRTNET_XDP_RES_CONSUMED: > > rcu_read_unlock(); > > goto xdp_xmit; > > - default: > > - bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); > > - fallthrough; > > - case XDP_ABORTED: > > - trace_xdp_exception(vi->dev, xdp_prog, act); > > - fallthrough; > > - case XDP_DROP: > > + > > + case VIRTNET_XDP_RES_DROP: > > goto err_xdp_frags; > > } > > err_xdp_frags: > > -- > > 2.32.0.3.g01195cf9f > > >
Jason Wang
2023-Apr-04 05:04 UTC
[PATCH net-next 3/8] virtio_net: introduce virtnet_xdp_handler() to seprate the logic of run xdp
On Mon, Apr 3, 2023 at 12:17?PM Xuan Zhuo <xuanzhuo at linux.alibaba.com> wrote:> > On Mon, 3 Apr 2023 10:43:03 +0800, Jason Wang <jasowang at redhat.com> wrote: > > On Tue, Mar 28, 2023 at 8:04?PM Xuan Zhuo <xuanzhuo at linux.alibaba.com> wrote: > > > > > > At present, we have two similar logic to perform the XDP prog. > > > > > > Therefore, this PATCH separates the code of executing XDP, which is > > > conducive to later maintenance. > > > > > > Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com> > > > --- > > > drivers/net/virtio_net.c | 142 +++++++++++++++++++++------------------ > > > 1 file changed, 75 insertions(+), 67 deletions(-) > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > > > index bb426958cdd4..72b9d6ee4024 100644 > > > --- a/drivers/net/virtio_net.c > > > +++ b/drivers/net/virtio_net.c > > > @@ -301,6 +301,15 @@ struct padded_vnet_hdr { > > > char padding[12]; > > > }; > > > > > > +enum { > > > + /* xdp pass */ > > > + VIRTNET_XDP_RES_PASS, > > > + /* drop packet. the caller needs to release the page. */ > > > + VIRTNET_XDP_RES_DROP, > > > + /* packet is consumed by xdp. the caller needs to do nothing. */ > > > + VIRTNET_XDP_RES_CONSUMED, > > > +}; > > > > I'd prefer this to be done on top unless it is a must. But I don't see > > any advantage of introducing this, it's partial mapping of XDP action > > and it needs to be extended when XDP action is extended. (And we've > > already had: VIRTIO_XDP_REDIR and VIRTIO_XDP_TX ...) > > No, these are the three states of buffer after XDP processing. > > * PASS: goto make skbXDP_PASS goes for this.> * DROP: we should release bufferXDP_DROP and error conditions go with this.> * CUNSUMED: xdp prog used the buffer, we do nothingXDP_TX/XDP_REDIRECTION goes for this. So t virtnet_xdp_handler() just maps XDP ACTION plus the error conditions to the above three states. We can simply map error to XDP_DROP like: case XDP_TX: stats->xdp_tx++; xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) return XDP_DROP; A good side effect is to avoid the xdp_xmit pointer to be passed to the function.> > The latter two are not particularly related to XDP ACTION. And it does not need > to extend when XDP action is extended. At least I have not thought of this > situation.What's the advantages of such indirection compared to using XDP action directly? Thanks> > > > > > > + > > > static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf); > > > static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); > > > > > > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev, > > > return ret; > > > } > > > > > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, > > > + struct net_device *dev, > > > + unsigned int *xdp_xmit, > > > + struct virtnet_rq_stats *stats) > > > +{ > > > + struct xdp_frame *xdpf; > > > + int err; > > > + u32 act; > > > + > > > + act = bpf_prog_run_xdp(xdp_prog, xdp); > > > + stats->xdp_packets++; > > > + > > > + switch (act) { > > > + case XDP_PASS: > > > + return VIRTNET_XDP_RES_PASS; > > > + > > > + case XDP_TX: > > > + stats->xdp_tx++; > > > + xdpf = xdp_convert_buff_to_frame(xdp); > > > + if (unlikely(!xdpf)) > > > + return VIRTNET_XDP_RES_DROP; > > > + > > > + err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); > > > + if (unlikely(!err)) { > > > + xdp_return_frame_rx_napi(xdpf); > > > + } else if (unlikely(err < 0)) { > > > + trace_xdp_exception(dev, xdp_prog, act); > > > + return VIRTNET_XDP_RES_DROP; > > > + } > > > + > > > + *xdp_xmit |= VIRTIO_XDP_TX; > > > + return VIRTNET_XDP_RES_CONSUMED; > > > + > > > + case XDP_REDIRECT: > > > + stats->xdp_redirects++; > > > + err = xdp_do_redirect(dev, xdp, xdp_prog); > > > + if (err) > > > + return VIRTNET_XDP_RES_DROP; > > > + > > > + *xdp_xmit |= VIRTIO_XDP_REDIR; > > > + return VIRTNET_XDP_RES_CONSUMED; > > > + > > > + default: > > > + bpf_warn_invalid_xdp_action(dev, xdp_prog, act); > > > + fallthrough; > > > + case XDP_ABORTED: > > > + trace_xdp_exception(dev, xdp_prog, act); > > > + fallthrough; > > > + case XDP_DROP: > > > + return VIRTNET_XDP_RES_DROP; > > > + } > > > +} > > > + > > > static unsigned int virtnet_get_headroom(struct virtnet_info *vi) > > > { > > > return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; > > > @@ -876,7 +938,6 @@ static struct sk_buff *receive_small(struct net_device *dev, > > > struct page *page = virt_to_head_page(buf); > > > unsigned int delta = 0; > > > struct page *xdp_page; > > > - int err; > > > unsigned int metasize = 0; > > > > > > len -= vi->hdr_len; > > > @@ -898,7 +959,6 @@ static struct sk_buff *receive_small(struct net_device *dev, > > > xdp_prog = rcu_dereference(rq->xdp_prog); > > > if (xdp_prog) { > > > struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; > > > - struct xdp_frame *xdpf; > > > struct xdp_buff xdp; > > > void *orig_data; > > > u32 act; > > > @@ -931,46 +991,22 @@ static struct sk_buff *receive_small(struct net_device *dev, > > > xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, > > > xdp_headroom, len, true); > > > orig_data = xdp.data; > > > - act = bpf_prog_run_xdp(xdp_prog, &xdp); > > > - stats->xdp_packets++; > > > + > > > + act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); > > > > > > switch (act) { > > > - case XDP_PASS: > > > + case VIRTNET_XDP_RES_PASS: > > > /* Recalculate length in case bpf program changed it */ > > > delta = orig_data - xdp.data; > > > len = xdp.data_end - xdp.data; > > > metasize = xdp.data - xdp.data_meta; > > > break; > > > - case XDP_TX: > > > - stats->xdp_tx++; > > > - xdpf = xdp_convert_buff_to_frame(&xdp); > > > - if (unlikely(!xdpf)) > > > - goto err_xdp; > > > - err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); > > > - if (unlikely(!err)) { > > > - xdp_return_frame_rx_napi(xdpf); > > > - } else if (unlikely(err < 0)) { > > > - trace_xdp_exception(vi->dev, xdp_prog, act); > > > - goto err_xdp; > > > - } > > > - *xdp_xmit |= VIRTIO_XDP_TX; > > > - rcu_read_unlock(); > > > - goto xdp_xmit; > > > - case XDP_REDIRECT: > > > - stats->xdp_redirects++; > > > - err = xdp_do_redirect(dev, &xdp, xdp_prog); > > > - if (err) > > > - goto err_xdp; > > > - *xdp_xmit |= VIRTIO_XDP_REDIR; > > > + > > > + case VIRTNET_XDP_RES_CONSUMED: > > > rcu_read_unlock(); > > > goto xdp_xmit; > > > - default: > > > - bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); > > > - fallthrough; > > > - case XDP_ABORTED: > > > - trace_xdp_exception(vi->dev, xdp_prog, act); > > > - goto err_xdp; > > > - case XDP_DROP: > > > + > > > + case VIRTNET_XDP_RES_DROP: > > > goto err_xdp; > > > } > > > } > > > @@ -1277,7 +1313,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, > > > if (xdp_prog) { > > > unsigned int xdp_frags_truesz = 0; > > > struct skb_shared_info *shinfo; > > > - struct xdp_frame *xdpf; > > > struct page *xdp_page; > > > struct xdp_buff xdp; > > > void *data; > > > @@ -1294,49 +1329,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, > > > if (unlikely(err)) > > > goto err_xdp_frags; > > > > > > - act = bpf_prog_run_xdp(xdp_prog, &xdp); > > > - stats->xdp_packets++; > > > + act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); > > > > > > switch (act) { > > > - case XDP_PASS: > > > + case VIRTNET_XDP_RES_PASS: > > > head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); > > > if (unlikely(!head_skb)) > > > goto err_xdp_frags; > > > > > > rcu_read_unlock(); > > > return head_skb; > > > - case XDP_TX: > > > - stats->xdp_tx++; > > > - xdpf = xdp_convert_buff_to_frame(&xdp); > > > - if (unlikely(!xdpf)) { > > > - netdev_dbg(dev, "convert buff to frame failed for xdp\n"); > > > > Nit: This debug is lost after the conversion. > > Will fix. > > Thanks. > > > > > Thanks > > > > > - goto err_xdp_frags; > > > - } > > > - err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); > > > - if (unlikely(!err)) { > > > - xdp_return_frame_rx_napi(xdpf); > > > - } else if (unlikely(err < 0)) { > > > - trace_xdp_exception(vi->dev, xdp_prog, act); > > > - goto err_xdp_frags; > > > - } > > > - *xdp_xmit |= VIRTIO_XDP_TX; > > > - rcu_read_unlock(); > > > - goto xdp_xmit; > > > - case XDP_REDIRECT: > > > - stats->xdp_redirects++; > > > - err = xdp_do_redirect(dev, &xdp, xdp_prog); > > > - if (err) > > > - goto err_xdp_frags; > > > - *xdp_xmit |= VIRTIO_XDP_REDIR; > > > + > > > + case VIRTNET_XDP_RES_CONSUMED: > > > rcu_read_unlock(); > > > goto xdp_xmit; > > > - default: > > > - bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); > > > - fallthrough; > > > - case XDP_ABORTED: > > > - trace_xdp_exception(vi->dev, xdp_prog, act); > > > - fallthrough; > > > - case XDP_DROP: > > > + > > > + case VIRTNET_XDP_RES_DROP: > > > goto err_xdp_frags; > > > } > > > err_xdp_frags: > > > -- > > > 2.32.0.3.g01195cf9f > > > > > >