Displaying 20 results from an estimated 20 matches for "xdp_set_data_meta_invalid".
2017 Dec 31
1
[bpf-next V3 PATCH 11/14] virtio_net: setup xdp_rxq_info
...,6 +116,8 @@ struct receive_queue {
/* Name of this receive queue: input.$index */
char name[40];
+
+ struct xdp_rxq_info xdp_rxq;
};
struct virtnet_info {
@@ -559,6 +562,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data = xdp.data_hard_start + xdp_headroom;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -692,6 +696,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp.data = data + vi->hdr_len;
xdp_set_data_meta_inv...
2018 Jan 03
0
[bpf-next V4 PATCH 11/14] virtio_net: setup xdp_rxq_info
...,6 +116,8 @@ struct receive_queue {
/* Name of this receive queue: input.$index */
char name[40];
+
+ struct xdp_rxq_info xdp_rxq;
};
struct virtnet_info {
@@ -559,6 +562,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data = xdp.data_hard_start + xdp_headroom;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -692,6 +696,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp.data = data + vi->hdr_len;
xdp_set_data_meta_inv...
2018 Sep 06
2
[PATCH net-next 06/11] tuntap: split out XDP logic
...ce(tun->xdp_prog);
> - if (xdp_prog && !*skb_xdp) {
> + if (xdp_prog) {
> struct xdp_buff xdp;
> - void *orig_data;
> u32 act;
>
> xdp.data_hard_start = buf;
> @@ -1695,33 +1732,14 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
> xdp_set_data_meta_invalid(&xdp);
> xdp.data_end = xdp.data + len;
> xdp.rxq = &tfile->xdp_rxq;
> - orig_data = xdp.data;
> - act = bpf_prog_run_xdp(xdp_prog, &xdp);
> -
> - switch (act) {
> - case XDP_REDIRECT:
> - err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
>...
2018 Sep 06
2
[PATCH net-next 06/11] tuntap: split out XDP logic
...ce(tun->xdp_prog);
> - if (xdp_prog && !*skb_xdp) {
> + if (xdp_prog) {
> struct xdp_buff xdp;
> - void *orig_data;
> u32 act;
>
> xdp.data_hard_start = buf;
> @@ -1695,33 +1732,14 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
> xdp_set_data_meta_invalid(&xdp);
> xdp.data_end = xdp.data + len;
> xdp.rxq = &tfile->xdp_rxq;
> - orig_data = xdp.data;
> - act = bpf_prog_run_xdp(xdp_prog, &xdp);
> -
> - switch (act) {
> - case XDP_REDIRECT:
> - err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
>...
2017 Dec 13
0
[bpf-next V1-RFC PATCH 11/14] virtio_net: setup xdp_rxq_info
...,6 +116,8 @@ struct receive_queue {
/* Name of this receive queue: input.$index */
char name[40];
+
+ struct xdp_rxq_info xdp_rxq;
};
struct virtnet_info {
@@ -556,6 +559,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data = xdp.data_hard_start + xdp_headroom;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -1229,6 +1233,13 @@ static int virtnet_open(struct net_device *dev)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_...
2017 Dec 13
0
[bpf-next V1-RFC PATCH 11/14] virtio_net: setup xdp_rxq_info
...,6 +116,8 @@ struct receive_queue {
/* Name of this receive queue: input.$index */
char name[40];
+
+ struct xdp_rxq_info xdp_rxq;
};
struct virtnet_info {
@@ -556,6 +559,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data = xdp.data_hard_start + xdp_headroom;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -1229,6 +1233,13 @@ static int virtnet_open(struct net_device *dev)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_...
2018 Sep 06
0
[PATCH net-next 06/11] tuntap: split out XDP logic
...rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
- if (xdp_prog && !*skb_xdp) {
+ if (xdp_prog) {
struct xdp_buff xdp;
- void *orig_data;
u32 act;
xdp.data_hard_start = buf;
@@ -1695,33 +1732,14 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
xdp.rxq = &tfile->xdp_rxq;
- orig_data = xdp.data;
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
-
- switch (act) {
- case XDP_REDIRECT:
- err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
- xdp_do_flush_map();
- if (err)
- g...
2017 Dec 22
1
[bpf-next V2 PATCH 11/14] virtio_net: setup xdp_rxq_info
...,6 +116,8 @@ struct receive_queue {
/* Name of this receive queue: input.$index */
char name[40];
+
+ struct xdp_rxq_info xdp_rxq;
};
struct virtnet_info {
@@ -559,6 +562,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data = xdp.data_hard_start + xdp_headroom;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -1225,13 +1229,18 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
static int virtnet_open(struct net_device *dev)
{
struct...
2018 Sep 06
0
[PATCH net-next 09/11] tuntap: accept an array of XDP buffs through sendmsg()
...bpf_prog *xdp_prog;
+ struct sk_buff *skb = NULL;
+ u32 rxhash = 0, act;
+ int buflen = *(int *)xdp->data_hard_start;
+ int err = 0;
+ bool skb_xdp = false;
+
+ xdp_prog = rcu_dereference(tun->xdp_prog);
+ if (xdp_prog) {
+ if (gso->gso_type) {
+ skb_xdp = true;
+ goto build;
+ }
+ xdp_set_data_meta_invalid(xdp);
+ xdp->rxq = &tfile->xdp_rxq;
+ act = tun_do_xdp(tun, tfile, xdp_prog, xdp, &err);
+ if (err)
+ goto out;
+ if (act == XDP_REDIRECT)
+ *flush = true;
+ if (act != XDP_PASS)
+ goto out;
+ }
+
+build:
+ skb = build_skb(xdp->data_hard_start, buflen);
+ if (!skb) {
+...
2018 Sep 07
0
[PATCH net-next 06/11] tuntap: split out XDP logic
...prog && !*skb_xdp) {
>> + if (xdp_prog) {
>> struct xdp_buff xdp;
>> - void *orig_data;
>> u32 act;
>>
>> xdp.data_hard_start = buf;
>> @@ -1695,33 +1732,14 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
>> xdp_set_data_meta_invalid(&xdp);
>> xdp.data_end = xdp.data + len;
>> xdp.rxq = &tfile->xdp_rxq;
>> - orig_data = xdp.data;
>> - act = bpf_prog_run_xdp(xdp_prog, &xdp);
>> -
>> - switch (act) {
>> - case XDP_REDIRECT:
>> - err = xdp_do_redirect(tun...
2018 Sep 06
1
[PATCH net-next 09/11] tuntap: accept an array of XDP buffs through sendmsg()
...2 rxhash = 0, act;
> + int buflen = *(int *)xdp->data_hard_start;
> + int err = 0;
> + bool skb_xdp = false;
> +
> + xdp_prog = rcu_dereference(tun->xdp_prog);
> + if (xdp_prog) {
> + if (gso->gso_type) {
> + skb_xdp = true;
> + goto build;
> + }
> + xdp_set_data_meta_invalid(xdp);
> + xdp->rxq = &tfile->xdp_rxq;
> + act = tun_do_xdp(tun, tfile, xdp_prog, xdp, &err);
> + if (err)
> + goto out;
> + if (act == XDP_REDIRECT)
> + *flush = true;
> + if (act != XDP_PASS)
> + goto out;
> + }
> +
> +build:
> + skb = b...
2018 May 21
0
[RFC PATCH net-next 12/12] vhost_net: batch submitting XDP buffers to underlayer sockets
...ret = total_len;
+ if (ctl && ((ctl->type & 0xF) == TUN_MSG_PTR)) {
+ int n = ctl->type >> 16;
+
+ preempt_disable();
+ rcu_read_lock();
+
+ for (i = 0; i < n; i++) {
+ struct xdp_buff *x = (struct xdp_buff *)ctl->ptr;
+ struct xdp_buff *xdp = &x[i];
+
+ xdp_set_data_meta_invalid(xdp);
+ xdp->rxq = &tfile->xdp_rxq;
+ tun_xdp_one(tun, tfile, xdp);
+ }
+
+ xdp_do_flush_map();
+ tun_xdp_flush(tun->dev);
+
+ rcu_read_unlock();
+ preempt_enable();
+
+ ret = total_len;
goto out;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 0d84de6.....
2018 Mar 01
0
[PATCH net-next 1/2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
...g headroom but reserve enough space to push
* the descriptor on if we get an XDP_TX return code.
*/
- data = page_address(xdp_page) + offset;
+ data = page_address(page) + offset;
xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
xdp.data = data + vi->hdr_len;
xdp_set_data_meta_invalid(&xdp);
@@ -736,9 +755,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
act = bpf_prog_run_xdp(xdp_prog, &xdp);
- if (act != XDP_PASS)
- ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
-
switch (act) {
case XDP_PASS:
/* recalculate offset to accou...
2018 Mar 01
1
[PATCH net-next 1/2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
...to push
> * the descriptor on if we get an XDP_TX return code.
> */
> - data = page_address(xdp_page) + offset;
> + data = page_address(page) + offset;
> xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
> xdp.data = data + vi->hdr_len;
> xdp_set_data_meta_invalid(&xdp);
> @@ -736,9 +755,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>
> act = bpf_prog_run_xdp(xdp_prog, &xdp);
>
> - if (act != XDP_PASS)
> - ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
> -
> switch (act) {
> case...
2018 May 21
20
[RFC PATCH net-next 00/12] XDP batching for TUN/vhost_net
Hi all:
We do not support XDP batching for TUN since it can only receive one
packet a time from vhost_net. This series tries to remove this
limitation by:
- introduce a TUN specific msg_control that can hold a pointer to an
array of XDP buffs
- try copy and build XDP buff in vhost_net
- store XDP buffs in an array and submit them once for every N packets
from vhost_net
- since TUN can only
2018 Mar 01
7
[PATCH net-next 0/2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
Hi:
This series tries to re-enable XDP_REDIRECT for mergeable buffer which
was removed since commit 7324f5399b06 ("virtio_net: disable
XDP_REDIRECT in receive_mergeable() case"). Main concerns are:
- not enough tailroom was reserved which breaks cpumap
- complex logic like EWMA and linearizing during XDP processing
Fix those by:
- reserve enough tailroom during refill
- disable EWMA
2018 Mar 01
7
[PATCH net-next 0/2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
Hi:
This series tries to re-enable XDP_REDIRECT for mergeable buffer which
was removed since commit 7324f5399b06 ("virtio_net: disable
XDP_REDIRECT in receive_mergeable() case"). Main concerns are:
- not enough tailroom was reserved which breaks cpumap
- complex logic like EWMA and linearizing during XDP processing
Fix those by:
- reserve enough tailroom during refill
- disable EWMA
2018 Sep 06
22
[PATCH net-next 00/11] Vhost_net TX batching
Hi all:
This series tries to batch submitting packets to underlayer socket
through msg_control during sendmsg(). This is done by:
1) Doing userspace copy inside vhost_net
2) Build XDP buff
3) Batch at most 64 (VHOST_NET_BATCH) XDP buffs and submit them once
through msg_control during sendmsg().
4) Underlayer sockets can use XDP buffs directly when XDP is enalbed,
or build skb based on XDP
2018 Sep 12
14
[PATCH net-next V2 00/11] vhost_net TX batching
Hi all:
This series tries to batch submitting packets to underlayer socket
through msg_control during sendmsg(). This is done by:
1) Doing userspace copy inside vhost_net
2) Build XDP buff
3) Batch at most 64 (VHOST_NET_BATCH) XDP buffs and submit them once
through msg_control during sendmsg().
4) Underlayer sockets can use XDP buffs directly when XDP is enalbed,
or build skb based on XDP
2018 Sep 12
14
[PATCH net-next V2 00/11] vhost_net TX batching
Hi all:
This series tries to batch submitting packets to underlayer socket
through msg_control during sendmsg(). This is done by:
1) Doing userspace copy inside vhost_net
2) Build XDP buff
3) Batch at most 64 (VHOST_NET_BATCH) XDP buffs and submit them once
through msg_control during sendmsg().
4) Underlayer sockets can use XDP buffs directly when XDP is enalbed,
or build skb based on XDP