search for: config_net_rx_busy_pol

Displaying 20 results from an estimated 84 matches for "config_net_rx_busy_pol".

2014 Jul 20
1
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...age.h> > +#include <net/busy_poll.h> > > static int napi_weight = NAPI_POLL_WEIGHT; > module_param(napi_weight, int, 0444); > @@ -94,8 +95,143 @@ struct receive_queue { > > /* Name of this receive queue: input.$index */ > char name[40]; > + > +#ifdef CONFIG_NET_RX_BUSY_POLL > + unsigned int state; > +#define VIRTNET_RQ_STATE_IDLE 0 > +#define VIRTNET_RQ_STATE_NAPI 1 /* NAPI or refill owns this RQ */ > +#define VIRTNET_RQ_STATE_POLL 2 /* poll owns this RQ */ > +#define VIRTNET_RQ_STATE_DISABLED 4 /* RQ is disabled */ > +#...
2014 Jul 20
1
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...age.h> > +#include <net/busy_poll.h> > > static int napi_weight = NAPI_POLL_WEIGHT; > module_param(napi_weight, int, 0444); > @@ -94,8 +95,143 @@ struct receive_queue { > > /* Name of this receive queue: input.$index */ > char name[40]; > + > +#ifdef CONFIG_NET_RX_BUSY_POLL > + unsigned int state; > +#define VIRTNET_RQ_STATE_IDLE 0 > +#define VIRTNET_RQ_STATE_NAPI 1 /* NAPI or refill owns this RQ */ > +#define VIRTNET_RQ_STATE_POLL 2 /* poll owns this RQ */ > +#define VIRTNET_RQ_STATE_DISABLED 4 /* RQ is disabled */ > +#...
2014 Jul 16
2
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...gt; > +#include <net/busy_poll.h> > > static int napi_weight = NAPI_POLL_WEIGHT; > module_param(napi_weight, int, 0444); > @@ -94,8 +95,143 @@ struct receive_queue { > > /* Name of this receive queue: input.$index */ > char name[40]; > + > +#ifdef CONFIG_NET_RX_BUSY_POLL > + unsigned int state; > +#define VIRTNET_RQ_STATE_IDLE 0 > +#define VIRTNET_RQ_STATE_NAPI 1 /* NAPI or refill owns this RQ */ > +#define VIRTNET_RQ_STATE_POLL 2 /* poll owns this RQ */ > +#define VIRTNET_RQ_STATE_DISABLED 4 /* RQ is disabled */ > +#...
2014 Jul 16
2
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...gt; > +#include <net/busy_poll.h> > > static int napi_weight = NAPI_POLL_WEIGHT; > module_param(napi_weight, int, 0444); > @@ -94,8 +95,143 @@ struct receive_queue { > > /* Name of this receive queue: input.$index */ > char name[40]; > + > +#ifdef CONFIG_NET_RX_BUSY_POLL > + unsigned int state; > +#define VIRTNET_RQ_STATE_IDLE 0 > +#define VIRTNET_RQ_STATE_NAPI 1 /* NAPI or refill owns this RQ */ > +#define VIRTNET_RQ_STATE_POLL 2 /* poll owns this RQ */ > +#define VIRTNET_RQ_STATE_DISABLED 4 /* RQ is disabled */ > +#...
2014 Jul 15
3
[PATCH net-next] virtio-net: rx busy polling support
...clude <linux/cpu.h> #include <linux/average.h> +#include <net/busy_poll.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -94,8 +95,144 @@ struct receive_queue { /* Name of this receive queue: input.$index */ char name[40]; + +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; +#define VIRTNET_RQ_STATE_IDLE 0 +#define VIRTNET_RQ_STATE_NAPI 1 /* NAPI or refill owns this RQ */ +#define VIRTNET_RQ_STATE_POLL 2 /* poll owns this RQ */ +#define VIRTNET_RQ_STATE_DISABLED 4 /* RQ is disabled */ +#define VIRTNET_RQ_OWNED (VIRTN...
2014 Jul 15
3
[PATCH net-next] virtio-net: rx busy polling support
...clude <linux/cpu.h> #include <linux/average.h> +#include <net/busy_poll.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -94,8 +95,144 @@ struct receive_queue { /* Name of this receive queue: input.$index */ char name[40]; + +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; +#define VIRTNET_RQ_STATE_IDLE 0 +#define VIRTNET_RQ_STATE_NAPI 1 /* NAPI or refill owns this RQ */ +#define VIRTNET_RQ_STATE_POLL 2 /* poll owns this RQ */ +#define VIRTNET_RQ_STATE_DISABLED 4 /* RQ is disabled */ +#define VIRTNET_RQ_OWNED (VIRTN...
2014 Jul 16
9
[PATCH net-next V2 0/3] rx busy polling support for virtio-net
Hi all: This series introduces the support for rx busy polling support. This was useful for reduing the latency for a kvm guest. Patch 1-2 introduces helpers which is used for rx busy polling. Patch 3 implement the main function. Test was done between a kvm guest and an external host. Two hosts were connected through 40gb mlx4 cards. With both busy_poll and busy_read are set to 50 in guest, 1
2014 Jul 16
9
[PATCH net-next V2 0/3] rx busy polling support for virtio-net
Hi all: This series introduces the support for rx busy polling support. This was useful for reduing the latency for a kvm guest. Patch 1-2 introduces helpers which is used for rx busy polling. Patch 3 implement the main function. Test was done between a kvm guest and an external host. Two hosts were connected through 40gb mlx4 cards. With both busy_poll and busy_read are set to 50 in guest, 1
2014 Jul 16
0
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...clude <linux/cpu.h> #include <linux/average.h> +#include <net/busy_poll.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -94,8 +95,143 @@ struct receive_queue { /* Name of this receive queue: input.$index */ char name[40]; + +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; +#define VIRTNET_RQ_STATE_IDLE 0 +#define VIRTNET_RQ_STATE_NAPI 1 /* NAPI or refill owns this RQ */ +#define VIRTNET_RQ_STATE_POLL 2 /* poll owns this RQ */ +#define VIRTNET_RQ_STATE_DISABLED 4 /* RQ is disabled */ +#define VIRTNET_RQ_OWNED (VIRTN...
2014 Jul 17
2
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...; static int napi_weight = NAPI_POLL_WEIGHT; >>> module_param(napi_weight, int, 0444); >>> @@ -94,8 +95,143 @@ struct receive_queue { >>> /* Name of this receive queue: input.$index */ >>> char name[40]; >>> + >>> +#ifdef CONFIG_NET_RX_BUSY_POLL >>> + unsigned int state; >>> +#define VIRTNET_RQ_STATE_IDLE 0 >>> +#define VIRTNET_RQ_STATE_NAPI 1 /* NAPI or refill owns >>> this RQ */ >>> +#define VIRTNET_RQ_STATE_POLL 2 /* poll owns this RQ */ >>> +#define...
2014 Jul 17
2
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...; static int napi_weight = NAPI_POLL_WEIGHT; >>> module_param(napi_weight, int, 0444); >>> @@ -94,8 +95,143 @@ struct receive_queue { >>> /* Name of this receive queue: input.$index */ >>> char name[40]; >>> + >>> +#ifdef CONFIG_NET_RX_BUSY_POLL >>> + unsigned int state; >>> +#define VIRTNET_RQ_STATE_IDLE 0 >>> +#define VIRTNET_RQ_STATE_NAPI 1 /* NAPI or refill owns >>> this RQ */ >>> +#define VIRTNET_RQ_STATE_POLL 2 /* poll owns this RQ */ >>> +#define...
2014 Jul 17
0
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...;net/busy_poll.h> >> static int napi_weight = NAPI_POLL_WEIGHT; >> module_param(napi_weight, int, 0444); >> @@ -94,8 +95,143 @@ struct receive_queue { >> /* Name of this receive queue: input.$index */ >> char name[40]; >> + >> +#ifdef CONFIG_NET_RX_BUSY_POLL >> + unsigned int state; >> +#define VIRTNET_RQ_STATE_IDLE 0 >> +#define VIRTNET_RQ_STATE_NAPI 1 /* NAPI or refill owns >> this RQ */ >> +#define VIRTNET_RQ_STATE_POLL 2 /* poll owns this RQ */ >> +#define VIRTNET_RQ_STATE_DISABLE...
2014 Jul 17
0
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...eight = NAPI_POLL_WEIGHT; >>>> module_param(napi_weight, int, 0444); >>>> @@ -94,8 +95,143 @@ struct receive_queue { >>>> /* Name of this receive queue: input.$index */ >>>> char name[40]; >>>> + >>>> +#ifdef CONFIG_NET_RX_BUSY_POLL >>>> + unsigned int state; >>>> +#define VIRTNET_RQ_STATE_IDLE 0 >>>> +#define VIRTNET_RQ_STATE_NAPI 1 /* NAPI or refill owns >>>> this RQ */ >>>> +#define VIRTNET_RQ_STATE_POLL 2 /* poll owns this RQ */ &g...
2014 Jul 23
3
[PATCH v3 net-next 0/2] rx busy polling support for virtio-net
Hi all: This series introduces the support for rx busy polling support. This was useful for reduing the latency for a kvm guest. Instead of introducing new states and spinlocks, this series re-uses NAPI state to synchonrize between NAPI and busy polling. This grealy simplified the codes and reduce the overheads of spinlocks for normal NAPI fast path. Test was done between a kvm guest and an
2014 Jul 23
3
[PATCH v3 net-next 0/2] rx busy polling support for virtio-net
Hi all: This series introduces the support for rx busy polling support. This was useful for reduing the latency for a kvm guest. Instead of introducing new states and spinlocks, this series re-uses NAPI state to synchonrize between NAPI and busy polling. This grealy simplified the codes and reduce the overheads of spinlocks for normal NAPI fast path. Test was done between a kvm guest and an
2014 Dec 17
2
[PATCH 01/10] core: Split out UFO6 support
..._UFO6) && !(features & NETIF_F_GEN_CSUM) && > + !(features & NETIF_F_IPV6_CSUM)) { > + netdev_dbg(dev, > + "Dropping NETIF_F_UFO6 since no checksum offload features.\n"); > + features &= ~NETIF_F_UFO6; > } > > + > #ifdef CONFIG_NET_RX_BUSY_POLL > if (dev->netdev_ops->ndo_busy_poll) > features |= NETIF_F_BUSY_POLL; > diff --git a/net/core/ethtool.c b/net/core/ethtool.c > index 06dfb29..93eff41 100644 > --- a/net/core/ethtool.c > +++ b/net/core/ethtool.c > @@ -223,7 +223,7 @@ static netdev_features_t ethtool...
2014 Dec 17
2
[PATCH 01/10] core: Split out UFO6 support
..._UFO6) && !(features & NETIF_F_GEN_CSUM) && > + !(features & NETIF_F_IPV6_CSUM)) { > + netdev_dbg(dev, > + "Dropping NETIF_F_UFO6 since no checksum offload features.\n"); > + features &= ~NETIF_F_UFO6; > } > > + > #ifdef CONFIG_NET_RX_BUSY_POLL > if (dev->netdev_ops->ndo_busy_poll) > features |= NETIF_F_BUSY_POLL; > diff --git a/net/core/ethtool.c b/net/core/ethtool.c > index 06dfb29..93eff41 100644 > --- a/net/core/ethtool.c > +++ b/net/core/ethtool.c > @@ -223,7 +223,7 @@ static netdev_features_t ethtool...
2014 Oct 15
2
[RFC PATCH net-next 5/6] virtio-net: enable tx interrupt
...if (unlikely(virtqueue_poll(sq->vq, r)) && + napi_schedule_prep(napi)) { + virtqueue_disable_cb(sq->vq); + __napi_schedule(napi); + goto again; + } + } else { + __netif_tx_unlock(txq); + } + + netif_wake_subqueue(vi->dev, vq2txq(sq->vq)); + return sent; +} + #ifdef CONFIG_NET_RX_BUSY_POLL + /* must be called with local_bh_disable()d */ static int virtnet_busy_poll(struct napi_struct *napi) { @@ -822,36 +881,12 @@ static int virtnet_open(struct net_device *dev) if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); virtnet_nap...
2014 Oct 15
2
[RFC PATCH net-next 5/6] virtio-net: enable tx interrupt
...if (unlikely(virtqueue_poll(sq->vq, r)) && + napi_schedule_prep(napi)) { + virtqueue_disable_cb(sq->vq); + __napi_schedule(napi); + goto again; + } + } else { + __netif_tx_unlock(txq); + } + + netif_wake_subqueue(vi->dev, vq2txq(sq->vq)); + return sent; +} + #ifdef CONFIG_NET_RX_BUSY_POLL + /* must be called with local_bh_disable()d */ static int virtnet_busy_poll(struct napi_struct *napi) { @@ -822,36 +881,12 @@ static int virtnet_open(struct net_device *dev) if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); virtnet_nap...
2014 Oct 14
4
[PATCH RFC] virtio_net: enable tx interrupt
...if (unlikely(virtqueue_poll(sq->vq, r)) && + napi_schedule_prep(napi)) { + virtqueue_disable_cb(sq->vq); + __napi_schedule(napi); + goto again; + } + } else { + __netif_tx_unlock(txq); + } + + netif_wake_subqueue(vi->dev, vq2txq(sq->vq)); + return sent; +} + #ifdef CONFIG_NET_RX_BUSY_POLL /* must be called with local_bh_disable()d */ static int virtnet_busy_poll(struct napi_struct *napi) @@ -814,30 +870,12 @@ static int virtnet_open(struct net_device *dev) if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); virtnet_napi_ena...