Displaying 20 results from an estimated 35 matches for "rx_irq".
2013 Feb 15
1
[PATCH 7/8] netback: split event channels support
...t only use one event channel to do tx / rx notification.
This may cause unnecessary wake-up of process routines. This patch adds a new
feature called feautre-split-event-channel to netback, enabling it to handle
Tx and Rx event separately.
Netback will use tx_irq to notify guest for tx completion, rx_irq for rx
notification.
If frontend doesn''t support this feature, tx_irq = rx_irq.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
---
drivers/net/xen-netback/common.h | 10 +++--
drivers/net/xen-netback/interface.c | 78 ++++++++++++++++++++++++++++-------
drivers/net/xen-netbac...
2013 May 21
1
[PATCH net-next V2 2/2] xen-netfront: split event channels support for Xen frontend driver
...net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -85,7 +85,15 @@ struct netfront_info {
struct napi_struct napi;
- unsigned int evtchn;
+ /* Split event channels support, tx_* == rx_* when using
+ * single event channel.
+ */
+ unsigned int tx_evtchn, rx_evtchn;
+ unsigned int tx_irq, rx_irq;
+ /* Only used when split event channels support is enabled */
+ char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
+ char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
+
struct xenbus_device *xbdev;
spinlock_t tx_lock;
@@ -330,7 +338,7 @@ no_skb:
push:
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&...
2017 Apr 21
3
[PATCH net-next v2 2/5] virtio-net: transmit napi
...affinity regardless of whether the
>> optimization is used.
>
>
> Yes, I noticed this in the past too.
>
>> Though this is not limited to napi-tx, it is more
>> pronounced in that mode than without napi.
>>
>> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>>
>> upstream:
>>
>> 1,1,1: 28985 Mbps, 278 Gcyc
>> 1,0,2: 30067 Mbps, 402 Gcyc
>>
>> napi tx:
>>
>> 1,1,1: 34492 Mbps, 269 Gcyc
>> 1,0,2: 36527 Mbps, 537 Gcyc (!)
>> 1,0,1: 36269 Mbps, 394 Gcyc
>> 1,0,0: 34674 Mbps,...
2017 Apr 21
3
[PATCH net-next v2 2/5] virtio-net: transmit napi
...affinity regardless of whether the
>> optimization is used.
>
>
> Yes, I noticed this in the past too.
>
>> Though this is not limited to napi-tx, it is more
>> pronounced in that mode than without napi.
>>
>> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>>
>> upstream:
>>
>> 1,1,1: 28985 Mbps, 278 Gcyc
>> 1,0,2: 30067 Mbps, 402 Gcyc
>>
>> napi tx:
>>
>> 1,1,1: 34492 Mbps, 269 Gcyc
>> 1,0,2: 36527 Mbps, 537 Gcyc (!)
>> 1,0,1: 36269 Mbps, 394 Gcyc
>> 1,0,0: 34674 Mbps,...
2017 Apr 20
2
[PATCH net-next v2 2/5] virtio-net: transmit napi
...s always a win over keeping it off, even without irq
affinity.
The cycle cost is significant without affinity regardless of whether the
optimization is used. Though this is not limited to napi-tx, it is more
pronounced in that mode than without napi.
1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
upstream:
1,1,1: 28985 Mbps, 278 Gcyc
1,0,2: 30067 Mbps, 402 Gcyc
napi tx:
1,1,1: 34492 Mbps, 269 Gcyc
1,0,2: 36527 Mbps, 537 Gcyc (!)
1,0,1: 36269 Mbps, 394 Gcyc
1,0,0: 34674 Mbps, 402 Gcyc
This is a particularly strong example. It is also representative
of most RR tests. It is less...
2017 Apr 20
2
[PATCH net-next v2 2/5] virtio-net: transmit napi
...s always a win over keeping it off, even without irq
affinity.
The cycle cost is significant without affinity regardless of whether the
optimization is used. Though this is not limited to napi-tx, it is more
pronounced in that mode than without napi.
1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
upstream:
1,1,1: 28985 Mbps, 278 Gcyc
1,0,2: 30067 Mbps, 402 Gcyc
napi tx:
1,1,1: 34492 Mbps, 269 Gcyc
1,0,2: 36527 Mbps, 537 Gcyc (!)
1,0,1: 36269 Mbps, 394 Gcyc
1,0,0: 34674 Mbps, 402 Gcyc
This is a particularly strong example. It is also representative
of most RR tests. It is less...
2017 Apr 24
2
[PATCH net-next v2 2/5] virtio-net: transmit napi
...gt; >
>> >
>> > Yes, I noticed this in the past too.
>> >
>> >> Though this is not limited to napi-tx, it is more
>> >> pronounced in that mode than without napi.
>> >>
>> >> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>> >>
>> >> upstream:
>> >>
>> >> 1,1,1: 28985 Mbps, 278 Gcyc
>> >> 1,0,2: 30067 Mbps, 402 Gcyc
>> >>
>> >> napi tx:
>> >>
>> >> 1,1,1: 34492 Mbps, 269 Gcyc
>> >> 1,0,2:...
2017 Apr 24
2
[PATCH net-next v2 2/5] virtio-net: transmit napi
...gt; >
>> >
>> > Yes, I noticed this in the past too.
>> >
>> >> Though this is not limited to napi-tx, it is more
>> >> pronounced in that mode than without napi.
>> >>
>> >> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>> >>
>> >> upstream:
>> >>
>> >> 1,1,1: 28985 Mbps, 278 Gcyc
>> >> 1,0,2: 30067 Mbps, 402 Gcyc
>> >>
>> >> napi tx:
>> >>
>> >> 1,1,1: 34492 Mbps, 269 Gcyc
>> >> 1,0,2:...
2017 Apr 21
0
[PATCH net-next v2 2/5] virtio-net: transmit napi
...The cycle cost is significant without affinity regardless of whether the
> optimization is used.
Yes, I noticed this in the past too.
> Though this is not limited to napi-tx, it is more
> pronounced in that mode than without napi.
>
> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>
> upstream:
>
> 1,1,1: 28985 Mbps, 278 Gcyc
> 1,0,2: 30067 Mbps, 402 Gcyc
>
> napi tx:
>
> 1,1,1: 34492 Mbps, 269 Gcyc
> 1,0,2: 36527 Mbps, 537 Gcyc (!)
> 1,0,1: 36269 Mbps, 394 Gcyc
> 1,0,0: 34674 Mbps, 402 Gcyc
>
> This is a particularly stron...
2017 Apr 24
0
[PATCH net-next v2 2/5] virtio-net: transmit napi
...> optimization is used.
> >
> >
> > Yes, I noticed this in the past too.
> >
> >> Though this is not limited to napi-tx, it is more
> >> pronounced in that mode than without napi.
> >>
> >> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
> >>
> >> upstream:
> >>
> >> 1,1,1: 28985 Mbps, 278 Gcyc
> >> 1,0,2: 30067 Mbps, 402 Gcyc
> >>
> >> napi tx:
> >>
> >> 1,1,1: 34492 Mbps, 269 Gcyc
> >> 1,0,2: 36527 Mbps, 537 Gcyc (!)
> >>...
2017 Apr 24
0
[PATCH net-next v2 2/5] virtio-net: transmit napi
...t;> > Yes, I noticed this in the past too.
> >> >
> >> >> Though this is not limited to napi-tx, it is more
> >> >> pronounced in that mode than without napi.
> >> >>
> >> >> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
> >> >>
> >> >> upstream:
> >> >>
> >> >> 1,1,1: 28985 Mbps, 278 Gcyc
> >> >> 1,0,2: 30067 Mbps, 402 Gcyc
> >> >>
> >> >> napi tx:
> >> >>
> >> >> 1,1,1:...
2017 Apr 18
2
[PATCH net-next v2 2/5] virtio-net: transmit napi
From: Willem de Bruijn <willemb at google.com>
Convert virtio-net to a standard napi tx completion path. This enables
better TCP pacing using TCP small queues and increases single stream
throughput.
The virtio-net driver currently cleans tx descriptors on transmission
of new packets in ndo_start_xmit. Latency depends on new traffic, so
is unbounded. To avoid deadlock when a socket reaches
2019 Nov 22
0
[PATCH net-next v2] drivers: net: virtio_net: Implement a dev_watchdog handler
...ntel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 0940a0da16f2..28465fc76dda 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -70,7 +70,7 @@ static int ixgb_clean(struct napi_struct *, int);
static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
-static void ixgb_tx_timeout(struct net_device *dev);
+static void ixgb_tx_timeout(struct net_device *dev, int txqueue);
static void ixgb_tx_timeout_task(struct work_struct *work);
static void...
2019 Nov 23
1
[PATCH net-next v2] drivers: net: virtio_net: Implement a dev_watchdog handler
...rivers/net/ethernet/intel/ixgb/ixgb_main.c
> index 0940a0da16f2..28465fc76dda 100644
> --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
> +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
> @@ -70,7 +70,7 @@ static int ixgb_clean(struct napi_struct *, int);
> static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
> static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
>
> -static void ixgb_tx_timeout(struct net_device *dev);
> +static void ixgb_tx_timeout(struct net_device *dev, int txqueue);
> static void ixgb_tx_timeout_task(struct work_struct...
2019 Nov 22
4
[PATCH net-next v2] drivers: net: virtio_net: Implement a dev_watchdog handler
Hi Michael,
Em sex., 22 de nov. de 2019 ?s 07:31, Michael S. Tsirkin
<mst at redhat.com> escreveu:
>
> On Thu, Nov 21, 2019 at 10:36:36PM -0300, Julio Faracco wrote:
> > Driver virtio_net is not handling error events for TX provided by
> > dev_watchdog. This event is reached when transmission queue is having
> > problems to transmit packets. This could happen for any
2019 Nov 22
4
[PATCH net-next v2] drivers: net: virtio_net: Implement a dev_watchdog handler
Hi Michael,
Em sex., 22 de nov. de 2019 ?s 07:31, Michael S. Tsirkin
<mst at redhat.com> escreveu:
>
> On Thu, Nov 21, 2019 at 10:36:36PM -0300, Julio Faracco wrote:
> > Driver virtio_net is not handling error events for TX provided by
> > dev_watchdog. This event is reached when transmission queue is having
> > problems to transmit packets. This could happen for any
2019 Nov 24
0
[PATCH net-next v2] drivers: net: virtio_net: Implement a dev_watchdog handler
...igb/igb_main.c
index ed7e667d7eb2..0eb8470050c2 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -146,7 +146,7 @@ static int igb_poll(struct napi_struct *, int);
static bool igb_clean_tx_irq(struct igb_q_vector *, int);
static int igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
-static void igb_tx_timeout(struct net_device *);
+static void igb_tx_timeout(struct net_device *, int txqueue);
static void igb_reset_task(struct work_struct *);
static void igb_vlan_mode(struct net...
2019 Nov 24
3
[PATCH net-next v2] drivers: net: virtio_net: Implement a dev_watchdog handler
...igb/igb_main.c
index ed7e667d7eb2..0eb8470050c2 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -146,7 +146,7 @@ static int igb_poll(struct napi_struct *, int);
static bool igb_clean_tx_irq(struct igb_q_vector *, int);
static int igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
-static void igb_tx_timeout(struct net_device *);
+static void igb_tx_timeout(struct net_device *, int txqueue);
static void igb_reset_task(struct work_struct *);
static void igb_vlan_mode(struct net...
2019 Nov 24
3
[PATCH net-next v2] drivers: net: virtio_net: Implement a dev_watchdog handler
...igb/igb_main.c
index ed7e667d7eb2..0eb8470050c2 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -146,7 +146,7 @@ static int igb_poll(struct napi_struct *, int);
static bool igb_clean_tx_irq(struct igb_q_vector *, int);
static int igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
-static void igb_tx_timeout(struct net_device *);
+static void igb_tx_timeout(struct net_device *, int txqueue);
static void igb_reset_task(struct work_struct *);
static void igb_vlan_mode(struct net...
2019 Nov 24
0
[PATCH net-next v2] drivers: net: virtio_net: Implement a dev_watchdog handler
...igb/igb_main.c
index ed7e667d7eb2..0eb8470050c2 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -146,7 +146,7 @@ static int igb_poll(struct napi_struct *, int);
static bool igb_clean_tx_irq(struct igb_q_vector *, int);
static int igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
-static void igb_tx_timeout(struct net_device *);
+static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
static void igb_reset_task(struct work_struct *);
static void igb_vlan_mode(s...