Displaying 18 results from an estimated 18 matches for "num_rx_queu".
Did you mean:
num_rx_queues
2009 Jan 27
5
[PATCH 2/2] Add VMDq support to ixgbe
...;protocol = eth_type_trans(skb, adapter->netdev);
#ifndef IXGBE_NO_LRO
if (ixgbe_lro_ring_queue(rx_ring->lrolist,
adapter, skb, staterr, rx_ring, rx_desc) == 0) {
@@ -1475,6 +1531,8 @@ static irqreturn_t ixgbe_msix_clean_rx(i
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
rx_ring = &(adapter->rx_ring[r_idx]);
+ if (!rx_ring->active)
+ continue;
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
#ifndef CONFIG_IXGBE_NAPI
@@ -1501,6 +1559,8 @@ static irqreturn_t ixgbe_msix_clean_rx...
2014 Jan 16
0
[PATCH net-next v3 4/5] net-sysfs: add support for device-specific rx queue sysfs attributes
...rtnetlink link ops */
const struct rtnl_link_ops *rtnl_link_ops;
@@ -2374,7 +2389,7 @@ static inline bool netif_is_multiqueue(const struct net_device *dev)
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
#else
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
@@ -2393,7 +2408,7 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
from_dev->real_num_tx_queues);
if (err)
return err;
-#ifdef CONF...
2014 Jan 16
0
[PATCH net-next v4 4/6] net-sysfs: add support for device-specific rx queue sysfs attributes
...rtnetlink link ops */
const struct rtnl_link_ops *rtnl_link_ops;
@@ -2374,7 +2389,7 @@ static inline bool netif_is_multiqueue(const struct net_device *dev)
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
#else
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
@@ -2393,7 +2408,7 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
from_dev->real_num_tx_queues);
if (err)
return err;
-#ifdef CONF...
2014 Jan 16
2
[PATCH net-next v3 4/5] net-sysfs: add support for device-specific rx queue sysfs attributes
...um_queues(struct net_device *to_dev,
> #endif
> }
>
> +#ifdef CONFIG_SYSFS
> +static inline unsigned int get_netdev_rx_queue_index(
> + struct netdev_rx_queue *queue)
> +{
> + struct net_device *dev = queue->dev;
> + int i;
> +
> + for (i = 0; i < dev->num_rx_queues; i++)
> + if (queue == &dev->_rx[i])
> + break;
Why write a loop when you can do:
i = queue - dev->_rx;
Ben.
> + BUG_ON(i >= dev->num_rx_queues);
> +
> + return i;
> +}
> +#endif
--
Ben Hutchings, Staff Engineer, Solarflare
Not speaking for my employe...
2014 Jan 16
2
[PATCH net-next v3 4/5] net-sysfs: add support for device-specific rx queue sysfs attributes
...um_queues(struct net_device *to_dev,
> #endif
> }
>
> +#ifdef CONFIG_SYSFS
> +static inline unsigned int get_netdev_rx_queue_index(
> + struct netdev_rx_queue *queue)
> +{
> + struct net_device *dev = queue->dev;
> + int i;
> +
> + for (i = 0; i < dev->num_rx_queues; i++)
> + if (queue == &dev->_rx[i])
> + break;
Why write a loop when you can do:
i = queue - dev->_rx;
Ben.
> + BUG_ON(i >= dev->num_rx_queues);
> +
> + return i;
> +}
> +#endif
--
Ben Hutchings, Staff Engineer, Solarflare
Not speaking for my employe...
2014 Jan 16
6
[PATCH net-next v3 1/5] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2014 Jan 16
6
[PATCH net-next v3 1/5] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2013 Aug 23
1
[PATCH] VMXNET3: Add support for virtual IOMMU
...rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
@@ -2184,8 +2211,9 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
i, adapter->num_rx_queues);
devRead->rssConfDesc.confVer = 1;
- devRead->rssConfDesc.confLen = sizeof(*rssConf);
- devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
+ devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
+ devRead->rssConfDesc.confPA =
+ cpu_to_le64(adapter->rss_co...
2013 Aug 23
1
[PATCH] VMXNET3: Add support for virtual IOMMU
...rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
@@ -2184,8 +2211,9 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
i, adapter->num_rx_queues);
devRead->rssConfDesc.confVer = 1;
- devRead->rssConfDesc.confLen = sizeof(*rssConf);
- devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
+ devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
+ devRead->rssConfDesc.confPA =
+ cpu_to_le64(adapter->rss_co...
2013 Aug 20
2
[PATCH] VMXNET3: Add support for virtual IOMMU
...rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
@@ -2184,8 +2205,9 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
i, adapter->num_rx_queues);
devRead->rssConfDesc.confVer = 1;
- devRead->rssConfDesc.confLen = sizeof(*rssConf);
- devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
+ devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
+ devRead->rssConfDesc.confPA =
+ cpu_to_le64(adapter->rss_co...
2013 Aug 20
2
[PATCH] VMXNET3: Add support for virtual IOMMU
...rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
@@ -2184,8 +2205,9 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
i, adapter->num_rx_queues);
devRead->rssConfDesc.confVer = 1;
- devRead->rssConfDesc.confLen = sizeof(*rssConf);
- devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
+ devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
+ devRead->rssConfDesc.confPA =
+ cpu_to_le64(adapter->rss_co...
2014 Jan 16
13
[PATCH net-next v4 1/6] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2014 Jan 16
13
[PATCH net-next v4 1/6] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2014 Jan 17
7
[PATCH net-next v5 0/6] virtio-net: mergeable rx buffer size auto-tuning
The virtio-net device currently uses aligned MTU-sized mergeable receive
packet buffers. Network throughput for workloads with large average
packet size can be improved by posting larger receive packet buffers.
However, due to SKB truesize effects, posting large (e.g, PAGE_SIZE)
buffers reduces the throughput of workloads that do not benefit from GRO
and have no large inbound packets.
This
2014 Jan 17
7
[PATCH net-next v5 0/6] virtio-net: mergeable rx buffer size auto-tuning
The virtio-net device currently uses aligned MTU-sized mergeable receive
packet buffers. Network throughput for workloads with large average
packet size can be improved by posting larger receive packet buffers.
However, due to SKB truesize effects, posting large (e.g, PAGE_SIZE)
buffers reduces the throughput of workloads that do not benefit from GRO
and have no large inbound packets.
This
2018 Aug 07
1
[PATCH net-next] net: allow to call netif_reset_xps_queues() under cpu_read_lock
...aps, *new_dev_maps = NULL;
@@ -2275,6 +2277,9 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
return -EINVAL;
}
+ if (!cpuslocked)
+ cpus_read_lock();
+
mutex_lock(&xps_map_mutex);
if (is_rxqs_map) {
maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
@@ -2317,9 +2322,9 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
if (!new_dev_maps)
goto out_no_new_maps;
- static_key_slow_inc(&xps_needed);
+ static_key_slow_inc_cpuslocked(&xps_needed);
if (is_rxqs_map)
- static_key_slow_inc(&xps_rxqs_n...
2014 Jan 17
7
[PATCH net-next v6 0/6] virtio-net: mergeable rx buffer size auto-tuning
The virtio-net device currently uses aligned MTU-sized mergeable receive
packet buffers. Network throughput for workloads with large average
packet size can be improved by posting larger receive packet buffers.
However, due to SKB truesize effects, posting large (e.g, PAGE_SIZE)
buffers reduces the throughput of workloads that do not benefit from GRO
and have no large inbound packets.
This
2014 Jan 17
7
[PATCH net-next v6 0/6] virtio-net: mergeable rx buffer size auto-tuning
The virtio-net device currently uses aligned MTU-sized mergeable receive
packet buffers. Network throughput for workloads with large average
packet size can be improved by posting larger receive packet buffers.
However, due to SKB truesize effects, posting large (e.g, PAGE_SIZE)
buffers reduces the throughput of workloads that do not benefit from GRO
and have no large inbound packets.
This