Displaying 20 results from an estimated 105 matches for "good_copy_len".
2016 Mar 18
2
[net-next v2] virtio_net: replace netdev_alloc_skb_ip_align() with napi_alloc_skb()
...00644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -260,7 +260,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
p = page_address(page) + offset;
/* copy small packet so we can reuse these pages for small data */
- skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
+ skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
if (unlikely(!skb))
return NULL;
@@ -541,7 +541,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
struct virtio_net_hdr_mrg_rxbuf *hdr;
int err;
- skb = __netdev_alloc_skb_ip_align(vi->dev,...
2016 Mar 18
2
[net-next v2] virtio_net: replace netdev_alloc_skb_ip_align() with napi_alloc_skb()
...00644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -260,7 +260,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
p = page_address(page) + offset;
/* copy small packet so we can reuse these pages for small data */
- skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
+ skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
if (unlikely(!skb))
return NULL;
@@ -541,7 +541,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
struct virtio_net_hdr_mrg_rxbuf *hdr;
int err;
- skb = __netdev_alloc_skb_ip_align(vi->dev,...
2012 Dec 26
5
[RFC PATCH] virtio-net: reset virtqueue affinity when doing cpu hotplug
...8;
module_param(napi_weight, int, 0444);
@@ -34,6 +35,8 @@ static bool csum = true, gso = true;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
+static bool cpu_hotplug = false;
+
/* FIXME: MTU in config. */
#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define GOOD_COPY_LEN 128
@@ -1041,6 +1044,26 @@ static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
vi->affinity_hint_set = false;
}
+static int virtnet_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch(action) {
+ case CPU_ONLINE:
+ case CPU_ONLI...
2012 Dec 26
5
[RFC PATCH] virtio-net: reset virtqueue affinity when doing cpu hotplug
...8;
module_param(napi_weight, int, 0444);
@@ -34,6 +35,8 @@ static bool csum = true, gso = true;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
+static bool cpu_hotplug = false;
+
/* FIXME: MTU in config. */
#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define GOOD_COPY_LEN 128
@@ -1041,6 +1044,26 @@ static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
vi->affinity_hint_set = false;
}
+static int virtnet_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch(action) {
+ case CPU_ONLINE:
+ case CPU_ONLI...
2016 Mar 17
3
[PATCH net-next] virtio_net: replace netdev_alloc_skb_ip_align() with napi_alloc_skb()
...00644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -260,7 +260,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
p = page_address(page) + offset;
/* copy small packet so we can reuse these pages for small data */
- skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
+ skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
if (unlikely(!skb))
return NULL;
--
1.8.3.1
2016 Mar 17
3
[PATCH net-next] virtio_net: replace netdev_alloc_skb_ip_align() with napi_alloc_skb()
...00644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -260,7 +260,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
p = page_address(page) + offset;
/* copy small packet so we can reuse these pages for small data */
- skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
+ skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
if (unlikely(!skb))
return NULL;
--
1.8.3.1
2019 May 31
0
[PATCH v3 1/5] vsock/virtio: limit the memory used per-socket
...b6d8..4fd4987511a9 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -27,6 +27,9 @@
/* How long to wait for graceful shutdown of a connection */
#define VSOCK_CLOSE_TIMEOUT (8 * HZ)
+/* Threshold for detecting small packets to copy */
+#define GOOD_COPY_LEN 128
+
static const struct virtio_transport *virtio_transport_get_ops(void)
{
const struct vsock_transport *t = vsock_core_get_transport();
@@ -65,6 +68,9 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
pkt->buf = kmalloc(len, GFP_KERNEL);
if (!pkt->buf)
goto o...
2019 Jul 17
0
[PATCH v4 1/5] vsock/virtio: limit the memory used per-socket
...65c5..095221f94786 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -26,6 +26,9 @@
/* How long to wait for graceful shutdown of a connection */
#define VSOCK_CLOSE_TIMEOUT (8 * HZ)
+/* Threshold for detecting small packets to copy */
+#define GOOD_COPY_LEN 128
+
static const struct virtio_transport *virtio_transport_get_ops(void)
{
const struct vsock_transport *t = vsock_core_get_transport();
@@ -64,6 +67,9 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
pkt->buf = kmalloc(len, GFP_KERNEL);
if (!pkt->buf)
goto o...
2019 May 15
2
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...the skb.
>>
>> Yes and the point is if the packet is smaller than 128 bytes the pages will
>> be recycled.
>>
>>
> So it's avoid the overhead of allocation of a large buffer. I got it.
>
> Just a curiosity, why the threshold is 128 bytes?
From its name (GOOD_COPY_LEN), I think it just a value that won't lose
much performance, e.g the size two cachelines.
Thanks
>
>>> Do you suggest to implement something similar, or for now we can use my
>>> approach and if we will merge the datapath we can reuse the virtio-net
>>> approach...
2019 May 15
2
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...the skb.
>>
>> Yes and the point is if the packet is smaller than 128 bytes the pages will
>> be recycled.
>>
>>
> So it's avoid the overhead of allocation of a large buffer. I got it.
>
> Just a curiosity, why the threshold is 128 bytes?
From its name (GOOD_COPY_LEN), I think it just a value that won't lose
much performance, e.g the size two cachelines.
Thanks
>
>>> Do you suggest to implement something similar, or for now we can use my
>>> approach and if we will merge the datapath we can reuse the virtio-net
>>> approach...
2013 Nov 20
1
[PATCH net 1/3] virtio-net: drop the rest of buffers when we can't allocate skb
...t; struct page *page, unsigned int offset,
> > @@ -237,8 +248,13 @@ static struct sk_buff *page_to_skb(struct
> > receive_queue *rq,
> >
> > /* copy small packet so we can reuse these pages for small data */
> > skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
> > - if (unlikely(!skb))
> > + if (unlikely(!skb)) {
> > + if (vi->mergeable_rx_bufs) {
> > + hdr = (struct skb_vnet_hdr *)p;
> > + drop_mergeable_buffer(rq, hdr->mhdr.num_buffers);
> > + }
> > return NULL;
> > + }
> >
> &...
2013 Nov 20
1
[PATCH net 1/3] virtio-net: drop the rest of buffers when we can't allocate skb
...t; struct page *page, unsigned int offset,
> > @@ -237,8 +248,13 @@ static struct sk_buff *page_to_skb(struct
> > receive_queue *rq,
> >
> > /* copy small packet so we can reuse these pages for small data */
> > skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
> > - if (unlikely(!skb))
> > + if (unlikely(!skb)) {
> > + if (vi->mergeable_rx_bufs) {
> > + hdr = (struct skb_vnet_hdr *)p;
> > + drop_mergeable_buffer(rq, hdr->mhdr.num_buffers);
> > + }
> > return NULL;
> > + }
> >
> &...
2013 Dec 26
3
[PATCH net-next 3/3] net: auto-tune mergeable rx buffer size for improved performance
On 12/17/2013 08:16 AM, Michael Dalton wrote:
> Commit 2613af0ed18a ("virtio_net: migrate mergeable rx buffers to page frag
> allocators") changed the mergeable receive buffer size from PAGE_SIZE to
> MTU-size, introducing a single-stream regression for benchmarks with large
> average packet size. There is no single optimal buffer size for all
> workloads. For workloads
2013 Dec 26
3
[PATCH net-next 3/3] net: auto-tune mergeable rx buffer size for improved performance
On 12/17/2013 08:16 AM, Michael Dalton wrote:
> Commit 2613af0ed18a ("virtio_net: migrate mergeable rx buffers to page frag
> allocators") changed the mergeable receive buffer size from PAGE_SIZE to
> MTU-size, introducing a single-stream regression for benchmarks with large
> average packet size. There is no single optimal buffer size for all
> workloads. For workloads
2013 Oct 28
8
[PATCH net-next] virtio_net: migrate mergeable rx buffers to page frag allocators
...truct skb_vnet_hdr *hdr;
- unsigned int copy, hdr_len, offset;
+ unsigned int copy, hdr_len, hdr_padded_len;
char *p;
- p = page_address(page);
+ p = page_address(page) + offset;
/* copy small packet so we can reuse these pages for small data */
skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
@@ -254,16 +244,17 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
if (vi->mergeable_rx_bufs) {
hdr_len = sizeof hdr->mhdr;
- offset = hdr_len;
+ hdr_padded_len = sizeof hdr->mhdr;
} else {
hdr_len = sizeof hdr->hdr;
- offset = sizeof(struct padded_vnet_...
2013 Oct 28
8
[PATCH net-next] virtio_net: migrate mergeable rx buffers to page frag allocators
...truct skb_vnet_hdr *hdr;
- unsigned int copy, hdr_len, offset;
+ unsigned int copy, hdr_len, hdr_padded_len;
char *p;
- p = page_address(page);
+ p = page_address(page) + offset;
/* copy small packet so we can reuse these pages for small data */
skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
@@ -254,16 +244,17 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
if (vi->mergeable_rx_bufs) {
hdr_len = sizeof hdr->mhdr;
- offset = hdr_len;
+ hdr_padded_len = sizeof hdr->mhdr;
} else {
hdr_len = sizeof hdr->hdr;
- offset = sizeof(struct padded_vnet_...
2013 Nov 12
12
[PATCH net-next 1/4] virtio-net: mergeable buffer size should include virtio-net header
...PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \
+ sizeof(struct virtio_net_hdr_mrg_rxbuf), \
+ L1_CACHE_BYTES))
#define GOOD_COPY_LEN 128
#define VIRTNET_DRIVER_VERSION "1.0.0"
@@ -314,10 +317,10 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
head_skb->dev->stats.rx_length_errors++;
return -EINVAL;
}
- if (unlikely(len > MAX_PACKET_LEN)) {
+ if (unlikely(len...
2013 Nov 12
12
[PATCH net-next 1/4] virtio-net: mergeable buffer size should include virtio-net header
...PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \
+ sizeof(struct virtio_net_hdr_mrg_rxbuf), \
+ L1_CACHE_BYTES))
#define GOOD_COPY_LEN 128
#define VIRTNET_DRIVER_VERSION "1.0.0"
@@ -314,10 +317,10 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
head_skb->dev->stats.rx_length_errors++;
return -EINVAL;
}
- if (unlikely(len > MAX_PACKET_LEN)) {
+ if (unlikely(len...
2023 May 01
1
[RFC PATCH net 2/3] virtio-net: allow usage of vrings smaller than MAX_SKB_FRAGS + 2
...mutt-send-email-mst at kernel.org/
First up to 4k should not be a problem. Even jumbo frames e.g. 9k
is highly likely to succeed. And a probe time which is often boot
even 64k isn't a problem ...
Hmm. We could allocate large buffers at probe time. Reuse them and
copy data over.
IOW reusing GOOD_COPY_LEN flow for this case. Not yet sure how I feel
about this. OTOH it removes the need for the whole feature blocking
approach, does it not?
WDYT?
> > > + /* How many ring descriptors we may need to transmit a single packet */
> > > + u16 single_pkt_max_descs;
> > >...
2013 Nov 13
4
[PATCH net-next 4/4] virtio-net: auto-tune mergeable rx buffer size for improved performance
...IXME: MTU in config. */
> #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
> -#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \
> - sizeof(struct virtio_net_hdr_mrg_rxbuf), \
> - L1_CACHE_BYTES))
> #define GOOD_COPY_LEN 128
> +#define RECEIVE_AVG_WEIGHT 64
Maybe we can make this as a module parameter.
>
> #define VIRTNET_DRIVER_VERSION "1.0.0"
>
> @@ -79,6 +78,9 @@ struct receive_queue {
> /* Chain pages by the private ptr. */
> struct page *pages;
>
> + /* Average...