Displaying 14 results from an estimated 14 matches for "1594,6".
Did you mean:
1294,6
2014 Jan 16
0
[PATCH net-next v4 6/6] virtio-net: initial rx sysfs support, export mergeable rx buffer size
...n + clamp_t(unsigned int, ewma_read(&rq->mrg_avg_pkt_len),
- GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
- len = ALIGN(len, MERGEABLE_BUFFER_ALIGN);
+ len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
return -ENOMEM;
@@ -1594,6 +1601,33 @@ err:
return ret;
}
+#ifdef CONFIG_SYSFS
+static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attribute, char *buf)
+{
+ struct virtnet_info *vi = netdev_priv(queue->dev);
+ unsigned int queue_index = get_netdev_rx_queue_ind...
2014 Jan 16
0
[PATCH net-next v3 5/5] virtio-net: initial rx sysfs support, export mergeable rx buffer size
...virtnet_info *vi)
napi_weight);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
+ seqcount_init(&vi->rq[i].sysfs_seq);
ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
}
@@ -1594,6 +1615,39 @@ err:
return ret;
}
+#ifdef CONFIG_SYSFS
+static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attribute, char *buf)
+{
+ struct virtnet_info *vi = netdev_priv(queue->dev);
+ unsigned int queue_index = get_netdev_rx_queue_ind...
2014 Jan 16
2
[PATCH net-next v3 5/5] virtio-net: initial rx sysfs support, export mergeable rx buffer size
...eight);
>
> sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
> + seqcount_init(&vi->rq[i].sysfs_seq);
> ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
> sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
> }
> @@ -1594,6 +1615,39 @@ err:
> return ret;
> }
>
> +#ifdef CONFIG_SYSFS
> +static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
> + struct rx_queue_attribute *attribute, char *buf)
> +{
> + struct virtnet_info *vi = netdev_priv(queue->dev);
> + unsig...
2014 Jan 16
2
[PATCH net-next v3 5/5] virtio-net: initial rx sysfs support, export mergeable rx buffer size
...eight);
>
> sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
> + seqcount_init(&vi->rq[i].sysfs_seq);
> ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
> sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
> }
> @@ -1594,6 +1615,39 @@ err:
> return ret;
> }
>
> +#ifdef CONFIG_SYSFS
> +static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
> + struct rx_queue_attribute *attribute, char *buf)
> +{
> + struct virtnet_info *vi = netdev_priv(queue->dev);
> + unsig...
2011 Nov 21
0
[PATCH] xsm/flask: fix resource list range checks
...);
+
int security_ocontext_add(char *ocontext, unsigned long low,
unsigned long high, u32 sid);
diff --git a/xen/xsm/flask/ss/services.c b/xen/xsm/flask/ss/services.c
index 6e72800..b880762 100644
--- a/xen/xsm/flask/ss/services.c
+++ b/xen/xsm/flask/ss/services.c
@@ -1594,6 +1594,53 @@ out:
return rc;
}
+int security_iterate_iomem_sids(unsigned long start, unsigned long end,
+ security_iterate_fn fn, void *data)
+{
+ struct ocontext *c;
+ int rc = 0;
+
+ POLICY_RDLOCK;
+
+ c = policydb.ocontexts[OCON_IOMEM];
+ wh...
2014 Jan 16
6
[PATCH net-next v3 1/5] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2014 Jan 16
6
[PATCH net-next v3 1/5] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2014 Jan 16
13
[PATCH net-next v4 1/6] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2014 Jan 16
13
[PATCH net-next v4 1/6] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2011 Aug 31
1
[PATCH] hivex: Add byte runs for nodes and values
...t; (* all errors are turned into exceptions *)
| RErrDispose -> pr "unit"
| RHive -> pr "t"
+ | RSize -> pr "size"
| RNode -> pr "node"
| RNodeNotFound -> pr "node"
| RNodeList -> pr "node array"
@@ -1594,6 +1649,8 @@ and generate_ocaml_prototype ?(is_external = false) name style =
| RString -> pr "string"
| RStringList -> pr "string array"
| RLenType -> pr "hive_type * int"
+ | RLenNode -> pr "node"
+ | RLenValue -> pr "val...
2014 Jan 17
7
[PATCH net-next v5 0/6] virtio-net: mergeable rx buffer size auto-tuning
The virtio-net device currently uses aligned MTU-sized mergeable receive
packet buffers. Network throughput for workloads with large average
packet size can be improved by posting larger receive packet buffers.
However, due to SKB truesize effects, posting large (e.g, PAGE_SIZE)
buffers reduces the throughput of workloads that do not benefit from GRO
and have no large inbound packets.
This
2014 Jan 17
7
[PATCH net-next v5 0/6] virtio-net: mergeable rx buffer size auto-tuning
The virtio-net device currently uses aligned MTU-sized mergeable receive
packet buffers. Network throughput for workloads with large average
packet size can be improved by posting larger receive packet buffers.
However, due to SKB truesize effects, posting large (e.g, PAGE_SIZE)
buffers reduces the throughput of workloads that do not benefit from GRO
and have no large inbound packets.
This
2009 Mar 27
42
[PATCH 00/42] ocfs2: Add reflink file support. V1
Hi all,
So I have finally finished the v1 of reflink for ocfs2. It has some
bugs that I am still investigating, but the schema is almost there. So
I'd like to send it out first for review. And Tristan and I will
continue to work on the stability of the code.
The general information for reflink, please see
http://oss.oracle.com/osswiki/OCFS2/DesignDocs/Reflink.
For the design doc, please
2009 Apr 03
42
[PATCH 00/42] ocfs2: Add reflink file support. V2
Hi all,
Change from v1 to v2: bug fix and metadata/credits reservation
improvement.
The general information for reflink, please see
http://oss.oracle.com/osswiki/OCFS2/DesignDocs/Reflink.
For the design doc, please see
http://oss.oracle.com/osswiki/OCFS2/DesignDocs/RefcountTrees
http://oss.oracle.com/osswiki/OCFS2/DesignDocs/ReflinkOperation