Displaying 9 results from an estimated 9 matches for "copylen".
2014 Oct 23
0
[PATCH RFC] tun: fix sparse warnings for virtio headers
...virtio16_to_cpu(false, gso.hdr_len) < ETH_HLEN)))
return -EINVAL;
}
@@ -1065,7 +1065,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
* enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace.
*/
- copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
+ copylen = gso.hdr_len ? __virtio16_to_cpu(false, gso.hdr_len) : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
linear = copylen;
@@ -1075,10 +1075,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *...
2014 Oct 23
0
[PATCH RFC] tun: fix sparse warnings for virtio headers
...virtio16_to_cpu(false, gso.hdr_len) < ETH_HLEN)))
return -EINVAL;
}
@@ -1065,7 +1065,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
* enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace.
*/
- copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
+ copylen = gso.hdr_len ? __virtio16_to_cpu(false, gso.hdr_len) : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
linear = copylen;
@@ -1075,10 +1075,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *...
2003 Jan 03
0
[Fwd: Re: rsync windows -> unix still hanging :(]
...writeBufCnt = 0;
> writeBuf = (char*)malloc(MAX_MAP_SIZE);
> if (!writeBuf) out_of_memory("write_file");
> }
> ret = len;
> do {
> if ( buf && writeBufCnt < writeBufSize ) {
> size_t copyLen = len;
> if ( copyLen > writeBufSize - writeBufCnt ) {
> copyLen = writeBufSize - writeBufCnt;
> }
> memcpy(writeBuf + writeBufCnt, buf, copyLen);
> writeBufCnt += copyLen;
> buf += copyLen;
>...
2002 Dec 09
2
Rsync performance increase through buffering
...BufCnt;
+
+ if ( !writeBuf ) {
+ writeBufSize = MAX_MAP_SIZE;
+ writeBufCnt = 0;
+ writeBuf = (char*)malloc(MAX_MAP_SIZE);
+ if (!writeBuf) out_of_memory("write_file");
+ }
+ ret = len;
+ do {
+ if ( buf && writeBufCnt < writeBufSize ) {
+ size_t copyLen = len;
+ if ( copyLen > writeBufSize - writeBufCnt ) {
+ copyLen = writeBufSize - writeBufCnt;
+ }
+ memcpy(writeBuf + writeBufCnt, buf, copyLen);
+ writeBufCnt += copyLen;
+ buf += copyLen;
+ len -= copyLen;
+ }
+ if ( !buf || writeBufCnt == writeBufSize ) {
+ int...
2018 Sep 06
1
[PATCH net-next 08/11] tun: switch to new type of msg_control
..., struct msghdr *m,
> if (unlikely(len < ETH_HLEN))
> goto err;
>
> - if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
> + if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
> struct iov_iter i;
>
> copylen = vnet_hdr.hdr_len ?
> @@ -724,11 +724,11 @@ static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m,
> tap = rcu_dereference(q->tap);
> /* copy skb_ubuf_info for callback when skb has no error */
> if (zerocopy) {
> - skb_shinfo(skb)->destructor_arg = m->ms...
2018 Sep 06
0
[PATCH net-next 08/11] tun: switch to new type of msg_control
...ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m,
if (unlikely(len < ETH_HLEN))
goto err;
- if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
+ if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
struct iov_iter i;
copylen = vnet_hdr.hdr_len ?
@@ -724,11 +724,11 @@ static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m,
tap = rcu_dereference(q->tap);
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
- skb_shinfo(skb)->destructor_arg = m->msg_control;
+ skb_shinfo(...
2018 Sep 06
22
[PATCH net-next 00/11] Vhost_net TX batching
Hi all:
This series tries to batch submitting packets to underlayer socket
through msg_control during sendmsg(). This is done by:
1) Doing userspace copy inside vhost_net
2) Build XDP buff
3) Batch at most 64 (VHOST_NET_BATCH) XDP buffs and submit them once
through msg_control during sendmsg().
4) Underlayer sockets can use XDP buffs directly when XDP is enalbed,
or build skb based on XDP
2018 Sep 12
14
[PATCH net-next V2 00/11] vhost_net TX batching
Hi all:
This series tries to batch submitting packets to underlayer socket
through msg_control during sendmsg(). This is done by:
1) Doing userspace copy inside vhost_net
2) Build XDP buff
3) Batch at most 64 (VHOST_NET_BATCH) XDP buffs and submit them once
through msg_control during sendmsg().
4) Underlayer sockets can use XDP buffs directly when XDP is enalbed,
or build skb based on XDP
2018 Sep 12
14
[PATCH net-next V2 00/11] vhost_net TX batching
Hi all:
This series tries to batch submitting packets to underlayer socket
through msg_control during sendmsg(). This is done by:
1) Doing userspace copy inside vhost_net
2) Build XDP buff
3) Batch at most 64 (VHOST_NET_BATCH) XDP buffs and submit them once
through msg_control during sendmsg().
4) Underlayer sockets can use XDP buffs directly when XDP is enalbed,
or build skb based on XDP