search for: zero_copy

Displaying 20 results from an estimated 24 matches for "zero_copy".

2019 May 16
2
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...58:36PM +0200, Stefano Garzarella wrote: > +struct virtio_vsock_buf { Please add a comment describing the purpose of this struct and to differentiate its use from struct virtio_vsock_pkt. > +static struct virtio_vsock_buf * > +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) > +{ > + struct virtio_vsock_buf *buf; > + > + if (pkt->len == 0) > + return NULL; > + > + buf = kzalloc(sizeof(*buf), GFP_KERNEL); > + if (!buf) > + return NULL; > + > + /* If the buffer in the virtio_vsock_pkt is full, we can move it to > + * the new vir...
2019 May 16
2
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...58:36PM +0200, Stefano Garzarella wrote: > +struct virtio_vsock_buf { Please add a comment describing the purpose of this struct and to differentiate its use from struct virtio_vsock_pkt. > +static struct virtio_vsock_buf * > +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) > +{ > + struct virtio_vsock_buf *buf; > + > + if (pkt->len == 0) > + return NULL; > + > + buf = kzalloc(sizeof(*buf), GFP_KERNEL); > + if (!buf) > + return NULL; > + > + /* If the buffer in the virtio_vsock_pkt is full, we can move it to > + * the new vir...
2019 May 15
2
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...>>> @@ -86,6 +89,46 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, >>>>> return NULL; >>>>> } >>>>> +static struct virtio_vsock_buf * >>>>> +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) >>>>> +{ >>>>> + struct virtio_vsock_buf *buf; >>>>> + >>>>> + if (pkt->len == 0) >>>>> + return NULL; >>>>> + >>>>> + buf = kzalloc(sizeof(*buf), GFP_KERNEL); >>>>> + if (!b...
2019 May 15
2
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...>>> @@ -86,6 +89,46 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, >>>>> return NULL; >>>>> } >>>>> +static struct virtio_vsock_buf * >>>>> +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) >>>>> +{ >>>>> + struct virtio_vsock_buf *buf; >>>>> + >>>>> + if (pkt->len == 0) >>>>> + return NULL; >>>>> + >>>>> + buf = kzalloc(sizeof(*buf), GFP_KERNEL); >>>>> + if (!b...
2019 May 14
3
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...err) >>> goto out; >>> @@ -86,6 +89,46 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, >>> return NULL; >>> } >>> +static struct virtio_vsock_buf * >>> +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) >>> +{ >>> + struct virtio_vsock_buf *buf; >>> + >>> + if (pkt->len == 0) >>> + return NULL; >>> + >>> + buf = kzalloc(sizeof(*buf), GFP_KERNEL); >>> + if (!buf) >>> + return NULL; >>> + >>> + /...
2019 May 14
3
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...err) >>> goto out; >>> @@ -86,6 +89,46 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, >>> return NULL; >>> } >>> +static struct virtio_vsock_buf * >>> +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) >>> +{ >>> + struct virtio_vsock_buf *buf; >>> + >>> + if (pkt->len == 0) >>> + return NULL; >>> + >>> + buf = kzalloc(sizeof(*buf), GFP_KERNEL); >>> + if (!buf) >>> + return NULL; >>> + >>> + /...
2019 May 13
2
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...->buf, info->msg, len); > if (err) > goto out; > @@ -86,6 +89,46 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, > return NULL; > } > > +static struct virtio_vsock_buf * > +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) > +{ > + struct virtio_vsock_buf *buf; > + > + if (pkt->len == 0) > + return NULL; > + > + buf = kzalloc(sizeof(*buf), GFP_KERNEL); > + if (!buf) > + return NULL; > + > + /* If the buffer in the virtio_vsock_pkt is full, we can move it to > + * the new vir...
2019 May 13
2
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...->buf, info->msg, len); > if (err) > goto out; > @@ -86,6 +89,46 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, > return NULL; > } > > +static struct virtio_vsock_buf * > +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) > +{ > + struct virtio_vsock_buf *buf; > + > + if (pkt->len == 0) > + return NULL; > + > + buf = kzalloc(sizeof(*buf), GFP_KERNEL); > + if (!buf) > + return NULL; > + > + /* If the buffer in the virtio_vsock_pkt is full, we can move it to > + * the new vir...
2019 May 17
0
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...irtio_vsock_buf { > > Please add a comment describing the purpose of this struct and to > differentiate its use from struct virtio_vsock_pkt. > Sure, I'll fix it. > > +static struct virtio_vsock_buf * > > +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) > > +{ > > + struct virtio_vsock_buf *buf; > > + > > + if (pkt->len == 0) > > + return NULL; > > + > > + buf = kzalloc(sizeof(*buf), GFP_KERNEL); > > + if (!buf) > > + return NULL; > > + > > + /* If the buffer in the virtio_vso...
2019 May 28
0
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...> > On Mon, May 13, 2019 at 05:58:53PM +0800, Jason Wang wrote: > > > > > On 2019/5/10 ??8:58, Stefano Garzarella wrote: > > > > > > +static struct virtio_vsock_buf * > > > > > > +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) > > > > > > +{ > > > > > > + struct virtio_vsock_buf *buf; > > > > > > + > > > > > > + if (pkt->len == 0) > > > > > > + return NULL; > > > > > > + > > > > > > + buf = k...
2014 Feb 26
2
[PATCH net] vhost: net: switch to use data copy if pending DMAs exceed the limit
...a locks. >> >> Haven't thought this deeply, but another possible sloution is to rcuify >> destructor_arg and assign it to NULL during vhost_net removing. > > Xen treat it by a timer, for those skbs which has been delivered for a > while, netback would exchange page of zero_copy's skb with dom0's page. > > but there is still a race between host's another process handle the skb > and netback exchange its page. (This problem has been proved by testing) > > and Xen hasn't solved this problem yet, because if anyone want to solve > this problem...
2014 Feb 26
2
[PATCH net] vhost: net: switch to use data copy if pending DMAs exceed the limit
...a locks. >> >> Haven't thought this deeply, but another possible sloution is to rcuify >> destructor_arg and assign it to NULL during vhost_net removing. > > Xen treat it by a timer, for those skbs which has been delivered for a > while, netback would exchange page of zero_copy's skb with dom0's page. > > but there is still a race between host's another process handle the skb > and netback exchange its page. (This problem has been proved by testing) > > and Xen hasn't solved this problem yet, because if anyone want to solve > this problem...
2019 May 12
1
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...g(pkt->buf, info->msg, len); > if (err) > goto out; > @@ -86,6 +89,46 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, > return NULL; > } > > +static struct virtio_vsock_buf * > +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) > +{ > + struct virtio_vsock_buf *buf; > + > + if (pkt->len == 0) > + return NULL; > + > + buf = kzalloc(sizeof(*buf), GFP_KERNEL); > + if (!buf) > + return NULL; > + > + /* If the buffer in the virtio_vsock_pkt is full, we can move it to > + * the new vir...
2014 Feb 26
2
[PATCH net] vhost: net: switch to use data copy if pending DMAs exceed the limit
On 02/25/2014 09:57 PM, Michael S. Tsirkin wrote: > On Tue, Feb 25, 2014 at 02:53:58PM +0800, Jason Wang wrote: >> We used to stop the handling of tx when the number of pending DMAs >> exceeds VHOST_MAX_PEND. This is used to reduce the memory occupation >> of both host and guest. But it was too aggressive in some cases, since >> any delay or blocking of a single packet
2014 Feb 26
2
[PATCH net] vhost: net: switch to use data copy if pending DMAs exceed the limit
On 02/25/2014 09:57 PM, Michael S. Tsirkin wrote: > On Tue, Feb 25, 2014 at 02:53:58PM +0800, Jason Wang wrote: >> We used to stop the handling of tx when the number of pending DMAs >> exceeds VHOST_MAX_PEND. This is used to reduce the memory occupation >> of both host and guest. But it was too aggressive in some cases, since >> any delay or blocking of a single packet
2019 May 13
0
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...); > > if (err) > > goto out; > > @@ -86,6 +89,46 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, > > return NULL; > > } > > +static struct virtio_vsock_buf * > > +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) > > +{ > > + struct virtio_vsock_buf *buf; > > + > > + if (pkt->len == 0) > > + return NULL; > > + > > + buf = kzalloc(sizeof(*buf), GFP_KERNEL); > > + if (!buf) > > + return NULL; > > + > > + /* If the buffer in the virtio_vso...
2014 Feb 26
0
[PATCH net] vhost: net: switch to use data copy if pending DMAs exceed the limit
...; >>Haven't thought this deeply, but another possible sloution is to rcuify > >>destructor_arg and assign it to NULL during vhost_net removing. > > > >Xen treat it by a timer, for those skbs which has been delivered for a > >while, netback would exchange page of zero_copy's skb with dom0's page. > > > >but there is still a race between host's another process handle the skb > >and netback exchange its page. (This problem has been proved by testing) > > > >and Xen hasn't solved this problem yet, because if anyone want to s...
2014 Feb 26
0
[PATCH net] vhost: net: switch to use data copy if pending DMAs exceed the limit
...destroy them may need extra locks. > > Haven't thought this deeply, but another possible sloution is to rcuify > destructor_arg and assign it to NULL during vhost_net removing. Xen treat it by a timer, for those skbs which has been delivered for a while, netback would exchange page of zero_copy's skb with dom0's page. but there is still a race between host's another process handle the skb and netback exchange its page. (This problem has been proved by testing) and Xen hasn't solved this problem yet, because if anyone want to solve this problem completely, a page lock is...
2019 May 14
0
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...gt; > > > @@ -86,6 +89,46 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, > > > > return NULL; > > > > } > > > > +static struct virtio_vsock_buf * > > > > +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) > > > > +{ > > > > + struct virtio_vsock_buf *buf; > > > > + > > > > + if (pkt->len == 0) > > > > + return NULL; > > > > + > > > > + buf = kzalloc(sizeof(*buf), GFP_KERNEL); > > > > + if (!buf) >...
2019 May 10
0
[PATCH v2 1/8] vsock/virtio: limit the memory used per-socket
...buf_len = len; + err = memcpy_from_msg(pkt->buf, info->msg, len); if (err) goto out; @@ -86,6 +89,46 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, return NULL; } +static struct virtio_vsock_buf * +virtio_transport_alloc_buf(struct virtio_vsock_pkt *pkt, bool zero_copy) +{ + struct virtio_vsock_buf *buf; + + if (pkt->len == 0) + return NULL; + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return NULL; + + /* If the buffer in the virtio_vsock_pkt is full, we can move it to + * the new virtio_vsock_buf avoiding the copy, because we are sure that +...