Displaying 20 results from an estimated 434 matches for "uio_maxiov".
2014 May 13
2
[PATCH v1] vhost: avoid large order allocations
A test case which generates memory pressure while performing guest administration
fails with vhost triggering "page allocation failure" and guest not starting up.
After some analysis we discovered the allocation order of vhost to be rensponsible
for this behaviour. Thus we suggest patch 1/1 which dynamically allocates the
required memory. Please see its description for details.
Thanks,
2014 May 13
2
[PATCH v1] vhost: avoid large order allocations
A test case which generates memory pressure while performing guest administration
fails with vhost triggering "page allocation failure" and guest not starting up.
After some analysis we discovered the allocation order of vhost to be rensponsible
for this behaviour. Thus we suggest patch 1/1 which dynamically allocates the
required memory. Please see its description for details.
Thanks,
2014 May 13
0
[PATCH v1] vhost: avoid large order allocations
...index be414d2..e3a9a68 100644
> --- a/drivers/vhost/net.c
> +++ b/drivers/vhost/net.c
> @@ -374,7 +374,7 @@ static void handle_tx(struct vhost_net *net)
> break;
>
> head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
> - ARRAY_SIZE(vq->iov),
> + UIO_MAXIOV,
> &out, &in,
> NULL, NULL);
> /* On error, stop handling until the next kick. */
> @@ -506,7 +506,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
> goto err;
> }
> r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
> -...
2010 Sep 14
1
[PATCH] vhost: max s/g to match qemu
Qemu supports up to UIO_MAXIOV s/g so we have to match that because guest
drivers may rely on this.
Allocate indirect and log arrays dynamically to avoid using too much contigious
memory and make the length of hdr array to match the header length since each
iovec entry has a least one byte.
Test with copying large files w/ and...
2010 Sep 14
1
[PATCH] vhost: max s/g to match qemu
Qemu supports up to UIO_MAXIOV s/g so we have to match that because guest
drivers may rely on this.
Allocate indirect and log arrays dynamically to avoid using too much contigious
memory and make the length of hdr array to match the header length since each
iovec entry has a least one byte.
Test with copying large files w/ and...
2014 May 13
2
[PATCH v1] vhost: avoid large order allocations
...ivers/vhost/net.c b/drivers/vhost/net.c
index be414d2..e3a9a68 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -374,7 +374,7 @@ static void handle_tx(struct vhost_net *net)
break;
head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
- ARRAY_SIZE(vq->iov),
+ UIO_MAXIOV,
&out, &in,
NULL, NULL);
/* On error, stop handling until the next kick. */
@@ -506,7 +506,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
goto err;
}
r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
- ARRAY_SIZE(vq->iov) - seg, &out...
2013 Sep 04
2
[PATCH V3 4/6] vhost_net: determine whether or not to use zerocopy at one time
...t;hdr, s), hdr_size);
> break;
> }
> - zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
> - nvq->upend_idx != nvq->done_idx);
> +
> + zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
> + && (nvq->upend_idx + 1) % UIO_MAXIOV !=
> + nvq->done_idx
Thinking about this, this looks strange.
The original idea was that once we start doing zcopy, we keep
using the heads ring even for short packets until no zcopy is outstanding.
What's the logic behind (nvq->upend_idx + 1) % UIO_MAXIOV != nvq->done_id...
2013 Sep 04
2
[PATCH V3 4/6] vhost_net: determine whether or not to use zerocopy at one time
...t;hdr, s), hdr_size);
> break;
> }
> - zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
> - nvq->upend_idx != nvq->done_idx);
> +
> + zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
> + && (nvq->upend_idx + 1) % UIO_MAXIOV !=
> + nvq->done_idx
Thinking about this, this looks strange.
The original idea was that once we start doing zcopy, we keep
using the heads ring even for short packets until no zcopy is outstanding.
What's the logic behind (nvq->upend_idx + 1) % UIO_MAXIOV != nvq->done_id...
2017 Sep 28
9
[PATCH net-next] vhost_net: do not stall on zerocopy depletion
...rs/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -436,8 +436,8 @@ static bool vhost_exceeds_maxpend(struct vhost_net *net)
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
- return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV
- == nvq->done_idx;
+ return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
+ min(VHOST_MAX_PEND, vq->num >> 2);
}
/* Expects to be always run from workqueue - which acts as
@@ -480,12 +480,6 @@ static void handle_tx(struct vhost_net *net)
if (zcopy...
2017 Sep 28
9
[PATCH net-next] vhost_net: do not stall on zerocopy depletion
...rs/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -436,8 +436,8 @@ static bool vhost_exceeds_maxpend(struct vhost_net *net)
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
- return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV
- == nvq->done_idx;
+ return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
+ min(VHOST_MAX_PEND, vq->num >> 2);
}
/* Expects to be always run from workqueue - which acts as
@@ -480,12 +480,6 @@ static void handle_tx(struct vhost_net *net)
if (zcopy...
2020 Jun 03
1
[PATCH RFC 08/13] vhost/net: convert to new API: heads->bufs
...t; /* last used idx for outstanding DMA zerocopy buffers */
> int upend_idx;
> /* For TX, first used idx for DMA done zerocopy buffers
> - * For RX, number of batched heads
> + * For RX, number of batched bufs
> */
> int done_idx;
> + /* Outstanding user bufs. UIO_MAXIOV in length. */
> + /* TODO: we can make this smaller for sure. */
> + struct vhost_buf *bufs;
> /* Number of XDP frames batched */
> int batched_xdp;
> /* an array of userspace buffers info */
> @@ -271,6 +274,8 @@ static void vhost_net_clear_ubuf_info(struct vhost_net *n)...
2017 Sep 22
0
[PATCH net-next RFC 5/5] vhost_net: basic tx virtqueue batched processing
...&msg)) {
- vq_err(vq, "Unexpected header len for TX: "
- "%zd expected %zd\n",
- len, hdr_size);
- break;
- }
- len = msg_data_left(&msg);
-
- zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
- && (nvq->upend_idx + 1) % UIO_MAXIOV !=
- nvq->done_idx
- && vhost_net_tx_select_zcopy(net);
-
- /* use msg_control to pass vhost zerocopy ubuf info to skb */
- if (zcopy_used) {
- struct ubuf_info *ubuf;
- ubuf = nvq->ubuf_info + nvq->upend_idx;
-
- vq->heads[nvq->upend_idx].id = cpu_to_v...
2020 Jun 05
2
[PATCH RFC 03/13] vhost: batching fetches
...tic int vhost_test_open(struct inode *inode, struct file *f)
>>> dev = &n->dev;
>>> vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
>>> n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
>>> - vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
>>> + vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV + 64,
>>> VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, NULL);
>>> f->private_data = n;
>>> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
>>> index 8f9a07282625..ac...
2020 Jun 05
2
[PATCH RFC 03/13] vhost: batching fetches
...tic int vhost_test_open(struct inode *inode, struct file *f)
>>> dev = &n->dev;
>>> vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
>>> n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
>>> - vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
>>> + vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV + 64,
>>> VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, NULL);
>>> f->private_data = n;
>>> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
>>> index 8f9a07282625..ac...
2013 Sep 02
8
[PATCH V3 0/6] vhost code cleanup and minor enhancement
...sending test.
Plase review.
Changes from V2:
- Typo fixes and code style fix
- Add performance gain in the commit log of patch 2/6
- Retest the update the result in patch 6/6
Changes from V1:
- Fix the zerocopy enabling check by changing the check of upend_idx != done_idx
to (upend_idx + 1) % UIO_MAXIOV == done_idx.
- Switch to use put_user() in __vhost_add_used_n() if there's only one used
- Keep the max pending check based on Michael's suggestion.
Jason Wang (6):
vhost_net: make vhost_zerocopy_signal_used() return void
vhost_net: use vhost_add_used_and_signal_n() in
vhost_zeroco...
2013 Sep 02
8
[PATCH V3 0/6] vhost code cleanup and minor enhancement
...sending test.
Plase review.
Changes from V2:
- Typo fixes and code style fix
- Add performance gain in the commit log of patch 2/6
- Retest the update the result in patch 6/6
Changes from V1:
- Fix the zerocopy enabling check by changing the check of upend_idx != done_idx
to (upend_idx + 1) % UIO_MAXIOV == done_idx.
- Switch to use put_user() in __vhost_add_used_n() if there's only one used
- Keep the max pending check based on Michael's suggestion.
Jason Wang (6):
vhost_net: make vhost_zerocopy_signal_used() return void
vhost_net: use vhost_add_used_and_signal_n() in
vhost_zeroco...
2020 Jun 02
0
[PATCH RFC 08/13] vhost/net: convert to new API: heads->bufs
...,9 +112,12 @@ struct vhost_net_virtqueue {
/* last used idx for outstanding DMA zerocopy buffers */
int upend_idx;
/* For TX, first used idx for DMA done zerocopy buffers
- * For RX, number of batched heads
+ * For RX, number of batched bufs
*/
int done_idx;
+ /* Outstanding user bufs. UIO_MAXIOV in length. */
+ /* TODO: we can make this smaller for sure. */
+ struct vhost_buf *bufs;
/* Number of XDP frames batched */
int batched_xdp;
/* an array of userspace buffers info */
@@ -271,6 +274,8 @@ static void vhost_net_clear_ubuf_info(struct vhost_net *n)
int i;
for (i = 0; i <...
2013 Apr 27
0
[PATCH] vhost: Move vhost-net zerocopy support fields to net.c
...e(ubufs);
+}
+
+int vhost_net_set_ubuf_info(struct vhost_net *n)
+{
+ bool zcopy;
+ int i;
+
+ for (i = 0; i < n->dev.nvqs; ++i) {
+ zcopy = vhost_zcopy_mask & (0x1 << i);
+ if (!zcopy)
+ continue;
+ n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
+ UIO_MAXIOV, GFP_KERNEL);
+ if (!n->vqs[i].ubuf_info)
+ goto err;
+ }
+ return 0;
+
+err:
+ while (i--) {
+ zcopy = vhost_zcopy_mask & (0x1 << i);
+ if (!zcopy)
+ continue;
+ kfree(n->vqs[i].ubuf_info);
+ }
+ return -ENOMEM;
+}
+
+void vhost_net_reset_ubuf_info(struct vhost_net *n)
+{...
2013 Apr 27
0
[PATCH] vhost: Move vhost-net zerocopy support fields to net.c
...e(ubufs);
+}
+
+int vhost_net_set_ubuf_info(struct vhost_net *n)
+{
+ bool zcopy;
+ int i;
+
+ for (i = 0; i < n->dev.nvqs; ++i) {
+ zcopy = vhost_zcopy_mask & (0x1 << i);
+ if (!zcopy)
+ continue;
+ n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
+ UIO_MAXIOV, GFP_KERNEL);
+ if (!n->vqs[i].ubuf_info)
+ goto err;
+ }
+ return 0;
+
+err:
+ while (i--) {
+ zcopy = vhost_zcopy_mask & (0x1 << i);
+ if (!zcopy)
+ continue;
+ kfree(n->vqs[i].ubuf_info);
+ }
+ return -ENOMEM;
+}
+
+void vhost_net_reset_ubuf_info(struct vhost_net *n)
+{...
2013 Aug 30
12
[PATCH V2 0/6] vhost code cleanup and minor enhancement
...unify and simplify vhost codes especially for
zerocopy. With this series, 5% - 10% improvement for per cpu throughput were
seen during netperf guest sending test.
Plase review.
Changes from V1:
- Fix the zerocopy enabling check by changing the check of upend_idx != done_idx
to (upend_idx + 1) % UIO_MAXIOV == done_idx.
- Switch to use put_user() in __vhost_add_used_n() if there's only one used
- Keep the max pending check based on Michael's suggestion.
Jason Wang (6):
vhost_net: make vhost_zerocopy_signal_used() returns void
vhost_net: use vhost_add_used_and_signal_n() in
vhost_zeroc...