search for: wmem

Displaying 20 results from an estimated 61 matches for "wmem".

Did you mean: mem
2013 Apr 11
1
[PATCH] vhost_net: remove tx polling state
...on. * upend_idx is used to track end of used idx, done_idx is used to track head * of used idx. Once lower device DMA done contiguously, we will signal KVM @@ -242,7 +210,7 @@ static void handle_tx(struct vhost_net *net) .msg_flags = MSG_DONTWAIT, }; size_t len, total_len = 0; - int err, wmem; + int err; size_t hdr_size; struct socket *sock; struct vhost_ubuf_ref *uninitialized_var(ubufs); @@ -253,19 +221,9 @@ static void handle_tx(struct vhost_net *net) if (!sock) return; - wmem = atomic_read(&sock->sk->sk_wmem_alloc); - if (wmem >= sock->sk->sk_sndbuf)...
2013 Apr 11
1
[PATCH] vhost_net: remove tx polling state
...on. * upend_idx is used to track end of used idx, done_idx is used to track head * of used idx. Once lower device DMA done contiguously, we will signal KVM @@ -242,7 +210,7 @@ static void handle_tx(struct vhost_net *net) .msg_flags = MSG_DONTWAIT, }; size_t len, total_len = 0; - int err, wmem; + int err; size_t hdr_size; struct socket *sock; struct vhost_ubuf_ref *uninitialized_var(ubufs); @@ -253,19 +221,9 @@ static void handle_tx(struct vhost_net *net) if (!sock) return; - wmem = atomic_read(&sock->sk->sk_wmem_alloc); - if (wmem >= sock->sk->sk_sndbuf)...
2013 Mar 07
3
[PATCH] vhost_net: remove tx polling state
...ic void handle_tx(struct vhost_net *net) { struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; + struct vhost_poll *poll = net->poll + VHOST_NET_VQ_TX; unsigned out, in, s; int head; struct msghdr msg = { @@ -256,7 +225,7 @@ static void handle_tx(struct vhost_net *net) wmem = atomic_read(&sock->sk->sk_wmem_alloc); if (wmem >= sock->sk->sk_sndbuf) { mutex_lock(&vq->mutex); - tx_poll_start(net, sock); + vhost_poll_start(poll, sock->file); mutex_unlock(&vq->mutex); return; } @@ -265,7 +234,7 @@ static void handle_tx(stru...
2013 Mar 07
3
[PATCH] vhost_net: remove tx polling state
...ic void handle_tx(struct vhost_net *net) { struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; + struct vhost_poll *poll = net->poll + VHOST_NET_VQ_TX; unsigned out, in, s; int head; struct msghdr msg = { @@ -256,7 +225,7 @@ static void handle_tx(struct vhost_net *net) wmem = atomic_read(&sock->sk->sk_wmem_alloc); if (wmem >= sock->sk->sk_sndbuf) { mutex_lock(&vq->mutex); - tx_poll_start(net, sock); + vhost_poll_start(poll, sock->file); mutex_unlock(&vq->mutex); return; } @@ -265,7 +234,7 @@ static void handle_tx(stru...
2006 Jan 03
3
ip_queue module issue
...-lib/lib \ --with-libnet-includes=/usr/local/snort-lib/include \ --with-libnet-libraries=/usr/local/snort-lib/lib \ --with-libipq-includes=/usr/local/iptables/include \ --with-libipq-libraries=/usr/local/iptables/lib \ --enable-inline cat /proc/net/netlink> sk Eth Pid Groups Rmem Wmem Dump Locks c11c8040 0 0 00000000 0 0 00000000 2 c7ec0140 3 0 00000000 0 0 00000000 7 c11c8780 4 0 00000000 0 0 00000000 2 c7e74c40 5 0 00000000 0 0 00000000 2 Starting SNORT now: /usr/local/snort/bin/snort...
2012 Dec 27
3
[PATCH 1/2] vhost_net: correct error hanlding in vhost_net_set_backend()
Fix the leaking of oldubufs and fd refcnt when fail to initialized used ring. Signed-off-by: Jason Wang <jasowang at redhat.com> --- drivers/vhost/net.c | 14 +++++++++++--- 1 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ebd08b2..629d6b5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -834,8 +834,10 @@ static
2012 Dec 27
3
[PATCH 1/2] vhost_net: correct error hanlding in vhost_net_set_backend()
Fix the leaking of oldubufs and fd refcnt when fail to initialized used ring. Signed-off-by: Jason Wang <jasowang at redhat.com> --- drivers/vhost/net.c | 14 +++++++++++--- 1 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ebd08b2..629d6b5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -834,8 +834,10 @@ static
2009 Nov 03
11
[PATCHv7 3/3] vhost_net: a kernel-level virtio server
..._virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; + unsigned head, out, in, s; + struct msghdr msg = { + .msg_name = NULL, + .msg_namelen = 0, + .msg_control = NULL, + .msg_controllen = 0, + .msg_iov = vq->iov, + .msg_flags = MSG_DONTWAIT, + }; + size_t len, total_len = 0; + int err, wmem; + size_t hdr_size; + struct socket *sock = rcu_dereference(vq->private_data); + if (!sock) + return; + + wmem = atomic_read(&sock->sk->sk_wmem_alloc); + if (wmem >= sock->sk->sk_sndbuf) + return; + + use_mm(net->dev.mm); + mutex_lock(&vq->mutex); + vhost_no_notify...
2009 Nov 03
11
[PATCHv7 3/3] vhost_net: a kernel-level virtio server
..._virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; + unsigned head, out, in, s; + struct msghdr msg = { + .msg_name = NULL, + .msg_namelen = 0, + .msg_control = NULL, + .msg_controllen = 0, + .msg_iov = vq->iov, + .msg_flags = MSG_DONTWAIT, + }; + size_t len, total_len = 0; + int err, wmem; + size_t hdr_size; + struct socket *sock = rcu_dereference(vq->private_data); + if (!sock) + return; + + wmem = atomic_read(&sock->sk->sk_wmem_alloc); + if (wmem >= sock->sk->sk_sndbuf) + return; + + use_mm(net->dev.mm); + mutex_lock(&vq->mutex); + vhost_no_notify...
2013 Jan 06
2
[PATCH V3 0/2] handle polling errors
This is an update version of last version to fix the handling of polling errors in vhost/vhost_net. Currently, vhost and vhost_net ignore polling errors which can lead kernel crashing when it tries to remove itself from waitqueue after the polling failure. Fix this by checking the poll->wqh before the removing and report an error when meet polling errors. Changes from v2: - check poll->wqh
2013 Jan 06
2
[PATCH V3 0/2] handle polling errors
This is an update version of last version to fix the handling of polling errors in vhost/vhost_net. Currently, vhost and vhost_net ignore polling errors which can lead kernel crashing when it tries to remove itself from waitqueue after the polling failure. Fix this by checking the poll->wqh before the removing and report an error when meet polling errors. Changes from v2: - check poll->wqh
2010 Apr 19
2
[PATCH v4] Add mergeable RX bufs support to vhost
...ivers/vhost/net.c net-next-v4/drivers/vhost/net.c --- net-next-p0/drivers/vhost/net.c 2010-03-22 12:04:38.000000000 -0700 +++ net-next-v4/drivers/vhost/net.c 2010-04-19 14:23:38.000000000 -0700 @@ -108,7 +108,7 @@ static void handle_tx(struct vhost_net * }; size_t len, total_len = 0; int err, wmem; - size_t hdr_size; + size_t vhost_hlen; struct socket *sock = rcu_dereference(vq->private_data); if (!sock) return; @@ -127,13 +127,13 @@ static void handle_tx(struct vhost_net * if (wmem < sock->sk->sk_sndbuf / 2) tx_poll_stop(net); - hdr_size = vq->hdr_size; + vhost_...
2010 Apr 19
2
[PATCH v4] Add mergeable RX bufs support to vhost
...ivers/vhost/net.c net-next-v4/drivers/vhost/net.c --- net-next-p0/drivers/vhost/net.c 2010-03-22 12:04:38.000000000 -0700 +++ net-next-v4/drivers/vhost/net.c 2010-04-19 14:23:38.000000000 -0700 @@ -108,7 +108,7 @@ static void handle_tx(struct vhost_net * }; size_t len, total_len = 0; int err, wmem; - size_t hdr_size; + size_t vhost_hlen; struct socket *sock = rcu_dereference(vq->private_data); if (!sock) return; @@ -127,13 +127,13 @@ static void handle_tx(struct vhost_net * if (wmem < sock->sk->sk_sndbuf / 2) tx_poll_stop(net); - hdr_size = vq->hdr_size; + vhost_...
2003 Jul 05
13
HTB doesn''t respect rate values
Hi, machine: AMD K6 200 MHz Linux distribution: Mandrake 8.1 kernel: compiled 2.4.21 applied this: #define PSCHED_CLOCK_SOURCE PSCHED_CPU in file linux/include/net/pkt_sched.h bevore compiled the kernel (described on http://www.docum.org/stef.coene/qos/faq/cache/40.html) bandwitch on eth0: 128kbit The most simple configuration - 122kbit guaranted for WWW (sport 80) and
2010 Apr 26
1
[PATCH v6] Add mergeable rx buffer support to vhost_net
...ivers/vhost/net.c net-next-v6/drivers/vhost/net.c --- net-next-v0/drivers/vhost/net.c 2010-04-24 21:36:54.000000000 -0700 +++ net-next-v6/drivers/vhost/net.c 2010-04-26 01:13:04.000000000 -0700 @@ -109,7 +109,7 @@ static void handle_tx(struct vhost_net * }; size_t len, total_len = 0; int err, wmem; - size_t hdr_size; + size_t vhost_hlen; struct socket *sock = rcu_dereference(vq->private_data); if (!sock) return; @@ -128,13 +128,13 @@ static void handle_tx(struct vhost_net * if (wmem < sock->sk->sk_sndbuf / 2) tx_poll_stop(net); - hdr_size = vq->hdr_size; + vhost_...
2010 Apr 26
1
[PATCH v6] Add mergeable rx buffer support to vhost_net
...ivers/vhost/net.c net-next-v6/drivers/vhost/net.c --- net-next-v0/drivers/vhost/net.c 2010-04-24 21:36:54.000000000 -0700 +++ net-next-v6/drivers/vhost/net.c 2010-04-26 01:13:04.000000000 -0700 @@ -109,7 +109,7 @@ static void handle_tx(struct vhost_net * }; size_t len, total_len = 0; int err, wmem; - size_t hdr_size; + size_t vhost_hlen; struct socket *sock = rcu_dereference(vq->private_data); if (!sock) return; @@ -128,13 +128,13 @@ static void handle_tx(struct vhost_net * if (wmem < sock->sk->sk_sndbuf / 2) tx_poll_stop(net); - hdr_size = vq->hdr_size; + vhost_...
2010 Apr 23
1
[PATCHv5] add mergeable receiver buffers support to vhost
...ivers/vhost/net.c net-next-v5/drivers/vhost/net.c --- net-next-v0/drivers/vhost/net.c 2010-04-22 11:31:57.000000000 -0700 +++ net-next-v5/drivers/vhost/net.c 2010-04-22 12:41:17.000000000 -0700 @@ -109,7 +109,7 @@ static void handle_tx(struct vhost_net * }; size_t len, total_len = 0; int err, wmem; - size_t hdr_size; + size_t vhost_hlen; struct socket *sock = rcu_dereference(vq->private_data); if (!sock) return; @@ -128,13 +128,13 @@ static void handle_tx(struct vhost_net * if (wmem < sock->sk->sk_sndbuf / 2) tx_poll_stop(net); - hdr_size = vq->hdr_size; + vhost_...
2010 Apr 23
1
[PATCHv5] add mergeable receiver buffers support to vhost
...ivers/vhost/net.c net-next-v5/drivers/vhost/net.c --- net-next-v0/drivers/vhost/net.c 2010-04-22 11:31:57.000000000 -0700 +++ net-next-v5/drivers/vhost/net.c 2010-04-22 12:41:17.000000000 -0700 @@ -109,7 +109,7 @@ static void handle_tx(struct vhost_net * }; size_t len, total_len = 0; int err, wmem; - size_t hdr_size; + size_t vhost_hlen; struct socket *sock = rcu_dereference(vq->private_data); if (!sock) return; @@ -128,13 +128,13 @@ static void handle_tx(struct vhost_net * if (wmem < sock->sk->sk_sndbuf / 2) tx_poll_stop(net); - hdr_size = vq->hdr_size; + vhost_...
2011 Sep 09
1
Slow performance - 4 hosts, 10 gigabit ethernet, Gluster 3.2.3
Hi everyone, I am seeing slower-than-expected performance in Gluster 3.2.3 between 4 hosts with 10 gigabit eth between them all. Each host has 4x 300GB SAS 15K drives in RAID10, 6-core Xeon E5645 @ 2.40GHz and 24GB RAM running Ubuntu 10.04 64-bit (I have also tested with Scientific Linux 6.1 and Debian Squeeze - same results on those as well). All of the hosts mount the volume using the FUSE
2010 Mar 03
1
[RFC][ PATCH 2/3] vhost-net: handle vnet_hdr processing for MRG_RX_BUF
...et-next-p1/drivers/vhost/net.c net-next-p2/drivers/vhost/net.c --- net-next-p1/drivers/vhost/net.c 2010-03-01 11:44:22.000000000 -0800 +++ net-next-p2/drivers/vhost/net.c 2010-03-02 13:01:34.000000000 -0800 @@ -109,7 +109,6 @@ }; size_t len, total_len = 0; int err, wmem; - size_t hdr_size; struct socket *sock = rcu_dereference(vq->private_data); if (!sock) return; @@ -124,7 +123,6 @@ if (wmem < sock->sk->sk_sndbuf * 2) tx_poll_stop(net); - hdr_size = vq->hdr_size; for (;...