search for: tun_sock

Displaying 14 results from an estimated 14 matches for "tun_sock".

Did you mean: tun_lock
2009 Nov 04
0
[PATCHv8 1/3] tun: export underlying socket
...+ + if (!tun) + return -EBADFD; + len = iov_length(iv, count); + if (len < 0) { + ret = -EINVAL; + goto out; + } + + ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK); + ret = min_t(ssize_t, ret, len); out: tun_put(tun); return ret; @@ -847,7 +860,8 @@ static void tun_sock_write_space(struct sock *sk) return; if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible_sync(sk->sk_sleep); + wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | + POLLWRNORM | POLLWRBAND); tun = container_of(sk, struct tun_sock, sk...
2009 Nov 04
0
[PATCHv8 1/3] tun: export underlying socket
...+ + if (!tun) + return -EBADFD; + len = iov_length(iv, count); + if (len < 0) { + ret = -EINVAL; + goto out; + } + + ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK); + ret = min_t(ssize_t, ret, len); out: tun_put(tun); return ret; @@ -847,7 +860,8 @@ static void tun_sock_write_space(struct sock *sk) return; if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible_sync(sk->sk_sleep); + wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | + POLLWRNORM | POLLWRBAND); tun = container_of(sk, struct tun_sock, sk...
2009 Nov 03
1
[PATCHv7 1/3] tun: export underlying socket
...+ + if (!tun) + return -EBADFD; + len = iov_length(iv, count); + if (len < 0) { + ret = -EINVAL; + goto out; + } + + ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK); + ret = min_t(ssize_t, ret, len); out: tun_put(tun); return ret; @@ -847,7 +860,8 @@ static void tun_sock_write_space(struct sock *sk) return; if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible_sync(sk->sk_sleep); + wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | + POLLWRNORM | POLLWRBAND); tun = container_of(sk, struct tun_sock, sk...
2009 Nov 03
1
[PATCHv7 1/3] tun: export underlying socket
...+ + if (!tun) + return -EBADFD; + len = iov_length(iv, count); + if (len < 0) { + ret = -EINVAL; + goto out; + } + + ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK); + ret = min_t(ssize_t, ret, len); out: tun_put(tun); return ret; @@ -847,7 +860,8 @@ static void tun_sock_write_space(struct sock *sk) return; if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible_sync(sk->sk_sleep); + wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | + POLLWRNORM | POLLWRBAND); tun = container_of(sk, struct tun_sock, sk...
2009 Nov 02
1
[PATCHv6 1/3] tun: export underlying socket
...+ + if (!tun) + return -EBADFD; + len = iov_length(iv, count); + if (len < 0) { + ret = -EINVAL; + goto out; + } + + ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK); + ret = min_t(ssize_t, ret, len); out: tun_put(tun); return ret; @@ -847,7 +860,8 @@ static void tun_sock_write_space(struct sock *sk) return; if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible_sync(sk->sk_sleep); + wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | + POLLWRNORM | POLLWRBAND); tun = container_of(sk, struct tun_sock, sk...
2009 Nov 02
1
[PATCHv6 1/3] tun: export underlying socket
...+ + if (!tun) + return -EBADFD; + len = iov_length(iv, count); + if (len < 0) { + ret = -EINVAL; + goto out; + } + + ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK); + ret = min_t(ssize_t, ret, len); out: tun_put(tun); return ret; @@ -847,7 +860,8 @@ static void tun_sock_write_space(struct sock *sk) return; if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible_sync(sk->sk_sleep); + wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | + POLLWRNORM | POLLWRBAND); tun = container_of(sk, struct tun_sock, sk...
2009 Apr 16
1
[1/2] tun: Only free a netdev when all tun descriptors are closed
On Thu, Apr 16, 2009 at 01:08:18AM -0000, Herbert Xu wrote: > On Wed, Apr 15, 2009 at 10:38:34PM +0800, Herbert Xu wrote: > > > > So how about this? We replace the dev destructor with our own that > > doesn't immediately call free_netdev. We only call free_netdev once > > all tun fd's attached to the device have been closed. > > Here's the patch.
2009 Apr 16
1
[1/2] tun: Only free a netdev when all tun descriptors are closed
On Thu, Apr 16, 2009 at 01:08:18AM -0000, Herbert Xu wrote: > On Wed, Apr 15, 2009 at 10:38:34PM +0800, Herbert Xu wrote: > > > > So how about this? We replace the dev destructor with our own that > > doesn't immediately call free_netdev. We only call free_netdev once > > all tun fd's attached to the device have been closed. > > Here's the patch.
2009 Apr 02
7
[Lguest] [PATCH 4/5] lguest: use KVM hypercalls
fre, 27 03 2009 kl. 10:22 +1030, skrev Rusty Russell: > From: Matias Zabaljauregui <zabaljauregui at gmail.com> > > Impact: cleanup > > This patch allow us to use KVM hypercalls Something has broken in relation to this change. I'm not sure it is this change itself or one following, but I get the following error when using lguest: lguest: unhandled trap 6 at 0x418726
2009 Apr 02
7
[Lguest] [PATCH 4/5] lguest: use KVM hypercalls
fre, 27 03 2009 kl. 10:22 +1030, skrev Rusty Russell: > From: Matias Zabaljauregui <zabaljauregui at gmail.com> > > Impact: cleanup > > This patch allow us to use KVM hypercalls Something has broken in relation to this change. I'm not sure it is this change itself or one following, but I get the following error when using lguest: lguest: unhandled trap 6 at 0x418726
2011 Aug 12
11
[net-next RFC PATCH 0/7] multiqueue support for tun/tap
As multi-queue nics were commonly used for high-end servers, current single queue based tap can not satisfy the requirement of scaling guest network performance as the numbers of vcpus increase. So the following series implements multiple queue support in tun/tap. In order to take advantages of this, a multi-queue capable driver and qemu were also needed. I just rebase the latest version of
2011 Aug 12
11
[net-next RFC PATCH 0/7] multiqueue support for tun/tap
As multi-queue nics were commonly used for high-end servers, current single queue based tap can not satisfy the requirement of scaling guest network performance as the numbers of vcpus increase. So the following series implements multiple queue support in tun/tap. In order to take advantages of this, a multi-queue capable driver and qemu were also needed. I just rebase the latest version of
2011 Dec 05
8
[net-next RFC PATCH 0/5] Series short description
multiple queue virtio-net: flow steering through host/guest cooperation Hello all: This is a rough series adds the guest/host cooperation of flow steering support based on Krish Kumar's multiple queue virtio-net driver patch 3/3 (http://lwn.net/Articles/467283/). This idea is simple, the backend pass the rxhash to the guest and guest would tell the backend the hash to queue mapping when
2011 Dec 05
8
[net-next RFC PATCH 0/5] Series short description
multiple queue virtio-net: flow steering through host/guest cooperation Hello all: This is a rough series adds the guest/host cooperation of flow steering support based on Krish Kumar's multiple queue virtio-net driver patch 3/3 (http://lwn.net/Articles/467283/). This idea is simple, the backend pass the rxhash to the guest and guest would tell the backend the hash to queue mapping when