Displaying 20 results from an estimated 22 matches for "__tun_get".
2009 Apr 16
1
[1/2] tun: Only free a netdev when all tun descriptors are closed
...gt;
>
>
> Cheers,
>
>
> diff --git a/drivers/net/tun.c b/drivers/net/tun.c
>
> .....
>
> @@ -1275,20 +1278,18 @@ static int tun_chr_close(struct inode *inode, struct file *file)
> struct tun_file *tfile = file->private_data;
> struct tun_struct *tun = __tun_get(tfile);
>
> -
> if (tun) {
> - DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
> -
> - rtnl_lock();
> - __tun_detach(tun);
> -
> /* If desireable, unregister the netdevice. */
> - if (!(tun->flags & TUN_PERSIST)) {
> - sock_...
2009 Apr 16
1
[1/2] tun: Only free a netdev when all tun descriptors are closed
...gt;
>
>
> Cheers,
>
>
> diff --git a/drivers/net/tun.c b/drivers/net/tun.c
>
> .....
>
> @@ -1275,20 +1278,18 @@ static int tun_chr_close(struct inode *inode, struct file *file)
> struct tun_file *tfile = file->private_data;
> struct tun_struct *tun = __tun_get(tfile);
>
> -
> if (tun) {
> - DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
> -
> - rtnl_lock();
> - __tun_detach(tun);
> -
> /* If desireable, unregister the netdevice. */
> - if (!(tun->flags & TUN_PERSIST)) {
> - sock_...
2011 Aug 12
11
[net-next RFC PATCH 0/7] multiqueue support for tun/tap
As multi-queue nics were commonly used for high-end servers,
current single queue based tap can not satisfy the
requirement of scaling guest network performance as the
numbers of vcpus increase. So the following series
implements multiple queue support in tun/tap.
In order to take advantages of this, a multi-queue capable
driver and qemu were also needed. I just rebase the latest
version of
2011 Aug 12
11
[net-next RFC PATCH 0/7] multiqueue support for tun/tap
As multi-queue nics were commonly used for high-end servers,
current single queue based tap can not satisfy the
requirement of scaling guest network performance as the
numbers of vcpus increase. So the following series
implements multiple queue support in tun/tap.
In order to take advantages of this, a multi-queue capable
driver and qemu were also needed. I just rebase the latest
version of
2009 Nov 04
0
[PATCHv8 1/3] tun: export underlying socket
...v,
- unsigned long count, loff_t pos)
+static ssize_t tun_do_read(struct tun_struct *tun,
+ struct kiocb *iocb, const struct iovec *iv,
+ ssize_t len, int noblock)
{
- struct file *file = iocb->ki_filp;
- struct tun_file *tfile = file->private_data;
- struct tun_struct *tun = __tun_get(tfile);
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
- ssize_t len, ret = 0;
-
- if (!tun)
- return -EBADFD;
+ ssize_t ret = 0;
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;...
2009 Nov 04
0
[PATCHv8 1/3] tun: export underlying socket
...v,
- unsigned long count, loff_t pos)
+static ssize_t tun_do_read(struct tun_struct *tun,
+ struct kiocb *iocb, const struct iovec *iv,
+ ssize_t len, int noblock)
{
- struct file *file = iocb->ki_filp;
- struct tun_file *tfile = file->private_data;
- struct tun_struct *tun = __tun_get(tfile);
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
- ssize_t len, ret = 0;
-
- if (!tun)
- return -EBADFD;
+ ssize_t ret = 0;
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;...
2009 Nov 03
1
[PATCHv7 1/3] tun: export underlying socket
...v,
- unsigned long count, loff_t pos)
+static ssize_t tun_do_read(struct tun_struct *tun,
+ struct kiocb *iocb, const struct iovec *iv,
+ ssize_t len, int noblock)
{
- struct file *file = iocb->ki_filp;
- struct tun_file *tfile = file->private_data;
- struct tun_struct *tun = __tun_get(tfile);
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
- ssize_t len, ret = 0;
-
- if (!tun)
- return -EBADFD;
+ ssize_t ret = 0;
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;...
2009 Nov 03
1
[PATCHv7 1/3] tun: export underlying socket
...v,
- unsigned long count, loff_t pos)
+static ssize_t tun_do_read(struct tun_struct *tun,
+ struct kiocb *iocb, const struct iovec *iv,
+ ssize_t len, int noblock)
{
- struct file *file = iocb->ki_filp;
- struct tun_file *tfile = file->private_data;
- struct tun_struct *tun = __tun_get(tfile);
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
- ssize_t len, ret = 0;
-
- if (!tun)
- return -EBADFD;
+ ssize_t ret = 0;
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;...
2017 Nov 30
2
[PATCH net,stable v2] vhost: fix skb leak in handle_rx()
...d.
>
> Jason, what do you think?
>
tun_recvmsg() has the following check:
static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t
total_len,
??? ??? ?????? int flags)
{
??? struct tun_file *tfile = container_of(sock, struct tun_file, socket);
??? struct tun_struct *tun = __tun_get(tfile);
??? int ret;
??? if (!tun)
??? ??? return -EBADFD;
??? if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
??? ??? ret = -EINVAL;
??? ??? goto out;
??? }
And tun_do_read() has:
??? if (!iov_iter_count(to))
??? ??? return 0;
So I think we need free skb in those cases.
Th...
2017 Nov 30
2
[PATCH net,stable v2] vhost: fix skb leak in handle_rx()
...d.
>
> Jason, what do you think?
>
tun_recvmsg() has the following check:
static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t
total_len,
??? ??? ?????? int flags)
{
??? struct tun_file *tfile = container_of(sock, struct tun_file, socket);
??? struct tun_struct *tun = __tun_get(tfile);
??? int ret;
??? if (!tun)
??? ??? return -EBADFD;
??? if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
??? ??? ret = -EINVAL;
??? ??? goto out;
??? }
And tun_do_read() has:
??? if (!iov_iter_count(to))
??? ??? return 0;
So I think we need free skb in those cases.
Th...
2009 Nov 02
1
[PATCHv6 1/3] tun: export underlying socket
...v,
- unsigned long count, loff_t pos)
+static ssize_t tun_do_read(struct tun_struct *tun,
+ struct kiocb *iocb, const struct iovec *iv,
+ ssize_t len, int noblock)
{
- struct file *file = iocb->ki_filp;
- struct tun_file *tfile = file->private_data;
- struct tun_struct *tun = __tun_get(tfile);
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
- ssize_t len, ret = 0;
-
- if (!tun)
- return -EBADFD;
+ ssize_t ret = 0;
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;...
2009 Nov 02
1
[PATCHv6 1/3] tun: export underlying socket
...v,
- unsigned long count, loff_t pos)
+static ssize_t tun_do_read(struct tun_struct *tun,
+ struct kiocb *iocb, const struct iovec *iv,
+ ssize_t len, int noblock)
{
- struct file *file = iocb->ki_filp;
- struct tun_file *tfile = file->private_data;
- struct tun_struct *tun = __tun_get(tfile);
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
- ssize_t len, ret = 0;
-
- if (!tun)
- return -EBADFD;
+ ssize_t ret = 0;
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;...
2016 Jun 30
0
[PATCH net-next V3 6/6] tun: switch to use skb array for tx
...*/
+ skb = tun_ring_recv(tfile, noblock, &err);
if (!skb)
return err;
@@ -1574,8 +1629,25 @@ out:
return ret;
}
+static int tun_peek_len(struct socket *sock)
+{
+ struct tun_file *tfile = container_of(sock, struct tun_file, socket);
+ struct tun_struct *tun;
+ int ret = 0;
+
+ tun = __tun_get(tfile);
+ if (!tun)
+ return 0;
+
+ ret = skb_array_peek_len(&tfile->tx_array);
+ tun_put(tun);
+
+ return ret;
+}
+
/* Ops structure to mimic raw sockets with tun */
static const struct proto_ops tun_socket_ops = {
+ .peek_len = tun_peek_len,
.sendmsg = tun_sendmsg,
.recvmsg = tun_re...
2016 Jun 17
0
[PATCH net-next V2] tun: introduce tx skb ring
...9,8 +1708,39 @@ out:
> return ret;
> }
>
> +static int tun_peek_len(struct socket *sock)
> +{
> + struct tun_file *tfile = container_of(sock, struct tun_file, socket);
> + struct sock *sk = sock->sk;
> + struct tun_struct *tun;
> + int ret = 0;
> +
> + tun = __tun_get(tfile);
> + if (!tun)
> + return 0;
> +
> + if (tun->flags & IFF_TX_ARRAY) {
> + ret = skb_array_peek_len(&tfile->tx_array);
> + } else {
> + struct sk_buff *head;
> +
> + spin_lock_bh(&sk->sk_receive_queue.lock);
> + head = skb_peek(&sk-&...
2016 Jun 15
7
[PATCH net-next V2] tun: introduce tx skb ring
...mp;off, &err);
if (!skb)
return err;
@@ -1629,8 +1708,39 @@ out:
return ret;
}
+static int tun_peek_len(struct socket *sock)
+{
+ struct tun_file *tfile = container_of(sock, struct tun_file, socket);
+ struct sock *sk = sock->sk;
+ struct tun_struct *tun;
+ int ret = 0;
+
+ tun = __tun_get(tfile);
+ if (!tun)
+ return 0;
+
+ if (tun->flags & IFF_TX_ARRAY) {
+ ret = skb_array_peek_len(&tfile->tx_array);
+ } else {
+ struct sk_buff *head;
+
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ head = skb_peek(&sk->sk_receive_queue);
+ if (likely(head)) {
+ r...
2016 Jun 15
7
[PATCH net-next V2] tun: introduce tx skb ring
...mp;off, &err);
if (!skb)
return err;
@@ -1629,8 +1708,39 @@ out:
return ret;
}
+static int tun_peek_len(struct socket *sock)
+{
+ struct tun_file *tfile = container_of(sock, struct tun_file, socket);
+ struct sock *sk = sock->sk;
+ struct tun_struct *tun;
+ int ret = 0;
+
+ tun = __tun_get(tfile);
+ if (!tun)
+ return 0;
+
+ if (tun->flags & IFF_TX_ARRAY) {
+ ret = skb_array_peek_len(&tfile->tx_array);
+ } else {
+ struct sk_buff *head;
+
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ head = skb_peek(&sk->sk_receive_queue);
+ if (likely(head)) {
+ r...
2017 Nov 29
4
[PATCH net,stable v2] vhost: fix skb leak in handle_rx()
From: Wei Xu <wexu at redhat.com>
Matthew found a roughly 40% tcp throughput regression with commit
c67df11f(vhost_net: try batch dequing from skb array) as discussed
in the following thread:
https://www.mail-archive.com/netdev at vger.kernel.org/msg187936.html
Eventually we figured out that it was a skb leak in handle_rx()
when sending packets to the VM. This usually happens when a guest
2017 Nov 29
4
[PATCH net,stable v2] vhost: fix skb leak in handle_rx()
From: Wei Xu <wexu at redhat.com>
Matthew found a roughly 40% tcp throughput regression with commit
c67df11f(vhost_net: try batch dequing from skb array) as discussed
in the following thread:
https://www.mail-archive.com/netdev at vger.kernel.org/msg187936.html
Eventually we figured out that it was a skb leak in handle_rx()
when sending packets to the VM. This usually happens when a guest
2016 Jun 30
9
[PATCH net-next V3 0/6] switch to use tx skb array in tun
Hi all:
This series tries to switch to use skb array in tun. This is used to
eliminate the spinlock contention between producer and consumer. The
conversion was straightforward: just introdce a tx skb array and use
it instead of sk_receive_queue.
A minor issue is to keep the tx_queue_len behaviour, since tun used to
use it for the length of sk_receive_queue. This is done through:
- add the
2016 Jun 30
9
[PATCH net-next V3 0/6] switch to use tx skb array in tun
Hi all:
This series tries to switch to use skb array in tun. This is used to
eliminate the spinlock contention between producer and consumer. The
conversion was straightforward: just introdce a tx skb array and use
it instead of sk_receive_queue.
A minor issue is to keep the tx_queue_len behaviour, since tun used to
use it for the length of sk_receive_queue. This is done through:
- add the