Displaying 20 results from an estimated 88 matches for "noblock".
Did you mean:
nonblock
2016 Jun 17
0
[PATCH net-next V2] tun: introduce tx skb ring
..._empty(&sk->sk_receive_queue))
> + if (tun_queue_not_empty(tun, tfile))
> mask |= POLLIN | POLLRDNORM;
>
> if (sock_writeable(sk) ||
> @@ -1481,6 +1516,46 @@ done:
> return total;
> }
>
> +static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
> + int *err)
> +{
> + DECLARE_WAITQUEUE(wait, current);
> + struct sk_buff *skb = NULL;
> +
> + skb = skb_array_consume(&tfile->tx_array);
> + if (skb)
> + goto out;
> + if (noblock) {
> + *err = -EAGAIN;
> + goto out;
> + }
> +
> + add_...
2016 Jun 30
0
[PATCH net-next V3 6/6] tun: switch to use skb array for tx
...eep(sk), wait);
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_array_empty(&tfile->tx_array))
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(sk) ||
@@ -1426,22 +1442,61 @@ done:
return total;
}
+static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
+ int *err)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct sk_buff *skb = NULL;
+
+ skb = skb_array_consume(&tfile->tx_array);
+ if (skb)
+ goto out;
+ if (noblock) {
+ *err = -EAGAIN;
+ goto out;
+ }
+
+ add_wait_queue(&tfile->wq.wait, &wait);
+ current->state = T...
2016 Jun 15
7
[PATCH net-next V2] tun: introduce tx skb ring
...file, sk_sleep(sk), wait);
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (tun_queue_not_empty(tun, tfile))
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(sk) ||
@@ -1481,6 +1516,46 @@ done:
return total;
}
+static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
+ int *err)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct sk_buff *skb = NULL;
+
+ skb = skb_array_consume(&tfile->tx_array);
+ if (skb)
+ goto out;
+ if (noblock) {
+ *err = -EAGAIN;
+ goto out;
+ }
+
+ add_wait_queue(&tfile->wq.wait, &wait);
+ current->state = T...
2016 Jun 15
7
[PATCH net-next V2] tun: introduce tx skb ring
...file, sk_sleep(sk), wait);
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (tun_queue_not_empty(tun, tfile))
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(sk) ||
@@ -1481,6 +1516,46 @@ done:
return total;
}
+static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
+ int *err)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct sk_buff *skb = NULL;
+
+ skb = skb_array_consume(&tfile->tx_array);
+ if (skb)
+ goto out;
+ if (noblock) {
+ *err = -EAGAIN;
+ goto out;
+ }
+
+ add_wait_queue(&tfile->wq.wait, &wait);
+ current->state = T...
2017 Mar 21
12
[PATCH net-next 0/8] vhost-net rx batching
Hi all:
This series tries to implement rx batching for vhost-net. This is done
by batching the dequeuing from skb_array which was exported by
underlayer socket and pass the sbk back through msg_control to finish
userspace copying.
Tests shows at most 19% improvment on rx pps.
Please review.
Thanks
Jason Wang (8):
ptr_ring: introduce batch dequeuing
skb_array: introduce batch dequeuing
2017 Mar 21
12
[PATCH net-next 0/8] vhost-net rx batching
Hi all:
This series tries to implement rx batching for vhost-net. This is done
by batching the dequeuing from skb_array which was exported by
underlayer socket and pass the sbk back through msg_control to finish
userspace copying.
Tests shows at most 19% improvment on rx pps.
Please review.
Thanks
Jason Wang (8):
ptr_ring: introduce batch dequeuing
skb_array: introduce batch dequeuing
2016 Jun 22
0
[PATCH 3/3] vhost: device IOTLB API
...tic long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
}
#endif
+static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &...
2016 Dec 30
0
[PATCH net-next V3 3/3] tun: rx batching
...able();
+ while ((skb = __skb_dequeue(&process_queue)))
+ netif_receive_skb(skb);
+ local_bh_enable();
+ }
+}
+
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
void *msg_control, struct iov_iter *from,
- int noblock)
+ int noblock, bool more)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
@@ -1283,10 +1314,15 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_probe_transport_header(skb, 0);
rxhash = skb_get_hash(skb);
+
#ifndef CONFIG_4...
2016 Dec 28
0
[PATCH net-next V2 3/3] tun: rx batching
...ocal_bh_enable();
+ }
+
+ return 0;
+drop:
+ spin_unlock(&queue->lock);
+ kfree_skb(skb);
+ return -EFAULT;
+}
+
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
void *msg_control, struct iov_iter *from,
- int noblock)
+ int noblock, bool more)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
@@ -1283,18 +1322,27 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_probe_transport_header(skb, 0);
rxhash = skb_get_hash(skb);
+
#ifndef CONFIG_4...
2017 Jan 18
0
[PATCH net-next V5 3/3] tun: rx batching
...kb_dequeue(&process_queue)))
+ netif_receive_skb(nskb);
+ netif_receive_skb(skb);
+ local_bh_enable();
+ }
+}
+
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
void *msg_control, struct iov_iter *from,
- int noblock)
+ int noblock, bool more)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
@@ -1283,9 +1321,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rxhash = skb_get_hash(skb);
#ifndef CONFIG_4KSTACKS
- local_bh_disable();
- netif_rece...
2017 Jan 06
0
[PATCH V4 net-next 3/3] tun: rx batching
...kb_dequeue(&process_queue)))
+ netif_receive_skb(nskb);
+ netif_receive_skb(skb);
+ local_bh_enable();
+ }
+}
+
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
void *msg_control, struct iov_iter *from,
- int noblock)
+ int noblock, bool more)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
@@ -1283,10 +1320,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_probe_transport_header(skb, 0);
rxhash = skb_get_hash(skb);
+
#ifndef CONFIG_4K...
2017 Jan 06
5
[PATCH V4 net-next 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2017 Jan 06
5
[PATCH V4 net-next 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2016 Jun 30
9
[PATCH net-next V3 0/6] switch to use tx skb array in tun
Hi all:
This series tries to switch to use skb array in tun. This is used to
eliminate the spinlock contention between producer and consumer. The
conversion was straightforward: just introdce a tx skb array and use
it instead of sk_receive_queue.
A minor issue is to keep the tx_queue_len behaviour, since tun used to
use it for the length of sk_receive_queue. This is done through:
- add the
2016 Jun 30
9
[PATCH net-next V3 0/6] switch to use tx skb array in tun
Hi all:
This series tries to switch to use skb array in tun. This is used to
eliminate the spinlock contention between producer and consumer. The
conversion was straightforward: just introdce a tx skb array and use
it instead of sk_receive_queue.
A minor issue is to keep the tx_queue_len behaviour, since tun used to
use it for the length of sk_receive_queue. This is done through:
- add the
2016 Jun 30
10
[PATCH net-next V4 0/6] switch to use tx skb array in tun
Hi all:
This series tries to switch to use skb array in tun. This is used to
eliminate the spinlock contention between producer and consumer. The
conversion was straightforward: just introdce a tx skb array and use
it instead of sk_receive_queue.
A minor issue is to keep the tx_queue_len behaviour, since tun used to
use it for the length of sk_receive_queue. This is done through:
- add the
2016 Jun 30
10
[PATCH net-next V4 0/6] switch to use tx skb array in tun
Hi all:
This series tries to switch to use skb array in tun. This is used to
eliminate the spinlock contention between producer and consumer. The
conversion was straightforward: just introdce a tx skb array and use
it instead of sk_receive_queue.
A minor issue is to keep the tx_queue_len behaviour, since tun used to
use it for the length of sk_receive_queue. This is done through:
- add the
2017 Jan 18
7
[PATCH net-next V5 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2017 Jan 18
7
[PATCH net-next V5 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2017 Jan 06
2
[PATCH V4 net-next 3/3] tun: rx batching
...ceive_skb(nskb);
> + netif_receive_skb(skb);
> + local_bh_enable();
> + }
> +}
> +
> /* Get packet from user space buffer */
> static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
> void *msg_control, struct iov_iter *from,
> - int noblock)
> + int noblock, bool more)
> {
> struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
> struct sk_buff *skb;
> @@ -1283,10 +1320,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
> skb_probe_transport_header(skb, 0);
>
> rxhash =...