Displaying 20 results from an estimated 37 matches for "poll_out".
Did you mean:
epollout
2013 Sep 06
2
[Gluster-devel] GlusterFS 3.3.1 client crash (signal received: 6)
...d out>, event=<value optimized out>, data=<value optimized out>) at rpc-transport.c:489
#13 0x00007fc4eeeb0764 in socket_event_poll_in (this=0x3b6c060) at socket.c:1677
#14 0x00007fc4eeeb0847 in socket_event_handler (fd=<value optimized out>, idx=265, data=0x3b6c060, poll_in=1, poll_out=0, poll_err=<value optimized out>) at socket.c:1792
#15 0x00007fc4f2846464 in event_dispatch_epoll_handler (event_pool=0x177cdf0) at event.c:785
#16 event_dispatch_epoll (event_pool=0x177cdf0) at event.c:847
#17 0x000000000040736a in main (argc=<value optimized out>, argv=0x7fffcb83efc8...
2012 Feb 01
1
[PATCH 2/2] virtio-serial: setup_port_vq when adding port
...r/virtio_console.c b/drivers/char/virtio_console.c
> index 8e3c46d..2e5187e 100644
> --- a/drivers/char/virtio_console.c
> +++ b/drivers/char/virtio_console.c
> @@ -1132,6 +1132,55 @@ static void send_sigio_to_port(struct port *port)
> kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
> }
>
> +static void in_intr(struct virtqueue *vq);
> +static void out_intr(struct virtqueue *vq);
> +
> +static int setup_port_vq(struct ports_device *portdev, u32 id)
> +{
> + int err, vq_num;
> + vq_callback_t **io_callbacks;
> + char **io_names;
> + struct...
2012 Feb 01
1
[PATCH 2/2] virtio-serial: setup_port_vq when adding port
...r/virtio_console.c b/drivers/char/virtio_console.c
> index 8e3c46d..2e5187e 100644
> --- a/drivers/char/virtio_console.c
> +++ b/drivers/char/virtio_console.c
> @@ -1132,6 +1132,55 @@ static void send_sigio_to_port(struct port *port)
> kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
> }
>
> +static void in_intr(struct virtqueue *vq);
> +static void out_intr(struct virtqueue *vq);
> +
> +static int setup_port_vq(struct ports_device *portdev, u32 id)
> +{
> + int err, vq_num;
> + vq_callback_t **io_callbacks;
> + char **io_names;
> + struct...
2017 Dec 06
0
[Gluster-devel] Crash in glusterd!!!
...lt;optimized out>) at rpc-transport.c:546
#13 0x00003fff847fcd44 in socket_event_poll_in (this=this at entry=0x3fff74002210)
at socket.c:2236
#14 0x00003fff847ff89c in socket_event_handler (fd=<optimized out>,
idx=<optimized out>, data=0x3fff74002210, poll_in=<optimized out>,
poll_out=<optimized out>, poll_err=<optimized out>) at socket.c:2349
#15 0x00003fff88616874 in event_dispatch_epoll_handler
(event=0x3fff83d9d6a0, event_pool=0x10045bc0
<_GLOBAL__sub_I__ZN29DrhIfRhControlPdrProxyC_ActorC2EP12RTControllerP10RTActorRef()+116>)
at event-epoll.c:575
#16 even...
2017 Dec 06
1
[Gluster-devel] Crash in glusterd!!!
...sport.c:546
>
> #13 0x00003fff847fcd44 in socket_event_poll_in (this=this at entry=0x3fff74002210)
> at socket.c:2236
>
> #14 0x00003fff847ff89c in socket_event_handler (fd=<optimized out>,
> idx=<optimized out>, data=0x3fff74002210, poll_in=<optimized out>,
> poll_out=<optimized out>, poll_err=<optimized out>) at socket.c:2349
>
> #15 0x00003fff88616874 in event_dispatch_epoll_handler
> (event=0x3fff83d9d6a0, event_pool=0x10045bc0 <_GLOBAL__sub_I__
> ZN29DrhIfRhControlPdrProxyC_ActorC2EP12RTControllerP10RTActorRef()+116>)
> at ev...
2017 Dec 06
2
[Gluster-devel] Crash in glusterd!!!
Without the glusterd log file and the core file or the backtrace I can't
comment anything.
On Wed, Dec 6, 2017 at 3:09 PM, ABHISHEK PALIWAL <abhishpaliwal at gmail.com>
wrote:
> Any suggestion....
>
> On Dec 6, 2017 11:51, "ABHISHEK PALIWAL" <abhishpaliwal at gmail.com> wrote:
>
>> Hi Team,
>>
>> We are getting the crash in glusterd after
2008 May 15
3
[PATCH 1/4] ocfs2: Fixes pipe_buf_operations->pin switch to confirm in 2.6.23.
Signed-off-by: Tiger Yang <tiger.yang at oracle.com>
---
Config.make.in | 1 +
configure.in | 6 ++++++
fs/ocfs2/Makefile | 4 ++++
kapi-compat/include/pipe_buf_operations.h | 10 ++++++++++
4 files changed, 21 insertions(+), 0 deletions(-)
create mode 100644 kapi-compat/include/pipe_buf_operations.h
2018 Nov 15
0
[PATCH net-next 2/2] tuntap: free XDP dropped packets in a batch
..._file {
struct xdp_rxq_info xdp_rxq;
};
+struct tun_page {
+ struct page *page;
+ int count;
+};
+
struct tun_flow_entry {
struct hlist_node hash_link;
struct rcu_head rcu;
@@ -2377,9 +2382,16 @@ static void tun_sock_write_space(struct sock *sk)
kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
}
+static void tun_put_page(struct tun_page *tpage)
+{
+ if (tpage->page)
+ __page_frag_cache_drain(tpage->page, tpage->count);
+}
+
static int tun_xdp_one(struct tun_struct *tun,
struct tun_file *tfile,
- struct xdp_buff *xdp, int *flush)
+ struct xdp_buff...
2018 Sep 06
0
[PATCH net-next 09/11] tuntap: accept an array of XDP buffs through sendmsg()
...anged, 100 insertions(+), 3 deletions(-)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index c839a4bdcbd9..069db2e5dd08 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2424,22 +2424,119 @@ static void tun_sock_write_space(struct sock *sk)
kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
}
+static int tun_xdp_one(struct tun_struct *tun,
+ struct tun_file *tfile,
+ struct xdp_buff *xdp, int *flush)
+{
+ struct virtio_net_hdr *gso = xdp->data_hard_start + sizeof(int);
+ struct tun_pcpu_stats *stats;
+ struct bpf_prog *xdp_prog;
+ struct sk_buff *skb = NULL;
+...
2018 Sep 06
1
[PATCH net-next 09/11] tuntap: accept an array of XDP buffs through sendmsg()
...ns(-)
>
> diff --git a/drivers/net/tun.c b/drivers/net/tun.c
> index c839a4bdcbd9..069db2e5dd08 100644
> --- a/drivers/net/tun.c
> +++ b/drivers/net/tun.c
> @@ -2424,22 +2424,119 @@ static void tun_sock_write_space(struct sock *sk)
> kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
> }
>
> +static int tun_xdp_one(struct tun_struct *tun,
> + struct tun_file *tfile,
> + struct xdp_buff *xdp, int *flush)
> +{
> + struct virtio_net_hdr *gso = xdp->data_hard_start + sizeof(int);
> + struct tun_pcpu_stats *stats;
> + struct bpf_pro...
2010 Sep 02
4
[PATCH 0/3] virtio: console: async notifications for host connect / disconnect
Hey Rusty,
This patchset is on top of the previous one.
It sends a SIGIO signal to apps that request signals for host
activity. SIGIO is sent on host connect, disconnect as well as
hot-unplug (which can be seen as a special case of host disconnect).
Tested using several testcases in the test-virtserial repo:
http://fedorapeople.org/gitweb?p=amitshah/public_git/test-virtserial.git
Please apply.
2010 Sep 02
4
[PATCH 0/3] virtio: console: async notifications for host connect / disconnect
Hey Rusty,
This patchset is on top of the previous one.
It sends a SIGIO signal to apps that request signals for host
activity. SIGIO is sent on host connect, disconnect as well as
hot-unplug (which can be seen as a special case of host disconnect).
Tested using several testcases in the test-virtserial repo:
http://fedorapeople.org/gitweb?p=amitshah/public_git/test-virtserial.git
Please apply.
2010 Aug 26
5
[PATCH 0/4] virtio: console: fixes, SIGIO
Hi Rusty,
The main thing in these patches is the introduction of injecting SIGIO
on host-side connect/disconnect events and when new data is available
for ports.
The first two patches fix bugs that I haven't seen, but look like the
right thing to do.
These have been tested extensively using the test-virtserial test
suite.
Please apply,
Amit.
Amit Shah (4):
virtio: console: Un-block
2010 Aug 26
5
[PATCH 0/4] virtio: console: fixes, SIGIO
Hi Rusty,
The main thing in these patches is the introduction of injecting SIGIO
on host-side connect/disconnect events and when new data is available
for ports.
The first two patches fix bugs that I haven't seen, but look like the
right thing to do.
These have been tested extensively using the test-virtserial test
suite.
Please apply,
Amit.
Amit Shah (4):
virtio: console: Un-block
2009 Nov 04
0
[PATCHv8 1/3] tun: export underlying socket
...gt;sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ POLLWRNORM | POLLWRBAND);
tun = container_of(sk, struct tun_sock, sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
@@ -858,6 +872,37 @@ static void tun_sock_destruct(struct sock *sk)
free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
}
+static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct tun_struct *tun = container_of(...
2009 Nov 04
0
[PATCHv8 1/3] tun: export underlying socket
...gt;sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ POLLWRNORM | POLLWRBAND);
tun = container_of(sk, struct tun_sock, sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
@@ -858,6 +872,37 @@ static void tun_sock_destruct(struct sock *sk)
free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
}
+static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct tun_struct *tun = container_of(...
2009 Nov 03
1
[PATCHv7 1/3] tun: export underlying socket
...gt;sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ POLLWRNORM | POLLWRBAND);
tun = container_of(sk, struct tun_sock, sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
@@ -858,6 +872,37 @@ static void tun_sock_destruct(struct sock *sk)
free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
}
+static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct tun_struct *tun = container_of(...
2009 Nov 03
1
[PATCHv7 1/3] tun: export underlying socket
...gt;sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ POLLWRNORM | POLLWRBAND);
tun = container_of(sk, struct tun_sock, sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
@@ -858,6 +872,37 @@ static void tun_sock_destruct(struct sock *sk)
free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
}
+static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct tun_struct *tun = container_of(...
2018 Nov 15
3
[PATCH net-next 1/2] vhost_net: mitigate page reference counting during page frag refill
We do a get_page() which involves a atomic operation. This patch tries
to mitigate a per packet atomic operation by maintaining a reference
bias which is initially USHRT_MAX. Each time a page is got, instead of
calling get_page() we decrease the bias and when we find it's time to
use a new page we will decrease the bias at one time through
__page_cache_drain_cache().
Testpmd(virtio_user +
2009 Nov 02
1
[PATCHv6 1/3] tun: export underlying socket
...gt;sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ POLLWRNORM | POLLWRBAND);
tun = container_of(sk, struct tun_sock, sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
@@ -858,6 +872,37 @@ static void tun_sock_destruct(struct sock *sk)
free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
}
+static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct tun_struct *tun = container_of(...