Peng Tao
2017-Mar-01 03:56 UTC
[PATCH-v4-RESEND 0/4] vsock: cancel connect packets when failing to connect
Hi David, These patchsets were sent before and reviewed by Stefan and Jorgen [https://www.spinics.net/lists/kvm/msg142367.html]. If there is any blocker, please do tell and I'll see to it. Thanks! Currently, if a connect call fails on a signal or timeout (e.g., guest is still in the process of starting up), we'll just return to caller and leave the connect packet queued and they are sent even though the connection is considered a failure, which can confuse applications with unwanted false connect attempt. The patchset enables vsock (both host and guest) to cancel queued packets when a connect attempt is considered to fail. v4 changelog: - drop two unnecessary void * cast - update new callback comment v3 changelog: - define cancel_pkt callback in struct vsock_transport rather than struct virtio_transport - rename virtio_vsock_pkt->vsk to virtio_vsock_pkt->cancel_token v2 changelog: - fix queued_replies counting and resume tx/rx when necessary Cheers, Tao Peng Tao (4): vsock: track pkt owner vsock vhost-vsock: add pkt cancel capability vsock: add pkt cancel capability vsock: cancel packets when failing to connect drivers/vhost/vsock.c | 41 ++++++++++++++++++++++++++++++++ include/linux/virtio_vsock.h | 2 ++ include/net/af_vsock.h | 3 +++ net/vmw_vsock/af_vsock.c | 14 +++++++++++ net/vmw_vsock/virtio_transport.c | 42 +++++++++++++++++++++++++++++++++ net/vmw_vsock/virtio_transport_common.c | 7 ++++++ 6 files changed, 109 insertions(+) -- 2.7.4
So that we can cancel a queued pkt later if necessary. Reviewed-by: Stefan Hajnoczi <stefanha at redhat.com> Signed-off-by: Peng Tao <bergwolf at gmail.com> --- include/linux/virtio_vsock.h | 2 ++ net/vmw_vsock/virtio_transport_common.c | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 9638bfe..193ad3a 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -48,6 +48,7 @@ struct virtio_vsock_pkt { struct virtio_vsock_hdr hdr; struct work_struct work; struct list_head list; + void *cancel_token; /* only used for cancellation */ void *buf; u32 len; u32 off; @@ -56,6 +57,7 @@ struct virtio_vsock_pkt { struct virtio_vsock_pkt_info { u32 remote_cid, remote_port; + struct vsock_sock *vsk; struct msghdr *msg; u32 pkt_len; u16 type; diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 849c4ad..ab505f1 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -57,6 +57,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, pkt->len = len; pkt->hdr.len = cpu_to_le32(len); pkt->reply = info->reply; + pkt->cancel_token = info->vsk; if (info->msg && len > 0) { pkt->buf = kmalloc(len, GFP_KERNEL); @@ -179,6 +180,7 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk, struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE, .type = type, + .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); @@ -518,6 +520,7 @@ int virtio_transport_connect(struct vsock_sock *vsk) struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_REQUEST, .type = VIRTIO_VSOCK_TYPE_STREAM, + .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); @@ -533,6 +536,7 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode) VIRTIO_VSOCK_SHUTDOWN_RCV : 0) | (mode & SEND_SHUTDOWN ? VIRTIO_VSOCK_SHUTDOWN_SEND : 0), + .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); @@ -559,6 +563,7 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk, .type = VIRTIO_VSOCK_TYPE_STREAM, .msg = msg, .pkt_len = len, + .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); @@ -580,6 +585,7 @@ static int virtio_transport_reset(struct vsock_sock *vsk, .op = VIRTIO_VSOCK_OP_RST, .type = VIRTIO_VSOCK_TYPE_STREAM, .reply = !!pkt, + .vsk = vsk, }; /* Send RST only if the original pkt is not a RST pkt */ @@ -825,6 +831,7 @@ virtio_transport_send_response(struct vsock_sock *vsk, .remote_cid = le64_to_cpu(pkt->hdr.src_cid), .remote_port = le32_to_cpu(pkt->hdr.src_port), .reply = true, + .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); -- 2.7.4
Peng Tao
2017-Mar-01 03:56 UTC
[PATCH-v4-RESEND 2/4] vhost-vsock: add pkt cancel capability
To allow canceling all packets of a connection. Reviewed-by: Stefan Hajnoczi <stefanha at redhat.com> Reviewed-by: Jorgen Hansen <jhansen at vmware.com> Signed-off-by: Peng Tao <bergwolf at gmail.com> --- drivers/vhost/vsock.c | 41 +++++++++++++++++++++++++++++++++++++++++ include/net/af_vsock.h | 3 +++ 2 files changed, 44 insertions(+) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index ce5e63d..57babce 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -223,6 +223,46 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) return len; } +static int +vhost_transport_cancel_pkt(struct vsock_sock *vsk) +{ + struct vhost_vsock *vsock; + struct virtio_vsock_pkt *pkt, *n; + int cnt = 0; + LIST_HEAD(freeme); + + /* Find the vhost_vsock according to guest context id */ + vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); + if (!vsock) + return -ENODEV; + + spin_lock_bh(&vsock->send_pkt_list_lock); + list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { + if (pkt->cancel_token != vsk) + continue; + list_move(&pkt->list, &freeme); + } + spin_unlock_bh(&vsock->send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + if (cnt) { + struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; + int new_cnt; + + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); + if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) + vhost_poll_queue(&tx_vq->poll); + } + + return 0; +} + static struct virtio_vsock_pkt * vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, unsigned int out, unsigned int in) @@ -675,6 +715,7 @@ static struct virtio_transport vhost_transport = { .release = virtio_transport_release, .connect = virtio_transport_connect, .shutdown = virtio_transport_shutdown, + .cancel_pkt = vhost_transport_cancel_pkt, .dgram_enqueue = virtio_transport_dgram_enqueue, .dgram_dequeue = virtio_transport_dgram_dequeue, diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index f275896..f32ed9a 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -100,6 +100,9 @@ struct vsock_transport { void (*destruct)(struct vsock_sock *); void (*release)(struct vsock_sock *); + /* Cancel all pending packets sent on vsock. */ + int (*cancel_pkt)(struct vsock_sock *vsk); + /* Connections. */ int (*connect)(struct vsock_sock *); -- 2.7.4
Reviewed-by: Stefan Hajnoczi <stefanha at redhat.com> Signed-off-by: Peng Tao <bergwolf at gmail.com> --- net/vmw_vsock/virtio_transport.c | 42 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 6788264..f5c44b5 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -213,6 +213,47 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) return len; } +static int +virtio_transport_cancel_pkt(struct vsock_sock *vsk) +{ + struct virtio_vsock *vsock; + struct virtio_vsock_pkt *pkt, *n; + int cnt = 0; + LIST_HEAD(freeme); + + vsock = virtio_vsock_get(); + if (!vsock) { + return -ENODEV; + } + + spin_lock_bh(&vsock->send_pkt_list_lock); + list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { + if (pkt->cancel_token != vsk) + continue; + list_move(&pkt->list, &freeme); + } + spin_unlock_bh(&vsock->send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + if (cnt) { + struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; + int new_cnt; + + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); + if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && + new_cnt < virtqueue_get_vring_size(rx_vq)) + queue_work(virtio_vsock_workqueue, &vsock->rx_work); + } + + return 0; +} + static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) { int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; @@ -462,6 +503,7 @@ static struct virtio_transport virtio_transport = { .release = virtio_transport_release, .connect = virtio_transport_connect, .shutdown = virtio_transport_shutdown, + .cancel_pkt = virtio_transport_cancel_pkt, .dgram_bind = virtio_transport_dgram_bind, .dgram_dequeue = virtio_transport_dgram_dequeue, -- 2.7.4
Peng Tao
2017-Mar-01 03:56 UTC
[PATCH-v4-RESEND 4/4] vsock: cancel packets when failing to connect
Otherwise we'll leave the packets queued until releasing vsock device. E.g., if guest is slow to start up, resulting ETIMEDOUT on connect, guest will get the connect requests from failed host sockets. Reviewed-by: Stefan Hajnoczi <stefanha at redhat.com> Reviewed-by: Jorgen Hansen <jhansen at vmware.com> Signed-off-by: Peng Tao <bergwolf at gmail.com> --- net/vmw_vsock/af_vsock.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 8a398b3..c73b03a 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1101,10 +1101,19 @@ static const struct proto_ops vsock_dgram_ops = { .sendpage = sock_no_sendpage, }; +static int vsock_transport_cancel_pkt(struct vsock_sock *vsk) +{ + if (!transport->cancel_pkt) + return -EOPNOTSUPP; + + return transport->cancel_pkt(vsk); +} + static void vsock_connect_timeout(struct work_struct *work) { struct sock *sk; struct vsock_sock *vsk; + int cancel = 0; vsk = container_of(work, struct vsock_sock, dwork.work); sk = sk_vsock(vsk); @@ -1115,8 +1124,11 @@ static void vsock_connect_timeout(struct work_struct *work) sk->sk_state = SS_UNCONNECTED; sk->sk_err = ETIMEDOUT; sk->sk_error_report(sk); + cancel = 1; } release_sock(sk); + if (cancel) + vsock_transport_cancel_pkt(vsk); sock_put(sk); } @@ -1223,11 +1235,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, err = sock_intr_errno(timeout); sk->sk_state = SS_UNCONNECTED; sock->state = SS_UNCONNECTED; + vsock_transport_cancel_pkt(vsk); goto out_wait; } else if (timeout == 0) { err = -ETIMEDOUT; sk->sk_state = SS_UNCONNECTED; sock->state = SS_UNCONNECTED; + vsock_transport_cancel_pkt(vsk); goto out_wait; } -- 2.7.4
From: Peng Tao <bergwolf at gmail.com> Date: Wed, 1 Mar 2017 11:56:24 +0800> So that we can cancel a queued pkt later if necessary. > > Reviewed-by: Stefan Hajnoczi <stefanha at redhat.com> > Signed-off-by: Peng Tao <bergwolf at gmail.com> > --- > include/linux/virtio_vsock.h | 2 ++ > net/vmw_vsock/virtio_transport_common.c | 7 +++++++ > 2 files changed, 9 insertions(+) > > diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h > index 9638bfe..193ad3a 100644 > --- a/include/linux/virtio_vsock.h > +++ b/include/linux/virtio_vsock.h > @@ -48,6 +48,7 @@ struct virtio_vsock_pkt { > struct virtio_vsock_hdr hdr; > struct work_struct work; > struct list_head list; > + void *cancel_token; /* only used for cancellation */The type here is fixed, you only store vhost_sock object pointers here, so don't use "void *" please.
Apparently Analagous Threads
- [PATCH-v4-RESEND 0/4] vsock: cancel connect packets when failing to connect
- [PATCH-v5 0/4] vsock: cancel connect packets when failing to connect
- [PATCH-v5 0/4] vsock: cancel connect packets when failing to connect
- [PATCH v2 0/4] vsock: cancel connect packets when failing to connect
- [PATCH v2 0/4] vsock: cancel connect packets when failing to connect