Stefano Garzarella
2021-Jan-18 15:14 UTC
[RFC PATCH v2 08/13] virtio/vsock: dequeue callback for SOCK_SEQPACKET.
On Fri, Jan 15, 2021 at 08:43:24AM +0300, Arseny Krasnov wrote:>This adds transport callback and it's logic for SEQPACKET dequeue. >Callback fetches RW packets from rx queue of socket until whole record >is copied(if user's buffer is full, user is not woken up). This is done >to not stall sender, because if we wake up user and it leaves syscall, >nobody will send credit update for rest of record, and sender will wait >for next enter of read syscall at receiver's side. So if user buffer is >full, we just send credit update and drop data. If during copy SEQ_BEGIN >was found(and not all data was copied), copying is restarted by reset >user's iov iterator(previous unfinished data is dropped). > >Signed-off-by: Arseny Krasnov <arseny.krasnov at kaspersky.com> >--- > include/linux/virtio_vsock.h | 4 + > include/uapi/linux/virtio_vsock.h | 9 ++ > net/vmw_vsock/virtio_transport_common.c | 128 ++++++++++++++++++++++++ > 3 files changed, 141 insertions(+) > >diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h >index dc636b727179..7f0ef5204e33 100644 >--- a/include/linux/virtio_vsock.h >+++ b/include/linux/virtio_vsock.h >@@ -36,6 +36,10 @@ struct virtio_vsock_sock { > u32 rx_bytes; > u32 buf_alloc; > struct list_head rx_queue; >+ >+ /* For SOCK_SEQPACKET */ >+ u32 user_read_seq_len; >+ u32 user_read_copied; > }; > > struct virtio_vsock_pkt { >diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h >index 1d57ed3d84d2..058908bc19fc 100644 >--- a/include/uapi/linux/virtio_vsock.h >+++ b/include/uapi/linux/virtio_vsock.h >@@ -65,6 +65,7 @@ struct virtio_vsock_hdr { > > enum virtio_vsock_type { > VIRTIO_VSOCK_TYPE_STREAM = 1, >+ VIRTIO_VSOCK_TYPE_SEQPACKET = 2, > }; > > enum virtio_vsock_op { >@@ -83,6 +84,9 @@ enum virtio_vsock_op { > VIRTIO_VSOCK_OP_CREDIT_UPDATE = 6, > /* Request the peer to send the credit info to us */ > VIRTIO_VSOCK_OP_CREDIT_REQUEST = 7, >+ >+ /* Record begin for SOCK_SEQPACKET */ >+ VIRTIO_VSOCK_OP_SEQ_BEGIN = 8, > }; > > /* VIRTIO_VSOCK_OP_SHUTDOWN flags values */ >@@ -91,4 +95,9 @@ enum virtio_vsock_shutdown { > VIRTIO_VSOCK_SHUTDOWN_SEND = 2, > }; > >+/* VIRTIO_VSOCK_OP_RW flags values for SOCK_SEQPACKET type */ >+enum virtio_vsock_rw_seqpacket { >+ VIRTIO_VSOCK_RW_EOR = 1, >+}; >+ > #endif /* _UAPI_LINUX_VIRTIO_VSOCK_H */ >diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c >index 5956939eebb7..4328f653a477 100644 >--- a/net/vmw_vsock/virtio_transport_common.c >+++ b/net/vmw_vsock/virtio_transport_common.c >@@ -397,6 +397,132 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, > return err; > } > >+static inline void virtio_transport_del_n_free_pkt(struct virtio_vsock_pkt *pkt) >+{ >+ list_del(&pkt->list); >+ virtio_transport_free_pkt(pkt); >+} >+ >+static size_t virtio_transport_drop_until_seq_begin(struct virtio_vsock_sock *vvs) >+{ >+ struct virtio_vsock_pkt *pkt, *n; >+ size_t bytes_dropped = 0; >+ >+ list_for_each_entry_safe(pkt, n, &vvs->rx_queue, list) { >+ if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_SEQ_BEGIN) >+ break; >+ >+ bytes_dropped += le32_to_cpu(pkt->hdr.len); >+ virtio_transport_dec_rx_pkt(vvs, pkt); >+ virtio_transport_del_n_free_pkt(pkt); >+ } >+ >+ return bytes_dropped; >+} >+ >+static ssize_t virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, >+ struct msghdr *msg, >+ size_t user_buf_len) >+{ >+ struct virtio_vsock_sock *vvs = vsk->trans; >+ struct virtio_vsock_pkt *pkt; >+ size_t bytes_handled = 0; >+ int err = 0; >+ >+ spin_lock_bh(&vvs->rx_lock); >+ >+ if (user_buf_len == 0) { >+ /* User's buffer is full, we processing rest of >+ * record and drop it. If 'SEQ_BEGIN' is found >+ * while iterating, user will be woken up, >+ * because record is already copied, and we >+ * don't care about absent of some tail RW packets >+ * of it. Return number of bytes(rest of record), >+ * but ignore credit update for such absent bytes. >+ */ >+ bytes_handled = virtio_transport_drop_until_seq_begin(vvs); >+ vvs->user_read_copied += bytes_handled; >+ >+ if (!list_empty(&vvs->rx_queue) && >+ vvs->user_read_copied < vvs->user_read_seq_len) { >+ /* 'SEQ_BEGIN' found, but record isn't complete. >+ * Set number of copied bytes to fit record size >+ * and force counters to finish receiving. >+ */ >+ bytes_handled += (vvs->user_read_seq_len - vvs->user_read_copied); >+ vvs->user_read_copied = vvs->user_read_seq_len; >+ } >+ } >+ >+ /* Now start copying. */ >+ while (vvs->user_read_copied < vvs->user_read_seq_len && >+ vvs->rx_bytes && >+ user_buf_len && >+ !err) { >+ pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list); >+ >+ switch (le16_to_cpu(pkt->hdr.op)) { >+ case VIRTIO_VSOCK_OP_SEQ_BEGIN: { >+ /* Unexpected 'SEQ_BEGIN' during record copy: >+ * Leave receive loop, 'EAGAIN' will restart it from >+ * outer receive loop, packet is still in queue and >+ * counters are cleared. So in next loop enter, >+ * 'SEQ_BEGIN' will be dequeued first. User's iov >+ * iterator will be reset in outer loop. Also >+ * send credit update, because some bytes could be >+ * copied. User will never see unfinished record. >+ */ >+ err = -EAGAIN; >+ break; >+ } >+ case VIRTIO_VSOCK_OP_RW: { >+ size_t bytes_to_copy; >+ size_t pkt_len; >+ >+ pkt_len = (size_t)le32_to_cpu(pkt->hdr.len); >+ bytes_to_copy = min(user_buf_len, pkt_len); >+ >+ /* sk_lock is held by caller so no one else can dequeue. >+ * Unlock rx_lock since memcpy_to_msg() may sleep. >+ */ >+ spin_unlock_bh(&vvs->rx_lock); >+ >+ if (memcpy_to_msg(msg, pkt->buf, bytes_to_copy)) { >+ spin_lock_bh(&vvs->rx_lock); >+ err = -EINVAL; >+ break; >+ } >+ >+ spin_lock_bh(&vvs->rx_lock); >+ user_buf_len -= bytes_to_copy; >+ bytes_handled += pkt->len; >+ vvs->user_read_copied += bytes_to_copy; >+ >+ if (le16_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_RW_EOR)^ hdr.flags is __le32, so please use le32_to_cpu()>+ msg->msg_flags |= MSG_EOR; >+ break; >+ } >+ default: >+ ; >+ } >+ >+ /* For unexpected 'SEQ_BEGIN', keep such packet in queue, >+ * but drop any other type of packet. >+ */ >+ if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_SEQ_BEGIN) { >+ virtio_transport_dec_rx_pkt(vvs, pkt); >+ virtio_transport_del_n_free_pkt(pkt); >+ } >+ } >+ >+ spin_unlock_bh(&vvs->rx_lock); >+ >+ virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_SEQPACKET, >+ NULL); >+ >+ return err ?: bytes_handled; >+} >+ > ssize_t > virtio_transport_stream_dequeue(struct vsock_sock *vsk, > struct msghdr *msg, >@@ -481,6 +607,8 @@ int virtio_transport_do_socket_init(struct vsock_sock *vsk, > spin_lock_init(&vvs->rx_lock); > spin_lock_init(&vvs->tx_lock); > INIT_LIST_HEAD(&vvs->rx_queue); >+ vvs->user_read_copied = 0; >+ vvs->user_read_seq_len = 0; > > return 0; > } >-- >2.25.1 >