Stefano Garzarella
2019-Jun-28 12:36 UTC
[PATCH v2 0/3] vsock/virtio: several fixes in the .probe() and .remove()
During the review of "[PATCH] vsock/virtio: Initialize core virtio vsock before registering the driver", Stefan pointed out some possible issues in the .probe() and .remove() callbacks of the virtio-vsock driver. This series tries to solve these issues: - Patch 1 adds RCU critical sections to avoid use-after-free of 'the_virtio_vsock' pointer. - Patch 2 stops workers before to call vdev->config->reset(vdev) to be sure that no one is accessing the device. - Patch 3 moves the works flush at the end of the .remove() to avoid use-after-free of 'vsock' object. v2: - Patch 1: use RCU to protect 'the_virtio_vsock' pointer - Patch 2: no changes - Patch 3: flush works only at the end of .remove() - Removed patch 4 because virtqueue_detach_unused_buf() returns all the buffers allocated. v1: https://patchwork.kernel.org/cover/10964733/ Stefano Garzarella (3): vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock vsock/virtio: stop workers during the .remove() vsock/virtio: fix flush of works during the .remove() net/vmw_vsock/virtio_transport.c | 131 ++++++++++++++++++++++++------- 1 file changed, 102 insertions(+), 29 deletions(-) -- 2.20.1
Stefano Garzarella
2019-Jun-28 12:36 UTC
[PATCH v2 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
Some callbacks used by the upper layers can run while we are in the .remove(). A potential use-after-free can happen, because we free the_virtio_vsock without knowing if the callbacks are over or not. To solve this issue we move the assignment of the_virtio_vsock at the end of .probe(), when we finished all the initialization, and at the beginning of .remove(), before to release resources. For the same reason, we do the same also for the vdev->priv. We use RCU to be sure that all callbacks that use the_virtio_vsock ended before freeing it. This is not required for callbacks that use vdev->priv, because after the vdev->config->del_vqs() we are sure that they are ended and will no longer be invoked. We also take the mutex during the .remove() to avoid that .probe() can run while we are resetting the device. Signed-off-by: Stefano Garzarella <sgarzare at redhat.com> --- net/vmw_vsock/virtio_transport.c | 67 +++++++++++++++++++++----------- 1 file changed, 44 insertions(+), 23 deletions(-) diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 9c287e3e393c..7ad510ec12e0 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -65,19 +65,22 @@ struct virtio_vsock { u32 guest_cid; }; -static struct virtio_vsock *virtio_vsock_get(void) -{ - return the_virtio_vsock; -} - static u32 virtio_transport_get_local_cid(void) { - struct virtio_vsock *vsock = virtio_vsock_get(); + struct virtio_vsock *vsock; + u32 ret; - if (!vsock) - return VMADDR_CID_ANY; + rcu_read_lock(); + vsock = rcu_dereference(the_virtio_vsock); + if (!vsock) { + ret = VMADDR_CID_ANY; + goto out_rcu; + } - return vsock->guest_cid; + ret = vsock->guest_cid; +out_rcu: + rcu_read_unlock(); + return ret; } static void virtio_transport_loopback_work(struct work_struct *work) @@ -197,14 +200,18 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) struct virtio_vsock *vsock; int len = pkt->len; - vsock = virtio_vsock_get(); + rcu_read_lock(); + vsock = rcu_dereference(the_virtio_vsock); if (!vsock) { virtio_transport_free_pkt(pkt); - return -ENODEV; + len = -ENODEV; + goto out_rcu; } - if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) - return virtio_transport_send_pkt_loopback(vsock, pkt); + if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) { + len = virtio_transport_send_pkt_loopback(vsock, pkt); + goto out_rcu; + } if (pkt->reply) atomic_inc(&vsock->queued_replies); @@ -214,6 +221,9 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) spin_unlock_bh(&vsock->send_pkt_list_lock); queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); + +out_rcu: + rcu_read_unlock(); return len; } @@ -222,12 +232,14 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk) { struct virtio_vsock *vsock; struct virtio_vsock_pkt *pkt, *n; - int cnt = 0; + int cnt = 0, ret; LIST_HEAD(freeme); - vsock = virtio_vsock_get(); + rcu_read_lock(); + vsock = rcu_dereference(the_virtio_vsock); if (!vsock) { - return -ENODEV; + ret = -ENODEV; + goto out_rcu; } spin_lock_bh(&vsock->send_pkt_list_lock); @@ -255,7 +267,11 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk) queue_work(virtio_vsock_workqueue, &vsock->rx_work); } - return 0; + ret = 0; + +out_rcu: + rcu_read_unlock(); + return ret; } static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) @@ -590,8 +606,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) vsock->rx_buf_max_nr = 0; atomic_set(&vsock->queued_replies, 0); - vdev->priv = vsock; - the_virtio_vsock = vsock; mutex_init(&vsock->tx_lock); mutex_init(&vsock->rx_lock); mutex_init(&vsock->event_lock); @@ -613,6 +627,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev) virtio_vsock_event_fill(vsock); mutex_unlock(&vsock->event_lock); + vdev->priv = vsock; + rcu_assign_pointer(the_virtio_vsock, vsock); + mutex_unlock(&the_virtio_vsock_mutex); return 0; @@ -627,6 +644,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev) struct virtio_vsock *vsock = vdev->priv; struct virtio_vsock_pkt *pkt; + mutex_lock(&the_virtio_vsock_mutex); + + vdev->priv = NULL; + rcu_assign_pointer(the_virtio_vsock, NULL); + synchronize_rcu(); + flush_work(&vsock->loopback_work); flush_work(&vsock->rx_work); flush_work(&vsock->tx_work); @@ -666,12 +689,10 @@ static void virtio_vsock_remove(struct virtio_device *vdev) } spin_unlock_bh(&vsock->loopback_list_lock); - mutex_lock(&the_virtio_vsock_mutex); - the_virtio_vsock = NULL; - mutex_unlock(&the_virtio_vsock_mutex); - vdev->config->del_vqs(vdev); + mutex_unlock(&the_virtio_vsock_mutex); + kfree(vsock); } -- 2.20.1
Stefano Garzarella
2019-Jun-28 12:36 UTC
[PATCH v2 2/3] vsock/virtio: stop workers during the .remove()
Before to call vdev->config->reset(vdev) we need to be sure that no one is accessing the device, for this reason, we add new variables in the struct virtio_vsock to stop the workers during the .remove(). This patch also add few comments before vdev->config->reset(vdev) and vdev->config->del_vqs(vdev). Suggested-by: Stefan Hajnoczi <stefanha at redhat.com> Suggested-by: Michael S. Tsirkin <mst at redhat.com> Signed-off-by: Stefano Garzarella <sgarzare at redhat.com> --- net/vmw_vsock/virtio_transport.c | 51 +++++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 7ad510ec12e0..1b44ec6f3f6c 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -38,6 +38,7 @@ struct virtio_vsock { * must be accessed with tx_lock held. */ struct mutex tx_lock; + bool tx_run; struct work_struct send_pkt_work; spinlock_t send_pkt_list_lock; @@ -53,6 +54,7 @@ struct virtio_vsock { * must be accessed with rx_lock held. */ struct mutex rx_lock; + bool rx_run; int rx_buf_nr; int rx_buf_max_nr; @@ -60,6 +62,7 @@ struct virtio_vsock { * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. */ struct mutex event_lock; + bool event_run; struct virtio_vsock_event event_list[8]; u32 guest_cid; @@ -94,6 +97,10 @@ static void virtio_transport_loopback_work(struct work_struct *work) spin_unlock_bh(&vsock->loopback_list_lock); mutex_lock(&vsock->rx_lock); + + if (!vsock->rx_run) + goto out; + while (!list_empty(&pkts)) { struct virtio_vsock_pkt *pkt; @@ -102,6 +109,7 @@ static void virtio_transport_loopback_work(struct work_struct *work) virtio_transport_recv_pkt(pkt); } +out: mutex_unlock(&vsock->rx_lock); } @@ -130,6 +138,9 @@ virtio_transport_send_pkt_work(struct work_struct *work) mutex_lock(&vsock->tx_lock); + if (!vsock->tx_run) + goto out; + vq = vsock->vqs[VSOCK_VQ_TX]; for (;;) { @@ -188,6 +199,7 @@ virtio_transport_send_pkt_work(struct work_struct *work) if (added) virtqueue_kick(vq); +out: mutex_unlock(&vsock->tx_lock); if (restart_rx) @@ -323,6 +335,10 @@ static void virtio_transport_tx_work(struct work_struct *work) vq = vsock->vqs[VSOCK_VQ_TX]; mutex_lock(&vsock->tx_lock); + + if (!vsock->tx_run) + goto out; + do { struct virtio_vsock_pkt *pkt; unsigned int len; @@ -333,6 +349,8 @@ static void virtio_transport_tx_work(struct work_struct *work) added = true; } } while (!virtqueue_enable_cb(vq)); + +out: mutex_unlock(&vsock->tx_lock); if (added) @@ -361,6 +379,9 @@ static void virtio_transport_rx_work(struct work_struct *work) mutex_lock(&vsock->rx_lock); + if (!vsock->rx_run) + goto out; + do { virtqueue_disable_cb(vq); for (;;) { @@ -470,6 +491,9 @@ static void virtio_transport_event_work(struct work_struct *work) mutex_lock(&vsock->event_lock); + if (!vsock->event_run) + goto out; + do { struct virtio_vsock_event *event; unsigned int len; @@ -484,7 +508,7 @@ static void virtio_transport_event_work(struct work_struct *work) } while (!virtqueue_enable_cb(vq)); virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); - +out: mutex_unlock(&vsock->event_lock); } @@ -619,12 +643,18 @@ static int virtio_vsock_probe(struct virtio_device *vdev) INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work); + mutex_lock(&vsock->tx_lock); + vsock->tx_run = true; + mutex_unlock(&vsock->tx_lock); + mutex_lock(&vsock->rx_lock); virtio_vsock_rx_fill(vsock); + vsock->rx_run = true; mutex_unlock(&vsock->rx_lock); mutex_lock(&vsock->event_lock); virtio_vsock_event_fill(vsock); + vsock->event_run = true; mutex_unlock(&vsock->event_lock); vdev->priv = vsock; @@ -659,6 +689,24 @@ static void virtio_vsock_remove(struct virtio_device *vdev) /* Reset all connected sockets when the device disappear */ vsock_for_each_connected_socket(virtio_vsock_reset_sock); + /* Stop all work handlers to make sure no one is accessing the device, + * so we can safely call vdev->config->reset(). + */ + mutex_lock(&vsock->rx_lock); + vsock->rx_run = false; + mutex_unlock(&vsock->rx_lock); + + mutex_lock(&vsock->tx_lock); + vsock->tx_run = false; + mutex_unlock(&vsock->tx_lock); + + mutex_lock(&vsock->event_lock); + vsock->event_run = false; + mutex_unlock(&vsock->event_lock); + + /* Flush all device writes and interrupts, device will not use any + * more buffers. + */ vdev->config->reset(vdev); mutex_lock(&vsock->rx_lock); @@ -689,6 +737,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev) } spin_unlock_bh(&vsock->loopback_list_lock); + /* Delete virtqueues and flush outstanding callbacks if any */ vdev->config->del_vqs(vdev); mutex_unlock(&the_virtio_vsock_mutex); -- 2.20.1
Stefano Garzarella
2019-Jun-28 12:36 UTC
[PATCH v2 3/3] vsock/virtio: fix flush of works during the .remove()
This patch moves the flush of works after vdev->config->del_vqs(vdev), because we need to be sure that no workers run before to free the 'vsock' object. Since we stopped the workers using the [tx|rx|event]_run flags, we are sure no one is accessing the device while we are calling vdev->config->reset(vdev), so we can safely move the workers' flush. Before the vdev->config->del_vqs(vdev), workers can be scheduled by VQ callbacks, so we must flush them after del_vqs(), to avoid use-after-free of 'vsock' object. Suggested-by: Michael S. Tsirkin <mst at redhat.com> Signed-off-by: Stefano Garzarella <sgarzare at redhat.com> --- net/vmw_vsock/virtio_transport.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 1b44ec6f3f6c..96dafa978268 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -680,12 +680,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev) rcu_assign_pointer(the_virtio_vsock, NULL); synchronize_rcu(); - flush_work(&vsock->loopback_work); - flush_work(&vsock->rx_work); - flush_work(&vsock->tx_work); - flush_work(&vsock->event_work); - flush_work(&vsock->send_pkt_work); - /* Reset all connected sockets when the device disappear */ vsock_for_each_connected_socket(virtio_vsock_reset_sock); @@ -740,6 +734,15 @@ static void virtio_vsock_remove(struct virtio_device *vdev) /* Delete virtqueues and flush outstanding callbacks if any */ vdev->config->del_vqs(vdev); + /* Other works can be queued before 'config->del_vqs()', so we flush + * all works before to free the vsock object to avoid use after free. + */ + flush_work(&vsock->loopback_work); + flush_work(&vsock->rx_work); + flush_work(&vsock->tx_work); + flush_work(&vsock->event_work); + flush_work(&vsock->send_pkt_work); + mutex_unlock(&the_virtio_vsock_mutex); kfree(vsock); -- 2.20.1
Stefan Hajnoczi
2019-Jul-01 14:54 UTC
[PATCH v2 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
On Fri, Jun 28, 2019 at 02:36:57PM +0200, Stefano Garzarella wrote:> Some callbacks used by the upper layers can run while we are in the > .remove(). A potential use-after-free can happen, because we free > the_virtio_vsock without knowing if the callbacks are over or not. > > To solve this issue we move the assignment of the_virtio_vsock at the > end of .probe(), when we finished all the initialization, and at the > beginning of .remove(), before to release resources. > For the same reason, we do the same also for the vdev->priv. > > We use RCU to be sure that all callbacks that use the_virtio_vsock > ended before freeing it. This is not required for callbacks that > use vdev->priv, because after the vdev->config->del_vqs() we are sure > that they are ended and will no longer be invoked.->del_vqs() is only called at the very end, did you forget to move it earlier? In particular, the virtqueue handler callbacks schedule a workqueue. The work functions use container_of() to get vsock. We need to be sure that the work item isn't freed along with vsock while the work item is still pending. How do we know that the virtqueue handler is never called in such a way that it sees vsock != NULL (there is no explicit memory barrier on the read side) and then schedules a work item after flush_work() has run? Stefan -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 488 bytes Desc: not available URL: <http://lists.linuxfoundation.org/pipermail/virtualization/attachments/20190701/06b5a0a7/attachment.sig>
Stefan Hajnoczi
2019-Jul-01 15:08 UTC
[PATCH v2 3/3] vsock/virtio: fix flush of works during the .remove()
On Fri, Jun 28, 2019 at 02:36:59PM +0200, Stefano Garzarella wrote:> This patch moves the flush of works after vdev->config->del_vqs(vdev), > because we need to be sure that no workers run before to free the > 'vsock' object. > > Since we stopped the workers using the [tx|rx|event]_run flags, > we are sure no one is accessing the device while we are calling > vdev->config->reset(vdev), so we can safely move the workers' flush.What about send_pkt and loopback work? How were they stopped safely? For example, if send_pkt work executes then we're in trouble since it accesses the tx virtqueue which is deleted by ->del_vqs(). -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 488 bytes Desc: not available URL: <http://lists.linuxfoundation.org/pipermail/virtualization/attachments/20190701/5a529914/attachment.sig>
Stefan Hajnoczi
2019-Jul-01 15:09 UTC
[PATCH v2 3/3] vsock/virtio: fix flush of works during the .remove()
On Fri, Jun 28, 2019 at 02:36:59PM +0200, Stefano Garzarella wrote:> This patch moves the flush of works after vdev->config->del_vqs(vdev), > because we need to be sure that no workers run before to free the > 'vsock' object. > > Since we stopped the workers using the [tx|rx|event]_run flags, > we are sure no one is accessing the device while we are calling > vdev->config->reset(vdev), so we can safely move the workers' flush. > > Before the vdev->config->del_vqs(vdev), workers can be scheduled > by VQ callbacks, so we must flush them after del_vqs(), to avoid > use-after-free of 'vsock' object.Nevermind, I looked back at Patch 2 and saw the send_pkt and loopback work functions were also updated. Thanks! Stefan -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 488 bytes Desc: not available URL: <http://lists.linuxfoundation.org/pipermail/virtualization/attachments/20190701/8a201c11/attachment-0001.sig>
Stefan Hajnoczi
2019-Jul-01 15:10 UTC
[PATCH v2 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
On Fri, Jun 28, 2019 at 02:36:57PM +0200, Stefano Garzarella wrote:> Some callbacks used by the upper layers can run while we are in the > .remove(). A potential use-after-free can happen, because we free > the_virtio_vsock without knowing if the callbacks are over or not. > > To solve this issue we move the assignment of the_virtio_vsock at the > end of .probe(), when we finished all the initialization, and at the > beginning of .remove(), before to release resources. > For the same reason, we do the same also for the vdev->priv. > > We use RCU to be sure that all callbacks that use the_virtio_vsock > ended before freeing it. This is not required for callbacks that > use vdev->priv, because after the vdev->config->del_vqs() we are sure > that they are ended and will no longer be invoked.My question is answered in Patch 3. -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 488 bytes Desc: not available URL: <http://lists.linuxfoundation.org/pipermail/virtualization/attachments/20190701/c02b0e7e/attachment.sig>
Stefan Hajnoczi
2019-Jul-01 15:11 UTC
[PATCH v2 0/3] vsock/virtio: several fixes in the .probe() and .remove()
On Fri, Jun 28, 2019 at 02:36:56PM +0200, Stefano Garzarella wrote:> During the review of "[PATCH] vsock/virtio: Initialize core virtio vsock > before registering the driver", Stefan pointed out some possible issues > in the .probe() and .remove() callbacks of the virtio-vsock driver. > > This series tries to solve these issues: > - Patch 1 adds RCU critical sections to avoid use-after-free of > 'the_virtio_vsock' pointer. > - Patch 2 stops workers before to call vdev->config->reset(vdev) to > be sure that no one is accessing the device. > - Patch 3 moves the works flush at the end of the .remove() to avoid > use-after-free of 'vsock' object. > > v2: > - Patch 1: use RCU to protect 'the_virtio_vsock' pointer > - Patch 2: no changes > - Patch 3: flush works only at the end of .remove() > - Removed patch 4 because virtqueue_detach_unused_buf() returns all the buffers > allocated. > > v1: https://patchwork.kernel.org/cover/10964733/This looks good to me. Did you run any stress tests? For example an SMP guest constantly connecting and sending packets together with a script that hotplug/unplugs vhost-vsock-pci from the host side. Stefan -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 488 bytes Desc: not available URL: <http://lists.linuxfoundation.org/pipermail/virtualization/attachments/20190701/0d5aff27/attachment.sig>
Stefano Garzarella
2019-Jul-01 17:03 UTC
[PATCH v2 0/3] vsock/virtio: several fixes in the .probe() and .remove()
On Mon, Jul 01, 2019 at 04:11:13PM +0100, Stefan Hajnoczi wrote:> On Fri, Jun 28, 2019 at 02:36:56PM +0200, Stefano Garzarella wrote: > > During the review of "[PATCH] vsock/virtio: Initialize core virtio vsock > > before registering the driver", Stefan pointed out some possible issues > > in the .probe() and .remove() callbacks of the virtio-vsock driver. > > > > This series tries to solve these issues: > > - Patch 1 adds RCU critical sections to avoid use-after-free of > > 'the_virtio_vsock' pointer. > > - Patch 2 stops workers before to call vdev->config->reset(vdev) to > > be sure that no one is accessing the device. > > - Patch 3 moves the works flush at the end of the .remove() to avoid > > use-after-free of 'vsock' object. > > > > v2: > > - Patch 1: use RCU to protect 'the_virtio_vsock' pointer > > - Patch 2: no changes > > - Patch 3: flush works only at the end of .remove() > > - Removed patch 4 because virtqueue_detach_unused_buf() returns all the buffers > > allocated. > > > > v1: https://patchwork.kernel.org/cover/10964733/ > > This looks good to me.Thanks for the review!> > Did you run any stress tests? For example an SMP guest constantly > connecting and sending packets together with a script that > hotplug/unplugs vhost-vsock-pci from the host side.Yes, I started an SMP guest (-smp 4 -monitor tcp:127.0.0.1:1234,server,nowait) and I run these scripts to stress the .probe()/.remove() path: - guest while true; do cat /dev/urandom | nc-vsock -l 4321 > /dev/null & cat /dev/urandom | nc-vsock -l 5321 > /dev/null & cat /dev/urandom | nc-vsock -l 6321 > /dev/null & cat /dev/urandom | nc-vsock -l 7321 > /dev/null & wait done - host while true; do cat /dev/urandom | nc-vsock 3 4321 > /dev/null & cat /dev/urandom | nc-vsock 3 5321 > /dev/null & cat /dev/urandom | nc-vsock 3 6321 > /dev/null & cat /dev/urandom | nc-vsock 3 7321 > /dev/null & sleep 2 echo "device_del v1" | nc 127.0.0.1 1234 sleep 1 echo "device_add vhost-vsock-pci,id=v1,guest-cid=3" | nc 127.0.0.1 1234 sleep 1 done Do you think is enough or is better to have a test more accurate? Thanks, Stefano
Jason Wang
2019-Jul-03 09:53 UTC
[PATCH v2 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
On 2019/6/28 ??8:36, Stefano Garzarella wrote:> Some callbacks used by the upper layers can run while we are in the > .remove(). A potential use-after-free can happen, because we free > the_virtio_vsock without knowing if the callbacks are over or not. > > To solve this issue we move the assignment of the_virtio_vsock at the > end of .probe(), when we finished all the initialization, and at the > beginning of .remove(), before to release resources. > For the same reason, we do the same also for the vdev->priv. > > We use RCU to be sure that all callbacks that use the_virtio_vsock > ended before freeing it. This is not required for callbacks that > use vdev->priv, because after the vdev->config->del_vqs() we are sure > that they are ended and will no longer be invoked. > > We also take the mutex during the .remove() to avoid that .probe() can > run while we are resetting the device. > > Signed-off-by: Stefano Garzarella <sgarzare at redhat.com> > --- > net/vmw_vsock/virtio_transport.c | 67 +++++++++++++++++++++----------- > 1 file changed, 44 insertions(+), 23 deletions(-) > > diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c > index 9c287e3e393c..7ad510ec12e0 100644 > --- a/net/vmw_vsock/virtio_transport.c > +++ b/net/vmw_vsock/virtio_transport.c > @@ -65,19 +65,22 @@ struct virtio_vsock { > u32 guest_cid; > }; > > -static struct virtio_vsock *virtio_vsock_get(void) > -{ > - return the_virtio_vsock; > -} > - > static u32 virtio_transport_get_local_cid(void) > { > - struct virtio_vsock *vsock = virtio_vsock_get(); > + struct virtio_vsock *vsock; > + u32 ret; > > - if (!vsock) > - return VMADDR_CID_ANY; > + rcu_read_lock(); > + vsock = rcu_dereference(the_virtio_vsock); > + if (!vsock) { > + ret = VMADDR_CID_ANY; > + goto out_rcu; > + } > > - return vsock->guest_cid; > + ret = vsock->guest_cid; > +out_rcu: > + rcu_read_unlock(); > + return ret; > } > > static void virtio_transport_loopback_work(struct work_struct *work) > @@ -197,14 +200,18 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) > struct virtio_vsock *vsock; > int len = pkt->len; > > - vsock = virtio_vsock_get(); > + rcu_read_lock(); > + vsock = rcu_dereference(the_virtio_vsock); > if (!vsock) { > virtio_transport_free_pkt(pkt); > - return -ENODEV; > + len = -ENODEV; > + goto out_rcu; > } > > - if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) > - return virtio_transport_send_pkt_loopback(vsock, pkt); > + if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) { > + len = virtio_transport_send_pkt_loopback(vsock, pkt); > + goto out_rcu; > + } > > if (pkt->reply) > atomic_inc(&vsock->queued_replies); > @@ -214,6 +221,9 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) > spin_unlock_bh(&vsock->send_pkt_list_lock); > > queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); > + > +out_rcu: > + rcu_read_unlock(); > return len; > } > > @@ -222,12 +232,14 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk) > { > struct virtio_vsock *vsock; > struct virtio_vsock_pkt *pkt, *n; > - int cnt = 0; > + int cnt = 0, ret; > LIST_HEAD(freeme); > > - vsock = virtio_vsock_get(); > + rcu_read_lock(); > + vsock = rcu_dereference(the_virtio_vsock); > if (!vsock) { > - return -ENODEV; > + ret = -ENODEV; > + goto out_rcu; > } > > spin_lock_bh(&vsock->send_pkt_list_lock); > @@ -255,7 +267,11 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk) > queue_work(virtio_vsock_workqueue, &vsock->rx_work); > } > > - return 0; > + ret = 0; > + > +out_rcu: > + rcu_read_unlock(); > + return ret; > } > > static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) > @@ -590,8 +606,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) > vsock->rx_buf_max_nr = 0; > atomic_set(&vsock->queued_replies, 0); > > - vdev->priv = vsock; > - the_virtio_vsock = vsock; > mutex_init(&vsock->tx_lock); > mutex_init(&vsock->rx_lock); > mutex_init(&vsock->event_lock); > @@ -613,6 +627,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev) > virtio_vsock_event_fill(vsock); > mutex_unlock(&vsock->event_lock); > > + vdev->priv = vsock; > + rcu_assign_pointer(the_virtio_vsock, vsock);You probably need to use rcu_dereference_protected() to access the_virtio_vsock in the function in order to survive from sparse.> + > mutex_unlock(&the_virtio_vsock_mutex); > return 0; > > @@ -627,6 +644,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev) > struct virtio_vsock *vsock = vdev->priv; > struct virtio_vsock_pkt *pkt; > > + mutex_lock(&the_virtio_vsock_mutex); > + > + vdev->priv = NULL; > + rcu_assign_pointer(the_virtio_vsock, NULL);This is still suspicious, can we access the_virtio_vsock through vdev->priv? If yes, we may still get use-after-free since it was not protected by RCU. Another more interesting question, I believe we will do singleton for virtio_vsock structure. Then what's the point of using vdev->priv to access the_virtio_vsock? It looks to me we can it brings extra troubles for doing synchronization. Thanks> + synchronize_rcu(); > + > flush_work(&vsock->loopback_work); > flush_work(&vsock->rx_work); > flush_work(&vsock->tx_work); > @@ -666,12 +689,10 @@ static void virtio_vsock_remove(struct virtio_device *vdev) > } > spin_unlock_bh(&vsock->loopback_list_lock); > > - mutex_lock(&the_virtio_vsock_mutex); > - the_virtio_vsock = NULL; > - mutex_unlock(&the_virtio_vsock_mutex); > - > vdev->config->del_vqs(vdev); > > + mutex_unlock(&the_virtio_vsock_mutex); > + > kfree(vsock); > } >
Jason Wang
2019-Jul-04 04:00 UTC
[PATCH v2 2/3] vsock/virtio: stop workers during the .remove()
On 2019/6/28 ??8:36, Stefano Garzarella wrote:> Before to call vdev->config->reset(vdev) we need to be sure that > no one is accessing the device, for this reason, we add new variables > in the struct virtio_vsock to stop the workers during the .remove(). > > This patch also add few comments before vdev->config->reset(vdev) > and vdev->config->del_vqs(vdev). > > Suggested-by: Stefan Hajnoczi <stefanha at redhat.com> > Suggested-by: Michael S. Tsirkin <mst at redhat.com> > Signed-off-by: Stefano Garzarella <sgarzare at redhat.com> > --- > net/vmw_vsock/virtio_transport.c | 51 +++++++++++++++++++++++++++++++- > 1 file changed, 50 insertions(+), 1 deletion(-)This should work. But we may consider to convert the_virtio_vosck to socket object and use socket refcnt and destructor in the future instead of inventing something new by ourselves. Thanks> > diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c > index 7ad510ec12e0..1b44ec6f3f6c 100644 > --- a/net/vmw_vsock/virtio_transport.c > +++ b/net/vmw_vsock/virtio_transport.c > @@ -38,6 +38,7 @@ struct virtio_vsock { > * must be accessed with tx_lock held. > */ > struct mutex tx_lock; > + bool tx_run; > > struct work_struct send_pkt_work; > spinlock_t send_pkt_list_lock; > @@ -53,6 +54,7 @@ struct virtio_vsock { > * must be accessed with rx_lock held. > */ > struct mutex rx_lock; > + bool rx_run; > int rx_buf_nr; > int rx_buf_max_nr; > > @@ -60,6 +62,7 @@ struct virtio_vsock { > * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. > */ > struct mutex event_lock; > + bool event_run; > struct virtio_vsock_event event_list[8]; > > u32 guest_cid; > @@ -94,6 +97,10 @@ static void virtio_transport_loopback_work(struct work_struct *work) > spin_unlock_bh(&vsock->loopback_list_lock); > > mutex_lock(&vsock->rx_lock); > + > + if (!vsock->rx_run) > + goto out; > + > while (!list_empty(&pkts)) { > struct virtio_vsock_pkt *pkt; > > @@ -102,6 +109,7 @@ static void virtio_transport_loopback_work(struct work_struct *work) > > virtio_transport_recv_pkt(pkt); > } > +out: > mutex_unlock(&vsock->rx_lock); > } > > @@ -130,6 +138,9 @@ virtio_transport_send_pkt_work(struct work_struct *work) > > mutex_lock(&vsock->tx_lock); > > + if (!vsock->tx_run) > + goto out; > + > vq = vsock->vqs[VSOCK_VQ_TX]; > > for (;;) { > @@ -188,6 +199,7 @@ virtio_transport_send_pkt_work(struct work_struct *work) > if (added) > virtqueue_kick(vq); > > +out: > mutex_unlock(&vsock->tx_lock); > > if (restart_rx) > @@ -323,6 +335,10 @@ static void virtio_transport_tx_work(struct work_struct *work) > > vq = vsock->vqs[VSOCK_VQ_TX]; > mutex_lock(&vsock->tx_lock); > + > + if (!vsock->tx_run) > + goto out; > + > do { > struct virtio_vsock_pkt *pkt; > unsigned int len; > @@ -333,6 +349,8 @@ static void virtio_transport_tx_work(struct work_struct *work) > added = true; > } > } while (!virtqueue_enable_cb(vq)); > + > +out: > mutex_unlock(&vsock->tx_lock); > > if (added) > @@ -361,6 +379,9 @@ static void virtio_transport_rx_work(struct work_struct *work) > > mutex_lock(&vsock->rx_lock); > > + if (!vsock->rx_run) > + goto out; > + > do { > virtqueue_disable_cb(vq); > for (;;) { > @@ -470,6 +491,9 @@ static void virtio_transport_event_work(struct work_struct *work) > > mutex_lock(&vsock->event_lock); > > + if (!vsock->event_run) > + goto out; > + > do { > struct virtio_vsock_event *event; > unsigned int len; > @@ -484,7 +508,7 @@ static void virtio_transport_event_work(struct work_struct *work) > } while (!virtqueue_enable_cb(vq)); > > virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); > - > +out: > mutex_unlock(&vsock->event_lock); > } > > @@ -619,12 +643,18 @@ static int virtio_vsock_probe(struct virtio_device *vdev) > INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); > INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work); > > + mutex_lock(&vsock->tx_lock); > + vsock->tx_run = true; > + mutex_unlock(&vsock->tx_lock); > + > mutex_lock(&vsock->rx_lock); > virtio_vsock_rx_fill(vsock); > + vsock->rx_run = true; > mutex_unlock(&vsock->rx_lock); > > mutex_lock(&vsock->event_lock); > virtio_vsock_event_fill(vsock); > + vsock->event_run = true; > mutex_unlock(&vsock->event_lock); > > vdev->priv = vsock; > @@ -659,6 +689,24 @@ static void virtio_vsock_remove(struct virtio_device *vdev) > /* Reset all connected sockets when the device disappear */ > vsock_for_each_connected_socket(virtio_vsock_reset_sock); > > + /* Stop all work handlers to make sure no one is accessing the device, > + * so we can safely call vdev->config->reset(). > + */ > + mutex_lock(&vsock->rx_lock); > + vsock->rx_run = false; > + mutex_unlock(&vsock->rx_lock); > + > + mutex_lock(&vsock->tx_lock); > + vsock->tx_run = false; > + mutex_unlock(&vsock->tx_lock); > + > + mutex_lock(&vsock->event_lock); > + vsock->event_run = false; > + mutex_unlock(&vsock->event_lock); > + > + /* Flush all device writes and interrupts, device will not use any > + * more buffers. > + */ > vdev->config->reset(vdev); > > mutex_lock(&vsock->rx_lock); > @@ -689,6 +737,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev) > } > spin_unlock_bh(&vsock->loopback_list_lock); > > + /* Delete virtqueues and flush outstanding callbacks if any */ > vdev->config->del_vqs(vdev); > > mutex_unlock(&the_virtio_vsock_mutex);
Reasonably Related Threads
- [PATCH v3 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
- [PATCH v2 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
- [PATCH v2 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
- [PATCH v2 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
- [PATCH v2 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock