search for: atomic_sub_return

Displaying 20 results from an estimated 61 matches for "atomic_sub_return".

2016 Dec 12
3
[PATCH v4 2/4] vhost-vsock: add pkt cancel capability
...gt;send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + if (cnt) { + struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; + int new_cnt; + + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); + if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) + vhost_poll_queue(&tx_vq->poll); + } + + return 0; +} + static struct virtio_vsock_pkt * vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, unsigned int out, unsi...
2016 Dec 12
3
[PATCH v4 2/4] vhost-vsock: add pkt cancel capability
...gt;send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + if (cnt) { + struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; + int new_cnt; + + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); + if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) + vhost_poll_queue(&tx_vq->poll); + } + + return 0; +} + static struct virtio_vsock_pkt * vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, unsigned int out, unsi...
2014 Feb 12
0
[PATCH net 1/3] kref: add kref_sub_return
...is sometimes useful to get the value of the reference count after decrement. For example, vhost wants to execute some periodic cleanup operations once number of references drops below a specific value, before it reaches zero (for efficiency). Add an API to do this atomically and efficiently using atomic_sub_return. Signed-off-by: Michael S. Tsirkin <mst at redhat.com> --- Greg, could you ack this API extension please? I think it is cleanest to merge this through -net together with the first user. include/linux/kref.h | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 delet...
2020 Aug 18
0
qemu -display sdl,gl=on also eats CPU
...mation) kallsyms unmap_page_range 1123 100.000 (no location information) kallsyms unmap_page_range [self] ------------------------------------------------------------------------------- 1088 0.5242 (no location information) kallsyms atomic_sub_return.constprop.3 1088 100.000 (no location information) kallsyms atomic_sub_return.constprop.3 [self] ------------------------------------------------------------------------------- pixman/memcpy dominating initial profile probably OK, because guest uses vesa driver and KDE4 w...
2014 Feb 12
4
[PATCH net 0/3] vhost fixes for 3.14, -stable
This fixes a deadlock with vhost reported in the field, as well as a theoretical race issue found by code review. Patches 1+2 are needed for stable. Thanks to Qin Chuanyu for reporting the issue! Michael S. Tsirkin (3): kref: add kref_sub_return vhost: fix ref cnt checking deadlock vhost: fix a theoretical race in device cleanup include/linux/kref.h | 33
2014 Feb 12
4
[PATCH net 0/3] vhost fixes for 3.14, -stable
This fixes a deadlock with vhost reported in the field, as well as a theoretical race issue found by code review. Patches 1+2 are needed for stable. Thanks to Qin Chuanyu for reporting the issue! Michael S. Tsirkin (3): kref: add kref_sub_return vhost: fix ref cnt checking deadlock vhost: fix a theoretical race in device cleanup include/linux/kref.h | 33
2016 Dec 08
6
[PATCH v3 2/4] vhost-vsock: add pkt cancel capability
...gt;send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + if (cnt) { + struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; + int new_cnt; + + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); + if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) + vhost_poll_queue(&tx_vq->poll); + } + + return 0; +} + static struct virtio_vsock_pkt * vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, unsigned int out, unsi...
2016 Dec 08
6
[PATCH v3 2/4] vhost-vsock: add pkt cancel capability
...gt;send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + if (cnt) { + struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; + int new_cnt; + + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); + if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) + vhost_poll_queue(&tx_vq->poll); + } + + return 0; +} + static struct virtio_vsock_pkt * vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, unsigned int out, unsi...
2014 Feb 13
2
[PATCH net v2] vhost: fix ref cnt checking deadlock
...1); init_waitqueue_head(&ubufs->wait); ubufs->vq = vq; return ubufs; } -static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) +static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) { - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); + int r = atomic_sub_return(1, &ubufs->refcount); + if (unlikely(!r)) + wake_up(&ubufs->wait); + return r; } static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) { - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); - wait_event(ubufs->wait, !atomic_read(&ubufs->...
2014 Feb 13
2
[PATCH net v2] vhost: fix ref cnt checking deadlock
...1); init_waitqueue_head(&ubufs->wait); ubufs->vq = vq; return ubufs; } -static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) +static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) { - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); + int r = atomic_sub_return(1, &ubufs->refcount); + if (unlikely(!r)) + wake_up(&ubufs->wait); + return r; } static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) { - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); - wait_event(ubufs->wait, !atomic_read(&ubufs->...
2017 Mar 01
5
[PATCH-v4-RESEND 0/4] vsock: cancel connect packets when failing to connect
Hi David, These patchsets were sent before and reviewed by Stefan and Jorgen [https://www.spinics.net/lists/kvm/msg142367.html]. If there is any blocker, please do tell and I'll see to it. Thanks! Currently, if a connect call fails on a signal or timeout (e.g., guest is still in the process of starting up), we'll just return to caller and leave the connect packet queued and they are sent
2017 Mar 01
5
[PATCH-v4-RESEND 0/4] vsock: cancel connect packets when failing to connect
Hi David, These patchsets were sent before and reviewed by Stefan and Jorgen [https://www.spinics.net/lists/kvm/msg142367.html]. If there is any blocker, please do tell and I'll see to it. Thanks! Currently, if a connect call fails on a signal or timeout (e.g., guest is still in the process of starting up), we'll just return to caller and leave the connect packet queued and they are sent
2017 Mar 15
6
[PATCH-v5 0/4] vsock: cancel connect packets when failing to connect
Currently, if a connect call fails on a signal or timeout (e.g., guest is still in the process of starting up), we'll just return to caller and leave the connect packet queued and they are sent even though the connection is considered a failure, which can confuse applications with unwanted false connect attempt. The patchset enables vsock (both host and guest) to cancel queued packets when a
2017 Mar 15
6
[PATCH-v5 0/4] vsock: cancel connect packets when failing to connect
Currently, if a connect call fails on a signal or timeout (e.g., guest is still in the process of starting up), we'll just return to caller and leave the connect packet queued and they are sent even though the connection is considered a failure, which can confuse applications with unwanted false connect attempt. The patchset enables vsock (both host and guest) to cancel queued packets when a
2016 Dec 07
0
[PATCH v2 2/4] vhost-vsock: add pkt cancel capability
...gt;send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + if (cnt) { + struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; + int new_cnt; + + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); + if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) + vhost_poll_queue(&tx_vq->poll); + } + + return 0; +} + static struct virtio_vsock_pkt * vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, unsigned int out, unsi...
2016 Dec 07
0
[PATCH v2 3/4] vsock: add pkt cancel capability
...amp;vsock->send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + if (cnt) { + struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; + int new_cnt; + + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); + if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && + new_cnt < virtqueue_get_vring_size(rx_vq)) + queue_work(virtio_vsock_workqueue, &vsock->rx_work); + } + + return 0; +} + static void virtio_vsock_rx_fill(struct virtio_vsoc...
2016 Dec 08
0
[PATCH v3 3/4] vsock: add pkt cancel capability
...amp;vsock->send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + if (cnt) { + struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; + int new_cnt; + + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); + if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && + new_cnt < virtqueue_get_vring_size(rx_vq)) + queue_work(virtio_vsock_workqueue, &vsock->rx_work); + } + + return 0; +} + static void virtio_vsock_rx_fill(struct virtio_vsoc...
2016 Dec 12
0
[PATCH v4 3/4] vsock: add pkt cancel capability
...amp;vsock->send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + if (cnt) { + struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; + int new_cnt; + + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); + if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && + new_cnt < virtqueue_get_vring_size(rx_vq)) + queue_work(virtio_vsock_workqueue, &vsock->rx_work); + } + + return 0; +} + static void virtio_vsock_rx_fill(struct virtio_vsoc...
2016 Dec 07
8
[PATCH v2 0/4] vsock: cancel connect packets when failing to connect
Currently, if a connect call fails on a signal or timeout (e.g., guest is still in the process of starting up), we'll just return to caller and leave the connect packet queued and they are sent even though the connection is considered a failure, which can confuse applications with unwanted false connect attempt. The patchset enables vsock (both host and guest) to cancel queued packets when a
2016 Dec 07
8
[PATCH v2 0/4] vsock: cancel connect packets when failing to connect
Currently, if a connect call fails on a signal or timeout (e.g., guest is still in the process of starting up), we'll just return to caller and leave the connect packet queued and they are sent even though the connection is considered a failure, which can confuse applications with unwanted false connect attempt. The patchset enables vsock (both host and guest) to cancel queued packets when a