search for: spin_unlock_bh

Displaying 20 results from an estimated 350 matches for "spin_unlock_bh".

2018 Sep 27
3
[PATCH net V2] vhost-vsock: fix use after free
...len = pkt->len; + spin_lock_bh(&vhost_vsock_lock); + /* Find the vhost_vsock according to guest context id */ - vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); + vsock = __vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); if (!vsock) { virtio_transport_free_pkt(pkt); + spin_unlock_bh(&vhost_vsock_lock); return -ENODEV; } if (pkt->reply) atomic_inc(&vsock->queued_replies); - spin_lock_bh(&vsock->send_pkt_list_lock); + spin_lock(&vsock->send_pkt_list_lock); list_add_tail(&pkt->list, &vsock->send_pkt_list); - spin_unlock_bh...
2018 Sep 27
3
[PATCH net V2] vhost-vsock: fix use after free
...len = pkt->len; + spin_lock_bh(&vhost_vsock_lock); + /* Find the vhost_vsock according to guest context id */ - vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); + vsock = __vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); if (!vsock) { virtio_transport_free_pkt(pkt); + spin_unlock_bh(&vhost_vsock_lock); return -ENODEV; } if (pkt->reply) atomic_inc(&vsock->queued_replies); - spin_lock_bh(&vsock->send_pkt_list_lock); + spin_lock(&vsock->send_pkt_list_lock); list_add_tail(&pkt->list, &vsock->send_pkt_list); - spin_unlock_bh...
2023 Jul 21
2
[Bridge] [PATCH] can: j1939: prevent deadlock by changing j1939_socks_lock to rwlock
...static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk) jsk->state |= J1939_SOCK_BOUND; j1939_priv_get(priv); - spin_lock_bh(&priv->j1939_socks_lock); + write_lock_bh(&priv->j1939_socks_lock); list_add_tail(&jsk->list, &priv->j1939_socks); - spin_unlock_bh(&priv->j1939_socks_lock); + write_unlock_bh(&priv->j1939_socks_lock); } static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk) { - spin_lock_bh(&priv->j1939_socks_lock); + write_lock_bh(&priv->j1939_socks_lock); list_del_init(&jsk->list)...
2018 Sep 27
0
[PATCH net V2] vhost-vsock: fix use after free
...(&vhost_vsock_lock); > + > /* Find the vhost_vsock according to guest context id */ > - vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); > + vsock = __vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); > if (!vsock) { > virtio_transport_free_pkt(pkt); > + spin_unlock_bh(&vhost_vsock_lock); > return -ENODEV; > } > > if (pkt->reply) > atomic_inc(&vsock->queued_replies); > > - spin_lock_bh(&vsock->send_pkt_list_lock); > + spin_lock(&vsock->send_pkt_list_lock); > list_add_tail(&pkt->list, &a...
2019 Sep 26
5
[PATCH] vsock/virtio: add support for MSG_PEEK
...&vvs->rx_queue, list) { + if (total == len) + break; + + bytes = len - total; + if (bytes > pkt->len - pkt->off) + bytes = pkt->len - pkt->off; + + /* sk_lock is held by caller so no one else can dequeue. + * Unlock rx_lock since memcpy_to_msg() may sleep. + */ + spin_unlock_bh(&vvs->rx_lock); + + err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes); + if (err) + goto out; + + spin_lock_bh(&vvs->rx_lock); + + total += bytes; + } + + spin_unlock_bh(&vvs->rx_lock); + + return total; + +out: + if (total) + err = total; + return err; +} + +st...
2019 Sep 26
5
[PATCH] vsock/virtio: add support for MSG_PEEK
...&vvs->rx_queue, list) { + if (total == len) + break; + + bytes = len - total; + if (bytes > pkt->len - pkt->off) + bytes = pkt->len - pkt->off; + + /* sk_lock is held by caller so no one else can dequeue. + * Unlock rx_lock since memcpy_to_msg() may sleep. + */ + spin_unlock_bh(&vvs->rx_lock); + + err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes); + if (err) + goto out; + + spin_lock_bh(&vvs->rx_lock); + + total += bytes; + } + + spin_unlock_bh(&vvs->rx_lock); + + return total; + +out: + if (total) + err = total; + return err; +} + +st...
2018 Sep 27
2
[PATCH net] vhost-vsock: fix use after free
...len = pkt->len; + spin_lock_bh(&vhost_vsock_lock); + /* Find the vhost_vsock according to guest context id */ - vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); + vsock = __vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); if (!vsock) { virtio_transport_free_pkt(pkt); + spin_unlock_bh(&vhost_vsock_lock); return -ENODEV; } if (pkt->reply) atomic_inc(&vsock->queued_replies); - spin_lock_bh(&vsock->send_pkt_list_lock); + spin_lock(&vsock->send_pkt_list_lock); list_add_tail(&pkt->list, &vsock->send_pkt_list); - spin_unlock_bh...
2018 Sep 27
2
[PATCH net] vhost-vsock: fix use after free
...len = pkt->len; + spin_lock_bh(&vhost_vsock_lock); + /* Find the vhost_vsock according to guest context id */ - vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); + vsock = __vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); if (!vsock) { virtio_transport_free_pkt(pkt); + spin_unlock_bh(&vhost_vsock_lock); return -ENODEV; } if (pkt->reply) atomic_inc(&vsock->queued_replies); - spin_lock_bh(&vsock->send_pkt_list_lock); + spin_lock(&vsock->send_pkt_list_lock); list_add_tail(&pkt->list, &vsock->send_pkt_list); - spin_unlock_bh...
2020 Jun 08
2
[PATCH RFC v5 12/13] vhost/vsock: switch to the buf API
...> + struct vhost_buf buf; > + int ret; > > spin_lock_bh(&vsock->send_pkt_list_lock); > if (list_empty(&vsock->send_pkt_list)) { > @@ -117,16 +118,17 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, > list_del_init(&pkt->list); > spin_unlock_bh(&vsock->send_pkt_list_lock); > > - head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), > - &out, &in, NULL, NULL); > - if (head < 0) { > + ret = vhost_get_avail_buf(vq, &buf, > + vq->iov, ARRAY_SIZE(vq->iov), > + &a...
2020 Jun 08
2
[PATCH RFC v5 12/13] vhost/vsock: switch to the buf API
...> + struct vhost_buf buf; > + int ret; > > spin_lock_bh(&vsock->send_pkt_list_lock); > if (list_empty(&vsock->send_pkt_list)) { > @@ -117,16 +118,17 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, > list_del_init(&pkt->list); > spin_unlock_bh(&vsock->send_pkt_list_lock); > > - head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), > - &out, &in, NULL, NULL); > - if (head < 0) { > + ret = vhost_get_avail_buf(vq, &buf, > + vq->iov, ARRAY_SIZE(vq->iov), > + &a...
2013 Jan 06
3
[PATCH] tcm_vhost: Use llist for cmd completion list
...onfigfs fabric module */ @@ -301,9 +301,7 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd) { struct vhost_scsi *vs = tv_cmd->tvc_vhost; - spin_lock_bh(&vs->vs_completion_lock); - list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); - spin_unlock_bh(&vs->vs_completion_lock); + llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); vhost_work_queue(&vs->dev, &vs->vs_completion_work); } @@ -347,27 +345,6 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) kfree(tv_cmd); } -/* D...
2013 Jan 06
3
[PATCH] tcm_vhost: Use llist for cmd completion list
...onfigfs fabric module */ @@ -301,9 +301,7 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd) { struct vhost_scsi *vs = tv_cmd->tvc_vhost; - spin_lock_bh(&vs->vs_completion_lock); - list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); - spin_unlock_bh(&vs->vs_completion_lock); + llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); vhost_work_queue(&vs->dev, &vs->vs_completion_work); } @@ -347,27 +345,6 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) kfree(tv_cmd); } -/* D...
2012 Aug 30
0
[PATCH 07/11] vmci_hash_table.patch: VMCI hash table implementation.
...s point in time. Hence we should be able to just + * remove all entries from the hash table. + */ +void vmci_hash_destroy(struct vmci_hash_table *table) +{ + ASSERT(table); + + spin_lock_bh(&table->lock); + kfree(table->entries); + table->entries = NULL; + spin_unlock_bh(&table->lock); + kfree(table); +} + +void vmci_hash_init_entry(struct vmci_hash_entry *entry, + struct vmci_handle handle) +{ + ASSERT(entry); + entry->handle = handle; + entry->refCount = 0; +} + +/* + * Unlocked version of vmci_hash_exi...
2012 Aug 30
0
[PATCH 07/11] vmci_hash_table.patch: VMCI hash table implementation.
...s point in time. Hence we should be able to just + * remove all entries from the hash table. + */ +void vmci_hash_destroy(struct vmci_hash_table *table) +{ + ASSERT(table); + + spin_lock_bh(&table->lock); + kfree(table->entries); + table->entries = NULL; + spin_unlock_bh(&table->lock); + kfree(table); +} + +void vmci_hash_init_entry(struct vmci_hash_entry *entry, + struct vmci_handle handle) +{ + ASSERT(entry); + entry->handle = handle; + entry->refCount = 0; +} + +/* + * Unlocked version of vmci_hash_exi...
2019 Sep 23
1
[RFC] VSOCK: add support for MSG_PEEK
...ans; > + struct virtio_vsock_pkt *pkt; > + size_t bytes, off = 0, total = 0; > + int err = -EFAULT; > + > + spin_lock_bh(&vvs->rx_lock); > + What about using list_for_each_entry() to cycle through the queued packets? > + if (list_empty(&vvs->rx_queue)) { > + spin_unlock_bh(&vvs->rx_lock); > + return 0; > + } > + > + pkt = list_first_entry(&vvs->rx_queue, > + struct virtio_vsock_pkt, list); > + do { pkt->off contains the offset inside the packet where the unread data starts. So here we should initialize 'off': o...
2008 Mar 20
0
[RFC/PATCH 09/15] kvm-s390: interprocessor communication via sigp
..._NOADDR); + if (rc >= 0) + rc = -ENOTSUPP; + } + + if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { + vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; + VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); + rc = -ENOTSUPP; + } else + rc = 0; + spin_unlock_bh(&vcpu->arch.local_int.lock); + return rc; } static int handle_validity(struct kvm_vcpu *vcpu) Index: kvm/arch/s390/kvm/kvm-s390.c =================================================================== --- kvm.orig/arch/s390/kvm/kvm-s390.c +++ kvm/arch/s390/kvm/kvm-s390.c @@ -57,6 +57,12 @@...
2008 Mar 20
0
[RFC/PATCH 07/15] kvm-s390: interrupt subsystem, cpu timer, waitpsw
...i = vcpu->arch.local_int.float_int; + struct interrupt_info *inti; + int rc = 0; + + if (atomic_read(&li->active)) { + spin_lock_bh(&li->lock); + list_for_each_entry(inti, &li->list, list) + if (__interrupt_is_deliverable(vcpu, inti)) { + rc = 1; + break; + } + spin_unlock_bh(&li->lock); + } + + if ((!rc) && atomic_read(&fi->active)) { + spin_lock_bh(&fi->lock); + list_for_each_entry(inti, &fi->list, list) + if (__interrupt_is_deliverable(vcpu, inti)) { + rc = 1; + break; + } + spin_unlock_bh(&fi->lock); + } + + if...
2019 Sep 03
2
[PATCH v4 2/5] vsock/virtio: reduce credit update messages
..._inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt) > { > spin_lock_bh(&vvs->tx_lock); > + vvs->last_fwd_cnt = vvs->fwd_cnt; > pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt); > pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc); > spin_unlock_bh(&vvs->tx_lock); > @@ -261,6 +262,7 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, > struct virtio_vsock_sock *vvs = vsk->trans; > struct virtio_vsock_pkt *pkt; > size_t bytes, total = 0; > + u32 free_space; > int err = -EFAULT; > > spin_l...
2019 Sep 03
2
[PATCH v4 2/5] vsock/virtio: reduce credit update messages
..._inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt) > { > spin_lock_bh(&vvs->tx_lock); > + vvs->last_fwd_cnt = vvs->fwd_cnt; > pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt); > pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc); > spin_unlock_bh(&vvs->tx_lock); > @@ -261,6 +262,7 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, > struct virtio_vsock_sock *vvs = vsk->trans; > struct virtio_vsock_pkt *pkt; > size_t bytes, total = 0; > + u32 free_space; > int err = -EFAULT; > > spin_l...
2007 Apr 18
0
[Bridge] [PATCH] (4/11) bridge - ioctl cleanup and consolidation
...return num; @@ -119,7 +148,7 @@ return -EPERM; spin_lock_bh(&br->lock); - br->bridge_forward_delay = clock_t_to_jiffies(arg0); + br->bridge_forward_delay = clock_t_to_jiffies(args[1]); if (br_is_root_bridge(br)) br->forward_delay = br->bridge_forward_delay; spin_unlock_bh(&br->lock); @@ -130,7 +159,7 @@ return -EPERM; spin_lock_bh(&br->lock); - br->bridge_hello_time = clock_t_to_jiffies(arg0); + br->bridge_hello_time = clock_t_to_jiffies(args[1]); if (br_is_root_bridge(br)) br->hello_time = br->bridge_hello_time; spin_u...