search for: spin_lock_bh

Displaying 20 results from an estimated 323 matches for "spin_lock_bh".

2023 Jul 21
2
[Bridge] [PATCH] can: j1939: prevent deadlock by changing j1939_socks_lock to rwlock
...n/j1939/socket.c b/net/can/j1939/socket.c index feaec4ad6d16..a8b981dc2065 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -80,16 +80,16 @@ static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk) jsk->state |= J1939_SOCK_BOUND; j1939_priv_get(priv); - spin_lock_bh(&priv->j1939_socks_lock); + write_lock_bh(&priv->j1939_socks_lock); list_add_tail(&jsk->list, &priv->j1939_socks); - spin_unlock_bh(&priv->j1939_socks_lock); + write_unlock_bh(&priv->j1939_socks_lock); } static void j1939_jsk_del(struct j1939_priv *pr...
2019 Sep 26
5
[PATCH] vsock/virtio: add support for MSG_PEEK
..._send_credit_update(struct vsock_sock *vsk, } static ssize_t +virtio_transport_stream_do_peek(struct vsock_sock *vsk, + struct msghdr *msg, + size_t len) +{ + struct virtio_vsock_sock *vvs = vsk->trans; + struct virtio_vsock_pkt *pkt; + size_t bytes, total = 0; + int err = -EFAULT; + + spin_lock_bh(&vvs->rx_lock); + + list_for_each_entry(pkt, &vvs->rx_queue, list) { + if (total == len) + break; + + bytes = len - total; + if (bytes > pkt->len - pkt->off) + bytes = pkt->len - pkt->off; + + /* sk_lock is held by caller so no one else can dequeue. + * Unlock...
2019 Sep 26
5
[PATCH] vsock/virtio: add support for MSG_PEEK
..._send_credit_update(struct vsock_sock *vsk, } static ssize_t +virtio_transport_stream_do_peek(struct vsock_sock *vsk, + struct msghdr *msg, + size_t len) +{ + struct virtio_vsock_sock *vvs = vsk->trans; + struct virtio_vsock_pkt *pkt; + size_t bytes, total = 0; + int err = -EFAULT; + + spin_lock_bh(&vvs->rx_lock); + + list_for_each_entry(pkt, &vvs->rx_queue, list) { + if (total == len) + break; + + bytes = len - total; + if (bytes > pkt->len - pkt->off) + bytes = pkt->len - pkt->off; + + /* sk_lock is held by caller so no one else can dequeue. + * Unlock...
2018 Sep 27
3
[PATCH net V2] vhost-vsock: fix use after free
...tions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 34bc3ab40c6d..7d0b292867fd 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -210,21 +210,27 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) struct vhost_vsock *vsock; int len = pkt->len; + spin_lock_bh(&vhost_vsock_lock); + /* Find the vhost_vsock according to guest context id */ - vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); + vsock = __vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); if (!vsock) { virtio_transport_free_pkt(pkt); + spin_unlock_bh(&vhost_vsock_loc...
2018 Sep 27
3
[PATCH net V2] vhost-vsock: fix use after free
...tions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 34bc3ab40c6d..7d0b292867fd 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -210,21 +210,27 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) struct vhost_vsock *vsock; int len = pkt->len; + spin_lock_bh(&vhost_vsock_lock); + /* Find the vhost_vsock according to guest context id */ - vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); + vsock = __vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); if (!vsock) { virtio_transport_free_pkt(pkt); + spin_unlock_bh(&vhost_vsock_loc...
2020 Jun 08
2
[PATCH RFC v5 12/13] vhost/vsock: switch to the buf API
...rivers/vhost/vsock.c > +++ b/drivers/vhost/vsock.c > @@ -103,7 +103,8 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, > unsigned out, in; > size_t nbytes; > size_t iov_len, payload_len; > - int head; > + struct vhost_buf buf; > + int ret; > > spin_lock_bh(&vsock->send_pkt_list_lock); > if (list_empty(&vsock->send_pkt_list)) { > @@ -117,16 +118,17 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, > list_del_init(&pkt->list); > spin_unlock_bh(&vsock->send_pkt_list_lock); > > - head = vh...
2020 Jun 08
2
[PATCH RFC v5 12/13] vhost/vsock: switch to the buf API
...rivers/vhost/vsock.c > +++ b/drivers/vhost/vsock.c > @@ -103,7 +103,8 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, > unsigned out, in; > size_t nbytes; > size_t iov_len, payload_len; > - int head; > + struct vhost_buf buf; > + int ret; > > spin_lock_bh(&vsock->send_pkt_list_lock); > if (list_empty(&vsock->send_pkt_list)) { > @@ -117,16 +118,17 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, > list_del_init(&pkt->list); > spin_unlock_bh(&vsock->send_pkt_list_lock); > > - head = vh...
2017 Jan 03
2
[PATCH net-next V2 3/3] tun: rx batching
...x_batched(struct tun_file *tfile, struct sk_buff *skb, > + int more) > +{ > + struct sk_buff_head *queue = &tfile->sk.sk_write_queue; > + struct sk_buff_head process_queue; > + int qlen; > + bool rcv = false; > + > + spin_lock(&queue->lock); Should this be spin_lock_bh()? Below and in tun_get_user() there are explicit local_bh_disable() calls so I guess BHs can interrupt us here and this would deadlock. -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 455 bytes Desc: not avail...
2017 Jan 03
2
[PATCH net-next V2 3/3] tun: rx batching
...x_batched(struct tun_file *tfile, struct sk_buff *skb, > + int more) > +{ > + struct sk_buff_head *queue = &tfile->sk.sk_write_queue; > + struct sk_buff_head process_queue; > + int qlen; > + bool rcv = false; > + > + spin_lock(&queue->lock); Should this be spin_lock_bh()? Below and in tun_get_user() there are explicit local_bh_disable() calls so I guess BHs can interrupt us here and this would deadlock. -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 455 bytes Desc: not avail...
2008 Mar 20
0
[RFC/PATCH 09/15] kvm-s390: interprocessor communication via sigp
...t handle_noop(struct kvm_vcpu * static int handle_stop(struct kvm_vcpu *vcpu) { + int rc; + vcpu->stat.exit_stop_request++; - VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); - return -ENOTSUPP; + spin_lock_bh(&vcpu->arch.local_int.lock); + if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { + vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; + rc = __kvm_s390_vcpu_store_status(vcpu, + KVM_S390_STORE_STATUS_NOADDR); + if (rc >= 0) + rc = -ENOTSUPP;...
2012 Aug 30
0
[PATCH 07/11] vmci_hash_table.patch: VMCI hash table implementation.
...time. + * We rely on the module ref count to insure that no one is accessing any + * hash table entries at this point in time. Hence we should be able to just + * remove all entries from the hash table. + */ +void vmci_hash_destroy(struct vmci_hash_table *table) +{ + ASSERT(table); + + spin_lock_bh(&table->lock); + kfree(table->entries); + table->entries = NULL; + spin_unlock_bh(&table->lock); + kfree(table); +} + +void vmci_hash_init_entry(struct vmci_hash_entry *entry, + struct vmci_handle handle) +{ + ASSERT(entry);...
2012 Aug 30
0
[PATCH 07/11] vmci_hash_table.patch: VMCI hash table implementation.
...time. + * We rely on the module ref count to insure that no one is accessing any + * hash table entries at this point in time. Hence we should be able to just + * remove all entries from the hash table. + */ +void vmci_hash_destroy(struct vmci_hash_table *table) +{ + ASSERT(table); + + spin_lock_bh(&table->lock); + kfree(table->entries); + table->entries = NULL; + spin_unlock_bh(&table->lock); + kfree(table); +} + +void vmci_hash_init_entry(struct vmci_hash_entry *entry, + struct vmci_handle handle) +{ + ASSERT(entry);...
2018 Sep 27
0
[PATCH net V2] vhost-vsock: fix use after free
...ck.c b/drivers/vhost/vsock.c > index 34bc3ab40c6d..7d0b292867fd 100644 > --- a/drivers/vhost/vsock.c > +++ b/drivers/vhost/vsock.c > @@ -210,21 +210,27 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) > struct vhost_vsock *vsock; > int len = pkt->len; > > + spin_lock_bh(&vhost_vsock_lock); > + > /* Find the vhost_vsock according to guest context id */ > - vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); > + vsock = __vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); > if (!vsock) { > virtio_transport_free_pkt(pkt); > +...
2008 Mar 20
0
[RFC/PATCH 07/15] kvm-s390: interrupt subsystem, cpu timer, waitpsw
...n 0; + } + + return 1; +} + +int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) +{ + struct local_interrupt *li = &vcpu->arch.local_int; + struct float_interrupt *fi = vcpu->arch.local_int.float_int; + struct interrupt_info *inti; + int rc = 0; + + if (atomic_read(&li->active)) { + spin_lock_bh(&li->lock); + list_for_each_entry(inti, &li->list, list) + if (__interrupt_is_deliverable(vcpu, inti)) { + rc = 1; + break; + } + spin_unlock_bh(&li->lock); + } + + if ((!rc) && atomic_read(&fi->active)) { + spin_lock_bh(&fi->lock); + list_fo...
2019 Sep 03
2
[PATCH v4 2/5] vsock/virtio: reduce credit update messages
...t/vmw_vsock/virtio_transport_common.c > +++ b/net/vmw_vsock/virtio_transport_common.c > @@ -211,6 +211,7 @@ static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs, > void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt) > { > spin_lock_bh(&vvs->tx_lock); > + vvs->last_fwd_cnt = vvs->fwd_cnt; > pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt); > pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc); > spin_unlock_bh(&vvs->tx_lock); > @@ -261,6 +262,7 @@ virtio_transport_stream_do_dequeue(st...
2019 Sep 03
2
[PATCH v4 2/5] vsock/virtio: reduce credit update messages
...t/vmw_vsock/virtio_transport_common.c > +++ b/net/vmw_vsock/virtio_transport_common.c > @@ -211,6 +211,7 @@ static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs, > void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt) > { > spin_lock_bh(&vvs->tx_lock); > + vvs->last_fwd_cnt = vvs->fwd_cnt; > pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt); > pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc); > spin_unlock_bh(&vvs->tx_lock); > @@ -261,6 +262,7 @@ virtio_transport_stream_do_dequeue(st...
2018 Sep 27
2
[PATCH net] vhost-vsock: fix use after free
...tions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 34bc3ab40c6d..7d0b292867fd 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -210,21 +210,27 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) struct vhost_vsock *vsock; int len = pkt->len; + spin_lock_bh(&vhost_vsock_lock); + /* Find the vhost_vsock according to guest context id */ - vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); + vsock = __vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); if (!vsock) { virtio_transport_free_pkt(pkt); + spin_unlock_bh(&vhost_vsock_loc...
2018 Sep 27
2
[PATCH net] vhost-vsock: fix use after free
...tions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 34bc3ab40c6d..7d0b292867fd 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -210,21 +210,27 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) struct vhost_vsock *vsock; int len = pkt->len; + spin_lock_bh(&vhost_vsock_lock); + /* Find the vhost_vsock according to guest context id */ - vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); + vsock = __vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); if (!vsock) { virtio_transport_free_pkt(pkt); + spin_unlock_bh(&vhost_vsock_loc...
2007 Apr 18
0
[Bridge] [PATCH] (4/11) bridge - ioctl cleanup and consolidation
...t_port_ifindices(br, indices, num); - if (copy_to_user((void *)arg0, indices, num*sizeof(int))) + get_port_ifindices(br, indices, num); + if (copy_to_user((void *)args[1], indices, num*sizeof(int))) num = -EFAULT; kfree(indices); return num; @@ -119,7 +148,7 @@ return -EPERM; spin_lock_bh(&br->lock); - br->bridge_forward_delay = clock_t_to_jiffies(arg0); + br->bridge_forward_delay = clock_t_to_jiffies(args[1]); if (br_is_root_bridge(br)) br->forward_delay = br->bridge_forward_delay; spin_unlock_bh(&br->lock); @@ -130,7 +159,7 @@ return -EPER...
2019 May 31
7
[PATCH v3 0/5] vsock/virtio: optimizations to increase the throughput
This series tries to increase the throughput of virtio-vsock with slight changes. While I was testing the v2 of this series I discovered an huge use of memory, so I added patch 1 to mitigate this issue. I put it in this series in order to better track the performance trends. v3: - Patch 1: added a threshold to copy only small packets [Jason] - Patch 1: replaced the allocation of a new buffer