search for: rcu_dereference_protect

Displaying 20 results from an estimated 128 matches for "rcu_dereference_protect".

2019 Jul 23
2
[PATCH 5/6] vhost: mark dirty pages during map uninit
...> + } > +} > + > static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) > { > struct vhost_map *map[VHOST_NUM_ADDRS]; > @@ -315,8 +327,10 @@ static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) > for (i = 0; i < VHOST_NUM_ADDRS; i++) { > map[i] = rcu_dereference_protected(vq->maps[i], > lockdep_is_held(&vq->mmu_lock)); > - if (map[i]) > + if (map[i]) { > + vhost_set_map_dirty(vq, map[i], i); > rcu_assign_pointer(vq->maps[i], NULL); > + } > } > spin_unlock(&vq->mmu_lock); > > @@ -354,7 +368,6 @...
2019 Jul 23
2
[PATCH 5/6] vhost: mark dirty pages during map uninit
...> + } > +} > + > static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) > { > struct vhost_map *map[VHOST_NUM_ADDRS]; > @@ -315,8 +327,10 @@ static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) > for (i = 0; i < VHOST_NUM_ADDRS; i++) { > map[i] = rcu_dereference_protected(vq->maps[i], > lockdep_is_held(&vq->mmu_lock)); > - if (map[i]) > + if (map[i]) { > + vhost_set_map_dirty(vq, map[i], i); > rcu_assign_pointer(vq->maps[i], NULL); > + } > } > spin_unlock(&vq->mmu_lock); > > @@ -354,7 +368,6 @...
2013 May 07
5
[PATCH 0/4] vhost private_data rcu removal
Asias He (4): vhost-net: Always access vq->private_data under vq mutex vhost-test: Always access vq->private_data under vq mutex vhost-scsi: Always access vq->private_data under vq mutex vhost: Remove custom vhost rcu usage drivers/vhost/net.c | 37 ++++++++++++++++--------------------- drivers/vhost/scsi.c | 17 ++++++----------- drivers/vhost/test.c | 20
2013 May 07
5
[PATCH 0/4] vhost private_data rcu removal
Asias He (4): vhost-net: Always access vq->private_data under vq mutex vhost-test: Always access vq->private_data under vq mutex vhost-scsi: Always access vq->private_data under vq mutex vhost: Remove custom vhost rcu usage drivers/vhost/net.c | 37 ++++++++++++++++--------------------- drivers/vhost/scsi.c | 17 ++++++----------- drivers/vhost/test.c | 20
2014 Jun 03
3
[PULL 2/2] vhost: replace rcu with mutex
...vq mutex held. > > The rcu_read_lock/unlock in translate_desc is unnecessary. Yep, this is what I pointed out. This is not only necessary, but confusing and might be incorrectly copy/pasted in the future. This patch is a partial one and leaves confusion. Some places uses the proper mp = rcu_dereference_protected(dev->memory, lockdep_is_held(&dev->mutex)); others use the now incorrect : rcu_read_lock(); mp = rcu_dereference(dev->memory); ...
2014 Jun 03
3
[PULL 2/2] vhost: replace rcu with mutex
...vq mutex held. > > The rcu_read_lock/unlock in translate_desc is unnecessary. Yep, this is what I pointed out. This is not only necessary, but confusing and might be incorrectly copy/pasted in the future. This patch is a partial one and leaves confusion. Some places uses the proper mp = rcu_dereference_protected(dev->memory, lockdep_is_held(&dev->mutex)); others use the now incorrect : rcu_read_lock(); mp = rcu_dereference(dev->memory); ...
2014 Jun 02
4
[PULL 2/2] vhost: replace rcu with mutex
...*newmem, *oldmem; > unsigned long size = offsetof(struct vhost_memory, regions); > + int i; > > if (copy_from_user(&mem, m, size)) > return -EFAULT; > @@ -619,7 +620,14 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) > oldmem = rcu_dereference_protected(d->memory, > lockdep_is_held(&d->mutex)); > rcu_assign_pointer(d->memory, newmem); > - synchronize_rcu(); > + > + /* All memory accesses are done under some VQ mutex. > + * So below is a faster equivalent of synchronize_rcu() > + */ > + for (i = 0...
2014 Jun 02
4
[PULL 2/2] vhost: replace rcu with mutex
...*newmem, *oldmem; > unsigned long size = offsetof(struct vhost_memory, regions); > + int i; > > if (copy_from_user(&mem, m, size)) > return -EFAULT; > @@ -619,7 +620,14 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) > oldmem = rcu_dereference_protected(d->memory, > lockdep_is_held(&d->mutex)); > rcu_assign_pointer(d->memory, newmem); > - synchronize_rcu(); > + > + /* All memory accesses are done under some VQ mutex. > + * So below is a faster equivalent of synchronize_rcu() > + */ > + for (i = 0...
2019 Jul 04
2
[PATCH v2 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
...int virtio_vsock_probe(struct virtio_device *vdev) >>> virtio_vsock_event_fill(vsock); >>> mutex_unlock(&vsock->event_lock); >>> + vdev->priv = vsock; >>> + rcu_assign_pointer(the_virtio_vsock, vsock); >> >> You probably need to use rcu_dereference_protected() to access >> the_virtio_vsock in the function in order to survive from sparse. >> > Ooo, thanks! > > Do you mean when we check if the_virtio_vsock is not null at the beginning of > virtio_vsock_probe()? I mean instead of: ??? /* Only one virtio-vsock device per guest...
2019 Jul 04
2
[PATCH v2 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
...int virtio_vsock_probe(struct virtio_device *vdev) >>> virtio_vsock_event_fill(vsock); >>> mutex_unlock(&vsock->event_lock); >>> + vdev->priv = vsock; >>> + rcu_assign_pointer(the_virtio_vsock, vsock); >> >> You probably need to use rcu_dereference_protected() to access >> the_virtio_vsock in the function in order to survive from sparse. >> > Ooo, thanks! > > Do you mean when we check if the_virtio_vsock is not null at the beginning of > virtio_vsock_probe()? I mean instead of: ??? /* Only one virtio-vsock device per guest...
2018 Nov 15
1
[PATCH -next] drm/nouveau: fix copy-paste error in nouveau_fence_wait_uevent_handler
...ers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index d4964f3..91286d0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -157,7 +157,7 @@ fence = list_entry(fctx->pending.next, typeof(*fence), head); chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); - if (nouveau_fence_update(fence->channel, fctx)) + if (nouveau_fence_update(chan, fctx)) ret = NVIF_NOTIFY_DROP; } spin_unlock_irqrestore(&fctx->lock, flags);
2020 Jan 10
1
[PATCH] drm/nouveau: Fix copy-paste error in nouveau_fence_wait_uevent_handler
...nouveau_fence.c index 9118df0..70bb6bb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -156,7 +156,7 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify) fence = list_entry(fctx->pending.next, typeof(*fence), head); chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); - if (nouveau_fence_update(fence->channel, fctx)) + if (nouveau_fence_update(chan, fctx)) ret = NVIF_NOTIFY_DROP; } spin_unlock_irqrestore(&fctx->lock, flags); -- 2.7.4
2019 Jul 23
0
[PATCH 5/6] vhost: mark dirty pages during map uninit
...; i++) + set_page_dirty(map->pages[i]); + } +} + static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) { struct vhost_map *map[VHOST_NUM_ADDRS]; @@ -315,8 +327,10 @@ static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) for (i = 0; i < VHOST_NUM_ADDRS; i++) { map[i] = rcu_dereference_protected(vq->maps[i], lockdep_is_held(&vq->mmu_lock)); - if (map[i]) + if (map[i]) { + vhost_set_map_dirty(vq, map[i], i); rcu_assign_pointer(vq->maps[i], NULL); + } } spin_unlock(&vq->mmu_lock); @@ -354,7 +368,6 @@ static void vhost_invalidate_vq_start(struct vho...
2019 Jul 23
0
[PATCH 5/6] vhost: mark dirty pages during map uninit
...gt; static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) >> { >> struct vhost_map *map[VHOST_NUM_ADDRS]; >> @@ -315,8 +327,10 @@ static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) >> for (i = 0; i < VHOST_NUM_ADDRS; i++) { >> map[i] = rcu_dereference_protected(vq->maps[i], >> lockdep_is_held(&vq->mmu_lock)); >> - if (map[i]) >> + if (map[i]) { >> + vhost_set_map_dirty(vq, map[i], i); >> rcu_assign_pointer(vq->maps[i], NULL); >> + } >> } >> spin_unlock(&vq->mmu_l...
2024 Jan 23
1
[PATCH] nouveau: rip out fence irq allow/block sequences.
...eak; - drop |= nouveau_fence_signal(fence); + nouveau_fence_signal(fence); } - - return drop; } static int @@ -160,26 +146,13 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc fence = list_entry(fctx->pending.next, typeof(*fence), head); chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); - if (nouveau_fence_update(chan, fctx)) - ret = NVIF_EVENT_DROP; + nouveau_fence_update(chan, fctx); } spin_unlock_irqrestore(&fctx->lock, flags); return ret; } -static void -nouveau_fence_work_allow_block(struct work_...
2011 Nov 18
3
[PATCH] vhost-net: Acquire device lock when releasing device
...releasing a device, and specifically when calling vhost_dev_cleanup(). Otherwise, RCU complains about it: [ 2025.642835] =============================== [ 2025.643838] [ INFO: suspicious RCU usage. ] [ 2025.645182] ------------------------------- [ 2025.645927] drivers/vhost/vhost.c:475 suspicious rcu_dereference_protected() usage! [ 2025.647329] [ 2025.647330] other info that might help us debug this: [ 2025.647331] [ 2025.649042] [ 2025.649043] rcu_scheduler_active = 1, debug_locks = 1 [ 2025.650235] no locks held by trinity/21042. [ 2025.650971] [ 2025.650972] stack backtrace: [ 2025.651789] Pid: 21042, comm: tr...
2011 Nov 18
3
[PATCH] vhost-net: Acquire device lock when releasing device
...releasing a device, and specifically when calling vhost_dev_cleanup(). Otherwise, RCU complains about it: [ 2025.642835] =============================== [ 2025.643838] [ INFO: suspicious RCU usage. ] [ 2025.645182] ------------------------------- [ 2025.645927] drivers/vhost/vhost.c:475 suspicious rcu_dereference_protected() usage! [ 2025.647329] [ 2025.647330] other info that might help us debug this: [ 2025.647331] [ 2025.649042] [ 2025.649043] rcu_scheduler_active = 1, debug_locks = 1 [ 2025.650235] no locks held by trinity/21042. [ 2025.650971] [ 2025.650972] stack backtrace: [ 2025.651789] Pid: 21042, comm: tr...
2024 Jan 25
1
[PATCH] nouveau: rip out fence irq allow/block sequences.
...nouveau_fence_signal(fence); > } > - > - return drop; > } > > static int > @@ -160,26 +146,13 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc > > fence = list_entry(fctx->pending.next, typeof(*fence), head); > chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); > - if (nouveau_fence_update(chan, fctx)) > - ret = NVIF_EVENT_DROP; > + nouveau_fence_update(chan, fctx); > } > spin_unlock_irqrestore(&fctx->lock, flags); > > return ret; > } > > -static...
2014 Jun 05
1
[PATCH v2 1/2] vhost: move acked_features to VQs
...dev *d, struct vhost_virtqueue *vq, +static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) { struct vhost_memory *mp; - size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; + size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; mp = rcu_dereference_protected(vq->dev->memory, lockdep_is_held(&vq->mutex)); return vq_memory_access_ok(log_base, mp, - vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && + vhost_has_feature(vq, VHOST_F_LOG_ALL)) && (!vq->log_used || log_access_ok(log_base, vq->l...
2014 Jun 05
1
[PATCH v2 1/2] vhost: move acked_features to VQs
...dev *d, struct vhost_virtqueue *vq, +static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) { struct vhost_memory *mp; - size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; + size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; mp = rcu_dereference_protected(vq->dev->memory, lockdep_is_held(&vq->mutex)); return vq_memory_access_ok(log_base, mp, - vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && + vhost_has_feature(vq, VHOST_F_LOG_ALL)) && (!vq->log_used || log_access_ok(log_base, vq->l...