search for: atomic_inc_return

Displaying 20 results from an estimated 75 matches for "atomic_inc_return".

2015 Feb 26
4
[PATCH v2 1/4] Add atomic_inc_return to atomics.
...f86atomic.h index 8c4b696..17fb088 100644 --- a/xf86atomic.h +++ b/xf86atomic.h @@ -49,6 +49,7 @@ typedef struct { # define atomic_read(x) ((x)->atomic) # define atomic_set(x, val) ((x)->atomic = (val)) # define atomic_inc(x) ((void) __sync_fetch_and_add (&(x)->atomic, 1)) +# define atomic_inc_return(x) (__sync_add_and_fetch (&(x)->atomic, 1)) # define atomic_dec_and_test(x) (__sync_fetch_and_add (&(x)->atomic, -1) == 1) # define atomic_add(x, v) ((void) __sync_add_and_fetch(&(x)->atomic, (v))) # define atomic_dec(x, v) ((void) __sync_sub_and_fetch(&(x)->atomic, (...
2015 Feb 25
1
[PATCH 1/2] nouveau: make nouveau importing global buffers completely thread-safe, with tests
...> >> DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) { >> if (nvbo->base.handle == handle) { >> - *pbo = NULL; >> - nouveau_bo_ref(&nvbo->base, pbo); >> + if (atomic_inc_return(&nvbo->refcnt) == 1) { >> + /* >> + * Uh oh, this bo is dead and someone else >> + * will free it, but because refcnt is >> + * now non-zero fort...
2015 Feb 24
4
[PATCH 1/2] nouveau: make nouveau importing global buffers completely thread-safe, with tests
..._gem_info req = { .handle = handle }; @@ -427,8 +416,24 @@ nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle, DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) { if (nvbo->base.handle == handle) { - *pbo = NULL; - nouveau_bo_ref(&nvbo->base, pbo); + if (atomic_inc_return(&nvbo->refcnt) == 1) { + /* + * Uh oh, this bo is dead and someone else + * will free it, but because refcnt is + * now non-zero fortunately they won't + * call the ioctl to close the bo. + * + * Remove this bo from the list so other + * calls to nouveau_bo...
2015 Feb 25
0
[PATCH 1/2] nouveau: make nouveau importing global buffers completely thread-safe, with tests
...v, uint32_t handle, > > DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) { > if (nvbo->base.handle == handle) { > - *pbo = NULL; > - nouveau_bo_ref(&nvbo->base, pbo); > + if (atomic_inc_return(&nvbo->refcnt) == 1) { > + /* > + * Uh oh, this bo is dead and someone else > + * will free it, but because refcnt is > + * now non-zero fortunately they won...
2019 Jun 13
0
[PATCH 09/22] memremap: lift the devmap_enable manipulation into devm_memremap_pages
...dev_pagemap_put_ops(void *data) +{ + if (atomic_dec_and_test(&devmap_enable)) + static_branch_disable(&devmap_managed_key); +} + +/* + * Toggle the static key for ->page_free() callbacks when dev_pagemap + * pages go idle. + */ +static int dev_pagemap_enable(struct device *dev) +{ + if (atomic_inc_return(&devmap_enable) == 1) + static_branch_enable(&devmap_managed_key); + + if (devm_add_action_or_reset(dev, dev_pagemap_put_ops, NULL)) + return -ENOMEM; + return 0; +} +#else +static inline int dev_pagemap_enable(struct device *dev) +{ + return 0; +} +#endif /* CONFIG_DEV_PAGEMAP_OPS */ +...
2019 Jun 17
0
[PATCH 10/25] memremap: lift the devmap_enable manipulation into devm_memremap_pages
...atomic_dec_and_test(&devmap_enable)) + static_branch_disable(&devmap_managed_key); +} + +static int dev_pagemap_get_ops(struct device *dev, struct dev_pagemap *pgmap) +{ + if (!pgmap->ops->page_free) { + WARN(1, "Missing page_free method\n"); + return -EINVAL; + } + + if (atomic_inc_return(&devmap_enable) == 1) + static_branch_enable(&devmap_managed_key); + return devm_add_action_or_reset(dev, dev_pagemap_put_ops, NULL); +} +#else +static int dev_pagemap_get_ops(struct device *dev, struct dev_pagemap *pgmap) +{ + return -EINVAL; +} +#endif /* CONFIG_DEV_PAGEMAP_OPS */ + #if...
2019 Jun 26
0
[PATCH 11/25] memremap: lift the devmap_enable manipulation into devm_memremap_pages
..._test(&devmap_managed_enable)) + static_branch_disable(&devmap_managed_key); +} + +static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap) +{ + if (!pgmap->ops->page_free) { + WARN(1, "Missing page_free method\n"); + return -EINVAL; + } + + if (atomic_inc_return(&devmap_managed_enable) == 1) + static_branch_enable(&devmap_managed_key); + return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL); +} +#else +static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap) +{ + return -EINVAL; +} +#endif /* CONFIG_DEV_P...
2019 Jun 26
1
[PATCH 11/25] memremap: lift the devmap_enable manipulation into devm_memremap_pages
...NIT: later on you add the check for pgmap->ops... it should probably be here. But not sure that bisection will be an issue here. Reviewed-by: Ira Weiny <ira.weiny at intel.com> > + WARN(1, "Missing page_free method\n"); > + return -EINVAL; > + } > + > + if (atomic_inc_return(&devmap_managed_enable) == 1) > + static_branch_enable(&devmap_managed_key); > + return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL); > +} > +#else > +static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap) > +{ > + return...
2010 May 26
1
[PATCH 1/1] staging: hv: Fix race condition on IC channel initialization (modified)
...+ mb(); DPRINT_INFO(VMBUS, "%s", hv_cb_utils[cnt].log_msg); - - if (VmbusChannelOpen(newChannel, 2 * PAGE_SIZE, - 2 * PAGE_SIZE, NULL, 0, - hv_cb_utils[cnt].callback, - newChannel) == 0) - hv_cb_utils[cnt].channel = newChannel; + if (atomic_inc_return(&ic_channel_initcnt) == + MAX_MSG_TYPES) + osd_WaitEventSet(ic_channel_ready); } - cnt++; } } DPRINT_EXIT(VMBUS); diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c index c21731a..3ae8981 100644 --- a/drivers/staging/hv/vmbus_drv.c +++ b/drivers/...
2010 May 26
1
[PATCH 1/1] staging: hv: Fix race condition on IC channel initialization (modified)
...+ mb(); DPRINT_INFO(VMBUS, "%s", hv_cb_utils[cnt].log_msg); - - if (VmbusChannelOpen(newChannel, 2 * PAGE_SIZE, - 2 * PAGE_SIZE, NULL, 0, - hv_cb_utils[cnt].callback, - newChannel) == 0) - hv_cb_utils[cnt].channel = newChannel; + if (atomic_inc_return(&ic_channel_initcnt) == + MAX_MSG_TYPES) + osd_WaitEventSet(ic_channel_ready); } - cnt++; } } DPRINT_EXIT(VMBUS); diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c index c21731a..3ae8981 100644 --- a/drivers/staging/hv/vmbus_drv.c +++ b/drivers/...
2013 Mar 23
10
[PATCH V7 0/5] virtio-scsi multiqueue
This series implements virtio-scsi queue steering, which gives performance improvements of up to 50% (measured both with QEMU and tcm_vhost backends). This version rebased on Rusty's virtio ring rework patches, which has already gone into virtio-next today. We hope this can go into virtio-next together with the virtio ring rework pathes. V7: respin to fix the patch apply error V6: rework
2013 Mar 23
10
[PATCH V7 0/5] virtio-scsi multiqueue
This series implements virtio-scsi queue steering, which gives performance improvements of up to 50% (measured both with QEMU and tcm_vhost backends). This version rebased on Rusty's virtio ring rework patches, which has already gone into virtio-next today. We hope this can go into virtio-next together with the virtio ring rework pathes. V7: respin to fix the patch apply error V6: rework
2024 Jan 23
1
[PATCH] nouveau: rip out fence irq allow/block sequences.
...struct dma_fence_ops nouveau_fence_ops_legacy = { .release = nouveau_fence_release }; -static bool nouveau_fence_enable_signaling(struct dma_fence *f) -{ - struct nouveau_fence *fence = from_fence(f); - struct nouveau_fence_chan *fctx = nouveau_fctx(fence); - bool ret; - bool do_work; - - if (atomic_inc_return(&fctx->notify_ref) == 0) - do_work = true; - - ret = nouveau_fence_no_signaling(f); - if (ret) - set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags); - else if (atomic_dec_and_test(&fctx->notify_ref)) - do_work = true; - - if (do_work) - schedule_work(&fctx->allow_...
2024 Jan 25
1
[PATCH] nouveau: rip out fence irq allow/block sequences.
....release = nouveau_fence_release > }; > > -static bool nouveau_fence_enable_signaling(struct dma_fence *f) > -{ > - struct nouveau_fence *fence = from_fence(f); > - struct nouveau_fence_chan *fctx = nouveau_fctx(fence); > - bool ret; > - bool do_work; > - > - if (atomic_inc_return(&fctx->notify_ref) == 0) > - do_work = true; > - > - ret = nouveau_fence_no_signaling(f); > - if (ret) > - set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags); > - else if (atomic_dec_and_test(&fctx->notify_ref)) > - do_work = true; > - > - if (d...
2016 Apr 15
0
[PATCH 1/2] nouveau/bl: Assign different names to interfaces
...me(void) > +{ > + // 12 chars for "nv_backlight" + 2 for two digits number + 1 for '\0' > + char* backlight_name = (char*)kmalloc(sizeof(char[15]), GFP_KERNEL); Making this stack-allocated in the caller would be so much simpler... > + const int nb = atomic_inc_return(&bl_interfaces_nb) - 1; This kinda sucks if you reload nouveau a bunch. How about using an "ida". Have a look in drivers/gpu/drm/drm_crtc.c for how I use that one. > + if (nb > 0 && nb < 100) > + sprintf(backlight_name, "nv_backlight%d&qu...
2013 Feb 12
6
[PATCH v3 0/5] virtio-scsi multiqueue
This series implements virtio-scsi queue steering, which gives performance improvements of up to 50% (measured both with QEMU and tcm_vhost backends). The patches build on top of the new virtio APIs at http://permalink.gmane.org/gmane.linux.kernel.virtualization/18431; the new API simplifies the locking of the virtio-scsi driver nicely, thus it makes sense to require them as a prerequisite.
2013 Feb 12
6
[PATCH v3 0/5] virtio-scsi multiqueue
This series implements virtio-scsi queue steering, which gives performance improvements of up to 50% (measured both with QEMU and tcm_vhost backends). The patches build on top of the new virtio APIs at http://permalink.gmane.org/gmane.linux.kernel.virtualization/18431; the new API simplifies the locking of the virtio-scsi driver nicely, thus it makes sense to require them as a prerequisite.
2013 Mar 19
6
[PATCH V5 0/5] virtio-scsi multiqueue
This series implements virtio-scsi queue steering, which gives performance improvements of up to 50% (measured both with QEMU and tcm_vhost backends). This version rebased on Rusty's virtio ring rework patches. We hope this can go into virtio-next together with the virtio ring rework pathes. V5: improving the grammar of 1/5 (Paolo) move the dropping of sg_elems to 'virtio-scsi: use
2013 Mar 19
6
[PATCH V5 0/5] virtio-scsi multiqueue
This series implements virtio-scsi queue steering, which gives performance improvements of up to 50% (measured both with QEMU and tcm_vhost backends). This version rebased on Rusty's virtio ring rework patches. We hope this can go into virtio-next together with the virtio ring rework pathes. V5: improving the grammar of 1/5 (Paolo) move the dropping of sg_elems to 'virtio-scsi: use
2013 Mar 11
7
[PATCH V4 0/5] virtio-scsi multiqueue
This series implements virtio-scsi queue steering, which gives performance improvements of up to 50% (measured both with QEMU and tcm_vhost backends). This version rebased on Rusty's virtio ring rework patches. We hope this can go into virtio-next together with the virtio ring rework pathes. V4: rebase on virtio ring rework patches (rusty's pending-rebases branch) V3 and be found