search for: atomic_dec_and_test

Displaying 20 results from an estimated 91 matches for "atomic_dec_and_test".

2015 Feb 25
1
[PATCH 1/2] nouveau: make nouveau importing global buffers completely thread-safe, with tests
...omic.h >> +++ b/xf86atomic.h >> @@ -49,7 +49,8 @@ typedef struct { >> # define atomic_read(x) ((x)->atomic) >> # define atomic_set(x, val) ((x)->atomic = (val)) >> # define atomic_inc(x) ((void) __sync_fetch_and_add (&(x)->atomic, 1)) >> -# define atomic_dec_and_test(x) (__sync_fetch_and_add (&(x)->atomic, -1) == 1) >> +# define atomic_inc_return(x) (__sync_add_and_fetch (&(x)->atomic, 1)) >> +# define atomic_dec_and_test(x) (__sync_add_and_fetch (&(x)->atomic, -1) == 0) > The atomic_dec_and_test change seems like unrelated b...
2015 Feb 24
4
[PATCH 1/2] nouveau: make nouveau importing global buffers completely thread-safe, with tests
...f86atomic.h index 8c4b696..66a8d9a 100644 --- a/xf86atomic.h +++ b/xf86atomic.h @@ -49,7 +49,8 @@ typedef struct { # define atomic_read(x) ((x)->atomic) # define atomic_set(x, val) ((x)->atomic = (val)) # define atomic_inc(x) ((void) __sync_fetch_and_add (&(x)->atomic, 1)) -# define atomic_dec_and_test(x) (__sync_fetch_and_add (&(x)->atomic, -1) == 1) +# define atomic_inc_return(x) (__sync_add_and_fetch (&(x)->atomic, 1)) +# define atomic_dec_and_test(x) (__sync_add_and_fetch (&(x)->atomic, -1) == 0) # define atomic_add(x, v) ((void) __sync_add_and_fetch(&(x)->atomic,...
2015 Feb 25
0
[PATCH 1/2] nouveau: make nouveau importing global buffers completely thread-safe, with tests
...100644 > --- a/xf86atomic.h > +++ b/xf86atomic.h > @@ -49,7 +49,8 @@ typedef struct { > # define atomic_read(x) ((x)->atomic) > # define atomic_set(x, val) ((x)->atomic = (val)) > # define atomic_inc(x) ((void) __sync_fetch_and_add (&(x)->atomic, 1)) > -# define atomic_dec_and_test(x) (__sync_fetch_and_add (&(x)->atomic, -1) == 1) > +# define atomic_inc_return(x) (__sync_add_and_fetch (&(x)->atomic, 1)) > +# define atomic_dec_and_test(x) (__sync_add_and_fetch (&(x)->atomic, -1) == 0) The atomic_dec_and_test change seems like unrelated bugfix. Split...
2015 Feb 26
4
[PATCH v2 1/4] Add atomic_inc_return to atomics.
...@@ -49,6 +49,7 @@ typedef struct { # define atomic_read(x) ((x)->atomic) # define atomic_set(x, val) ((x)->atomic = (val)) # define atomic_inc(x) ((void) __sync_fetch_and_add (&(x)->atomic, 1)) +# define atomic_inc_return(x) (__sync_add_and_fetch (&(x)->atomic, 1)) # define atomic_dec_and_test(x) (__sync_fetch_and_add (&(x)->atomic, -1) == 1) # define atomic_add(x, v) ((void) __sync_add_and_fetch(&(x)->atomic, (v))) # define atomic_dec(x, v) ((void) __sync_sub_and_fetch(&(x)->atomic, (v))) @@ -68,6 +69,7 @@ typedef struct { # define atomic_read(x) AO_load_full(&am...
2024 Jan 23
1
[PATCH] nouveau: rip out fence irq allow/block sequences.
...ce *fence) { - int drop = 0; - dma_fence_signal_locked(&fence->base); list_del(&fence->head); rcu_assign_pointer(fence->channel, NULL); - if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) { - struct nouveau_fence_chan *fctx = nouveau_fctx(fence); - - if (atomic_dec_and_test(&fctx->notify_ref)) - drop = 1; - } - dma_fence_put(&fence->base); - return drop; } static struct nouveau_fence * @@ -93,8 +83,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error) if (error) dma_fence_set_error(&fence->base, error); - if...
2024 Jan 25
1
[PATCH] nouveau: rip out fence irq allow/block sequences.
...ma_fence_signal_locked(&fence->base); > list_del(&fence->head); > rcu_assign_pointer(fence->channel, NULL); > > - if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) { > - struct nouveau_fence_chan *fctx = nouveau_fctx(fence); > - > - if (atomic_dec_and_test(&fctx->notify_ref)) > - drop = 1; > - } > - > dma_fence_put(&fence->base); > - return drop; > } > > static struct nouveau_fence * > @@ -93,8 +83,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error) > if (error) > d...
2015 Mar 19
0
[ANNOUNCE] libdrm 2.4.60
...ice and EU counts Jerome Glisse (1): nouveau: fix unlock nouveau_bo_name_ref() Kristian Høgsberg (1): intel: Fix documentation for drm_intel_gem_bo_wait() Maarten Lankhorst (4): Add atomic_inc_return to atomics. Use __sync_add_and_fetch instead of __sync_fetch_and_add for atomic_dec_and_test nouveau: make nouveau importing global buffers completely thread-safe, with tests nouveau: Do not add most bo's to the global bo list. Philipp Zabel (1): tests: add support for imx-drm Thomas Klausner (4): Fix libdrm's atomic_dec_and_test on Solaris. Add NetB...
2019 Jun 13
0
[PATCH 09/22] memremap: lift the devmap_enable manipulation into devm_memremap_pages
...ASK ~((1UL << PA_SECTION_SHIFT) - 1) #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) +#ifdef CONFIG_DEV_PAGEMAP_OPS +DEFINE_STATIC_KEY_FALSE(devmap_managed_key); +EXPORT_SYMBOL(devmap_managed_key); +static atomic_t devmap_enable; + +static void dev_pagemap_put_ops(void *data) +{ + if (atomic_dec_and_test(&devmap_enable)) + static_branch_disable(&devmap_managed_key); +} + +/* + * Toggle the static key for ->page_free() callbacks when dev_pagemap + * pages go idle. + */ +static int dev_pagemap_enable(struct device *dev) +{ + if (atomic_inc_return(&devmap_enable) == 1) + static_branch...
2019 Jun 17
0
[PATCH 10/25] memremap: lift the devmap_enable manipulation into devm_memremap_pages
...ASK ~((1UL << PA_SECTION_SHIFT) - 1) #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) +#ifdef CONFIG_DEV_PAGEMAP_OPS +DEFINE_STATIC_KEY_FALSE(devmap_managed_key); +EXPORT_SYMBOL(devmap_managed_key); +static atomic_t devmap_enable; + +static void dev_pagemap_put_ops(void *data) +{ + if (atomic_dec_and_test(&devmap_enable)) + static_branch_disable(&devmap_managed_key); +} + +static int dev_pagemap_get_ops(struct device *dev, struct dev_pagemap *pgmap) +{ + if (!pgmap->ops->page_free) { + WARN(1, "Missing page_free method\n"); + return -EINVAL; + } + + if (atomic_inc_return(&...
2019 Jun 26
0
[PATCH 11/25] memremap: lift the devmap_enable manipulation into devm_memremap_pages
...;< PA_SECTION_SHIFT) - 1) #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) +#ifdef CONFIG_DEV_PAGEMAP_OPS +DEFINE_STATIC_KEY_FALSE(devmap_managed_key); +EXPORT_SYMBOL(devmap_managed_key); +static atomic_t devmap_managed_enable; + +static void devmap_managed_enable_put(void *data) +{ + if (atomic_dec_and_test(&devmap_managed_enable)) + static_branch_disable(&devmap_managed_key); +} + +static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap) +{ + if (!pgmap->ops->page_free) { + WARN(1, "Missing page_free method\n"); + return -EINVAL; + } + + if (atomi...
2019 Jun 26
1
[PATCH 11/25] memremap: lift the devmap_enable manipulation into devm_memremap_pages
..._SIZE (1UL << PA_SECTION_SHIFT) > > +#ifdef CONFIG_DEV_PAGEMAP_OPS > +DEFINE_STATIC_KEY_FALSE(devmap_managed_key); > +EXPORT_SYMBOL(devmap_managed_key); > +static atomic_t devmap_managed_enable; > + > +static void devmap_managed_enable_put(void *data) > +{ > + if (atomic_dec_and_test(&devmap_managed_enable)) > + static_branch_disable(&devmap_managed_key); > +} > + > +static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap) > +{ > + if (!pgmap->ops->page_free) { NIT: later on you add the check for pgmap->ops... it...
2010 Jul 13
0
[PATCH 1/2] btrfs: restructure try_release_extent_buffer()
...page(eb, 0); + __free_extent_buffer(eb); +} + struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len, struct page *page0, @@ -3174,10 +3207,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, free_eb: if (!atomic_dec_and_test(&eb->refs)) return exists; - for (index = 1; index < i; index++) - page_cache_release(extent_buffer_page(eb, index)); - page_cache_release(extent_buffer_page(eb, 0)); - __free_extent_buffer(eb); + btrfs_release_extent_buffer(eb); return exists; } @@ -3831,8 +3861,6 @@ int try...
2010 Aug 04
6
[PATCH -v2 0/3] jbd2 scalability patches
This version fixes three bugs in the 2nd patch of this series that caused kernel BUG when the system was under race. We weren't accounting with t_oustanding_credits correctly, and there were race conditions caused by the fact the I had overlooked the fact that __jbd2_log_wait_for_space() and jbd2_get_transaction() requires j_state_lock to be write locked. Theodore Ts'o (3): jbd2: Use
2012 Dec 18
0
[PATCH] [RFC] Btrfs: Subpagesize blocksize (WIP).
..._len; + start = ((u64)page->index << PAGE_CACHE_SHIFT) + offset; bvec--; eb = (struct extent_buffer *)page->private; BUG_ON(!eb); + if (eb->len < PAGE_SIZE) { + while (eb->start != start) { + eb = eb->next; + BUG_ON(!eb); + } + } + +next_eb: done = atomic_dec_and_test(&eb->io_pages); if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) { @@ -3184,12 +3198,50 @@ static void end_bio_extent_buffer_writepage(struct bio *bio, int err) SetPageError(page); } - end_page_writeback(page); + if (eb->len >= PAGE_SIZE) { + end_pa...
2007 Apr 18
0
[patch 7/9] Guest page hinting: minor fault optimization.
...if the page can be made volatile if the page * still has users and guest page hinting is enabled. */ -static inline int put_page_testzero(struct page *page) +static inline int put_page_testzero_nocheck(struct page *page) { - int ret; VM_BUG_ON(atomic_read(&page->_count) == 0); - ret = atomic_dec_and_test(&page->_count); + return atomic_dec_and_test(&page->_count); +} + +static inline int put_page_testzero(struct page *page) +{ + int ret = put_page_testzero_nocheck(page); if (!ret) page_make_volatile(page, 1); return ret; @@ -356,6 +360,7 @@ static inline void init_page_count(st...
2007 Apr 18
0
[patch 7/9] Guest page hinting: minor fault optimization.
...if the page can be made volatile if the page * still has users and guest page hinting is enabled. */ -static inline int put_page_testzero(struct page *page) +static inline int put_page_testzero_nocheck(struct page *page) { - int ret; VM_BUG_ON(atomic_read(&page->_count) == 0); - ret = atomic_dec_and_test(&page->_count); + return atomic_dec_and_test(&page->_count); +} + +static inline int put_page_testzero(struct page *page) +{ + int ret = put_page_testzero_nocheck(page); if (!ret) page_make_volatile(page, 1); return ret; @@ -356,6 +360,7 @@ static inline void init_page_count(st...
2014 Jul 23
0
[PATCH 09/17] drm/radeon: use common fence implementation for fences
...ed) return; if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) { @@ -355,7 +355,7 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring) { unsigned long irqflags; - if (!rdev->ddev->irq_enabled) +// if (!rdev->ddev->irq_enabled) return; if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
2018 Dec 10
1
[PATCH net 4/4] vhost: log dirty page correctly
...mplexity 1 arch/x86/include/asm/current.h:get_current Cyclomatic Complexity 3 include/linux/string.h:memset Cyclomatic Complexity 5 include/linux/string.h:memcpy Cyclomatic Complexity 1 include/asm-generic/getorder.h:__get_order Cyclomatic Complexity 1 arch/x86/include/asm/atomic.h:arch_atomic_dec_and_test Cyclomatic Complexity 1 include/asm-generic/atomic-instrumented.h:atomic_dec_and_test Cyclomatic Complexity 1 include/linux/err.h:PTR_ERR Cyclomatic Complexity 1 include/linux/thread_info.h:set_ti_thread_flag Cyclomatic Complexity 1 include/linux/thread_info.h:check_object_size Cyclo...
2007 Apr 18
0
[Bridge] [PATCH] (4/4) bridge forwarding table RCU
...get(struct net_bridge *br, + unsigned char *addr) +{ + struct net_bridge_fdb_entry *fdb; + + rcu_read_lock(); + fdb = __br_fdb_get(br, addr); + if (fdb) + atomic_inc(&fdb->use_count); + rcu_read_unlock(); + return fdb; +} + + void br_fdb_put(struct net_bridge_fdb_entry *ent) { if (atomic_dec_and_test(&ent->use_count)) @@ -229,9 +238,9 @@ memset(buf, 0, maxnum*sizeof(struct __fdb_entry)); - read_lock_bh(&br->hash_lock); + rcu_read_lock(); for (i = 0; i < BR_HASH_SIZE; i++) { - hlist_for_each_entry(f, h, &br->hash[i], hlist) { + hlist_for_each_entry_rcu(f, h, &am...
2012 Dec 02
3
[PATCH] vhost-blk: Add vhost-blk support v6
...vq_err(&blk->vq, "Failed to write status\n"); + return -EFAULT; + } + + return 0; +} + +static void vhost_blk_req_done(struct bio *bio, int err) +{ + struct vhost_blk_req *req = bio->bi_private; + struct vhost_blk *blk = req->blk; + + if (err) + req->len = err; + + if (atomic_dec_and_test(&req->bio_nr)) { + llist_add(&req->llnode, &blk->llhead); + vhost_work_queue(&blk->dev, &blk->work); + } + + bio_put(bio); +} + +static void vhost_blk_req_umap(struct vhost_blk_req *req) +{ + struct req_page_list *pl; + int i, j; + + if (req->pl) { + for (i...