Hi all - The following 23 patches add more error handling to the btrfs code: - Add btrfs_panic - Catch locking failures in {set,clear}_extent_bit - Push up set_extent_bit errors to callers - Push up lock_extent errors to callers - Push up clear_extent_bit errors to callers - Push up unlock_extent errors to callers - Make pin_down_extent return void - Push up btrfs_pin_extent errors to callers - Push up non-looped btrfs_transaction_start errors to callers New in this version: - Make functions with no return value return void (11 patches) - Simplify btrfs_submit_bio_hook - Simplify tree_merge_hook by adding a merge_bio helper - Push up submit_bio_hook failures Changes since last submission: - Refreshed against 3.1-rc3. -Jeff -- Jeff Mahoney SuSE Labs -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
As part of the effort to eliminate BUG_ON as an error handling technique, we need to determine which errors are actual logic errors, which are on-disk corruption, and which are normal runtime errors e.g. -ENOMEM. Annotating these error cases is helpful to understand and report them. This patch adds a btrfs_panic() routine that will either panic or BUG depending on the new -ofatal_errors={panic,bug} mount option. Since there are still so many BUG_ONs, it defaults to BUG for now but I expect that to change once the error handling effort has made significant progress. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/ctree.h | 11 +++++++++++ fs/btrfs/super.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 57 insertions(+), 1 deletion(-) --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1363,6 +1363,7 @@ struct btrfs_ioctl_defrag_range_args { #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) #define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17) +#define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 18) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) @@ -2650,6 +2651,16 @@ do { \ __btrfs_std_error((fs_info), __func__, __LINE__, (errno));\ } while (0) +void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, + unsigned int line, int errno, const char *fmt, ...); + +#define btrfs_panic(fs_info, errno, fmt, args...) \ +do { \ + struct btrfs_fs_info *_i = (fs_info); \ + __btrfs_panic(_i, __func__, __LINE__, errno, fmt, ##args); \ + BUG_ON(!(_i->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)); \ +} while (0) + /* acl.c */ #ifdef CONFIG_BTRFS_FS_POSIX_ACL struct posix_acl *btrfs_get_acl(struct inode *inode, int type); --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -74,6 +74,9 @@ static const char *btrfs_decode_error(st case -EROFS: errstr = "Readonly filesystem"; break; + case -EEXIST: + errstr = "Object already exists"; + break; default: if (nbuf) { if (snprintf(nbuf, 16, "error %d", -errno) >= 0) @@ -143,6 +146,33 @@ void __btrfs_std_error(struct btrfs_fs_i btrfs_handle_error(fs_info); } +/* + * __btrfs_panic decodes unexpected, fatal errors from the caller, + * issues an alert, and either panics or BUGs, depending on mount options. + */ +void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, + unsigned int line, int errno, const char *fmt, ...) +{ + struct super_block *sb = fs_info->sb; + char nbuf[16]; + const char *errstr; + struct va_format vaf = { .fmt = fmt }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + + errstr = btrfs_decode_error(fs_info, errno, nbuf); + if (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR) + panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n", + sb->s_id, function, line, &vaf, errstr); + + printk(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n", + sb->s_id, function, line, &vaf, errstr); + va_end(args); + /* Caller calls BUG() */ +} + static void btrfs_put_super(struct super_block *sb) { struct btrfs_root *root = btrfs_sb(sb); @@ -162,7 +192,7 @@ enum { Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, - Opt_inode_cache, Opt_err, + Opt_inode_cache, Opt_fatal_errors, Opt_err, }; static match_table_t tokens = { @@ -195,6 +225,7 @@ static match_table_t tokens = { {Opt_subvolrootid, "subvolrootid=%d"}, {Opt_defrag, "autodefrag"}, {Opt_inode_cache, "inode_cache"}, + {Opt_fatal_errors, "fatal_errors=%s"}, {Opt_err, NULL}, }; @@ -381,6 +412,18 @@ int btrfs_parse_options(struct btrfs_roo printk(KERN_INFO "btrfs: enabling auto defrag"); btrfs_set_opt(info->mount_opt, AUTO_DEFRAG); break; + case Opt_fatal_errors: + if (strcmp(args[0].from, "panic") == 0) + btrfs_set_opt(info->mount_opt, + PANIC_ON_FATAL_ERROR); + else if (strcmp(args[0].from, "bug") == 0) + btrfs_clear_opt(info->mount_opt, + PANIC_ON_FATAL_ERROR); + else { + ret = -EINVAL; + goto out; + } + break; case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " "''%s''\n", p); @@ -729,6 +772,8 @@ static int btrfs_show_options(struct seq seq_puts(seq, ",autodefrag"); if (btrfs_test_opt(root, INODE_MAP_CACHE)) seq_puts(seq, ",inode_cache"); + if (btrfs_test_opt(root, PANIC_ON_FATAL_ERROR)) + seq_puts(seq, ",fatal_errors=panic"); return 0; } -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 02/23] btrfs: Catch locking failures in {set,clear}_extent_bit
The *_state functions can only return 0 or -EEXIST. This patch addresses the cases where those functions return -EEXIST, representing a locking failure. It handles them by panicking with an appropriate error message. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/extent_io.c | 42 +++++++++++++++++++++++++++++++----------- 1 file changed, 31 insertions(+), 11 deletions(-) --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -51,6 +51,12 @@ struct extent_page_data { unsigned int sync_io:1; }; +static inline struct btrfs_fs_info * +tree_fs_info(struct extent_io_tree *tree) +{ + return btrfs_sb(tree->mapping->host->i_sb)->fs_info; +} + int __init extent_io_init(void) { extent_state_cache = kmem_cache_create("extent_state", @@ -531,7 +537,10 @@ hit_next: prealloc = alloc_extent_state_atomic(prealloc); BUG_ON(!prealloc); err = split_state(tree, state, prealloc, start); - BUG_ON(err == -EEXIST); + if (err) + btrfs_panic(tree_fs_info(tree), err, "Locking error: " + "Extent tree was modified by another " + "thread while locked."); prealloc = NULL; if (err) goto out; @@ -553,7 +562,10 @@ hit_next: prealloc = alloc_extent_state_atomic(prealloc); BUG_ON(!prealloc); err = split_state(tree, state, prealloc, end + 1); - BUG_ON(err == -EEXIST); + if (err) + btrfs_panic(tree_fs_info(tree), err, "Locking error: " + "Extent tree was modified by another " + "thread while locked."); if (wake) wake_up(&state->wq); @@ -736,8 +748,11 @@ again: prealloc = alloc_extent_state_atomic(prealloc); BUG_ON(!prealloc); err = insert_state(tree, prealloc, start, end, &bits); + if (err) + btrfs_panic(tree_fs_info(tree), err, "Locking error: " + "Extent tree was modified by another " + "thread while locked."); prealloc = NULL; - BUG_ON(err == -EEXIST); goto out; } state = rb_entry(node, struct extent_state, rb_node); @@ -803,7 +818,10 @@ hit_next: prealloc = alloc_extent_state_atomic(prealloc); BUG_ON(!prealloc); err = split_state(tree, state, prealloc, start); - BUG_ON(err == -EEXIST); + if (err) + btrfs_panic(tree_fs_info(tree), err, "Locking error: " + "Extent tree was modified by another " + "thread while locked."); prealloc = NULL; if (err) goto out; @@ -840,12 +858,11 @@ hit_next: */ err = insert_state(tree, prealloc, start, this_end, &bits); - BUG_ON(err == -EEXIST); - if (err) { - free_extent_state(prealloc); - prealloc = NULL; - goto out; - } + if (err) + btrfs_panic(tree_fs_info(tree), err, "Locking error: " + "Extent tree was modified by another " + "thread while locked."); + cache_state(prealloc, cached_state); prealloc = NULL; start = this_end + 1; @@ -867,7 +884,10 @@ hit_next: prealloc = alloc_extent_state_atomic(prealloc); BUG_ON(!prealloc); err = split_state(tree, state, prealloc, end + 1); - BUG_ON(err == -EEXIST); + if (err) + btrfs_panic(tree_fs_info(tree), err, "Locking error: " + "Extent tree was modified by another " + "thread while locked."); set_state_bits(tree, prealloc, &bits); cache_state(prealloc, cached_state); -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 03/23] btrfs: Push up set_extent_bit errors to callers
In the locking case, set_extent_bit can return -EEXIST but callers already handle that. In the non-locking case, it can''t fail. Memory allocation failures are handled by BUG_ON. This patch pushes up the BUG_ONs from set_extent_bit to callers, except where -ENOMEM can''t occur (e.g. __GFP_WAIT && __GFP_NOFAIL). Update v2: Changed cases of BUG_ON(ret) to BUG_ON(ret < 0) Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/extent-tree.c | 37 +++++++++++++++++++++++----------- fs/btrfs/extent_io.c | 52 ++++++++++++++++++++++++++++++++++++------------- fs/btrfs/file-item.c | 3 +- fs/btrfs/inode.c | 25 ++++++++++++++++------- fs/btrfs/ioctl.c | 5 ++-- fs/btrfs/relocation.c | 21 ++++++++++++------- 6 files changed, 99 insertions(+), 44 deletions(-) --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -192,11 +192,14 @@ block_group_cache_tree_search(struct btr static int add_excluded_extent(struct btrfs_root *root, u64 start, u64 num_bytes) { + int ret; u64 end = start + num_bytes - 1; - set_extent_bits(&root->fs_info->freed_extents[0], - start, end, EXTENT_UPTODATE, GFP_NOFS); - set_extent_bits(&root->fs_info->freed_extents[1], - start, end, EXTENT_UPTODATE, GFP_NOFS); + ret = set_extent_bits(&root->fs_info->freed_extents[0], + start, end, EXTENT_UPTODATE, GFP_NOFS); + BUG_ON(ret < 0); + ret = set_extent_bits(&root->fs_info->freed_extents[1], + start, end, EXTENT_UPTODATE, GFP_NOFS); + BUG_ON(ret < 0); return 0; } @@ -4142,8 +4145,9 @@ static int update_block_group(struct btr spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - set_extent_dirty(info->pinned_extents, - bytenr, bytenr + num_bytes - 1, + /* Can''t return -ENOMEM */ + set_extent_dirty(info->pinned_extents, bytenr, + bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); } btrfs_put_block_group(cache); @@ -4184,6 +4188,7 @@ static int pin_down_extent(struct btrfs_ spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); + /* __GFP_NOFAIL means it can''t return -ENOMEM */ set_extent_dirty(root->fs_info->pinned_extents, bytenr, bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); return 0; @@ -5636,6 +5641,7 @@ struct extent_buffer *btrfs_init_new_buf int level) { struct extent_buffer *buf; + int ret; buf = btrfs_find_create_tree_block(root, bytenr, blocksize); if (!buf) @@ -5654,14 +5660,21 @@ struct extent_buffer *btrfs_init_new_buf * EXENT bit to differentiate dirty pages. */ if (root->log_transid % 2 == 0) - set_extent_dirty(&root->dirty_log_pages, buf->start, - buf->start + buf->len - 1, GFP_NOFS); + ret = set_extent_dirty(&root->dirty_log_pages, + buf->start, + buf->start + buf->len - 1, + GFP_NOFS); else - set_extent_new(&root->dirty_log_pages, buf->start, - buf->start + buf->len - 1, GFP_NOFS); + ret = set_extent_new(&root->dirty_log_pages, + buf->start, + buf->start + buf->len - 1, + GFP_NOFS); + BUG_ON(ret < 0); } else { - set_extent_dirty(&trans->transaction->dirty_pages, buf->start, - buf->start + buf->len - 1, GFP_NOFS); + ret = set_extent_dirty(&trans->transaction->dirty_pages, + buf->start, buf->start + buf->len - 1, + GFP_NOFS); + BUG_ON(ret < 0); } trans->blocks_used++; /* this returns a buffer locked for blocking */ --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -703,6 +703,9 @@ static void uncache_state(struct extent_ * part of the range already has the desired bits set. The start of the * existing range is returned in failed_start in this case. * + * It may also fail with -ENOMEM if memory cannot be obtained for extent_state + * structures. + * * [start, end] is inclusive This takes the tree lock. */ @@ -721,7 +724,8 @@ int set_extent_bit(struct extent_io_tree again: if (!prealloc && (mask & __GFP_WAIT)) { prealloc = alloc_extent_state(mask); - BUG_ON(!prealloc); + if (!prealloc) + return -ENOMEM; } spin_lock(&tree->lock); @@ -740,7 +744,11 @@ again: node = tree_search(tree, start); if (!node) { prealloc = alloc_extent_state_atomic(prealloc); - BUG_ON(!prealloc); + if (!prealloc) { + err = -ENOMEM; + goto out; + } + err = insert_state(tree, prealloc, start, end, &bits); if (err) btrfs_panic(tree_fs_info(tree), err, "Locking error: " @@ -810,7 +818,11 @@ hit_next: } prealloc = alloc_extent_state_atomic(prealloc); - BUG_ON(!prealloc); + if (!prealloc) { + err = -ENOMEM; + goto out; + } + err = split_state(tree, state, prealloc, start); if (err) btrfs_panic(tree_fs_info(tree), err, "Locking error: " @@ -844,7 +856,10 @@ hit_next: this_end = last_start - 1; prealloc = alloc_extent_state_atomic(prealloc); - BUG_ON(!prealloc); + if (!prealloc) { + err = -ENOMEM; + goto out; + } /* * Avoid to free ''prealloc'' if it can be merged with @@ -876,7 +891,11 @@ hit_next: } prealloc = alloc_extent_state_atomic(prealloc); - BUG_ON(!prealloc); + if (!prealloc) { + err = -ENOMEM; + goto out; + } + err = split_state(tree, state, prealloc, end + 1); if (err) btrfs_panic(tree_fs_info(tree), err, "Locking error: " @@ -988,6 +1007,7 @@ int lock_extent_bits(struct extent_io_tr } WARN_ON(start > end); } + BUG_ON(err < 0); return err; } @@ -1010,6 +1030,7 @@ int try_lock_extent(struct extent_io_tre EXTENT_LOCKED, 1, 0, NULL, mask); return 0; } + BUG_ON(err < 0); return 1; } @@ -1757,8 +1778,9 @@ static void end_bio_extent_readpage(stru } if (uptodate) { - set_extent_uptodate(tree, start, end, &cached, - GFP_ATOMIC); + ret = set_extent_uptodate(tree, start, end, &cached, + GFP_ATOMIC); + BUG_ON(ret < 0); } unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); @@ -1980,8 +2002,9 @@ static int __extent_read_full_page(struc memset(userpage + pg_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); - set_extent_uptodate(tree, cur, cur + iosize - 1, - &cached, GFP_NOFS); + ret = set_extent_uptodate(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); + BUG_ON(ret < 0); unlock_extent_cached(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); break; @@ -2030,8 +2053,9 @@ static int __extent_read_full_page(struc flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); - set_extent_uptodate(tree, cur, cur + iosize - 1, - &cached, GFP_NOFS); + ret = set_extent_uptodate(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); + BUG_ON(ret < 0); unlock_extent_cached(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); cur = cur + iosize; @@ -3286,8 +3310,10 @@ int set_extent_buffer_uptodate(struct ex num_pages = num_extent_pages(eb->start, eb->len); if (eb_straddles_pages(eb)) { - set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, - NULL, GFP_NOFS); + int ret = set_extent_uptodate(tree, eb->start, + eb->start + eb->len - 1, + NULL, GFP_NOFS); + BUG_ON(ret < 0); } for (i = 0; i < num_pages; i++) { page = extent_buffer_page(eb, i); --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -212,9 +212,10 @@ static int __btrfs_lookup_bio_sums(struc sum = 0; if (BTRFS_I(inode)->root->root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID) { - set_extent_bits(io_tree, offset, + ret = set_extent_bits(io_tree, offset, offset + bvec->bv_len - 1, EXTENT_NODATASUM, GFP_NOFS); + BUG_ON(ret < 0); } else { printk(KERN_INFO "btrfs no csum found " "for inode %llu start %llu\n", --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1548,6 +1548,7 @@ static void btrfs_writepage_fixup_worker struct inode *inode; u64 page_start; u64 page_end; + int ret; fixup = container_of(work, struct btrfs_writepage_fixup, work); page = fixup->page; @@ -1579,7 +1580,9 @@ again: } BUG(); - btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); + ret = btrfs_set_extent_delalloc(inode, page_start, page_end, + &cached_state); + BUG_ON(ret < 0); ClearPageChecked(page); out: unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, @@ -1883,8 +1886,11 @@ static int btrfs_io_failed_hook(struct b } failrec->logical = logical; free_extent_map(em); - set_extent_bits(failure_tree, start, end, EXTENT_LOCKED | - EXTENT_DIRTY, GFP_NOFS); + + /* Doesn''t this ignore locking failures? */ + ret = set_extent_bits(failure_tree, start, end, EXTENT_LOCKED | + EXTENT_DIRTY, GFP_NOFS); + BUG_ON(ret < 0); set_state_private(failure_tree, start, (u64)(unsigned long)failrec); } else { @@ -5167,8 +5173,10 @@ again: kunmap(page); btrfs_mark_buffer_dirty(leaf); } - set_extent_uptodate(io_tree, em->start, - extent_map_end(em) - 1, NULL, GFP_NOFS); + ret = set_extent_uptodate(io_tree, em->start, + extent_map_end(em) - 1, + NULL, GFP_NOFS); + BUG_ON(ret < 0); goto insert; } else { printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); @@ -6230,9 +6238,10 @@ static ssize_t btrfs_direct_IO(int rw, s */ if (writing) { write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING; - ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, - EXTENT_DELALLOC, 0, NULL, &cached_state, - GFP_NOFS); + ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, + lockend, EXTENT_DELALLOC, 0, NULL, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); if (ret) { clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, EXTENT_LOCKED | write_bits, --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -938,8 +938,9 @@ again: } - btrfs_set_extent_delalloc(inode, page_start, page_end - 1, - &cached_state); + ret = btrfs_set_extent_delalloc(inode, page_start, page_end - 1, + &cached_state); + BUG_ON(ret < 0); unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, &cached_state, --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2623,11 +2623,11 @@ static int finish_pending_nodes(struct b return err; } -static void mark_block_processed(struct reloc_control *rc, +static int mark_block_processed(struct reloc_control *rc, u64 bytenr, u32 blocksize) { - set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, - EXTENT_DIRTY, GFP_NOFS); + return set_extent_bits(&rc->processed_blocks, bytenr, + bytenr + blocksize - 1, EXTENT_DIRTY, GFP_NOFS); } static void __mark_block_processed(struct reloc_control *rc, @@ -2636,8 +2636,10 @@ static void __mark_block_processed(struc u32 blocksize; if (node->level == 0 || in_block_group(node->bytenr, rc->block_group)) { + int ret; blocksize = btrfs_level_size(rc->extent_root, node->level); - mark_block_processed(rc, node->bytenr, blocksize); + ret = mark_block_processed(rc, node->bytenr, blocksize); + BUG_ON(ret < 0); } node->processed = 1; } @@ -2994,13 +2996,16 @@ static int relocate_file_extent_cluster( if (nr < cluster->nr && page_start + offset == cluster->boundary[nr]) { - set_extent_bits(&BTRFS_I(inode)->io_tree, - page_start, page_end, - EXTENT_BOUNDARY, GFP_NOFS); + ret = set_extent_bits(&BTRFS_I(inode)->io_tree, + page_start, page_end, + EXTENT_BOUNDARY, GFP_NOFS); + BUG_ON(ret < 0); nr++; } - btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); + ret = btrfs_set_extent_delalloc(inode, page_start, + page_end, NULL); + BUG_ON(ret < 0); set_page_dirty(page); unlock_extent(&BTRFS_I(inode)->io_tree, -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 04/23] btrfs: Push up lock_extent errors to callers
lock_extent, try_lock_extent, and lock_extent_bits can''t currently fail because errors are caught via BUG_ON. This patch pushes the error handling up to callers, which currently only handle them via BUG_ON themselves. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/compression.c | 3 +- fs/btrfs/disk-io.c | 5 ++- fs/btrfs/extent_io.c | 20 ++++++++----- fs/btrfs/file.c | 17 ++++++----- fs/btrfs/free-space-cache.c | 6 ++-- fs/btrfs/inode.c | 66 ++++++++++++++++++++++++++------------------ fs/btrfs/ioctl.c | 16 ++++++---- fs/btrfs/relocation.c | 18 ++++++++---- 8 files changed, 94 insertions(+), 57 deletions(-) --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -496,7 +496,8 @@ static noinline int add_ra_bio_pages(str * sure they map to this compressed extent on disk. */ set_page_extent_mapped(page); - lock_extent(tree, last_offset, end, GFP_NOFS); + ret = lock_extent(tree, last_offset, end, GFP_NOFS); + BUG_ON(ret < 0); read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, last_offset, PAGE_CACHE_SIZE); --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -331,8 +331,9 @@ static int verify_parent_transid(struct if (!parent_transid || btrfs_header_generation(eb) == parent_transid) return 0; - lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, - 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, + 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); if (extent_buffer_uptodate(io_tree, eb, cached_state) && btrfs_header_generation(eb) == parent_transid) { ret = 0; --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1013,7 +1013,6 @@ int lock_extent_bits(struct extent_io_tr } WARN_ON(start > end); } - BUG_ON(err < 0); return err; } @@ -1035,8 +1034,8 @@ int try_lock_extent(struct extent_io_tre clear_extent_bit(tree, start, failed_start - 1, EXTENT_LOCKED, 1, 0, NULL, mask); return 0; - } - BUG_ON(err < 0); + } else if (err < 0) + return err; return 1; } @@ -1347,8 +1346,9 @@ again: BUG_ON(ret); /* step three, lock the state bits for the whole range */ - lock_extent_bits(tree, delalloc_start, delalloc_end, - 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(tree, delalloc_start, delalloc_end, + 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); /* then test to make sure it is all still delalloc */ ret = test_range_bit(tree, delalloc_start, delalloc_end, @@ -1977,7 +1977,8 @@ static int __extent_read_full_page(struc end = page_end; while (1) { - lock_extent(tree, start, end, GFP_NOFS); + ret = lock_extent(tree, start, end, GFP_NOFS); + BUG_ON(ret < 0); ordered = btrfs_lookup_ordered_extent(inode, start); if (!ordered) break; @@ -2663,12 +2664,14 @@ int extent_invalidatepage(struct extent_ u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); u64 end = start + PAGE_CACHE_SIZE - 1; size_t blocksize = page->mapping->host->i_sb->s_blocksize; + int ret; start += (offset + blocksize - 1) & ~(blocksize - 1); if (start > end) return 0; - lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); wait_on_page_writeback(page); clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | @@ -2878,8 +2881,9 @@ int extent_fiemap(struct inode *inode, s last_for_get_extent = isize; } - lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); em = get_extent_skip_holes(inode, off, last_for_get_extent, get_extent); --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1104,9 +1104,10 @@ again: err = 0; if (start_pos < inode->i_size) { struct btrfs_ordered_extent *ordered; - lock_extent_bits(&BTRFS_I(inode)->io_tree, - start_pos, last_pos - 1, 0, &cached_state, - GFP_NOFS); + err = lock_extent_bits(&BTRFS_I(inode)->io_tree, + start_pos, last_pos - 1, 0, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); ordered = btrfs_lookup_first_ordered_extent(inode, last_pos - 1); if (ordered && @@ -1612,8 +1613,9 @@ static long btrfs_fallocate(struct file /* the extent lock is ordered inside the running * transaction */ - lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, - locked_end, 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, + locked_end, 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); ordered = btrfs_lookup_first_ordered_extent(inode, alloc_end - 1); if (ordered && @@ -1696,8 +1698,9 @@ static int find_desired_extent(struct in if (inode->i_size == 0) return -ENXIO; - lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, - &cached_state, GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); /* * Delalloc is such a pain. If we have a hole and we have pending --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -609,8 +609,10 @@ int __btrfs_write_out_cache(struct btrfs } index = 0; - lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, - 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, + i_size_read(inode) - 1, 0, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); /* * When searching for pinned extents, we need to start at our start --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -588,9 +588,11 @@ retry: int page_started = 0; unsigned long nr_written = 0; - lock_extent(io_tree, async_extent->start, - async_extent->start + - async_extent->ram_size - 1, GFP_NOFS); + ret = lock_extent(io_tree, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, + GFP_NOFS); + BUG_ON(ret < 0); /* allocate blocks */ ret = cow_file_range(inode, async_cow->locked_page, @@ -617,9 +619,10 @@ retry: continue; } - lock_extent(io_tree, async_extent->start, - async_extent->start + async_extent->ram_size - 1, - GFP_NOFS); + ret = lock_extent(io_tree, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, GFP_NOFS); + BUG_ON(ret < 0); trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); @@ -1563,8 +1566,9 @@ again: page_start = page_offset(page); page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; - lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, - &cached_state, GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, + 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); /* already ordered? We''re done */ if (PagePrivate2(page)) @@ -1746,9 +1750,11 @@ static int btrfs_finish_ordered_io(struc goto out; } - lock_extent_bits(io_tree, ordered_extent->file_offset, - ordered_extent->file_offset + ordered_extent->len - 1, - 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(io_tree, ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len - 1, + 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); if (nolock) trans = btrfs_join_transaction_nolock(root); @@ -3410,8 +3416,9 @@ again: } wait_on_page_writeback(page); - lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, - GFP_NOFS); + ret = lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); set_page_extent_mapped(page); ordered = btrfs_lookup_ordered_extent(inode, page_start); @@ -3486,8 +3493,9 @@ int btrfs_cont_expand(struct inode *inod struct btrfs_ordered_extent *ordered; btrfs_wait_ordered_range(inode, hole_start, block_end - hole_start); - lock_extent_bits(io_tree, hole_start, block_end - 1, 0, - &cached_state, GFP_NOFS); + err = lock_extent_bits(io_tree, hole_start, block_end - 1, 0, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); ordered = btrfs_lookup_ordered_extent(inode, hole_start); if (!ordered) break; @@ -5798,9 +5806,10 @@ again: goto out; } - lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset, - ordered->file_offset + ordered->len - 1, 0, - &cached_state, GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset, + ordered->file_offset + ordered->len - 1, 0, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { ret = btrfs_mark_extent_written(trans, inode, @@ -6214,8 +6223,9 @@ static ssize_t btrfs_direct_IO(int rw, s } while (1) { - lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, - 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, + lockend, 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); /* * We''re concerned with the entire range that we''re going to be * doing DIO to, so we need to make sure theres no ordered @@ -6354,6 +6364,7 @@ static void btrfs_invalidatepage(struct struct extent_state *cached_state = NULL; u64 page_start = page_offset(page); u64 page_end = page_start + PAGE_CACHE_SIZE - 1; + int ret; /* @@ -6370,8 +6381,9 @@ static void btrfs_invalidatepage(struct btrfs_releasepage(page, GFP_NOFS); return; } - lock_extent_bits(tree, page_start, page_end, 0, &cached_state, - GFP_NOFS); + ret = lock_extent_bits(tree, page_start, page_end, 0, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); ordered = btrfs_lookup_ordered_extent(page->mapping->host, page_offset(page)); if (ordered) { @@ -6393,8 +6405,9 @@ static void btrfs_invalidatepage(struct } btrfs_put_ordered_extent(ordered); cached_state = NULL; - lock_extent_bits(tree, page_start, page_end, 0, &cached_state, - GFP_NOFS); + ret = lock_extent_bits(tree, page_start, page_end, + 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); } clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | @@ -6462,8 +6475,9 @@ again: } wait_on_page_writeback(page); - lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, - GFP_NOFS); + ret = lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); set_page_extent_mapped(page); /* --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -757,7 +757,7 @@ static int should_defrag_range(struct in struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_map *em = NULL; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; - int ret = 1; + int ret = 1, err; /* * make sure that once we start defragging and extent, we keep on @@ -778,7 +778,8 @@ static int should_defrag_range(struct in if (!em) { /* get the big lock and read metadata off disk */ - lock_extent(io_tree, start, start + len - 1, GFP_NOFS); + err = lock_extent(io_tree, start, start + len - 1, GFP_NOFS); + BUG_ON(err < 0); em = btrfs_get_extent(inode, NULL, 0, start, len, 0); unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); @@ -902,9 +903,10 @@ again: page_start = page_offset(pages[0]); page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; - lock_extent_bits(&BTRFS_I(inode)->io_tree, - page_start, page_end - 1, 0, &cached_state, - GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, 0, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); ordered = btrfs_lookup_first_ordered_extent(inode, page_end - 1); if (ordered && ordered->file_offset + ordered->len > page_start && @@ -2225,7 +2227,9 @@ static noinline long btrfs_ioctl_clone(s another, and lock file content */ while (1) { struct btrfs_ordered_extent *ordered; - lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); + ret = lock_extent(&BTRFS_I(src)->io_tree, off, off+len, + GFP_NOFS); + BUG_ON(ret < 0); ordered = btrfs_lookup_first_ordered_extent(src, off+len); if (!ordered && !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len, --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1577,6 +1577,7 @@ int replace_file_extents(struct btrfs_tr ret = try_lock_extent(&BTRFS_I(inode)->io_tree, key.offset, end, GFP_NOFS); + BUG_ON(ret < 0); if (!ret) continue; @@ -1899,6 +1900,7 @@ static int invalidate_extent_cache(struc u64 objectid; u64 start, end; u64 ino; + int ret; objectid = min_key->objectid; while (1) { @@ -1952,7 +1954,9 @@ static int invalidate_extent_cache(struc } /* the lock_extent waits for readpage to complete */ - lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + ret = lock_extent(&BTRFS_I(inode)->io_tree, start, end, + GFP_NOFS); + BUG_ON(ret < 0); btrfs_drop_extent_cache(inode, start, end, 1); unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); } @@ -2862,7 +2866,9 @@ int prealloc_file_extent_cluster(struct else end = cluster->end - offset; - lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + ret = lock_extent(&BTRFS_I(inode)->io_tree, start, + end, GFP_NOFS); + BUG_ON(ret < 0); num_bytes = end + 1 - start; ret = btrfs_prealloc_file_range(inode, 0, start, num_bytes, num_bytes, @@ -2899,7 +2905,8 @@ int setup_extent_mapping(struct inode *i em->bdev = root->fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PINNED, &em->flags); - lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + ret = lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + BUG_ON(ret < 0); while (1) { write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); @@ -2989,8 +2996,9 @@ static int relocate_file_extent_cluster( page_start = (u64)page->index << PAGE_CACHE_SHIFT; page_end = page_start + PAGE_CACHE_SIZE - 1; - lock_extent(&BTRFS_I(inode)->io_tree, - page_start, page_end, GFP_NOFS); + ret = lock_extent(&BTRFS_I(inode)->io_tree, + page_start, page_end, GFP_NOFS); + BUG_ON(ret < 0); set_page_extent_mapped(page); -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 05/23] btrfs: Push up clear_extent_bit errors to callers
clear_extent_bit can fail with -ENOMEM for a specific case but will BUG on other memory allocation failures. This patch returns -ENOMEM for memory allocation failures and handles them with BUG_ON in callers which don''t handle it already. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/disk-io.c | 7 ++- fs/btrfs/extent-tree.c | 14 ++++-- fs/btrfs/extent_io.c | 56 ++++++++++++++++++-------- fs/btrfs/file.c | 10 ++-- fs/btrfs/free-space-cache.c | 20 +++++---- fs/btrfs/inode.c | 92 +++++++++++++++++++++++++++----------------- fs/btrfs/ioctl.c | 9 ++-- fs/btrfs/relocation.c | 5 +- fs/btrfs/transaction.c | 4 + fs/btrfs/tree-log.c | 5 +- 10 files changed, 142 insertions(+), 80 deletions(-) --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2981,7 +2981,9 @@ static int btrfs_destroy_marked_extents( if (ret) break; - clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); + ret = clear_extent_bits(dirty_pages, start, end, mark, + GFP_NOFS); + BUG_ON(ret < 0); while (start <= end) { index = start >> PAGE_CACHE_SHIFT; start = (u64)(index + 1) << PAGE_CACHE_SHIFT; @@ -3042,7 +3044,8 @@ static int btrfs_destroy_pinned_extent(s end + 1 - start, NULL); - clear_extent_dirty(unpin, start, end, GFP_NOFS); + ret = clear_extent_dirty(unpin, start, end, GFP_NOFS); + BUG_ON(ret < 0); btrfs_error_unpin_extent_range(root, start, end); cond_resched(); } --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -207,14 +207,17 @@ static void free_excluded_extents(struct struct btrfs_block_group_cache *cache) { u64 start, end; + int ret; start = cache->key.objectid; end = start + cache->key.offset - 1; - clear_extent_bits(&root->fs_info->freed_extents[0], - start, end, EXTENT_UPTODATE, GFP_NOFS); - clear_extent_bits(&root->fs_info->freed_extents[1], - start, end, EXTENT_UPTODATE, GFP_NOFS); + ret = clear_extent_bits(&root->fs_info->freed_extents[0], + start, end, EXTENT_UPTODATE, GFP_NOFS); + BUG_ON(ret < 0); + ret = clear_extent_bits(&root->fs_info->freed_extents[1], + start, end, EXTENT_UPTODATE, GFP_NOFS); + BUG_ON(ret < 0); } static int exclude_super_stripes(struct btrfs_root *root, @@ -4359,7 +4362,8 @@ int btrfs_finish_extent_commit(struct bt ret = btrfs_discard_extent(root, start, end + 1 - start, NULL); - clear_extent_dirty(unpin, start, end, GFP_NOFS); + ret = clear_extent_dirty(unpin, start, end, GFP_NOFS); + BUG_ON(ret < 0); unpin_extent_range(root, start, end); cond_resched(); } --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -529,7 +529,11 @@ hit_next: if (state->start < start) { prealloc = alloc_extent_state_atomic(prealloc); - BUG_ON(!prealloc); + if (!prealloc) { + err = -ENOMEM; + goto out; + } + err = split_state(tree, state, prealloc, start); if (err) btrfs_panic(tree_fs_info(tree), err, "Locking error: " @@ -554,7 +558,11 @@ hit_next: */ if (state->start <= end && state->end > end) { prealloc = alloc_extent_state_atomic(prealloc); - BUG_ON(!prealloc); + if (!prealloc) { + err = -ENOMEM; + goto out; + } + err = split_state(tree, state, prealloc, end + 1); if (err) btrfs_panic(tree_fs_info(tree), err, "Locking error: " @@ -1024,9 +1032,12 @@ int try_lock_extent(struct extent_io_tre err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, &failed_start, NULL, mask); if (err == -EEXIST) { - if (failed_start > start) - clear_extent_bit(tree, start, failed_start - 1, - EXTENT_LOCKED, 1, 0, NULL, mask); + if (failed_start > start) { + err = clear_extent_bit(tree, start, failed_start - 1, + EXTENT_LOCKED, 1, 0, NULL, + mask); + BUG_ON(err < 0); + } return 0; } else if (err < 0) return err; @@ -1036,14 +1047,18 @@ int try_lock_extent(struct extent_io_tre int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached, gfp_t mask) { - return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, - mask); + int ret = clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, + cached, mask); + BUG_ON(ret < 0); + return ret; } int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { - return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, - mask); + int ret = clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, + mask); + BUG_ON(ret < 0); + return ret; } /* @@ -1383,7 +1398,9 @@ int extent_clear_unlock_delalloc(struct if (op & EXTENT_CLEAR_DELALLOC) clear_bits |= EXTENT_DELALLOC; - clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); + ret = clear_extent_bit(tree, start, end, clear_bits, + 1, 0, NULL, GFP_NOFS); + BUG_ON(ret < 0); if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK | EXTENT_SET_PRIVATE2))) @@ -1688,7 +1705,9 @@ static void end_bio_extent_writepage(str } if (!uptodate) { - clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); + ret = clear_extent_uptodate(tree, start, end, + NULL, GFP_NOFS); + BUG_ON(ret < 0); ClearPageUptodate(page); SetPageError(page); } @@ -2667,10 +2686,11 @@ int extent_invalidatepage(struct extent_ ret = lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); BUG_ON(ret < 0); wait_on_page_writeback(page); - clear_extent_bit(tree, start, end, - EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, - 1, 1, &cached_state, GFP_NOFS); + ret = clear_extent_bit(tree, start, end, + EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, + 1, 1, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); return 0; } @@ -3293,8 +3313,10 @@ int clear_extent_buffer_uptodate(struct clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); if (eb_straddles_pages(eb)) { - clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, - cached_state, GFP_NOFS); + int ret = clear_extent_uptodate(tree, eb->start, + eb->start + eb->len - 1, + cached_state, GFP_NOFS); + BUG_ON(ret < 0); } for (i = 0; i < num_pages; i++) { page = extent_buffer_page(eb, i); --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1128,10 +1128,12 @@ again: if (ordered) btrfs_put_ordered_extent(ordered); - clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, - last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, - GFP_NOFS); + err = clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, + last_pos - 1, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); unlock_extent_cached(&BTRFS_I(inode)->io_tree, start_pos, last_pos - 1, &cached_state, GFP_NOFS); --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -800,10 +800,12 @@ int __btrfs_write_out_cache(struct btrfs ret = btrfs_search_slot(trans, root, &key, path, 1, 1); if (ret < 0) { + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, NULL, + GFP_NOFS); + BUG_ON(ret < 0); ret = -1; - clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); goto out; } leaf = path->nodes[0]; @@ -814,12 +816,14 @@ int __btrfs_write_out_cache(struct btrfs btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || found_key.offset != offset) { - ret = -1; - clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 0, 0, NULL, - GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, + bytes - 1, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, + NULL, GFP_NOFS); + BUG_ON(ret < 0); btrfs_release_path(path); + ret = -1; goto out; } } --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -960,9 +960,11 @@ static int cow_file_range_async(struct i unsigned long nr_pages; u64 cur_end; int limit = 10 * 1024 * 1042; + int ret; - clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, - 1, 0, NULL, GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, + EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS); + BUG_ON(ret < 0); while (start < end) { async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); BUG_ON(!async_cow); @@ -1917,9 +1919,11 @@ static int btrfs_io_failed_hook(struct b } if (!state || failrec->last_mirror > num_copies) { set_state_private(failure_tree, failrec->start, 0); - clear_extent_bits(failure_tree, failrec->start, - failrec->start + failrec->len - 1, - EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS); + ret = clear_extent_bits(failure_tree, failrec->start, + failrec->start + failrec->len - 1, + EXTENT_LOCKED | EXTENT_DIRTY, + GFP_NOFS); + BUG_ON(ret < 0); kfree(failrec); return -EIO; } @@ -1963,11 +1967,13 @@ static int btrfs_clean_io_failures(struc private_failure; set_state_private(&BTRFS_I(inode)->io_failure_tree, failure->start, 0); - clear_extent_bits(&BTRFS_I(inode)->io_failure_tree, + ret = clear_extent_bits( + &BTRFS_I(inode)->io_failure_tree, failure->start, failure->start + failure->len - 1, EXTENT_DIRTY | EXTENT_LOCKED, GFP_NOFS); + BUG_ON(ret < 0); kfree(failure); } } @@ -2001,8 +2007,9 @@ static int btrfs_readpage_end_io_hook(st if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { - clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, - GFP_NOFS); + ret = clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, + GFP_NOFS); + BUG_ON(ret < 0); return 0; } @@ -3432,9 +3439,11 @@ again: goto again; } - clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, - EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, - 0, 0, &cached_state, GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); ret = btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); @@ -5584,6 +5593,7 @@ static int btrfs_get_blocks_direct(struc u64 start = iblock << inode->i_blkbits; u64 len = bh_result->b_size; struct btrfs_trans_handle *trans; + int ret; em = btrfs_get_extent(inode, NULL, 0, start, len, 0); if (IS_ERR(em)) @@ -5679,9 +5689,11 @@ must_cow: return PTR_ERR(em); len = min(len, em->len - (start - em->start)); unlock: - clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1, - EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1, - 0, NULL, GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, start, + start + len - 1, + EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, + 1, 0, NULL, GFP_NOFS); + BUG_ON(ret < 0); map: bh_result->b_blocknr = (em->block_start + (start - em->start)) >> inode->i_blkbits; @@ -6253,9 +6265,12 @@ static ssize_t btrfs_direct_IO(int rw, s &cached_state, GFP_NOFS); BUG_ON(ret < 0); if (ret) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, - lockend, EXTENT_LOCKED | write_bits, - 1, 0, &cached_state, GFP_NOFS); + int ret2; + ret2 = clear_extent_bit(&BTRFS_I(inode)->io_tree, + lockstart, lockend, + EXTENT_LOCKED | write_bits, + 1, 0, &cached_state, GFP_NOFS); + BUG_ON(ret2 < 0); goto out; } } @@ -6269,19 +6284,21 @@ static ssize_t btrfs_direct_IO(int rw, s btrfs_submit_direct, 0); if (ret < 0 && ret != -EIOCBQUEUED) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, - offset + iov_length(iov, nr_segs) - 1, - EXTENT_LOCKED | write_bits, 1, 0, - &cached_state, GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, + offset + iov_length(iov, nr_segs) - 1, + EXTENT_LOCKED | write_bits, 1, 0, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) { /* * We''re falling back to buffered, unlock the section we didn''t * do IO on. */ - clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret, - offset + iov_length(iov, nr_segs) - 1, - EXTENT_LOCKED | write_bits, 1, 0, - &cached_state, GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret, + offset + iov_length(iov, nr_segs) - 1, + EXTENT_LOCKED | write_bits, 1, 0, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); } out: free_extent_state(cached_state); @@ -6391,10 +6408,11 @@ static void btrfs_invalidatepage(struct * IO on this page will never be started, so we need * to account for any ordered extents now */ - clear_extent_bit(tree, page_start, page_end, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, - &cached_state, GFP_NOFS); + ret = clear_extent_bit(tree, page_start, page_end, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, + 1, 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); /* * whoever cleared the private bit is responsible * for the finish_ordered_io @@ -6409,9 +6427,11 @@ static void btrfs_invalidatepage(struct 0, &cached_state, GFP_NOFS); BUG_ON(ret < 0); } - clear_extent_bit(tree, page_start, page_end, - EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS); + ret = clear_extent_bit(tree, page_start, page_end, + EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 1, 1, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); __btrfs_releasepage(page, GFP_NOFS); ClearPageChecked(page); @@ -6501,9 +6521,11 @@ again: * is probably a better way to do this, but for now keep consistent with * prepare_pages in the normal write path. */ - clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, - EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, - 0, 0, &cached_state, GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); ret = btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -926,10 +926,11 @@ again: if (ordered) btrfs_put_ordered_extent(ordered); - clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, - page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, - GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, + page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); if (i_done != num_pages) { spin_lock(&BTRFS_I(inode)->lock); --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3834,8 +3834,9 @@ restart: } btrfs_release_path(path); - clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, - GFP_NOFS); + ret = clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, + EXTENT_DIRTY, GFP_NOFS); + BUG_ON(ret < 0); if (trans) { nr = trans->blocks_used; --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -636,7 +636,9 @@ int btrfs_wait_marked_extents(struct btr if (ret) break; - clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); + ret = clear_extent_bits(dirty_pages, start, end, + mark, GFP_NOFS); + BUG_ON(ret < 0); while (start <= end) { index = start >> PAGE_CACHE_SHIFT; start = (u64)(index + 1) << PAGE_CACHE_SHIFT; --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2158,8 +2158,9 @@ static void free_log_tree(struct btrfs_t if (ret) break; - clear_extent_bits(&log->dirty_log_pages, start, end, - EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); + ret = clear_extent_bits(&log->dirty_log_pages, start, end, + EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); + BUG_ON(ret < 0); } free_extent_buffer(log->node); -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 06/23] btrfs: Push up unlock_extent errors to callers
The previous patch pushed the clear_extent_bit error handling up a level, which included unlock_extent and unlock_extent_cache. This patch pushes the BUG_ON up into the callers of those functions. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/compression.c | 9 ++-- fs/btrfs/disk-io.c | 7 +-- fs/btrfs/extent_io.c | 52 +++++++++++++---------- fs/btrfs/file.c | 35 ++++++++------- fs/btrfs/free-space-cache.c | 15 ++++-- fs/btrfs/inode.c | 98 ++++++++++++++++++++++++++------------------ fs/btrfs/ioctl.c | 26 +++++++---- fs/btrfs/relocation.c | 24 +++++++--- 8 files changed, 160 insertions(+), 106 deletions(-) --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -507,7 +507,8 @@ static noinline int add_ra_bio_pages(str (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || (em->block_start >> 9) != cb->orig_bio->bi_sector) { free_extent_map(em); - unlock_extent(tree, last_offset, end, GFP_NOFS); + ret = unlock_extent(tree, last_offset, end, GFP_NOFS); + BUG_ON(ret < 0); unlock_page(page); page_cache_release(page); break; @@ -535,7 +536,8 @@ static noinline int add_ra_bio_pages(str nr_pages++; page_cache_release(page); } else { - unlock_extent(tree, last_offset, end, GFP_NOFS); + ret = unlock_extent(tree, last_offset, end, GFP_NOFS); + BUG_ON(ret < 0); unlock_page(page); page_cache_release(page); break; --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -326,7 +326,7 @@ static int verify_parent_transid(struct struct extent_buffer *eb, u64 parent_transid) { struct extent_state *cached_state = NULL; - int ret; + int ret, err; if (!parent_transid || btrfs_header_generation(eb) == parent_transid) return 0; @@ -347,8 +347,9 @@ static int verify_parent_transid(struct ret = 1; clear_extent_buffer_uptodate(io_tree, eb, &cached_state); out: - unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); return ret; } --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1053,18 +1053,14 @@ int try_lock_extent(struct extent_io_tre int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached, gfp_t mask) { - int ret = clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, - cached, mask); - BUG_ON(ret < 0); - return ret; + return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, + mask); } int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { - int ret = clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, - mask); - BUG_ON(ret < 0); - return ret; + return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, + mask); } /* @@ -1369,8 +1365,9 @@ again: ret = test_range_bit(tree, delalloc_start, delalloc_end, EXTENT_DELALLOC, 1, cached_state); if (!ret) { - unlock_extent_cached(tree, delalloc_start, delalloc_end, - &cached_state, GFP_NOFS); + ret = unlock_extent_cached(tree, delalloc_start, delalloc_end, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); __unlock_for_delalloc(inode, locked_page, delalloc_start, delalloc_end); cond_resched(); @@ -1807,7 +1804,9 @@ static void end_bio_extent_readpage(stru GFP_ATOMIC); BUG_ON(ret < 0); } - unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); + ret = unlock_extent_cached(tree, start, end, + &cached, GFP_ATOMIC); + BUG_ON(ret < 0); if (whole_page) { if (uptodate) { @@ -2001,7 +2000,8 @@ static int __extent_read_full_page(struc ordered = btrfs_lookup_ordered_extent(inode, start); if (!ordered) break; - unlock_extent(tree, start, end, GFP_NOFS); + ret = unlock_extent(tree, start, end, GFP_NOFS); + BUG_ON(ret < 0); btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); } @@ -2031,15 +2031,17 @@ static int __extent_read_full_page(struc ret = set_extent_uptodate(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); BUG_ON(ret < 0); - unlock_extent_cached(tree, cur, cur + iosize - 1, - &cached, GFP_NOFS); + ret = unlock_extent_cached(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); + BUG_ON(ret < 0); break; } em = get_extent(inode, page, pg_offset, cur, end - cur + 1, 0); if (IS_ERR_OR_NULL(em)) { SetPageError(page); - unlock_extent(tree, cur, end, GFP_NOFS); + ret = unlock_extent(tree, cur, end, GFP_NOFS); + BUG_ON(ret < 0); break; } extent_offset = cur - em->start; @@ -2082,8 +2084,9 @@ static int __extent_read_full_page(struc ret = set_extent_uptodate(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); BUG_ON(ret < 0); - unlock_extent_cached(tree, cur, cur + iosize - 1, - &cached, GFP_NOFS); + ret = unlock_extent_cached(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); + BUG_ON(ret < 0); cur = cur + iosize; pg_offset += iosize; continue; @@ -2092,7 +2095,9 @@ static int __extent_read_full_page(struc if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1, NULL)) { check_page_uptodate(tree, page); - unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + ret = unlock_extent(tree, cur, cur + iosize - 1, + GFP_NOFS); + BUG_ON(ret < 0); cur = cur + iosize; pg_offset += iosize; continue; @@ -2102,7 +2107,9 @@ static int __extent_read_full_page(struc */ if (block_start == EXTENT_MAP_INLINE) { SetPageError(page); - unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + ret = unlock_extent(tree, cur, cur + iosize - 1, + GFP_NOFS); + BUG_ON(ret < 0); cur = cur + iosize; pg_offset += iosize; continue; @@ -2829,7 +2836,7 @@ static struct extent_map *get_extent_ski int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len, get_extent_t *get_extent) { - int ret = 0; + int ret = 0, err; u64 off = start; u64 max = start + len; u32 flags = 0; @@ -2989,8 +2996,9 @@ int extent_fiemap(struct inode *inode, s out_free: free_extent_map(em); out: - unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); return ret; } --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1114,9 +1114,10 @@ again: ordered->file_offset + ordered->len > start_pos && ordered->file_offset < last_pos) { btrfs_put_ordered_extent(ordered); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - start_pos, last_pos - 1, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + start_pos, last_pos - 1, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); for (i = 0; i < num_pages; i++) { unlock_page(pages[i]); page_cache_release(pages[i]); @@ -1134,9 +1135,10 @@ again: EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, GFP_NOFS); BUG_ON(err < 0); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - start_pos, last_pos - 1, &cached_state, - GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + start_pos, last_pos - 1, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); } for (i = 0; i < num_pages; i++) { clear_page_dirty_for_io(pages[i]); @@ -1577,7 +1579,7 @@ static long btrfs_fallocate(struct file u64 locked_end; u64 mask = BTRFS_I(inode)->root->sectorsize - 1; struct extent_map *em; - int ret; + int ret, err; alloc_start = offset & ~mask; alloc_end = (offset + len + mask) & ~mask; @@ -1624,9 +1626,10 @@ static long btrfs_fallocate(struct file ordered->file_offset + ordered->len > alloc_start && ordered->file_offset < alloc_end) { btrfs_put_ordered_extent(ordered); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - alloc_start, locked_end, - &cached_state, GFP_NOFS); + ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + alloc_start, locked_end, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); /* * we can''t wait on the range with the transaction * running or with the extent lock held @@ -1668,8 +1671,9 @@ static long btrfs_fallocate(struct file break; } } - unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, + locked_end, &cached_state, GFP_NOFS); + BUG_ON(err < 0); btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); out: @@ -1688,7 +1692,7 @@ static int find_desired_extent(struct in u64 orig_start = *offset; u64 len = i_size_read(inode); u64 last_end = 0; - int ret = 0; + int ret = 0, err; lockend = max_t(u64, root->sectorsize, lockend); if (lockend <= lockstart) @@ -1784,8 +1788,9 @@ static int find_desired_extent(struct in if (!ret) *offset = min(*offset, inode->i_size); out: - unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); return ret; } --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -549,7 +549,7 @@ int __btrfs_write_out_cache(struct btrfs int index = 0, num_pages = 0; int entries = 0; int bitmaps = 0; - int ret = -1; + int ret = -1, err; bool next_page = false; bool out_of_space = false; @@ -760,9 +760,10 @@ int __btrfs_write_out_cache(struct btrfs if (out_of_space) { btrfs_drop_pages(pages, num_pages); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, - i_size_read(inode) - 1, &cached_state, - GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, + i_size_read(inode) - 1, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); ret = 0; goto out; } @@ -782,8 +783,10 @@ int __btrfs_write_out_cache(struct btrfs ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0, bytes, &cached_state); btrfs_drop_pages(pages, num_pages); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, - i_size_read(inode) - 1, &cached_state, GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, + i_size_read(inode) - 1, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); if (ret) { ret = 0; --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -643,9 +643,11 @@ retry: kfree(async_extent->pages); async_extent->nr_pages = 0; async_extent->pages = NULL; - unlock_extent(io_tree, async_extent->start, - async_extent->start + - async_extent->ram_size - 1, GFP_NOFS); + ret = unlock_extent(io_tree, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, + GFP_NOFS); + BUG_ON(ret < 0); goto retry; } @@ -1578,8 +1580,10 @@ again: ordered = btrfs_lookup_ordered_extent(inode, page_start); if (ordered) { - unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, - page_end, &cached_state, GFP_NOFS); + ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); unlock_page(page); btrfs_start_ordered_extent(inode, ordered, 1); goto again; @@ -1591,8 +1595,9 @@ again: BUG_ON(ret < 0); ClearPageChecked(page); out: - unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, - &cached_state, GFP_NOFS); + ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, + page_end, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); out_page: unlock_page(page); page_cache_release(page); @@ -1789,9 +1794,11 @@ static int btrfs_finish_ordered_io(struc ordered_extent->len); BUG_ON(ret); } - unlock_extent_cached(io_tree, ordered_extent->file_offset, - ordered_extent->file_offset + - ordered_extent->len - 1, &cached_state, GFP_NOFS); + ret = unlock_extent_cached(io_tree, ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len - 1, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); add_pending_csums(trans, inode, ordered_extent->file_offset, &ordered_extent->list); @@ -3387,7 +3394,7 @@ static int btrfs_truncate_page(struct ad pgoff_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); struct page *page; - int ret = 0; + int ret = 0, err; u64 page_start; u64 page_end; @@ -3430,8 +3437,9 @@ again: ordered = btrfs_lookup_ordered_extent(inode, page_start); if (ordered) { - unlock_extent_cached(io_tree, page_start, page_end, - &cached_state, GFP_NOFS); + ret = unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); unlock_page(page); page_cache_release(page); btrfs_start_ordered_extent(inode, ordered, 1); @@ -3448,8 +3456,9 @@ again: ret = btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); if (ret) { - unlock_extent_cached(io_tree, page_start, page_end, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); goto out_unlock; } @@ -3462,8 +3471,9 @@ again: } ClearPageChecked(page); set_page_dirty(page); - unlock_extent_cached(io_tree, page_start, page_end, &cached_state, - GFP_NOFS); + err = unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); out_unlock: if (ret) @@ -3493,7 +3503,7 @@ int btrfs_cont_expand(struct inode *inod u64 last_byte; u64 cur_offset; u64 hole_size; - int err = 0; + int err = 0, err2; if (size <= hole_start) return 0; @@ -3508,8 +3518,9 @@ int btrfs_cont_expand(struct inode *inod ordered = btrfs_lookup_ordered_extent(inode, hole_start); if (!ordered) break; - unlock_extent_cached(io_tree, hole_start, block_end - 1, - &cached_state, GFP_NOFS); + err2 = unlock_extent_cached(io_tree, hole_start, block_end - 1, + &cached_state, GFP_NOFS); + BUG_ON(err2 < 0); btrfs_put_ordered_extent(ordered); } @@ -3556,8 +3567,9 @@ int btrfs_cont_expand(struct inode *inod } free_extent_map(em); - unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, - GFP_NOFS); + err2 = unlock_extent_cached(io_tree, hole_start, block_end - 1, + &cached_state, GFP_NOFS); + BUG_ON(err2 < 0); return err; } @@ -5624,8 +5636,9 @@ static int btrfs_get_blocks_direct(struc test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { free_extent_map(em); /* DIO will do one hole at a time, so just unlock a sector */ - unlock_extent(&BTRFS_I(inode)->io_tree, start, - start + root->sectorsize - 1, GFP_NOFS); + ret = unlock_extent(&BTRFS_I(inode)->io_tree, start, + start + root->sectorsize - 1, GFP_NOFS); + BUG_ON(ret < 0); return 0; } @@ -5727,6 +5740,7 @@ struct btrfs_dio_private { static void btrfs_endio_direct_read(struct bio *bio, int err) { + int ret; struct btrfs_dio_private *dip = bio->bi_private; struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; struct bio_vec *bvec = bio->bi_io_vec; @@ -5767,8 +5781,9 @@ static void btrfs_endio_direct_read(stru bvec++; } while (bvec <= bvec_end); - unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, - dip->logical_offset + dip->bytes - 1, GFP_NOFS); + ret = unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, + dip->logical_offset + dip->bytes - 1, GFP_NOFS); + BUG_ON(ret < 0); bio->bi_private = dip->private; kfree(dip->csums); @@ -5790,7 +5805,7 @@ static void btrfs_endio_direct_write(str struct extent_state *cached_state = NULL; u64 ordered_offset = dip->logical_offset; u64 ordered_bytes = dip->bytes; - int ret; + int ret, ret2; if (err) goto out_done; @@ -5856,9 +5871,11 @@ again: btrfs_update_inode(trans, root, inode); ret = 0; out_unlock: - unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, - ordered->file_offset + ordered->len - 1, - &cached_state, GFP_NOFS); + ret2 = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + ordered->file_offset, + ordered->file_offset + ordered->len - 1, + &cached_state, GFP_NOFS); + BUG_ON(ret2 < 0); out: btrfs_delalloc_release_metadata(inode, ordered->len); btrfs_end_transaction(trans, root); @@ -6247,8 +6264,9 @@ static ssize_t btrfs_direct_IO(int rw, s lockend - lockstart + 1); if (!ordered) break; - unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, - &cached_state, GFP_NOFS); + ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, + lockend, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); cond_resched(); @@ -6468,7 +6486,7 @@ int btrfs_page_mkwrite(struct vm_area_st char *kaddr; unsigned long zero_start; loff_t size; - int ret; + int ret, err; u64 page_start; u64 page_end; @@ -6506,8 +6524,9 @@ again: */ ordered = btrfs_lookup_ordered_extent(inode, page_start); if (ordered) { - unlock_extent_cached(io_tree, page_start, page_end, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); unlock_page(page); btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); @@ -6530,8 +6549,9 @@ again: ret = btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); if (ret) { - unlock_extent_cached(io_tree, page_start, page_end, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); ret = VM_FAULT_SIGBUS; goto out_unlock; } @@ -6556,7 +6576,9 @@ again: BTRFS_I(inode)->last_trans = root->fs_info->generation; BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; - unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); + err = unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); out_unlock: if (!ret) --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -781,7 +781,8 @@ static int should_defrag_range(struct in err = lock_extent(io_tree, start, start + len - 1, GFP_NOFS); BUG_ON(err < 0); em = btrfs_get_extent(inode, NULL, 0, start, len, 0); - unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); + err = unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); + BUG_ON(err < 0); if (IS_ERR(em)) return 0; @@ -912,9 +913,10 @@ again: ordered->file_offset + ordered->len > page_start && ordered->file_offset < page_end) { btrfs_put_ordered_extent(ordered); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - page_start, page_end - 1, - &cached_state, GFP_NOFS); + ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); for (i = 0; i < i_done; i++) { unlock_page(pages[i]); page_cache_release(pages[i]); @@ -945,9 +947,10 @@ again: &cached_state); BUG_ON(ret < 0); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - page_start, page_end - 1, &cached_state, - GFP_NOFS); + ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); for (i = 0; i < i_done; i++) { clear_page_dirty_for_io(pages[i]); @@ -2139,7 +2142,7 @@ static noinline long btrfs_ioctl_clone(s struct btrfs_key key; u32 nritems; int slot; - int ret; + int ret, err; u64 len = olen; u64 bs = root->fs_info->sb->s_blocksize; u64 hint_byte; @@ -2236,7 +2239,9 @@ static noinline long btrfs_ioctl_clone(s !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len, EXTENT_DELALLOC, 0, NULL)) break; - unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); + ret = unlock_extent(&BTRFS_I(src)->io_tree, off, + off+len, GFP_NOFS); + BUG_ON(ret < 0); if (ordered) btrfs_put_ordered_extent(ordered); btrfs_wait_ordered_range(src, off, len); @@ -2443,7 +2448,8 @@ next: ret = 0; out: btrfs_release_path(path); - unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); + err = unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); + BUG_ON(err < 0); out_unlock: mutex_unlock(&src->i_mutex); mutex_unlock(&inode->i_mutex); --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1583,8 +1583,9 @@ int replace_file_extents(struct btrfs_tr btrfs_drop_extent_cache(inode, key.offset, end, 1); - unlock_extent(&BTRFS_I(inode)->io_tree, - key.offset, end, GFP_NOFS); + ret = unlock_extent(&BTRFS_I(inode)->io_tree, + key.offset, end, GFP_NOFS); + BUG_ON(ret < 0); } } @@ -1958,7 +1959,9 @@ static int invalidate_extent_cache(struc GFP_NOFS); BUG_ON(ret < 0); btrfs_drop_extent_cache(inode, start, end, 1); - unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + ret = unlock_extent(&BTRFS_I(inode)->io_tree, start, end, + GFP_NOFS); + BUG_ON(ret < 0); } return 0; } @@ -2860,6 +2863,7 @@ int prealloc_file_extent_cluster(struct goto out; while (nr < cluster->nr) { + int err; start = cluster->boundary[nr] - offset; if (nr + 1 < cluster->nr) end = cluster->boundary[nr + 1] - 1 - offset; @@ -2873,7 +2877,9 @@ int prealloc_file_extent_cluster(struct ret = btrfs_prealloc_file_range(inode, 0, start, num_bytes, num_bytes, end + 1, &alloc_hint); - unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + err = unlock_extent(&BTRFS_I(inode)->io_tree, start, end, + GFP_NOFS); + BUG_ON(err < 0); if (ret) break; nr++; @@ -2892,7 +2898,7 @@ int setup_extent_mapping(struct inode *i struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map *em; - int ret = 0; + int ret = 0, err; em = alloc_extent_map(); if (!em) @@ -2917,7 +2923,8 @@ int setup_extent_mapping(struct inode *i } btrfs_drop_extent_cache(inode, start, end, 0); } - unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + err = unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + BUG_ON(err < 0); return ret; } @@ -3016,8 +3023,9 @@ static int relocate_file_extent_cluster( BUG_ON(ret < 0); set_page_dirty(page); - unlock_extent(&BTRFS_I(inode)->io_tree, - page_start, page_end, GFP_NOFS); + ret = unlock_extent(&BTRFS_I(inode)->io_tree, + page_start, page_end, GFP_NOFS); + BUG_ON(ret < 0); unlock_page(page); page_cache_release(page); -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 07/23] btrfs: Make pin_down_extent return void
pin_down_extent performs some operations which can''t fail and then calls set_extent_dirty, which has two failure cases via set_extent_bit: 1) Return -EEXIST if exclusive bits are set - Since it doesn''t use any exclusive bits, this failure case can''t occur. 2) Return -ENOMEM if memory can''t be allocated - Since it''s called with gfp_flags & __GFP_NOFAIL, this failure case can''t occur. With no failure cases, it can return void. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/extent-tree.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4175,9 +4175,9 @@ static u64 first_logical_byte(struct btr return bytenr; } -static int pin_down_extent(struct btrfs_root *root, - struct btrfs_block_group_cache *cache, - u64 bytenr, u64 num_bytes, int reserved) +static void pin_down_extent(struct btrfs_root *root, + struct btrfs_block_group_cache *cache, + u64 bytenr, u64 num_bytes, int reserved) { spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); @@ -4194,7 +4194,6 @@ static int pin_down_extent(struct btrfs_ /* __GFP_NOFAIL means it can''t return -ENOMEM */ set_extent_dirty(root->fs_info->pinned_extents, bytenr, bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); - return 0; } /* -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 08/23] btrfs: Push up btrfs_pin_extent failures
btrfs_pin_extent looks up a block group and then calls pin_down_extent with it. If the lookup fails, it should return -ENOENT to allow callers to handle the error condition. For the three existing callers, it is a logic error if the lookup fails and a panic will occur. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/extent-tree.c | 20 +++++++++++++++----- fs/btrfs/tree-log.c | 10 +++++++--- 2 files changed, 22 insertions(+), 8 deletions(-) --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2075,8 +2075,14 @@ static int run_one_delayed_ref(struct bt BUG_ON(extent_op); head = btrfs_delayed_node_to_head(node); if (insert_reserved) { - btrfs_pin_extent(root, node->bytenr, - node->num_bytes, 1); + ret = btrfs_pin_extent(root, node->bytenr, + node->num_bytes, 1); + if (ret) + btrfs_panic(root->fs_info, ret, + "Cannot pin extent in range " + "%llu(%llu)\n", + node->bytenr, node->num_bytes); + if (head->is_data) { ret = btrfs_del_csums(trans, root, node->bytenr, @@ -4205,7 +4211,8 @@ int btrfs_pin_extent(struct btrfs_root * struct btrfs_block_group_cache *cache; cache = btrfs_lookup_block_group(root->fs_info, bytenr); - BUG_ON(!cache); + if (cache == NULL) + return -ENOENT; pin_down_extent(root, cache, bytenr, num_bytes, reserved); @@ -4765,8 +4772,11 @@ int btrfs_free_extent(struct btrfs_trans if (root_objectid == BTRFS_TREE_LOG_OBJECTID) { WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID); /* unlocks the pinned mutex */ - btrfs_pin_extent(root, bytenr, num_bytes, 1); - ret = 0; + ret = btrfs_pin_extent(root, bytenr, num_bytes, 1); + if (ret) + btrfs_panic(root->fs_info, ret, "Cannot pin " + "extent in range %llu(%llu)\n", + bytenr, num_bytes); } else if (owner < BTRFS_FIRST_FREE_OBJECTID) { ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes, parent, root_objectid, (int)owner, --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -275,9 +275,13 @@ static int process_one_buffer(struct btr struct extent_buffer *eb, struct walk_control *wc, u64 gen) { - if (wc->pin) - btrfs_pin_extent(log->fs_info->extent_root, - eb->start, eb->len, 0); + if (wc->pin) { + int ret = btrfs_pin_extent(log->fs_info->extent_root, + eb->start, eb->len, 0); + if (ret) + btrfs_panic(log->fs_info, ret, "Cannot pin extent in " + "range %llu(%llu)\n", eb->start, eb->len); + } if (btrfs_buffer_uptodate(eb, gen)) { if (wc->write) -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 09/23] btrfs: btrfs_drop_snapshot should return int
Commit cb1b69f4 (Btrfs: forced readonly when btrfs_drop_snapshot() fails) made btrfs_drop_snapshot return void because there were no callers checking the return value. That is the wrong order to handle error propogation since the caller will have no idea that an error has occured and continue on as if nothing went wrong. The next patch checks the btrfs_drop_snapshot return values, so re-add it here. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/ctree.h | 4 ++-- fs/btrfs/extent-tree.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2366,8 +2366,8 @@ static inline int btrfs_insert_empty_ite int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); -void btrfs_drop_snapshot(struct btrfs_root *root, - struct btrfs_block_rsv *block_rsv, int update_ref); +int btrfs_drop_snapshot(struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, int update_ref); int btrfs_drop_subtree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *node, --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6303,8 +6303,8 @@ static noinline int walk_up_tree(struct * also make sure backrefs for the shared block and all lower level * blocks are properly updated. */ -void btrfs_drop_snapshot(struct btrfs_root *root, - struct btrfs_block_rsv *block_rsv, int update_ref) +int btrfs_drop_snapshot(struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, int update_ref) { struct btrfs_path *path; struct btrfs_trans_handle *trans; @@ -6469,7 +6469,7 @@ out_free: out: if (err) btrfs_std_error(root->fs_info, err); - return; + return err; } /* -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 10/23] btrfs: Push up non-looped btrfs_start_transaction failures
This patch handles btrfs_start_transaction failures that don''t occur in a loop and are obvious to simply push up. In all cases except the mark_garbage_root case, the error is already handled by BUG_ON in the caller. Update v2: This version also checks the returns from btrfs_drop_snapshot. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/extent-tree.c | 6 +++++- fs/btrfs/relocation.c | 9 ++++++--- fs/btrfs/transaction.c | 6 ++++-- fs/btrfs/tree-log.c | 5 ++++- fs/btrfs/volumes.c | 3 ++- 5 files changed, 21 insertions(+), 8 deletions(-) --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6319,7 +6319,11 @@ int btrfs_drop_snapshot(struct btrfs_roo } trans = btrfs_start_transaction(tree_root, 0); - BUG_ON(IS_ERR(trans)); + if (IS_ERR(trans)) { + kfree(wc); + btrfs_free_path(path); + return PTR_ERR(trans); + } if (block_rsv) trans->block_rsv = block_rsv; --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2251,7 +2251,8 @@ again: } else { list_del_init(&reloc_root->root_list); } - btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0); + ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0); + BUG_ON(ret); } if (found) { @@ -4096,7 +4097,8 @@ static noinline_for_stack int mark_garba int ret; trans = btrfs_start_transaction(root->fs_info->tree_root, 0); - BUG_ON(IS_ERR(trans)); + if (IS_ERR(trans)) + return PTR_ERR(trans); memset(&root->root_item.drop_progress, 0, sizeof(root->root_item.drop_progress)); @@ -4176,7 +4178,8 @@ int btrfs_recover_relocation(struct btrf err = ret; goto out; } - mark_garbage_root(reloc_root); + ret = mark_garbage_root(reloc_root); + BUG_ON(ret); } } --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1400,6 +1400,7 @@ int btrfs_commit_transaction(struct btrf */ int btrfs_clean_old_snapshots(struct btrfs_root *root) { + int ret; LIST_HEAD(list); struct btrfs_fs_info *fs_info = root->fs_info; @@ -1415,9 +1416,10 @@ int btrfs_clean_old_snapshots(struct btr if (btrfs_header_backref_rev(root->node) < BTRFS_MIXED_BACKREF_REV) - btrfs_drop_snapshot(root, NULL, 0); + ret = btrfs_drop_snapshot(root, NULL, 0); else - btrfs_drop_snapshot(root, NULL, 1); + ret = btrfs_drop_snapshot(root, NULL, 1); + BUG_ON(ret); } return 0; } --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3151,7 +3151,10 @@ int btrfs_recover_log_trees(struct btrfs fs_info->log_root_recovering = 1; trans = btrfs_start_transaction(fs_info->tree_root, 0); - BUG_ON(IS_ERR(trans)); + if (IS_ERR(trans)) { + btrfs_free_path(path); + return PTR_ERR(trans); + } wc.trans = trans; wc.pin = 1; --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1876,7 +1876,8 @@ static int btrfs_relocate_chunk(struct b return ret; trans = btrfs_start_transaction(root, 0); - BUG_ON(IS_ERR(trans)); + if (IS_ERR(trans)) + return PTR_ERR(trans); lock_chunks(root); -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 11/23] btrfs: Make set_range_writeback return void
set_range_writeback has no error conditions that aren''t fatal logic errors. This patch makes it return void to eliminate an error site. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/extent_io.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1060,7 +1060,7 @@ int unlock_extent(struct extent_io_tree /* * helper function to set both pages and extents in the tree writeback */ -static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) +static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) { unsigned long index = start >> PAGE_CACHE_SHIFT; unsigned long end_index = end >> PAGE_CACHE_SHIFT; @@ -1068,12 +1068,14 @@ static int set_range_writeback(struct ex while (index <= end_index) { page = find_get_page(tree->mapping, index); - BUG_ON(!page); + if (!page) + btrfs_panic(tree_fs_info(tree), -ENOENT, + "Page not found in extent io tree at " + "offset %llu", index << PAGE_CACHE_SHIFT); set_page_writeback(page); page_cache_release(page); index++; } - return 0; } /* find the first state struct with ''bits'' set after ''start'', and -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 12/23] btrfs: extent_io.c: Make functions with no error conditions return void
wait_on_state, wait_extent_bit, __unlock_for_delalloc, check_page_uptodate, check_page_locked, check_page_writeback, and clear_extent_buffer_dirty have no error conditions and should return void. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/extent_io.c | 29 ++++++++++------------------- fs/btrfs/extent_io.h | 6 +++--- 2 files changed, 13 insertions(+), 22 deletions(-) --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -613,8 +613,8 @@ search_again: goto again; } -static int wait_on_state(struct extent_io_tree *tree, - struct extent_state *state) +static void wait_on_state(struct extent_io_tree *tree, + struct extent_state *state) __releases(tree->lock) __acquires(tree->lock) { @@ -624,7 +624,6 @@ static int wait_on_state(struct extent_i schedule(); spin_lock(&tree->lock); finish_wait(&state->wq, &wait); - return 0; } /* @@ -632,7 +631,7 @@ static int wait_on_state(struct extent_i * The range [start, end] is inclusive. * The tree lock is taken by this function */ -int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits) +void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits) { struct extent_state *state; struct rb_node *node; @@ -669,7 +668,6 @@ again: } out: spin_unlock(&tree->lock); - return 0; } static void set_state_bits(struct extent_io_tree *tree, @@ -1197,7 +1195,7 @@ out: return found; } -static noinline int __unlock_for_delalloc(struct inode *inode, +static noinline void __unlock_for_delalloc(struct inode *inode, struct page *locked_page, u64 start, u64 end) { @@ -1209,7 +1207,7 @@ static noinline int __unlock_for_delallo int i; if (index == locked_page->index && end_index == index) - return 0; + return; while (nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, @@ -1224,7 +1222,6 @@ static noinline int __unlock_for_delallo index += ret; cond_resched(); } - return 0; } static noinline int lock_delalloc_pages(struct inode *inode, @@ -1619,39 +1616,34 @@ int test_range_bit(struct extent_io_tree * helper function to set a given page up to date if all the * extents in the tree for that page are up to date */ -static int check_page_uptodate(struct extent_io_tree *tree, - struct page *page) +static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) { u64 start = (u64)page->index << PAGE_CACHE_SHIFT; u64 end = start + PAGE_CACHE_SIZE - 1; if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) SetPageUptodate(page); - return 0; } /* * helper function to unlock a page if all the extents in the tree * for that page are unlocked */ -static int check_page_locked(struct extent_io_tree *tree, - struct page *page) +static void check_page_locked(struct extent_io_tree *tree, struct page *page) { u64 start = (u64)page->index << PAGE_CACHE_SHIFT; u64 end = start + PAGE_CACHE_SIZE - 1; if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) unlock_page(page); - return 0; } /* * helper function to end page writeback if all the extents * in the tree for that page are done with writeback */ -static int check_page_writeback(struct extent_io_tree *tree, - struct page *page) +static void check_page_writeback(struct extent_io_tree *tree, + struct page *page) { end_page_writeback(page); - return 0; } /* lots and lots of room for performance fixes in the end_bio funcs */ @@ -3251,7 +3243,7 @@ void free_extent_buffer(struct extent_bu WARN_ON(1); } -int clear_extent_buffer_dirty(struct extent_io_tree *tree, +void clear_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb) { unsigned long i; @@ -3282,7 +3274,6 @@ int clear_extent_buffer_dirty(struct ext spin_unlock_irq(&page->mapping->tree_lock); unlock_page(page); } - return 0; } int set_extent_buffer_dirty(struct extent_io_tree *tree, --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -274,9 +274,9 @@ void memmove_extent_buffer(struct extent unsigned long src_offset, unsigned long len); void memset_extent_buffer(struct extent_buffer *eb, char c, unsigned long start, unsigned long len); -int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); -int clear_extent_buffer_dirty(struct extent_io_tree *tree, - struct extent_buffer *eb); +void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); +void clear_extent_buffer_dirty(struct extent_io_tree *tree, + struct extent_buffer *eb); int set_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb); int set_extent_buffer_uptodate(struct extent_io_tree *tree, -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 13/23] btrfs: volumes.c: Make functions with no error conditions return void
btrfs_cleanup_fs_uuids, run_scheduled_bios, btrfs_close_extra_devices, schedule_bio, and fill_device_from_item have no error conditions and should return void. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/volumes.c | 25 +++++++++---------------- fs/btrfs/volumes.h | 4 ++-- 2 files changed, 11 insertions(+), 18 deletions(-) --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -65,7 +65,7 @@ static void free_fs_devices(struct btrfs kfree(fs_devices); } -int btrfs_cleanup_fs_uuids(void) +void btrfs_cleanup_fs_uuids(void) { struct btrfs_fs_devices *fs_devices; @@ -75,7 +75,6 @@ int btrfs_cleanup_fs_uuids(void) list_del(&fs_devices->list); free_fs_devices(fs_devices); } - return 0; } static noinline struct btrfs_device *__find_device(struct list_head *head, @@ -128,7 +127,7 @@ static void requeue_list(struct btrfs_pe * the list if the block device is congested. This way, multiple devices * can make progress from a single worker thread. */ -static noinline int run_scheduled_bios(struct btrfs_device *device) +static noinline void run_scheduled_bios(struct btrfs_device *device) { struct bio *pending; struct backing_dev_info *bdi; @@ -291,7 +290,6 @@ loop_lock: done: blk_finish_plug(&plug); - return 0; } static void pending_bios_fn(struct btrfs_work *work) @@ -422,7 +420,7 @@ error: return ERR_PTR(-ENOMEM); } -int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) +void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device, *next; @@ -455,7 +453,6 @@ again: } mutex_unlock(&uuid_mutex); - return 0; } static void __free_device(struct work_struct *work) @@ -3202,9 +3199,9 @@ struct async_sched { * This will add one bio to the pending list for a device and make sure * the work struct is scheduled. */ -static noinline int schedule_bio(struct btrfs_root *root, - struct btrfs_device *device, - int rw, struct bio *bio) +static noinline void schedule_bio(struct btrfs_root *root, + struct btrfs_device *device, + int rw, struct bio *bio) { int should_queue = 1; struct btrfs_pending_bios *pending_bios; @@ -3214,7 +3211,6 @@ static noinline int schedule_bio(struct bio_get(bio); submit_bio(rw, bio); bio_put(bio); - return 0; } /* @@ -3248,7 +3244,6 @@ static noinline int schedule_bio(struct if (should_queue) btrfs_queue_worker(&root->fs_info->submit_workers, &device->work); - return 0; } int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, @@ -3449,9 +3444,9 @@ static int read_one_chunk(struct btrfs_r return 0; } -static int fill_device_from_item(struct extent_buffer *leaf, - struct btrfs_dev_item *dev_item, - struct btrfs_device *device) +static void fill_device_from_item(struct extent_buffer *leaf, + struct btrfs_dev_item *dev_item, + struct btrfs_device *device) { unsigned long ptr; @@ -3466,8 +3461,6 @@ static int fill_device_from_item(struct ptr = (unsigned long)btrfs_device_uuid(dev_item); read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); - - return 0; } static int open_seed_devices(struct btrfs_root *root, u8 *fsid) --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -195,12 +195,12 @@ int btrfs_open_devices(struct btrfs_fs_d int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, struct btrfs_fs_devices **fs_devices_ret); int btrfs_close_devices(struct btrfs_fs_devices *fs_devices); -int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices); +void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices); int btrfs_add_device(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_device *device); int btrfs_rm_device(struct btrfs_root *root, char *device_path); -int btrfs_cleanup_fs_uuids(void); +void btrfs_cleanup_fs_uuids(void); int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len); int btrfs_grow_device(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 new_size); -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 14/23] btrfs: async-thread.c: Make functions with no error conditions return void
run_ordered_completions, btrfs_stop_workers, btrfs_requeue_work, and btrfs_queue_worker don''t have any error conditions and should return void. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/async-thread.c | 31 ++++++++++--------------------- fs/btrfs/async-thread.h | 7 ++++--- 2 files changed, 14 insertions(+), 24 deletions(-) --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -95,7 +95,6 @@ static void start_new_worker_func(struct static int start_new_worker(struct btrfs_workers *queue) { struct worker_start *start; - int ret; start = kzalloc(sizeof(*start), GFP_NOFS); if (!start) @@ -103,10 +102,8 @@ static int start_new_worker(struct btrfs start->work.func = start_new_worker_func; start->queue = queue; - ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work); - if (ret) - kfree(start); - return ret; + btrfs_queue_worker(queue->atomic_worker_start, &start->work); + return 0; } /* @@ -177,11 +174,11 @@ out: spin_unlock_irqrestore(&workers->lock, flags); } -static noinline int run_ordered_completions(struct btrfs_workers *workers, - struct btrfs_work *work) +static noinline void run_ordered_completions(struct btrfs_workers *workers, + struct btrfs_work *work) { if (!workers->ordered) - return 0; + return; set_bit(WORK_DONE_BIT, &work->flags); @@ -219,7 +216,6 @@ static noinline int run_ordered_completi } spin_unlock(&workers->order_lock); - return 0; } static void put_worker(struct btrfs_worker_thread *worker) @@ -405,7 +401,7 @@ again: /* * this will wait for all the worker threads to shutdown */ -int btrfs_stop_workers(struct btrfs_workers *workers) +void btrfs_stop_workers(struct btrfs_workers *workers) { struct list_head *cur; struct btrfs_worker_thread *worker; @@ -433,7 +429,6 @@ int btrfs_stop_workers(struct btrfs_work put_worker(worker); } spin_unlock_irq(&workers->lock); - return 0; } /* @@ -618,14 +613,14 @@ found: * it was taken from. It is intended for use with long running work functions * that make some progress and want to give the cpu up for others. */ -int btrfs_requeue_work(struct btrfs_work *work) +void btrfs_requeue_work(struct btrfs_work *work) { struct btrfs_worker_thread *worker = work->worker; unsigned long flags; int wake = 0; if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) - goto out; + return; spin_lock_irqsave(&worker->lock, flags); if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) @@ -652,9 +647,6 @@ int btrfs_requeue_work(struct btrfs_work if (wake) wake_up_process(worker->task); spin_unlock_irqrestore(&worker->lock, flags); -out: - - return 0; } void btrfs_set_work_high_prio(struct btrfs_work *work) @@ -665,7 +657,7 @@ void btrfs_set_work_high_prio(struct btr /* * places a struct btrfs_work into the pending queue of one of the kthreads */ -int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) +void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) { struct btrfs_worker_thread *worker; unsigned long flags; @@ -673,7 +665,7 @@ int btrfs_queue_worker(struct btrfs_work /* don''t requeue something already on a list */ if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) - goto out; + return; worker = find_worker(workers); if (workers->ordered) { @@ -712,7 +704,4 @@ int btrfs_queue_worker(struct btrfs_work if (wake) wake_up_process(worker->task); spin_unlock_irqrestore(&worker->lock, flags); - -out: - return 0; } --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h @@ -109,11 +109,12 @@ struct btrfs_workers { char *name; }; -int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); +void btrfs_queue_worker(struct btrfs_workers *workers, + struct btrfs_work *work); int btrfs_start_workers(struct btrfs_workers *workers, int num_workers); -int btrfs_stop_workers(struct btrfs_workers *workers); +void btrfs_stop_workers(struct btrfs_workers *workers); void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max, struct btrfs_workers *async_starter); -int btrfs_requeue_work(struct btrfs_work *work); +void btrfs_requeue_work(struct btrfs_work *work); void btrfs_set_work_high_prio(struct btrfs_work *work); #endif -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 15/23] btrfs: tree-log.c: Make functions with no error conditions return void
btrfs_end_log_trans and wait_for_writer have no error conditions and should return void. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/tree-log.c | 6 ++---- fs/btrfs/tree-log.h | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -212,14 +212,13 @@ int btrfs_pin_log_trans(struct btrfs_roo * indicate we''re done making changes to the log tree * and wake up anyone waiting to do a sync */ -int btrfs_end_log_trans(struct btrfs_root *root) +void btrfs_end_log_trans(struct btrfs_root *root) { if (atomic_dec_and_test(&root->log_writers)) { smp_mb(); if (waitqueue_active(&root->log_writer_wait)) wake_up(&root->log_writer_wait); } - return 0; } @@ -1945,7 +1944,7 @@ static int wait_log_commit(struct btrfs_ return 0; } -static int wait_for_writer(struct btrfs_trans_handle *trans, +static void wait_for_writer(struct btrfs_trans_handle *trans, struct btrfs_root *root) { DEFINE_WAIT(wait); @@ -1959,7 +1958,6 @@ static int wait_for_writer(struct btrfs_ mutex_lock(&root->log_mutex); finish_wait(&root->log_writer_wait, &wait); } - return 0; } /* --- a/fs/btrfs/tree-log.h +++ b/fs/btrfs/tree-log.h @@ -38,7 +38,7 @@ int btrfs_del_inode_ref_in_log(struct bt struct btrfs_root *root, const char *name, int name_len, struct inode *inode, u64 dirid); -int btrfs_end_log_trans(struct btrfs_root *root); +void btrfs_end_log_trans(struct btrfs_root *root); int btrfs_pin_log_trans(struct btrfs_root *root); int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 16/23] btrfs: Make btrfs_init_compress return void
btrfs_init_compress doesn''t have any failure conditions, so return void. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/compression.h | 2 +- fs/btrfs/super.c | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -732,7 +734,7 @@ struct btrfs_compress_op *btrfs_compress &btrfs_lzo_compress, }; -int __init btrfs_init_compress(void) +void __init btrfs_init_compress(void) { int i; @@ -742,7 +744,6 @@ int __init btrfs_init_compress(void) atomic_set(&comp_alloc_workspace[i], 0); init_waitqueue_head(&comp_workspace_wait[i]); } - return 0; } /* --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -19,7 +19,7 @@ #ifndef __BTRFS_COMPRESSION_ #define __BTRFS_COMPRESSION_ -int btrfs_init_compress(void); +void btrfs_init_compress(void); void btrfs_exit_compress(void); int btrfs_compress_pages(int type, struct address_space *mapping, --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1283,9 +1283,7 @@ static int __init init_btrfs_fs(void) if (err) return err; - err = btrfs_init_compress(); - if (err) - goto free_sysfs; + btrfs_init_compress(); err = btrfs_init_cachep(); if (err) @@ -1326,7 +1324,6 @@ free_cachep: btrfs_destroy_cachep(); free_compress: btrfs_exit_compress(); -free_sysfs: btrfs_exit_sysfs(); return err; } -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 17/23] btrfs: Make btrfs_invalidate_inodes return void
btrfs_invalidate_inodes has no error conditions and should return void. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/ctree.h | 2 +- fs/btrfs/inode.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2590,7 +2590,7 @@ void btrfs_orphan_post_snapshot(struct b void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); -int btrfs_invalidate_inodes(struct btrfs_root *root); +void btrfs_invalidate_inodes(struct btrfs_root *root); void btrfs_add_delayed_iput(struct inode *inode); void btrfs_run_delayed_iputs(struct btrfs_root *root); int btrfs_prealloc_file_range(struct inode *inode, int mode, --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3886,7 +3886,7 @@ static void inode_tree_del(struct inode } } -int btrfs_invalidate_inodes(struct btrfs_root *root) +void btrfs_invalidate_inodes(struct btrfs_root *root) { struct rb_node *node; struct rb_node *prev; @@ -3946,7 +3946,6 @@ again: node = rb_next(node); } spin_unlock(&root->inode_lock); - return 0; } static int btrfs_init_locked_inode(struct inode *inode, void *p) -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 18/23] btrfs: disk-io.c: Make functions with no error conditions return void
btrfs_destroy_delalloc_inodes and __setup_root have no error conditions and should return void. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/disk-io.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -54,7 +54,7 @@ static int btrfs_destroy_ordered_extents static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, struct btrfs_root *root); static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t); -static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root); +static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); static int btrfs_destroy_marked_extents(struct btrfs_root *root, struct extent_io_tree *dirty_pages, int mark); @@ -1058,10 +1058,9 @@ int clean_tree_block(struct btrfs_trans_ return 0; } -static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, - u32 stripesize, struct btrfs_root *root, - struct btrfs_fs_info *fs_info, - u64 objectid) +static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, + u32 stripesize, struct btrfs_root *root, + struct btrfs_fs_info *fs_info, u64 objectid) { root->node = NULL; root->commit_root = NULL; @@ -1114,7 +1113,6 @@ static int __setup_root(u32 nodesize, u3 root->defrag_running = 0; root->root_key.objectid = objectid; root->anon_dev = 0; - return 0; } static int find_and_setup_root(struct btrfs_root *tree_root, @@ -2939,7 +2937,7 @@ static int btrfs_destroy_pending_snapsho return 0; } -static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root) +static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) { struct btrfs_inode *btrfs_inode; struct list_head splice; @@ -2959,8 +2957,6 @@ static int btrfs_destroy_delalloc_inodes } spin_unlock(&root->fs_info->delalloc_lock); - - return 0; } static int btrfs_destroy_marked_extents(struct btrfs_root *root, -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:22 UTC
[patch v3 19/23] btrfs: extent-tree.c: Make functions with no error conditions return void
btrfs_prepare_extent_commit, btrfs_set_block_group_rw, and setup_inline_extent_backref have no error conditions and should return void. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/ctree.h | 8 ++++---- fs/btrfs/extent-tree.c | 20 +++++++++----------- 2 files changed, 13 insertions(+), 15 deletions(-) --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2201,8 +2201,8 @@ int btrfs_free_extent(struct btrfs_trans int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len); int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, u64 num_bytes, int reserve, int sinfo); -int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, - struct btrfs_root *root); +void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, + struct btrfs_root *root); int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, @@ -2264,8 +2264,8 @@ int btrfs_truncate_reserve_metadata(stru struct btrfs_block_rsv *rsv); int btrfs_set_block_group_ro(struct btrfs_root *root, struct btrfs_block_group_cache *cache); -int btrfs_set_block_group_rw(struct btrfs_root *root, - struct btrfs_block_group_cache *cache); +void btrfs_set_block_group_rw(struct btrfs_root *root, + struct btrfs_block_group_cache *cache); void btrfs_put_block_group_cache(struct btrfs_fs_info *info); u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); int btrfs_error_unpin_extent_range(struct btrfs_root *root, --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1539,7 +1539,7 @@ out: * helper to add new inline back ref */ static noinline_for_stack -int setup_inline_extent_backref(struct btrfs_trans_handle *trans, +void setup_inline_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_extent_inline_ref *iref, @@ -1599,7 +1599,6 @@ int setup_inline_extent_backref(struct b btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); } btrfs_mark_buffer_dirty(leaf); - return 0; } static int lookup_extent_backref(struct btrfs_trans_handle *trans, @@ -1718,10 +1717,11 @@ int insert_inline_extent_backref(struct ret = update_inline_extent_backref(trans, root, path, iref, refs_to_add, extent_op); } else if (ret == -ENOENT) { - ret = setup_inline_extent_backref(trans, root, path, iref, - parent, root_objectid, - owner, offset, refs_to_add, - extent_op); + ret = 0; + setup_inline_extent_backref(trans, root, path, iref, + parent, root_objectid, + owner, offset, refs_to_add, + extent_op); } return ret; } @@ -4263,8 +4263,8 @@ int btrfs_update_reserved_bytes(struct b return ret; } -int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, - struct btrfs_root *root) +void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, + struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_caching_control *next; @@ -4293,7 +4293,6 @@ int btrfs_prepare_extent_commit(struct b up_write(&fs_info->extent_commit_sem); update_global_block_rsv(fs_info); - return 0; } static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) @@ -6719,7 +6718,7 @@ u64 btrfs_account_ro_block_groups_free_s return free_bytes; } -int btrfs_set_block_group_rw(struct btrfs_root *root, +void btrfs_set_block_group_rw(struct btrfs_root *root, struct btrfs_block_group_cache *cache) { struct btrfs_space_info *sinfo = cache->space_info; @@ -6735,7 +6734,6 @@ int btrfs_set_block_group_rw(struct btrf cache->ro = 0; spin_unlock(&cache->lock); spin_unlock(&sinfo->lock); - return 0; } /* -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:23 UTC
[patch v3 20/23] btrfs: file.c: Make functions with no error conditions return void
btrfs_run_defrag_inodes contains no error conditions and should return void. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/ctree.h | 2 +- fs/btrfs/file.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2612,7 +2612,7 @@ int btrfs_defrag_file(struct inode *inod /* file.c */ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, struct inode *inode); -int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); +void btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int skip_pinned); --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -191,7 +191,7 @@ struct inode_defrag *btrfs_find_defrag_i * run through the list of inodes in the FS that need * defragging */ -int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) +void btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) { struct inode_defrag *defrag; struct btrfs_root *inode_root; @@ -294,7 +294,6 @@ next_free: * wait for the defragger to stop */ wake_up(&fs_info->transaction_wait); - return 0; } /* simple helper to fault in pages and copy. This should go away -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:23 UTC
[patch v3 21/23] btrfs: simplify btrfs_submit_bio_hook
btrfs_submit_bio_hook currently calls btrfs_bio_wq_end_io in either case of an if statement that determines one of the arguments. This patch moves the function call outside of the if statement and uses it to only determine the different argument. This allows us to catch an error in one place in a more visually obvious way. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/inode.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1479,13 +1479,14 @@ static int btrfs_submit_bio_hook(struct struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; int skip_sum; + int metadata = 0; skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; if (btrfs_is_free_space_inode(root, inode)) - ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); - else - ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); + metadata = 2; + + ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); BUG_ON(ret); if (!(rw & REQ_WRITE)) { -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:23 UTC
[patch v3 22/23] btrfs: Factor out tree->ops->merge_bio_hook call
In submit_extent_page, there''s a visually noisy if statement that, in the midst of other conditions, does the tree dependency for tree->ops and tree->ops->merge_bio_hook before calling it, and then another condition afterwards. If an error is returned from merge_bio_hook, there''s no way to catch it. It''s considered a routine "1" return value instead of a failure. This patch factors out the dependency check into a new local merge_bio routine and BUG''s on an error. The if statement is less noisy as a side- effect. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/extent_io.c | 17 ++++++++++++++--- fs/btrfs/inode.c | 5 +++-- 2 files changed, 17 insertions(+), 5 deletions(-) --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1871,6 +1871,19 @@ static int submit_one_bio(int rw, struct return ret; } +static int merge_bio(struct extent_io_tree *tree, struct page *page, + unsigned long offset, size_t size, struct bio *bio, + unsigned long bio_flags) +{ + int ret = 0; + if (tree->ops && tree->ops->merge_bio_hook) + ret = tree->ops->merge_bio_hook(page, offset, size, bio, + bio_flags); + BUG_ON(ret < 0); + return ret; + +} + static int submit_extent_page(int rw, struct extent_io_tree *tree, struct page *page, sector_t sector, size_t size, unsigned long offset, @@ -1899,9 +1912,7 @@ static int submit_extent_page(int rw, st sector; if (prev_bio_flags != bio_flags || !contig || - (tree->ops && tree->ops->merge_bio_hook && - tree->ops->merge_bio_hook(page, offset, page_size, bio, - bio_flags)) || + merge_bio(tree, page, offset, page_size, bio, bio_flags) || bio_add_page(bio, page, page_size, offset) < page_size) { ret = submit_one_bio(rw, bio, mirror_num, prev_bio_flags); --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1425,10 +1425,11 @@ int btrfs_merge_bio_hook(struct page *pa map_length = length; ret = btrfs_map_block(map_tree, READ, logical, &map_length, NULL, 0); - + /* Will always return 0 or 1 with map_multi == NULL */ + BUG_ON(ret < 0); if (map_length < length + size) return 1; - return ret; + return 0; } /* -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
jeffm@suse.com
2011-Sep-09 00:23 UTC
[patch v3 23/23] btrfs: Push up ->submit_bio_hook failures
This pushes failures from the submit_bio_hook callbacks, btrfs_submit_bio_hook and btree_submit_bio_hook into the callers, including callers of submit_one_bio where it catches the failures with BUG_ON. It also pushes up through the ->readpage_io_failed_hook to end_bio_extent_writepage where the error is already caught with BUG_ON. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/disk-io.c | 6 +++--- fs/btrfs/extent_io.c | 29 +++++++++++++++++++++-------- fs/btrfs/inode.c | 3 ++- 3 files changed, 26 insertions(+), 12 deletions(-) --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -811,9 +811,9 @@ static int btree_submit_bio_hook(struct { int ret; - ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, - bio, 1); - BUG_ON(ret); + ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, bio, 1); + if (ret) + return ret; if (!(rw & REQ_WRITE)) { /* --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1693,6 +1693,7 @@ static void end_bio_extent_writepage(str uptodate = (err == 0); continue; } + BUG_ON(ret < 0); } if (!uptodate) { @@ -1785,6 +1786,7 @@ static void end_bio_extent_readpage(stru uncache_state(&cached); continue; } + BUG_ON(ret < 0); } if (uptodate) { @@ -1910,6 +1912,7 @@ static int submit_extent_page(int rw, st bio_add_page(bio, page, page_size, offset) < page_size) { ret = submit_one_bio(rw, bio, mirror_num, prev_bio_flags); + BUG_ON(ret < 0); bio = NULL; } else { return 0; @@ -1930,8 +1933,10 @@ static int submit_extent_page(int rw, st if (bio_ret) *bio_ret = bio; - else + else { ret = submit_one_bio(rw, bio, mirror_num, bio_flags); + BUG_ON(ret < 0); + } return ret; } @@ -2154,8 +2159,10 @@ int extent_read_full_page(struct extent_ ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, &bio_flags); - if (bio) + if (bio) { ret = submit_one_bio(READ, bio, 0, bio_flags); + BUG_ON(ret < 0); + } return ret; } @@ -2553,10 +2560,12 @@ retry: static void flush_epd_write_bio(struct extent_page_data *epd) { if (epd->bio) { + int ret; if (epd->sync_io) - submit_one_bio(WRITE_SYNC, epd->bio, 0, 0); + ret = submit_one_bio(WRITE_SYNC, epd->bio, 0, 0); else - submit_one_bio(WRITE, epd->bio, 0, 0); + ret = submit_one_bio(WRITE, epd->bio, 0, 0); + BUG_ON(ret < 0); epd->bio = NULL; } } @@ -2672,8 +2681,10 @@ int extent_readpages(struct extent_io_tr page_cache_release(page); } BUG_ON(!list_empty(pages)); - if (bio) - submit_one_bio(READ, bio, 0, bio_flags); + if (bio) { + int ret = submit_one_bio(READ, bio, 0, bio_flags); + BUG_ON(ret < 0); + } return 0; } @@ -3505,8 +3516,10 @@ int read_extent_buffer_pages(struct exte } } - if (bio) - submit_one_bio(READ, bio, mirror_num, bio_flags); + if (bio) { + err = submit_one_bio(READ, bio, mirror_num, bio_flags); + BUG_ON(err < 0); + } if (ret || !wait) return ret; --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1488,7 +1488,8 @@ static int btrfs_submit_bio_hook(struct metadata = 2; ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); - BUG_ON(ret); + if (ret) + return ret; if (!(rw & REQ_WRITE)) { if (bio_flags & EXTENT_BIO_COMPRESSED) { -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1 On 09/08/2011 08:22 PM, jeffm@suse.com wrote:> Hi all - > > The following 23 patches add more error handling to the btrfs > code: > > - Add btrfs_panic - Catch locking failures in > {set,clear}_extent_bit - Push up set_extent_bit errors to callers - > Push up lock_extent errors to callers - Push up clear_extent_bit > errors to callers - Push up unlock_extent errors to callers - Make > pin_down_extent return void - Push up btrfs_pin_extent errors to > callers - Push up non-looped btrfs_transaction_start errors to > callers > > New in this version: - Make functions with no return value return > void (11 patches) - Simplify btrfs_submit_bio_hook - Simplify > tree_merge_hook by adding a merge_bio helper - Push up > submit_bio_hook failures > > Changes since last submission: - Refreshed against 3.1-rc3. > > -Jeff >Oops. Sorry for the double-send. I had sent from a host which had appeared to have a broken mailer. Guess it wasn''t. :) - -Jeff - -- Jeff Mahoney SuSE Labs -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.18 (GNU/Linux) Comment: Using GnuPG with Mozilla - http://enigmail.mozdev.org/ iEYEARECAAYFAk5pX14ACgkQLPWxlyuTD7ITdACfWxIkPTsMMlwwHdrnrccG3yd9 3mYAn1cIxuRf+w+z4ou1gkryJt1RHJ6e =aAaR -----END PGP SIGNATURE----- -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html