Hi all - The following 9 patches add more error handling to the btrfs code: - Add btrfs_panic - Catch locking failures in {set,clear}_extent_bit - Push up set_extent_bit errors to callers - Push up lock_extent errors to callers - Push up clear_extent_bit errors to callers - Push up unlock_extent errors to callers - Make pin_down_extent return void - Push up btrfs_pin_extent errors to callers - Push up non-looped btrfs_transaction_start errors to callers Changes since version 1: - Changed BUG_ON(ret) to BUG_ON(ret < 0) in set_extent_bit push up patch - Added missing chunk to set_extent_bit push up patch - Added handling for btrfs_drop_snapshot return values to btrfs_transaction_start patch <Tsutomu Itoh> -Jeff -- Jeff Mahoney SUSE Labs -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
As part of the effort to eliminate BUG_ON as an error handling technique, we need to determine which errors are actual logic errors, which are on-disk corruption, and which are normal runtime errors e.g. -ENOMEM. Annotating these error cases is helpful to understand and report them. This patch adds a btrfs_panic() routine that will either panic or BUG depending on the new -ofatal_errors={panic,bug} mount option. Since there are still so many BUG_ONs, it defaults to BUG for now but I expect that to change once the error handling effort has made significant progress. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/ctree.h | 11 +++++++++++ fs/btrfs/super.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 57 insertions(+), 1 deletion(-) --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1363,6 +1363,7 @@ struct btrfs_ioctl_defrag_range_args { #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) #define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17) +#define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 18) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) @@ -2650,6 +2651,16 @@ do { \ __btrfs_std_error((fs_info), __func__, __LINE__, (errno));\ } while (0) +void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, + unsigned int line, int errno, const char *fmt, ...); + +#define btrfs_panic(fs_info, errno, fmt, args...) \ +do { \ + struct btrfs_fs_info *_i = (fs_info); \ + __btrfs_panic(_i, __func__, __LINE__, errno, fmt, ##args); \ + BUG_ON(!(_i->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)); \ +} while (0) + /* acl.c */ #ifdef CONFIG_BTRFS_FS_POSIX_ACL struct posix_acl *btrfs_get_acl(struct inode *inode, int type); --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -74,6 +74,9 @@ static const char *btrfs_decode_error(st case -EROFS: errstr = "Readonly filesystem"; break; + case -EEXIST: + errstr = "Object already exists"; + break; default: if (nbuf) { if (snprintf(nbuf, 16, "error %d", -errno) >= 0) @@ -143,6 +146,33 @@ void __btrfs_std_error(struct btrfs_fs_i btrfs_handle_error(fs_info); } +/* + * __btrfs_panic decodes unexpected, fatal errors from the caller, + * issues an alert, and either panics or BUGs, depending on mount options. + */ +void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, + unsigned int line, int errno, const char *fmt, ...) +{ + struct super_block *sb = fs_info->sb; + char nbuf[16]; + const char *errstr; + struct va_format vaf = { .fmt = fmt }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + + errstr = btrfs_decode_error(fs_info, errno, nbuf); + if (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR) + panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n", + sb->s_id, function, line, &vaf, errstr); + + printk(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n", + sb->s_id, function, line, &vaf, errstr); + va_end(args); + /* Caller calls BUG() */ +} + static void btrfs_put_super(struct super_block *sb) { struct btrfs_root *root = btrfs_sb(sb); @@ -162,7 +192,7 @@ enum { Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, - Opt_inode_cache, Opt_err, + Opt_inode_cache, Opt_fatal_errors, Opt_err, }; static match_table_t tokens = { @@ -195,6 +225,7 @@ static match_table_t tokens = { {Opt_subvolrootid, "subvolrootid=%d"}, {Opt_defrag, "autodefrag"}, {Opt_inode_cache, "inode_cache"}, + {Opt_fatal_errors, "fatal_errors=%s"}, {Opt_err, NULL}, }; @@ -381,6 +412,18 @@ int btrfs_parse_options(struct btrfs_roo printk(KERN_INFO "btrfs: enabling auto defrag"); btrfs_set_opt(info->mount_opt, AUTO_DEFRAG); break; + case Opt_fatal_errors: + if (strcmp(args[0].from, "panic") == 0) + btrfs_set_opt(info->mount_opt, + PANIC_ON_FATAL_ERROR); + else if (strcmp(args[0].from, "bug") == 0) + btrfs_clear_opt(info->mount_opt, + PANIC_ON_FATAL_ERROR); + else { + ret = -EINVAL; + goto out; + } + break; case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " "''%s''\n", p); @@ -729,6 +772,8 @@ static int btrfs_show_options(struct seq seq_puts(seq, ",autodefrag"); if (btrfs_test_opt(root, INODE_MAP_CACHE)) seq_puts(seq, ",inode_cache"); + if (btrfs_test_opt(root, PANIC_ON_FATAL_ERROR)) + seq_puts(seq, ",fatal_errors=panic"); return 0; } -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Jeff Mahoney
2011-Aug-15 19:50 UTC
[patch v2 2/9] btrfs: Catch locking failures in {set,clear}_extent_bit
The *_state functions can only return 0 or -EEXIST. This patch addresses the cases where those functions return -EEXIST, representing a locking failure. It handles them by panicking with an appropriate error message. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/extent_io.c | 36 +++++++++++++++++++++++++----------- fs/btrfs/extent_io.h | 6 ++++++ 2 files changed, 31 insertions(+), 11 deletions(-) --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -531,7 +531,10 @@ hit_next: prealloc = alloc_extent_state_atomic(prealloc); BUG_ON(!prealloc); err = split_state(tree, state, prealloc, start); - BUG_ON(err == -EEXIST); + if (err) + btrfs_panic(tree_fs_info(tree), err, "Locking error: " + "Extent tree was modified by another " + "thread while locked."); prealloc = NULL; if (err) goto out; @@ -553,7 +556,10 @@ hit_next: prealloc = alloc_extent_state_atomic(prealloc); BUG_ON(!prealloc); err = split_state(tree, state, prealloc, end + 1); - BUG_ON(err == -EEXIST); + if (err) + btrfs_panic(tree_fs_info(tree), err, "Locking error: " + "Extent tree was modified by another " + "thread while locked."); if (wake) wake_up(&state->wq); @@ -736,8 +742,11 @@ again: prealloc = alloc_extent_state_atomic(prealloc); BUG_ON(!prealloc); err = insert_state(tree, prealloc, start, end, &bits); + if (err) + btrfs_panic(tree_fs_info(tree), err, "Locking error: " + "Extent tree was modified by another " + "thread while locked."); prealloc = NULL; - BUG_ON(err == -EEXIST); goto out; } state = rb_entry(node, struct extent_state, rb_node); @@ -803,7 +812,10 @@ hit_next: prealloc = alloc_extent_state_atomic(prealloc); BUG_ON(!prealloc); err = split_state(tree, state, prealloc, start); - BUG_ON(err == -EEXIST); + if (err) + btrfs_panic(tree_fs_info(tree), err, "Locking error: " + "Extent tree was modified by another " + "thread while locked."); prealloc = NULL; if (err) goto out; @@ -840,12 +852,11 @@ hit_next: */ err = insert_state(tree, prealloc, start, this_end, &bits); - BUG_ON(err == -EEXIST); - if (err) { - free_extent_state(prealloc); - prealloc = NULL; - goto out; - } + if (err) + btrfs_panic(tree_fs_info(tree), err, "Locking error: " + "Extent tree was modified by another " + "thread while locked."); + cache_state(prealloc, cached_state); prealloc = NULL; start = this_end + 1; @@ -867,7 +878,10 @@ hit_next: prealloc = alloc_extent_state_atomic(prealloc); BUG_ON(!prealloc); err = split_state(tree, state, prealloc, end + 1); - BUG_ON(err == -EEXIST); + if (err) + btrfs_panic(tree_fs_info(tree), err, "Locking error: " + "Extent tree was modified by another " + "thread while locked."); set_state_bits(tree, prealloc, &bits); cache_state(prealloc, cached_state); --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -159,6 +159,12 @@ static inline int extent_compress_type(u return bio_flags >> EXTENT_BIO_FLAG_SHIFT; } +static inline struct btrfs_fs_info * +tree_fs_info(struct extent_io_tree *tree) +{ + return btrfs_sb(tree->mapping->host->i_sb)->fs_info; +} + struct extent_map_tree; typedef struct extent_map *(get_extent_t)(struct inode *inode, -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Jeff Mahoney
2011-Aug-15 19:50 UTC
[patch v2 3/9] btrfs: Push up set_extent_bit errors to callers
In the locking case, set_extent_bit can return -EEXIST but callers already handle that. In the non-locking case, it can''t fail. Memory allocation failures are handled by BUG_ON. This patch pushes up the BUG_ONs from set_extent_bit to callers, except where -ENOMEM can''t occur (e.g. __GFP_WAIT && __GFP_NOFAIL). Update v2: Changed cases of BUG_ON(ret) to BUG_ON(ret < 0) Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/extent-tree.c | 37 +++++++++++++++++++++++----------- fs/btrfs/extent_io.c | 52 ++++++++++++++++++++++++++++++++++++------------- fs/btrfs/file-item.c | 3 +- fs/btrfs/inode.c | 25 ++++++++++++++++------- fs/btrfs/ioctl.c | 5 ++-- fs/btrfs/relocation.c | 21 ++++++++++++------- 6 files changed, 99 insertions(+), 44 deletions(-) --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -192,11 +192,14 @@ block_group_cache_tree_search(struct btr static int add_excluded_extent(struct btrfs_root *root, u64 start, u64 num_bytes) { + int ret; u64 end = start + num_bytes - 1; - set_extent_bits(&root->fs_info->freed_extents[0], - start, end, EXTENT_UPTODATE, GFP_NOFS); - set_extent_bits(&root->fs_info->freed_extents[1], - start, end, EXTENT_UPTODATE, GFP_NOFS); + ret = set_extent_bits(&root->fs_info->freed_extents[0], + start, end, EXTENT_UPTODATE, GFP_NOFS); + BUG_ON(ret < 0); + ret = set_extent_bits(&root->fs_info->freed_extents[1], + start, end, EXTENT_UPTODATE, GFP_NOFS); + BUG_ON(ret < 0); return 0; } @@ -4142,8 +4145,9 @@ static int update_block_group(struct btr spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - set_extent_dirty(info->pinned_extents, - bytenr, bytenr + num_bytes - 1, + /* Can''t return -ENOMEM */ + set_extent_dirty(info->pinned_extents, bytenr, + bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); } btrfs_put_block_group(cache); @@ -4184,6 +4188,7 @@ static int pin_down_extent(struct btrfs_ spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); + /* __GFP_NOFAIL means it can''t return -ENOMEM */ set_extent_dirty(root->fs_info->pinned_extents, bytenr, bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); return 0; @@ -5636,6 +5641,7 @@ struct extent_buffer *btrfs_init_new_buf int level) { struct extent_buffer *buf; + int ret; buf = btrfs_find_create_tree_block(root, bytenr, blocksize); if (!buf) @@ -5654,14 +5660,21 @@ struct extent_buffer *btrfs_init_new_buf * EXENT bit to differentiate dirty pages. */ if (root->log_transid % 2 == 0) - set_extent_dirty(&root->dirty_log_pages, buf->start, - buf->start + buf->len - 1, GFP_NOFS); + ret = set_extent_dirty(&root->dirty_log_pages, + buf->start, + buf->start + buf->len - 1, + GFP_NOFS); else - set_extent_new(&root->dirty_log_pages, buf->start, - buf->start + buf->len - 1, GFP_NOFS); + ret = set_extent_new(&root->dirty_log_pages, + buf->start, + buf->start + buf->len - 1, + GFP_NOFS); + BUG_ON(ret < 0); } else { - set_extent_dirty(&trans->transaction->dirty_pages, buf->start, - buf->start + buf->len - 1, GFP_NOFS); + ret = set_extent_dirty(&trans->transaction->dirty_pages, + buf->start, buf->start + buf->len - 1, + GFP_NOFS); + BUG_ON(ret < 0); } trans->blocks_used++; /* this returns a buffer locked for blocking */ --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -703,6 +703,9 @@ static void uncache_state(struct extent_ * part of the range already has the desired bits set. The start of the * existing range is returned in failed_start in this case. * + * It may also fail with -ENOMEM if memory cannot be obtained for extent_state + * structures. + * * [start, end] is inclusive This takes the tree lock. */ @@ -721,7 +724,8 @@ int set_extent_bit(struct extent_io_tree again: if (!prealloc && (mask & __GFP_WAIT)) { prealloc = alloc_extent_state(mask); - BUG_ON(!prealloc); + if (!prealloc) + return -ENOMEM; } spin_lock(&tree->lock); @@ -740,7 +744,11 @@ again: node = tree_search(tree, start); if (!node) { prealloc = alloc_extent_state_atomic(prealloc); - BUG_ON(!prealloc); + if (!prealloc) { + err = -ENOMEM; + goto out; + } + err = insert_state(tree, prealloc, start, end, &bits); if (err) btrfs_panic(tree_fs_info(tree), err, "Locking error: " @@ -810,7 +818,11 @@ hit_next: } prealloc = alloc_extent_state_atomic(prealloc); - BUG_ON(!prealloc); + if (!prealloc) { + err = -ENOMEM; + goto out; + } + err = split_state(tree, state, prealloc, start); if (err) btrfs_panic(tree_fs_info(tree), err, "Locking error: " @@ -844,7 +856,10 @@ hit_next: this_end = last_start - 1; prealloc = alloc_extent_state_atomic(prealloc); - BUG_ON(!prealloc); + if (!prealloc) { + err = -ENOMEM; + goto out; + } /* * Avoid to free ''prealloc'' if it can be merged with @@ -876,7 +891,11 @@ hit_next: } prealloc = alloc_extent_state_atomic(prealloc); - BUG_ON(!prealloc); + if (!prealloc) { + err = -ENOMEM; + goto out; + } + err = split_state(tree, state, prealloc, end + 1); if (err) btrfs_panic(tree_fs_info(tree), err, "Locking error: " @@ -988,6 +1007,7 @@ int lock_extent_bits(struct extent_io_tr } WARN_ON(start > end); } + BUG_ON(err < 0); return err; } @@ -1010,6 +1030,7 @@ int try_lock_extent(struct extent_io_tre EXTENT_LOCKED, 1, 0, NULL, mask); return 0; } + BUG_ON(err < 0); return 1; } @@ -1757,8 +1778,9 @@ static void end_bio_extent_readpage(stru } if (uptodate) { - set_extent_uptodate(tree, start, end, &cached, - GFP_ATOMIC); + ret = set_extent_uptodate(tree, start, end, &cached, + GFP_ATOMIC); + BUG_ON(ret < 0); } unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); @@ -1980,8 +2002,9 @@ static int __extent_read_full_page(struc memset(userpage + pg_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); - set_extent_uptodate(tree, cur, cur + iosize - 1, - &cached, GFP_NOFS); + ret = set_extent_uptodate(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); + BUG_ON(ret < 0); unlock_extent_cached(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); break; @@ -2030,8 +2053,9 @@ static int __extent_read_full_page(struc flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); - set_extent_uptodate(tree, cur, cur + iosize - 1, - &cached, GFP_NOFS); + ret = set_extent_uptodate(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); + BUG_ON(ret < 0); unlock_extent_cached(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); cur = cur + iosize; @@ -3286,8 +3310,10 @@ int set_extent_buffer_uptodate(struct ex num_pages = num_extent_pages(eb->start, eb->len); if (eb_straddles_pages(eb)) { - set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, - NULL, GFP_NOFS); + int ret = set_extent_uptodate(tree, eb->start, + eb->start + eb->len - 1, + NULL, GFP_NOFS); + BUG_ON(ret < 0); } for (i = 0; i < num_pages; i++) { page = extent_buffer_page(eb, i); --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -212,9 +212,10 @@ static int __btrfs_lookup_bio_sums(struc sum = 0; if (BTRFS_I(inode)->root->root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID) { - set_extent_bits(io_tree, offset, + ret = set_extent_bits(io_tree, offset, offset + bvec->bv_len - 1, EXTENT_NODATASUM, GFP_NOFS); + BUG_ON(ret < 0); } else { printk(KERN_INFO "btrfs no csum found " "for inode %llu start %llu\n", --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1548,6 +1548,7 @@ static void btrfs_writepage_fixup_worker struct inode *inode; u64 page_start; u64 page_end; + int ret; fixup = container_of(work, struct btrfs_writepage_fixup, work); page = fixup->page; @@ -1579,7 +1580,9 @@ again: } BUG(); - btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); + ret = btrfs_set_extent_delalloc(inode, page_start, page_end, + &cached_state); + BUG_ON(ret < 0); ClearPageChecked(page); out: unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, @@ -1883,8 +1886,11 @@ static int btrfs_io_failed_hook(struct b } failrec->logical = logical; free_extent_map(em); - set_extent_bits(failure_tree, start, end, EXTENT_LOCKED | - EXTENT_DIRTY, GFP_NOFS); + + /* Doesn''t this ignore locking failures? */ + ret = set_extent_bits(failure_tree, start, end, EXTENT_LOCKED | + EXTENT_DIRTY, GFP_NOFS); + BUG_ON(ret < 0); set_state_private(failure_tree, start, (u64)(unsigned long)failrec); } else { @@ -5167,8 +5173,10 @@ again: kunmap(page); btrfs_mark_buffer_dirty(leaf); } - set_extent_uptodate(io_tree, em->start, - extent_map_end(em) - 1, NULL, GFP_NOFS); + ret = set_extent_uptodate(io_tree, em->start, + extent_map_end(em) - 1, + NULL, GFP_NOFS); + BUG_ON(ret < 0); goto insert; } else { printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); @@ -6230,9 +6238,10 @@ static ssize_t btrfs_direct_IO(int rw, s */ if (writing) { write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING; - ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, - EXTENT_DELALLOC, 0, NULL, &cached_state, - GFP_NOFS); + ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, + lockend, EXTENT_DELALLOC, 0, NULL, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); if (ret) { clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, EXTENT_LOCKED | write_bits, --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -938,8 +938,9 @@ again: } - btrfs_set_extent_delalloc(inode, page_start, page_end - 1, - &cached_state); + ret = btrfs_set_extent_delalloc(inode, page_start, page_end - 1, + &cached_state); + BUG_ON(ret < 0); unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, &cached_state, --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2623,11 +2623,11 @@ static int finish_pending_nodes(struct b return err; } -static void mark_block_processed(struct reloc_control *rc, +static int mark_block_processed(struct reloc_control *rc, u64 bytenr, u32 blocksize) { - set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, - EXTENT_DIRTY, GFP_NOFS); + return set_extent_bits(&rc->processed_blocks, bytenr, + bytenr + blocksize - 1, EXTENT_DIRTY, GFP_NOFS); } static void __mark_block_processed(struct reloc_control *rc, @@ -2636,8 +2636,10 @@ static void __mark_block_processed(struc u32 blocksize; if (node->level == 0 || in_block_group(node->bytenr, rc->block_group)) { + int ret; blocksize = btrfs_level_size(rc->extent_root, node->level); - mark_block_processed(rc, node->bytenr, blocksize); + ret = mark_block_processed(rc, node->bytenr, blocksize); + BUG_ON(ret < 0); } node->processed = 1; } @@ -2994,13 +2996,16 @@ static int relocate_file_extent_cluster( if (nr < cluster->nr && page_start + offset == cluster->boundary[nr]) { - set_extent_bits(&BTRFS_I(inode)->io_tree, - page_start, page_end, - EXTENT_BOUNDARY, GFP_NOFS); + ret = set_extent_bits(&BTRFS_I(inode)->io_tree, + page_start, page_end, + EXTENT_BOUNDARY, GFP_NOFS); + BUG_ON(ret < 0); nr++; } - btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); + ret = btrfs_set_extent_delalloc(inode, page_start, + page_end, NULL); + BUG_ON(ret < 0); set_page_dirty(page); unlock_extent(&BTRFS_I(inode)->io_tree, -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Jeff Mahoney
2011-Aug-15 19:50 UTC
[patch v2 4/9] btrfs: Push up lock_extent errors to callers
lock_extent, try_lock_extent, and lock_extent_bits can''t currently fail because errors are caught via BUG_ON. This patch pushes the error handling up to callers, which currently only handle them via BUG_ON themselves. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/compression.c | 3 +- fs/btrfs/disk-io.c | 5 ++- fs/btrfs/extent_io.c | 20 ++++++++----- fs/btrfs/file.c | 17 ++++++----- fs/btrfs/free-space-cache.c | 6 ++-- fs/btrfs/inode.c | 66 ++++++++++++++++++++++++++------------------ fs/btrfs/ioctl.c | 16 ++++++---- fs/btrfs/relocation.c | 18 ++++++++---- 8 files changed, 94 insertions(+), 57 deletions(-) --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -496,7 +496,8 @@ static noinline int add_ra_bio_pages(str * sure they map to this compressed extent on disk. */ set_page_extent_mapped(page); - lock_extent(tree, last_offset, end, GFP_NOFS); + ret = lock_extent(tree, last_offset, end, GFP_NOFS); + BUG_ON(ret < 0); read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, last_offset, PAGE_CACHE_SIZE); --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -331,8 +331,9 @@ static int verify_parent_transid(struct if (!parent_transid || btrfs_header_generation(eb) == parent_transid) return 0; - lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, - 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, + 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); if (extent_buffer_uptodate(io_tree, eb, cached_state) && btrfs_header_generation(eb) == parent_transid) { ret = 0; --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1013,7 +1013,6 @@ int lock_extent_bits(struct extent_io_tr } WARN_ON(start > end); } - BUG_ON(err); return err; } @@ -1035,8 +1034,8 @@ int try_lock_extent(struct extent_io_tre clear_extent_bit(tree, start, failed_start - 1, EXTENT_LOCKED, 1, 0, NULL, mask); return 0; - } - BUG_ON(err); + } else if (err < 0) + return err; return 1; } @@ -1347,8 +1346,9 @@ again: BUG_ON(ret); /* step three, lock the state bits for the whole range */ - lock_extent_bits(tree, delalloc_start, delalloc_end, - 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(tree, delalloc_start, delalloc_end, + 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); /* then test to make sure it is all still delalloc */ ret = test_range_bit(tree, delalloc_start, delalloc_end, @@ -1977,7 +1977,8 @@ static int __extent_read_full_page(struc end = page_end; while (1) { - lock_extent(tree, start, end, GFP_NOFS); + ret = lock_extent(tree, start, end, GFP_NOFS); + BUG_ON(ret < 0); ordered = btrfs_lookup_ordered_extent(inode, start); if (!ordered) break; @@ -2663,12 +2664,14 @@ int extent_invalidatepage(struct extent_ u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); u64 end = start + PAGE_CACHE_SIZE - 1; size_t blocksize = page->mapping->host->i_sb->s_blocksize; + int ret; start += (offset + blocksize - 1) & ~(blocksize - 1); if (start > end) return 0; - lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); wait_on_page_writeback(page); clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | @@ -2878,8 +2881,9 @@ int extent_fiemap(struct inode *inode, s last_for_get_extent = isize; } - lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); em = get_extent_skip_holes(inode, off, last_for_get_extent, get_extent); --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1104,9 +1104,10 @@ again: err = 0; if (start_pos < inode->i_size) { struct btrfs_ordered_extent *ordered; - lock_extent_bits(&BTRFS_I(inode)->io_tree, - start_pos, last_pos - 1, 0, &cached_state, - GFP_NOFS); + err = lock_extent_bits(&BTRFS_I(inode)->io_tree, + start_pos, last_pos - 1, 0, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); ordered = btrfs_lookup_first_ordered_extent(inode, last_pos - 1); if (ordered && @@ -1612,8 +1613,9 @@ static long btrfs_fallocate(struct file /* the extent lock is ordered inside the running * transaction */ - lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, - locked_end, 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, + locked_end, 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); ordered = btrfs_lookup_first_ordered_extent(inode, alloc_end - 1); if (ordered && @@ -1696,8 +1698,9 @@ static int find_desired_extent(struct in if (inode->i_size == 0) return -ENXIO; - lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, - &cached_state, GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); /* * Delalloc is such a pain. If we have a hole and we have pending --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -609,8 +609,10 @@ int __btrfs_write_out_cache(struct btrfs } index = 0; - lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, - 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, + i_size_read(inode) - 1, 0, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); /* * When searching for pinned extents, we need to start at our start --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -588,9 +588,11 @@ retry: int page_started = 0; unsigned long nr_written = 0; - lock_extent(io_tree, async_extent->start, - async_extent->start + - async_extent->ram_size - 1, GFP_NOFS); + ret = lock_extent(io_tree, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, + GFP_NOFS); + BUG_ON(ret < 0); /* allocate blocks */ ret = cow_file_range(inode, async_cow->locked_page, @@ -617,9 +619,10 @@ retry: continue; } - lock_extent(io_tree, async_extent->start, - async_extent->start + async_extent->ram_size - 1, - GFP_NOFS); + ret = lock_extent(io_tree, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, GFP_NOFS); + BUG_ON(ret < 0); trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); @@ -1563,8 +1566,9 @@ again: page_start = page_offset(page); page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; - lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, - &cached_state, GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, + 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); /* already ordered? We''re done */ if (PagePrivate2(page)) @@ -1746,9 +1750,11 @@ static int btrfs_finish_ordered_io(struc goto out; } - lock_extent_bits(io_tree, ordered_extent->file_offset, - ordered_extent->file_offset + ordered_extent->len - 1, - 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(io_tree, ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len - 1, + 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); if (nolock) trans = btrfs_join_transaction_nolock(root); @@ -3410,8 +3416,9 @@ again: } wait_on_page_writeback(page); - lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, - GFP_NOFS); + ret = lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); set_page_extent_mapped(page); ordered = btrfs_lookup_ordered_extent(inode, page_start); @@ -3486,8 +3493,9 @@ int btrfs_cont_expand(struct inode *inod struct btrfs_ordered_extent *ordered; btrfs_wait_ordered_range(inode, hole_start, block_end - hole_start); - lock_extent_bits(io_tree, hole_start, block_end - 1, 0, - &cached_state, GFP_NOFS); + err = lock_extent_bits(io_tree, hole_start, block_end - 1, 0, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); ordered = btrfs_lookup_ordered_extent(inode, hole_start); if (!ordered) break; @@ -5798,9 +5806,10 @@ again: goto out; } - lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset, - ordered->file_offset + ordered->len - 1, 0, - &cached_state, GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset, + ordered->file_offset + ordered->len - 1, 0, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { ret = btrfs_mark_extent_written(trans, inode, @@ -6214,8 +6223,9 @@ static ssize_t btrfs_direct_IO(int rw, s } while (1) { - lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, - 0, &cached_state, GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, + lockend, 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); /* * We''re concerned with the entire range that we''re going to be * doing DIO to, so we need to make sure theres no ordered @@ -6354,6 +6364,7 @@ static void btrfs_invalidatepage(struct struct extent_state *cached_state = NULL; u64 page_start = page_offset(page); u64 page_end = page_start + PAGE_CACHE_SIZE - 1; + int ret; /* @@ -6370,8 +6381,9 @@ static void btrfs_invalidatepage(struct btrfs_releasepage(page, GFP_NOFS); return; } - lock_extent_bits(tree, page_start, page_end, 0, &cached_state, - GFP_NOFS); + ret = lock_extent_bits(tree, page_start, page_end, 0, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); ordered = btrfs_lookup_ordered_extent(page->mapping->host, page_offset(page)); if (ordered) { @@ -6393,8 +6405,9 @@ static void btrfs_invalidatepage(struct } btrfs_put_ordered_extent(ordered); cached_state = NULL; - lock_extent_bits(tree, page_start, page_end, 0, &cached_state, - GFP_NOFS); + ret = lock_extent_bits(tree, page_start, page_end, + 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); } clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | @@ -6462,8 +6475,9 @@ again: } wait_on_page_writeback(page); - lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, - GFP_NOFS); + ret = lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); set_page_extent_mapped(page); /* --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -757,7 +757,7 @@ static int should_defrag_range(struct in struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_map *em = NULL; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; - int ret = 1; + int ret = 1, err; /* * make sure that once we start defragging and extent, we keep on @@ -778,7 +778,8 @@ static int should_defrag_range(struct in if (!em) { /* get the big lock and read metadata off disk */ - lock_extent(io_tree, start, start + len - 1, GFP_NOFS); + err = lock_extent(io_tree, start, start + len - 1, GFP_NOFS); + BUG_ON(err < 0); em = btrfs_get_extent(inode, NULL, 0, start, len, 0); unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); @@ -902,9 +903,10 @@ again: page_start = page_offset(pages[0]); page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; - lock_extent_bits(&BTRFS_I(inode)->io_tree, - page_start, page_end - 1, 0, &cached_state, - GFP_NOFS); + ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, 0, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); ordered = btrfs_lookup_first_ordered_extent(inode, page_end - 1); if (ordered && ordered->file_offset + ordered->len > page_start && @@ -2225,7 +2227,9 @@ static noinline long btrfs_ioctl_clone(s another, and lock file content */ while (1) { struct btrfs_ordered_extent *ordered; - lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); + ret = lock_extent(&BTRFS_I(src)->io_tree, off, off+len, + GFP_NOFS); + BUG_ON(ret < 0); ordered = btrfs_lookup_first_ordered_extent(src, off+len); if (!ordered && !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len, --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1577,6 +1577,7 @@ int replace_file_extents(struct btrfs_tr ret = try_lock_extent(&BTRFS_I(inode)->io_tree, key.offset, end, GFP_NOFS); + BUG_ON(ret < 0); if (!ret) continue; @@ -1899,6 +1900,7 @@ static int invalidate_extent_cache(struc u64 objectid; u64 start, end; u64 ino; + int ret; objectid = min_key->objectid; while (1) { @@ -1952,7 +1954,9 @@ static int invalidate_extent_cache(struc } /* the lock_extent waits for readpage to complete */ - lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + ret = lock_extent(&BTRFS_I(inode)->io_tree, start, end, + GFP_NOFS); + BUG_ON(ret < 0); btrfs_drop_extent_cache(inode, start, end, 1); unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); } @@ -2862,7 +2866,9 @@ int prealloc_file_extent_cluster(struct else end = cluster->end - offset; - lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + ret = lock_extent(&BTRFS_I(inode)->io_tree, start, + end, GFP_NOFS); + BUG_ON(ret < 0); num_bytes = end + 1 - start; ret = btrfs_prealloc_file_range(inode, 0, start, num_bytes, num_bytes, @@ -2899,7 +2905,8 @@ int setup_extent_mapping(struct inode *i em->bdev = root->fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PINNED, &em->flags); - lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + ret = lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + BUG_ON(ret < 0); while (1) { write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); @@ -2989,8 +2996,9 @@ static int relocate_file_extent_cluster( page_start = (u64)page->index << PAGE_CACHE_SHIFT; page_end = page_start + PAGE_CACHE_SIZE - 1; - lock_extent(&BTRFS_I(inode)->io_tree, - page_start, page_end, GFP_NOFS); + ret = lock_extent(&BTRFS_I(inode)->io_tree, + page_start, page_end, GFP_NOFS); + BUG_ON(ret < 0); set_page_extent_mapped(page); -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Jeff Mahoney
2011-Aug-15 19:50 UTC
[patch v2 5/9] btrfs: Push up clear_extent_bit errors to callers
clear_extent_bit can fail with -ENOMEM for a specific case but will BUG on other memory allocation failures. This patch returns -ENOMEM for memory allocation failures and handles them with BUG_ON in callers which don''t handle it already. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/disk-io.c | 7 ++- fs/btrfs/extent-tree.c | 14 ++++-- fs/btrfs/extent_io.c | 56 ++++++++++++++++++-------- fs/btrfs/file.c | 10 ++-- fs/btrfs/free-space-cache.c | 20 +++++---- fs/btrfs/inode.c | 92 +++++++++++++++++++++++++++----------------- fs/btrfs/ioctl.c | 9 ++-- fs/btrfs/relocation.c | 5 +- fs/btrfs/transaction.c | 4 + fs/btrfs/tree-log.c | 5 +- 10 files changed, 142 insertions(+), 80 deletions(-) --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2981,7 +2981,9 @@ static int btrfs_destroy_marked_extents( if (ret) break; - clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); + ret = clear_extent_bits(dirty_pages, start, end, mark, + GFP_NOFS); + BUG_ON(ret < 0); while (start <= end) { index = start >> PAGE_CACHE_SHIFT; start = (u64)(index + 1) << PAGE_CACHE_SHIFT; @@ -3042,7 +3044,8 @@ static int btrfs_destroy_pinned_extent(s end + 1 - start, NULL); - clear_extent_dirty(unpin, start, end, GFP_NOFS); + ret = clear_extent_dirty(unpin, start, end, GFP_NOFS); + BUG_ON(ret < 0); btrfs_error_unpin_extent_range(root, start, end); cond_resched(); } --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -207,14 +207,17 @@ static void free_excluded_extents(struct struct btrfs_block_group_cache *cache) { u64 start, end; + int ret; start = cache->key.objectid; end = start + cache->key.offset - 1; - clear_extent_bits(&root->fs_info->freed_extents[0], - start, end, EXTENT_UPTODATE, GFP_NOFS); - clear_extent_bits(&root->fs_info->freed_extents[1], - start, end, EXTENT_UPTODATE, GFP_NOFS); + ret = clear_extent_bits(&root->fs_info->freed_extents[0], + start, end, EXTENT_UPTODATE, GFP_NOFS); + BUG_ON(ret < 0); + ret = clear_extent_bits(&root->fs_info->freed_extents[1], + start, end, EXTENT_UPTODATE, GFP_NOFS); + BUG_ON(ret < 0); } static int exclude_super_stripes(struct btrfs_root *root, @@ -4359,7 +4362,8 @@ int btrfs_finish_extent_commit(struct bt ret = btrfs_discard_extent(root, start, end + 1 - start, NULL); - clear_extent_dirty(unpin, start, end, GFP_NOFS); + ret = clear_extent_dirty(unpin, start, end, GFP_NOFS); + BUG_ON(ret < 0); unpin_extent_range(root, start, end); cond_resched(); } --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -529,7 +529,11 @@ hit_next: if (state->start < start) { prealloc = alloc_extent_state_atomic(prealloc); - BUG_ON(!prealloc); + if (!prealloc) { + err = -ENOMEM; + goto out; + } + err = split_state(tree, state, prealloc, start); if (err) btrfs_panic(tree_fs_info(tree), err, "Locking error: " @@ -554,7 +558,11 @@ hit_next: */ if (state->start <= end && state->end > end) { prealloc = alloc_extent_state_atomic(prealloc); - BUG_ON(!prealloc); + if (!prealloc) { + err = -ENOMEM; + goto out; + } + err = split_state(tree, state, prealloc, end + 1); if (err) btrfs_panic(tree_fs_info(tree), err, "Locking error: " @@ -1024,9 +1032,12 @@ int try_lock_extent(struct extent_io_tre err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, &failed_start, NULL, mask); if (err == -EEXIST) { - if (failed_start > start) - clear_extent_bit(tree, start, failed_start - 1, - EXTENT_LOCKED, 1, 0, NULL, mask); + if (failed_start > start) { + err = clear_extent_bit(tree, start, failed_start - 1, + EXTENT_LOCKED, 1, 0, NULL, + mask); + BUG_ON(err < 0); + } return 0; } else if (err < 0) return err; @@ -1036,14 +1047,18 @@ int try_lock_extent(struct extent_io_tre int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached, gfp_t mask) { - return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, - mask); + int ret = clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, + cached, mask); + BUG_ON(ret < 0); + return ret; } int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { - return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, - mask); + int ret = clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, + mask); + BUG_ON(ret < 0); + return ret; } /* @@ -1383,7 +1398,9 @@ int extent_clear_unlock_delalloc(struct if (op & EXTENT_CLEAR_DELALLOC) clear_bits |= EXTENT_DELALLOC; - clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); + ret = clear_extent_bit(tree, start, end, clear_bits, + 1, 0, NULL, GFP_NOFS); + BUG_ON(ret < 0); if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK | EXTENT_SET_PRIVATE2))) @@ -1688,7 +1705,9 @@ static void end_bio_extent_writepage(str } if (!uptodate) { - clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); + ret = clear_extent_uptodate(tree, start, end, + NULL, GFP_NOFS); + BUG_ON(ret < 0); ClearPageUptodate(page); SetPageError(page); } @@ -2667,10 +2686,11 @@ int extent_invalidatepage(struct extent_ ret = lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); BUG_ON(ret < 0); wait_on_page_writeback(page); - clear_extent_bit(tree, start, end, - EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, - 1, 1, &cached_state, GFP_NOFS); + ret = clear_extent_bit(tree, start, end, + EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, + 1, 1, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); return 0; } @@ -3293,8 +3313,10 @@ int clear_extent_buffer_uptodate(struct clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); if (eb_straddles_pages(eb)) { - clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, - cached_state, GFP_NOFS); + int ret = clear_extent_uptodate(tree, eb->start, + eb->start + eb->len - 1, + cached_state, GFP_NOFS); + BUG_ON(ret < 0); } for (i = 0; i < num_pages; i++) { page = extent_buffer_page(eb, i); --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1128,10 +1128,12 @@ again: if (ordered) btrfs_put_ordered_extent(ordered); - clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, - last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, - GFP_NOFS); + err = clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, + last_pos - 1, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); unlock_extent_cached(&BTRFS_I(inode)->io_tree, start_pos, last_pos - 1, &cached_state, GFP_NOFS); --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -800,10 +800,12 @@ int __btrfs_write_out_cache(struct btrfs ret = btrfs_search_slot(trans, root, &key, path, 1, 1); if (ret < 0) { + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, NULL, + GFP_NOFS); + BUG_ON(ret < 0); ret = -1; - clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); goto out; } leaf = path->nodes[0]; @@ -814,12 +816,14 @@ int __btrfs_write_out_cache(struct btrfs btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || found_key.offset != offset) { - ret = -1; - clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 0, 0, NULL, - GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, + bytes - 1, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, + NULL, GFP_NOFS); + BUG_ON(ret < 0); btrfs_release_path(path); + ret = -1; goto out; } } --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -960,9 +960,11 @@ static int cow_file_range_async(struct i unsigned long nr_pages; u64 cur_end; int limit = 10 * 1024 * 1042; + int ret; - clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, - 1, 0, NULL, GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, + EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS); + BUG_ON(ret < 0); while (start < end) { async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); BUG_ON(!async_cow); @@ -1917,9 +1919,11 @@ static int btrfs_io_failed_hook(struct b } if (!state || failrec->last_mirror > num_copies) { set_state_private(failure_tree, failrec->start, 0); - clear_extent_bits(failure_tree, failrec->start, - failrec->start + failrec->len - 1, - EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS); + ret = clear_extent_bits(failure_tree, failrec->start, + failrec->start + failrec->len - 1, + EXTENT_LOCKED | EXTENT_DIRTY, + GFP_NOFS); + BUG_ON(ret < 0); kfree(failrec); return -EIO; } @@ -1963,11 +1967,13 @@ static int btrfs_clean_io_failures(struc private_failure; set_state_private(&BTRFS_I(inode)->io_failure_tree, failure->start, 0); - clear_extent_bits(&BTRFS_I(inode)->io_failure_tree, + ret = clear_extent_bits( + &BTRFS_I(inode)->io_failure_tree, failure->start, failure->start + failure->len - 1, EXTENT_DIRTY | EXTENT_LOCKED, GFP_NOFS); + BUG_ON(ret < 0); kfree(failure); } } @@ -2001,8 +2007,9 @@ static int btrfs_readpage_end_io_hook(st if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { - clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, - GFP_NOFS); + ret = clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, + GFP_NOFS); + BUG_ON(ret < 0); return 0; } @@ -3432,9 +3439,11 @@ again: goto again; } - clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, - EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, - 0, 0, &cached_state, GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); ret = btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); @@ -5584,6 +5593,7 @@ static int btrfs_get_blocks_direct(struc u64 start = iblock << inode->i_blkbits; u64 len = bh_result->b_size; struct btrfs_trans_handle *trans; + int ret; em = btrfs_get_extent(inode, NULL, 0, start, len, 0); if (IS_ERR(em)) @@ -5679,9 +5689,11 @@ must_cow: return PTR_ERR(em); len = min(len, em->len - (start - em->start)); unlock: - clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1, - EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1, - 0, NULL, GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, start, + start + len - 1, + EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, + 1, 0, NULL, GFP_NOFS); + BUG_ON(ret < 0); map: bh_result->b_blocknr = (em->block_start + (start - em->start)) >> inode->i_blkbits; @@ -6253,9 +6265,12 @@ static ssize_t btrfs_direct_IO(int rw, s &cached_state, GFP_NOFS); BUG_ON(ret); if (ret) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, - lockend, EXTENT_LOCKED | write_bits, - 1, 0, &cached_state, GFP_NOFS); + int ret2; + ret2 = clear_extent_bit(&BTRFS_I(inode)->io_tree, + lockstart, lockend, + EXTENT_LOCKED | write_bits, + 1, 0, &cached_state, GFP_NOFS); + BUG_ON(ret2 < 0); goto out; } } @@ -6269,19 +6284,21 @@ static ssize_t btrfs_direct_IO(int rw, s btrfs_submit_direct, 0); if (ret < 0 && ret != -EIOCBQUEUED) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, - offset + iov_length(iov, nr_segs) - 1, - EXTENT_LOCKED | write_bits, 1, 0, - &cached_state, GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, + offset + iov_length(iov, nr_segs) - 1, + EXTENT_LOCKED | write_bits, 1, 0, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) { /* * We''re falling back to buffered, unlock the section we didn''t * do IO on. */ - clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret, - offset + iov_length(iov, nr_segs) - 1, - EXTENT_LOCKED | write_bits, 1, 0, - &cached_state, GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret, + offset + iov_length(iov, nr_segs) - 1, + EXTENT_LOCKED | write_bits, 1, 0, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); } out: free_extent_state(cached_state); @@ -6391,10 +6408,11 @@ static void btrfs_invalidatepage(struct * IO on this page will never be started, so we need * to account for any ordered extents now */ - clear_extent_bit(tree, page_start, page_end, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, - &cached_state, GFP_NOFS); + ret = clear_extent_bit(tree, page_start, page_end, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, + 1, 0, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); /* * whoever cleared the private bit is responsible * for the finish_ordered_io @@ -6409,9 +6427,11 @@ static void btrfs_invalidatepage(struct 0, &cached_state, GFP_NOFS); BUG_ON(ret < 0); } - clear_extent_bit(tree, page_start, page_end, - EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS); + ret = clear_extent_bit(tree, page_start, page_end, + EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 1, 1, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); __btrfs_releasepage(page, GFP_NOFS); ClearPageChecked(page); @@ -6501,9 +6521,11 @@ again: * is probably a better way to do this, but for now keep consistent with * prepare_pages in the normal write path. */ - clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, - EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, - 0, 0, &cached_state, GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); ret = btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -926,10 +926,11 @@ again: if (ordered) btrfs_put_ordered_extent(ordered); - clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, - page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, - GFP_NOFS); + ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, + page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); if (i_done != num_pages) { spin_lock(&BTRFS_I(inode)->lock); --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3834,8 +3834,9 @@ restart: } btrfs_release_path(path); - clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, - GFP_NOFS); + ret = clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, + EXTENT_DIRTY, GFP_NOFS); + BUG_ON(ret < 0); if (trans) { nr = trans->blocks_used; --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -636,7 +636,9 @@ int btrfs_wait_marked_extents(struct btr if (ret) break; - clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); + ret = clear_extent_bits(dirty_pages, start, end, + mark, GFP_NOFS); + BUG_ON(ret < 0); while (start <= end) { index = start >> PAGE_CACHE_SHIFT; start = (u64)(index + 1) << PAGE_CACHE_SHIFT; --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2158,8 +2158,9 @@ static void free_log_tree(struct btrfs_t if (ret) break; - clear_extent_bits(&log->dirty_log_pages, start, end, - EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); + ret = clear_extent_bits(&log->dirty_log_pages, start, end, + EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); + BUG_ON(ret < 0); } free_extent_buffer(log->node); -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Jeff Mahoney
2011-Aug-15 19:50 UTC
[patch v2 6/9] btrfs: Push up unlock_extent errors to callers
The previous patch pushed the clear_extent_bit error handling up a level, which included unlock_extent and unlock_extent_cache. This patch pushes the BUG_ON up into the callers of those functions. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/compression.c | 9 ++-- fs/btrfs/disk-io.c | 7 +-- fs/btrfs/extent_io.c | 52 +++++++++++++---------- fs/btrfs/file.c | 35 ++++++++------- fs/btrfs/free-space-cache.c | 15 ++++-- fs/btrfs/inode.c | 98 ++++++++++++++++++++++++++------------------ fs/btrfs/ioctl.c | 26 +++++++---- fs/btrfs/relocation.c | 24 +++++++--- 8 files changed, 160 insertions(+), 106 deletions(-) --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -507,7 +507,8 @@ static noinline int add_ra_bio_pages(str (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || (em->block_start >> 9) != cb->orig_bio->bi_sector) { free_extent_map(em); - unlock_extent(tree, last_offset, end, GFP_NOFS); + ret = unlock_extent(tree, last_offset, end, GFP_NOFS); + BUG_ON(ret < 0); unlock_page(page); page_cache_release(page); break; @@ -535,7 +536,8 @@ static noinline int add_ra_bio_pages(str nr_pages++; page_cache_release(page); } else { - unlock_extent(tree, last_offset, end, GFP_NOFS); + ret = unlock_extent(tree, last_offset, end, GFP_NOFS); + BUG_ON(ret < 0); unlock_page(page); page_cache_release(page); break; --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -326,7 +326,7 @@ static int verify_parent_transid(struct struct extent_buffer *eb, u64 parent_transid) { struct extent_state *cached_state = NULL; - int ret; + int ret, err; if (!parent_transid || btrfs_header_generation(eb) == parent_transid) return 0; @@ -347,8 +347,9 @@ static int verify_parent_transid(struct ret = 1; clear_extent_buffer_uptodate(io_tree, eb, &cached_state); out: - unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); return ret; } --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1053,18 +1053,14 @@ int try_lock_extent(struct extent_io_tre int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached, gfp_t mask) { - int ret = clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, - cached, mask); - BUG_ON(ret < 0); - return ret; + return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, + mask); } int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { - int ret = clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, - mask); - BUG_ON(ret < 0); - return ret; + return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, + mask); } /* @@ -1369,8 +1365,9 @@ again: ret = test_range_bit(tree, delalloc_start, delalloc_end, EXTENT_DELALLOC, 1, cached_state); if (!ret) { - unlock_extent_cached(tree, delalloc_start, delalloc_end, - &cached_state, GFP_NOFS); + ret = unlock_extent_cached(tree, delalloc_start, delalloc_end, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); __unlock_for_delalloc(inode, locked_page, delalloc_start, delalloc_end); cond_resched(); @@ -1807,7 +1804,9 @@ static void end_bio_extent_readpage(stru GFP_ATOMIC); BUG_ON(ret); } - unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); + ret = unlock_extent_cached(tree, start, end, + &cached, GFP_ATOMIC); + BUG_ON(ret < 0); if (whole_page) { if (uptodate) { @@ -2001,7 +2000,8 @@ static int __extent_read_full_page(struc ordered = btrfs_lookup_ordered_extent(inode, start); if (!ordered) break; - unlock_extent(tree, start, end, GFP_NOFS); + ret = unlock_extent(tree, start, end, GFP_NOFS); + BUG_ON(ret < 0); btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); } @@ -2031,15 +2031,17 @@ static int __extent_read_full_page(struc ret = set_extent_uptodate(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); BUG_ON(ret); - unlock_extent_cached(tree, cur, cur + iosize - 1, - &cached, GFP_NOFS); + ret = unlock_extent_cached(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); + BUG_ON(ret < 0); break; } em = get_extent(inode, page, pg_offset, cur, end - cur + 1, 0); if (IS_ERR_OR_NULL(em)) { SetPageError(page); - unlock_extent(tree, cur, end, GFP_NOFS); + ret = unlock_extent(tree, cur, end, GFP_NOFS); + BUG_ON(ret < 0); break; } extent_offset = cur - em->start; @@ -2082,8 +2084,9 @@ static int __extent_read_full_page(struc ret = set_extent_uptodate(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); BUG_ON(ret); - unlock_extent_cached(tree, cur, cur + iosize - 1, - &cached, GFP_NOFS); + ret = unlock_extent_cached(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); + BUG_ON(ret < 0); cur = cur + iosize; pg_offset += iosize; continue; @@ -2092,7 +2095,9 @@ static int __extent_read_full_page(struc if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1, NULL)) { check_page_uptodate(tree, page); - unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + ret = unlock_extent(tree, cur, cur + iosize - 1, + GFP_NOFS); + BUG_ON(ret < 0); cur = cur + iosize; pg_offset += iosize; continue; @@ -2102,7 +2107,9 @@ static int __extent_read_full_page(struc */ if (block_start == EXTENT_MAP_INLINE) { SetPageError(page); - unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + ret = unlock_extent(tree, cur, cur + iosize - 1, + GFP_NOFS); + BUG_ON(ret < 0); cur = cur + iosize; pg_offset += iosize; continue; @@ -2829,7 +2836,7 @@ static struct extent_map *get_extent_ski int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len, get_extent_t *get_extent) { - int ret = 0; + int ret = 0, err; u64 off = start; u64 max = start + len; u32 flags = 0; @@ -2989,8 +2996,9 @@ int extent_fiemap(struct inode *inode, s out_free: free_extent_map(em); out: - unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); return ret; } --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1114,9 +1114,10 @@ again: ordered->file_offset + ordered->len > start_pos && ordered->file_offset < last_pos) { btrfs_put_ordered_extent(ordered); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - start_pos, last_pos - 1, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + start_pos, last_pos - 1, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); for (i = 0; i < num_pages; i++) { unlock_page(pages[i]); page_cache_release(pages[i]); @@ -1134,9 +1135,10 @@ again: EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, GFP_NOFS); BUG_ON(err < 0); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - start_pos, last_pos - 1, &cached_state, - GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + start_pos, last_pos - 1, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); } for (i = 0; i < num_pages; i++) { clear_page_dirty_for_io(pages[i]); @@ -1577,7 +1579,7 @@ static long btrfs_fallocate(struct file u64 locked_end; u64 mask = BTRFS_I(inode)->root->sectorsize - 1; struct extent_map *em; - int ret; + int ret, err; alloc_start = offset & ~mask; alloc_end = (offset + len + mask) & ~mask; @@ -1624,9 +1626,10 @@ static long btrfs_fallocate(struct file ordered->file_offset + ordered->len > alloc_start && ordered->file_offset < alloc_end) { btrfs_put_ordered_extent(ordered); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - alloc_start, locked_end, - &cached_state, GFP_NOFS); + ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + alloc_start, locked_end, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); /* * we can''t wait on the range with the transaction * running or with the extent lock held @@ -1668,8 +1671,9 @@ static long btrfs_fallocate(struct file break; } } - unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, + locked_end, &cached_state, GFP_NOFS); + BUG_ON(err < 0); btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); out: @@ -1688,7 +1692,7 @@ static int find_desired_extent(struct in u64 orig_start = *offset; u64 len = i_size_read(inode); u64 last_end = 0; - int ret = 0; + int ret = 0, err; lockend = max_t(u64, root->sectorsize, lockend); if (lockend <= lockstart) @@ -1784,8 +1788,9 @@ static int find_desired_extent(struct in if (!ret) *offset = min(*offset, inode->i_size); out: - unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); return ret; } --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -549,7 +549,7 @@ int __btrfs_write_out_cache(struct btrfs int index = 0, num_pages = 0; int entries = 0; int bitmaps = 0; - int ret = -1; + int ret = -1, err; bool next_page = false; bool out_of_space = false; @@ -760,9 +760,10 @@ int __btrfs_write_out_cache(struct btrfs if (out_of_space) { btrfs_drop_pages(pages, num_pages); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, - i_size_read(inode) - 1, &cached_state, - GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, + i_size_read(inode) - 1, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); ret = 0; goto out; } @@ -782,8 +783,10 @@ int __btrfs_write_out_cache(struct btrfs ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0, bytes, &cached_state); btrfs_drop_pages(pages, num_pages); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, - i_size_read(inode) - 1, &cached_state, GFP_NOFS); + err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, + i_size_read(inode) - 1, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); if (ret) { ret = 0; --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -643,9 +643,11 @@ retry: kfree(async_extent->pages); async_extent->nr_pages = 0; async_extent->pages = NULL; - unlock_extent(io_tree, async_extent->start, - async_extent->start + - async_extent->ram_size - 1, GFP_NOFS); + ret = unlock_extent(io_tree, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, + GFP_NOFS); + BUG_ON(ret < 0); goto retry; } @@ -1578,8 +1580,10 @@ again: ordered = btrfs_lookup_ordered_extent(inode, page_start); if (ordered) { - unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, - page_end, &cached_state, GFP_NOFS); + ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); unlock_page(page); btrfs_start_ordered_extent(inode, ordered, 1); goto again; @@ -1591,8 +1595,9 @@ again: BUG_ON(ret); ClearPageChecked(page); out: - unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, - &cached_state, GFP_NOFS); + ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, + page_end, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); out_page: unlock_page(page); page_cache_release(page); @@ -1789,9 +1794,11 @@ static int btrfs_finish_ordered_io(struc ordered_extent->len); BUG_ON(ret); } - unlock_extent_cached(io_tree, ordered_extent->file_offset, - ordered_extent->file_offset + - ordered_extent->len - 1, &cached_state, GFP_NOFS); + ret = unlock_extent_cached(io_tree, ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len - 1, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); add_pending_csums(trans, inode, ordered_extent->file_offset, &ordered_extent->list); @@ -3387,7 +3394,7 @@ static int btrfs_truncate_page(struct ad pgoff_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); struct page *page; - int ret = 0; + int ret = 0, err; u64 page_start; u64 page_end; @@ -3430,8 +3437,9 @@ again: ordered = btrfs_lookup_ordered_extent(inode, page_start); if (ordered) { - unlock_extent_cached(io_tree, page_start, page_end, - &cached_state, GFP_NOFS); + ret = unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); unlock_page(page); page_cache_release(page); btrfs_start_ordered_extent(inode, ordered, 1); @@ -3448,8 +3456,9 @@ again: ret = btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); if (ret) { - unlock_extent_cached(io_tree, page_start, page_end, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); goto out_unlock; } @@ -3462,8 +3471,9 @@ again: } ClearPageChecked(page); set_page_dirty(page); - unlock_extent_cached(io_tree, page_start, page_end, &cached_state, - GFP_NOFS); + err = unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); out_unlock: if (ret) @@ -3493,7 +3503,7 @@ int btrfs_cont_expand(struct inode *inod u64 last_byte; u64 cur_offset; u64 hole_size; - int err = 0; + int err = 0, err2; if (size <= hole_start) return 0; @@ -3508,8 +3518,9 @@ int btrfs_cont_expand(struct inode *inod ordered = btrfs_lookup_ordered_extent(inode, hole_start); if (!ordered) break; - unlock_extent_cached(io_tree, hole_start, block_end - 1, - &cached_state, GFP_NOFS); + err2 = unlock_extent_cached(io_tree, hole_start, block_end - 1, + &cached_state, GFP_NOFS); + BUG_ON(err2 < 0); btrfs_put_ordered_extent(ordered); } @@ -3556,8 +3567,9 @@ int btrfs_cont_expand(struct inode *inod } free_extent_map(em); - unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, - GFP_NOFS); + err2 = unlock_extent_cached(io_tree, hole_start, block_end - 1, + &cached_state, GFP_NOFS); + BUG_ON(err2 < 0); return err; } @@ -5624,8 +5636,9 @@ static int btrfs_get_blocks_direct(struc test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { free_extent_map(em); /* DIO will do one hole at a time, so just unlock a sector */ - unlock_extent(&BTRFS_I(inode)->io_tree, start, - start + root->sectorsize - 1, GFP_NOFS); + ret = unlock_extent(&BTRFS_I(inode)->io_tree, start, + start + root->sectorsize - 1, GFP_NOFS); + BUG_ON(ret < 0); return 0; } @@ -5727,6 +5740,7 @@ struct btrfs_dio_private { static void btrfs_endio_direct_read(struct bio *bio, int err) { + int ret; struct btrfs_dio_private *dip = bio->bi_private; struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; struct bio_vec *bvec = bio->bi_io_vec; @@ -5767,8 +5781,9 @@ static void btrfs_endio_direct_read(stru bvec++; } while (bvec <= bvec_end); - unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, - dip->logical_offset + dip->bytes - 1, GFP_NOFS); + ret = unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, + dip->logical_offset + dip->bytes - 1, GFP_NOFS); + BUG_ON(ret < 0); bio->bi_private = dip->private; kfree(dip->csums); @@ -5790,7 +5805,7 @@ static void btrfs_endio_direct_write(str struct extent_state *cached_state = NULL; u64 ordered_offset = dip->logical_offset; u64 ordered_bytes = dip->bytes; - int ret; + int ret, ret2; if (err) goto out_done; @@ -5856,9 +5871,11 @@ again: btrfs_update_inode(trans, root, inode); ret = 0; out_unlock: - unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, - ordered->file_offset + ordered->len - 1, - &cached_state, GFP_NOFS); + ret2 = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + ordered->file_offset, + ordered->file_offset + ordered->len - 1, + &cached_state, GFP_NOFS); + BUG_ON(ret2 < 0); out: btrfs_delalloc_release_metadata(inode, ordered->len); btrfs_end_transaction(trans, root); @@ -6247,8 +6264,9 @@ static ssize_t btrfs_direct_IO(int rw, s lockend - lockstart + 1); if (!ordered) break; - unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, - &cached_state, GFP_NOFS); + ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, + lockend, &cached_state, GFP_NOFS); + BUG_ON(ret < 0); btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); cond_resched(); @@ -6468,7 +6486,7 @@ int btrfs_page_mkwrite(struct vm_area_st char *kaddr; unsigned long zero_start; loff_t size; - int ret; + int ret, err; u64 page_start; u64 page_end; @@ -6506,8 +6524,9 @@ again: */ ordered = btrfs_lookup_ordered_extent(inode, page_start); if (ordered) { - unlock_extent_cached(io_tree, page_start, page_end, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); unlock_page(page); btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); @@ -6530,8 +6549,9 @@ again: ret = btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); if (ret) { - unlock_extent_cached(io_tree, page_start, page_end, - &cached_state, GFP_NOFS); + err = unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); ret = VM_FAULT_SIGBUS; goto out_unlock; } @@ -6556,7 +6576,9 @@ again: BTRFS_I(inode)->last_trans = root->fs_info->generation; BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; - unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); + err = unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); + BUG_ON(err < 0); out_unlock: if (!ret) --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -781,7 +781,8 @@ static int should_defrag_range(struct in err = lock_extent(io_tree, start, start + len - 1, GFP_NOFS); BUG_ON(err < 0); em = btrfs_get_extent(inode, NULL, 0, start, len, 0); - unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); + err = unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); + BUG_ON(err < 0); if (IS_ERR(em)) return 0; @@ -912,9 +913,10 @@ again: ordered->file_offset + ordered->len > page_start && ordered->file_offset < page_end) { btrfs_put_ordered_extent(ordered); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - page_start, page_end - 1, - &cached_state, GFP_NOFS); + ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, + &cached_state, GFP_NOFS); + BUG_ON(ret < 0); for (i = 0; i < i_done; i++) { unlock_page(pages[i]); page_cache_release(pages[i]); @@ -945,9 +947,10 @@ again: &cached_state); BUG_ON(ret); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - page_start, page_end - 1, &cached_state, - GFP_NOFS); + ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, &cached_state, + GFP_NOFS); + BUG_ON(ret < 0); for (i = 0; i < i_done; i++) { clear_page_dirty_for_io(pages[i]); @@ -2139,7 +2142,7 @@ static noinline long btrfs_ioctl_clone(s struct btrfs_key key; u32 nritems; int slot; - int ret; + int ret, err; u64 len = olen; u64 bs = root->fs_info->sb->s_blocksize; u64 hint_byte; @@ -2236,7 +2239,9 @@ static noinline long btrfs_ioctl_clone(s !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len, EXTENT_DELALLOC, 0, NULL)) break; - unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); + ret = unlock_extent(&BTRFS_I(src)->io_tree, off, + off+len, GFP_NOFS); + BUG_ON(ret < 0); if (ordered) btrfs_put_ordered_extent(ordered); btrfs_wait_ordered_range(src, off, len); @@ -2443,7 +2448,8 @@ next: ret = 0; out: btrfs_release_path(path); - unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); + err = unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); + BUG_ON(err < 0); out_unlock: mutex_unlock(&src->i_mutex); mutex_unlock(&inode->i_mutex); --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1583,8 +1583,9 @@ int replace_file_extents(struct btrfs_tr btrfs_drop_extent_cache(inode, key.offset, end, 1); - unlock_extent(&BTRFS_I(inode)->io_tree, - key.offset, end, GFP_NOFS); + ret = unlock_extent(&BTRFS_I(inode)->io_tree, + key.offset, end, GFP_NOFS); + BUG_ON(ret < 0); } } @@ -1958,7 +1959,9 @@ static int invalidate_extent_cache(struc GFP_NOFS); BUG_ON(ret < 0); btrfs_drop_extent_cache(inode, start, end, 1); - unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + ret = unlock_extent(&BTRFS_I(inode)->io_tree, start, end, + GFP_NOFS); + BUG_ON(ret < 0); } return 0; } @@ -2860,6 +2863,7 @@ int prealloc_file_extent_cluster(struct goto out; while (nr < cluster->nr) { + int err; start = cluster->boundary[nr] - offset; if (nr + 1 < cluster->nr) end = cluster->boundary[nr + 1] - 1 - offset; @@ -2873,7 +2877,9 @@ int prealloc_file_extent_cluster(struct ret = btrfs_prealloc_file_range(inode, 0, start, num_bytes, num_bytes, end + 1, &alloc_hint); - unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + err = unlock_extent(&BTRFS_I(inode)->io_tree, start, end, + GFP_NOFS); + BUG_ON(err < 0); if (ret) break; nr++; @@ -2892,7 +2898,7 @@ int setup_extent_mapping(struct inode *i struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map *em; - int ret = 0; + int ret = 0, err; em = alloc_extent_map(); if (!em) @@ -2917,7 +2923,8 @@ int setup_extent_mapping(struct inode *i } btrfs_drop_extent_cache(inode, start, end, 0); } - unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + err = unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + BUG_ON(err < 0); return ret; } @@ -3016,8 +3023,9 @@ static int relocate_file_extent_cluster( BUG_ON(ret); set_page_dirty(page); - unlock_extent(&BTRFS_I(inode)->io_tree, - page_start, page_end, GFP_NOFS); + ret = unlock_extent(&BTRFS_I(inode)->io_tree, + page_start, page_end, GFP_NOFS); + BUG_ON(ret < 0); unlock_page(page); page_cache_release(page); -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
pin_down_extent performs some operations which can''t fail and then calls set_extent_dirty, which has two failure cases via set_extent_bit: 1) Return -EEXIST if exclusive bits are set - Since it doesn''t use any exclusive bits, this failure case can''t occur. 2) Return -ENOMEM if memory can''t be allocated - Since it''s called with gfp_flags & __GFP_NOFAIL, this failure case can''t occur. With no failure cases, it can return void. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/extent-tree.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4175,9 +4175,9 @@ static u64 first_logical_byte(struct btr return bytenr; } -static int pin_down_extent(struct btrfs_root *root, - struct btrfs_block_group_cache *cache, - u64 bytenr, u64 num_bytes, int reserved) +static void pin_down_extent(struct btrfs_root *root, + struct btrfs_block_group_cache *cache, + u64 bytenr, u64 num_bytes, int reserved) { spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); @@ -4194,7 +4194,6 @@ static int pin_down_extent(struct btrfs_ /* __GFP_NOFAIL means it can''t return -ENOMEM */ set_extent_dirty(root->fs_info->pinned_extents, bytenr, bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); - return 0; } /* -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Jeff Mahoney
2011-Aug-15 19:50 UTC
[patch v2 8/9] btrfs: Push up btrfs_pin_extent failures
btrfs_pin_extent looks up a block group and then calls pin_down_extent with it. If the lookup fails, it should return -ENOENT to allow callers to handle the error condition. For the three existing callers, it is a logic error if the lookup fails and a panic will occur. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/extent-tree.c | 20 +++++++++++++++----- fs/btrfs/tree-log.c | 10 +++++++--- 2 files changed, 22 insertions(+), 8 deletions(-) --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2075,8 +2075,14 @@ static int run_one_delayed_ref(struct bt BUG_ON(extent_op); head = btrfs_delayed_node_to_head(node); if (insert_reserved) { - btrfs_pin_extent(root, node->bytenr, - node->num_bytes, 1); + ret = btrfs_pin_extent(root, node->bytenr, + node->num_bytes, 1); + if (ret) + btrfs_panic(root->fs_info, ret, + "Cannot pin extent in range " + "%llu(%llu)\n", + node->bytenr, node->num_bytes); + if (head->is_data) { ret = btrfs_del_csums(trans, root, node->bytenr, @@ -4205,7 +4211,8 @@ int btrfs_pin_extent(struct btrfs_root * struct btrfs_block_group_cache *cache; cache = btrfs_lookup_block_group(root->fs_info, bytenr); - BUG_ON(!cache); + if (cache == NULL) + return -ENOENT; pin_down_extent(root, cache, bytenr, num_bytes, reserved); @@ -4765,8 +4772,11 @@ int btrfs_free_extent(struct btrfs_trans if (root_objectid == BTRFS_TREE_LOG_OBJECTID) { WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID); /* unlocks the pinned mutex */ - btrfs_pin_extent(root, bytenr, num_bytes, 1); - ret = 0; + ret = btrfs_pin_extent(root, bytenr, num_bytes, 1); + if (ret) + btrfs_panic(root->fs_info, ret, "Cannot pin " + "extent in range %llu(%llu)\n", + bytenr, num_bytes); } else if (owner < BTRFS_FIRST_FREE_OBJECTID) { ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes, parent, root_objectid, (int)owner, --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -275,9 +275,13 @@ static int process_one_buffer(struct btr struct extent_buffer *eb, struct walk_control *wc, u64 gen) { - if (wc->pin) - btrfs_pin_extent(log->fs_info->extent_root, - eb->start, eb->len, 0); + if (wc->pin) { + int ret = btrfs_pin_extent(log->fs_info->extent_root, + eb->start, eb->len, 0); + if (ret) + btrfs_panic(log->fs_info, ret, "Cannot pin extent in " + "range %llu(%llu)\n", eb->start, eb->len); + } if (btrfs_buffer_uptodate(eb, gen)) { if (wc->write) -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Jeff Mahoney
2011-Aug-15 19:50 UTC
[patch v2 9/9] btrfs: Push up non-looped btrfs_start_transaction failures
This patch handles btrfs_start_transaction failures that don''t occur in a loop and are obvious to simply push up. In all cases except the mark_garbage_root case, the error is already handled by BUG_ON in the caller. Update v2: This version also checks the returns from btrfs_drop_snapshot. Signed-off-by: Jeff Mahoney <jeffm@suse.com> --- fs/btrfs/extent-tree.c | 6 +++++- fs/btrfs/relocation.c | 9 ++++++--- fs/btrfs/transaction.c | 6 ++++-- fs/btrfs/tree-log.c | 5 ++++- fs/btrfs/volumes.c | 3 ++- 5 files changed, 21 insertions(+), 8 deletions(-) --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6319,7 +6319,11 @@ int btrfs_drop_snapshot(struct btrfs_roo } trans = btrfs_start_transaction(tree_root, 0); - BUG_ON(IS_ERR(trans)); + if (IS_ERR(trans)) { + kfree(wc); + btrfs_free_path(path); + return PTR_ERR(trans); + } if (block_rsv) trans->block_rsv = block_rsv; --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2251,7 +2251,8 @@ again: } else { list_del_init(&reloc_root->root_list); } - btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0); + ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0); + BUG_ON(ret < 0); } if (found) { @@ -4096,7 +4097,8 @@ static noinline_for_stack int mark_garba int ret; trans = btrfs_start_transaction(root->fs_info->tree_root, 0); - BUG_ON(IS_ERR(trans)); + if (IS_ERR(trans)) + return PTR_ERR(trans); memset(&root->root_item.drop_progress, 0, sizeof(root->root_item.drop_progress)); @@ -4176,7 +4178,8 @@ int btrfs_recover_relocation(struct btrf err = ret; goto out; } - mark_garbage_root(reloc_root); + ret = mark_garbage_root(reloc_root); + BUG_ON(ret < 0); } } --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1400,6 +1400,7 @@ int btrfs_commit_transaction(struct btrf */ int btrfs_clean_old_snapshots(struct btrfs_root *root) { + int ret; LIST_HEAD(list); struct btrfs_fs_info *fs_info = root->fs_info; @@ -1415,9 +1416,10 @@ int btrfs_clean_old_snapshots(struct btr if (btrfs_header_backref_rev(root->node) < BTRFS_MIXED_BACKREF_REV) - btrfs_drop_snapshot(root, NULL, 0); + ret = btrfs_drop_snapshot(root, NULL, 0); else - btrfs_drop_snapshot(root, NULL, 1); + ret = btrfs_drop_snapshot(root, NULL, 1); + BUG_ON(ret < 0); } return 0; } --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3151,7 +3151,10 @@ int btrfs_recover_log_trees(struct btrfs fs_info->log_root_recovering = 1; trans = btrfs_start_transaction(fs_info->tree_root, 0); - BUG_ON(IS_ERR(trans)); + if (IS_ERR(trans)) { + btrfs_free_path(path); + return PTR_ERR(trans); + } wc.trans = trans; wc.pin = 1; --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1876,7 +1876,8 @@ static int btrfs_relocate_chunk(struct b return ret; trans = btrfs_start_transaction(root, 0); - BUG_ON(IS_ERR(trans)); + if (IS_ERR(trans)) + return PTR_ERR(trans); lock_chunks(root); -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Reasonably Related Threads
- [patch 00/65] Error handling patchset v3
- [PATCH] Btrfs: fix very slow inode eviction and fs unmount
- [PATCH 1/2 v3] Btrfs: use flag EXTENT_DEFRAG for snapshot-aware defrag
- [PATCH 1/5] fs: allow short direct-io reads to be completed via buffered IO V2
- [PATCH 0/4] btrfs: out-of-band (aka offline) dedupe v4