Currently btrfs stores the highest objectid of the fs tree, and it always returns (highest+1) inode number when we create a file, so inode numbers won''t be reclaimed when we delete files, so we''ll run out of inode numbers as we keep create/delete files in 32bit machines. This patchset aims to fix this, and it works similar to free space caching for block groups. I''ve run xfstests, and I also tested it with snapshot, balance etc. The patchset is also available in: git://repo.or.cz/linux-btrfs-devel.git ino-alloc --- fs/btrfs/btrfs_inode.h | 9 + fs/btrfs/compression.c | 5 +- fs/btrfs/ctree.h | 29 +- fs/btrfs/disk-io.c | 19 + fs/btrfs/export.c | 25 +- fs/btrfs/extent-tree.c | 50 ++-- fs/btrfs/extent_io.c | 4 +- fs/btrfs/file-item.c | 5 +- fs/btrfs/file.c | 27 +- fs/btrfs/free-space-cache.c | 873 ++++++++++++++++++++++++++----------------- fs/btrfs/free-space-cache.h | 48 +++- fs/btrfs/inode-map.c | 428 +++++++++++++++++++++- fs/btrfs/inode-map.h | 13 + fs/btrfs/inode.c | 282 ++++++++------ fs/btrfs/ioctl.c | 22 +- fs/btrfs/relocation.c | 27 +- fs/btrfs/transaction.c | 13 +- fs/btrfs/tree-log.c | 58 ++-- fs/btrfs/xattr.c | 8 +- 19 files changed, 1352 insertions(+), 593 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Li Zefan
2011-Mar-16 08:49 UTC
[PATCH 1/7] Btrfs: Remove unused btrfs_block_group_free_space()
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> --- fs/btrfs/free-space-cache.c | 15 --------------- fs/btrfs/free-space-cache.h | 1 - 2 files changed, 0 insertions(+), 16 deletions(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index a039065..4e27eaa 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1601,21 +1601,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "\n", count); } -u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) -{ - struct btrfs_free_space *info; - struct rb_node *n; - u64 ret = 0; - - for (n = rb_first(&block_group->free_space_offset); n; - n = rb_next(n)) { - info = rb_entry(n, struct btrfs_free_space, offset_index); - ret += info->bytes; - } - - return ret; -} - /* * for a given cluster, put all of its extents back into the free * space cache. If the block group passed doesn''t match the block group diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index e49ca5c..ef5d7e1 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -55,7 +55,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size); void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes); -u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group); int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_group_cache *block_group, -- 1.7.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> --- fs/btrfs/free-space-cache.c | 20 ++++++++------------ 1 files changed, 8 insertions(+), 12 deletions(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 4e27eaa..bc60114 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1063,15 +1063,13 @@ static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *info, u64 offset, u64 bytes) { - unsigned long start, end; - unsigned long i; + unsigned long start, count; start = offset_to_bit(info->offset, block_group->sectorsize, offset); - end = start + bytes_to_bits(bytes, block_group->sectorsize); - BUG_ON(end > BITS_PER_BITMAP); + count = bytes_to_bits(bytes, block_group->sectorsize); + BUG_ON(start + count > BITS_PER_BITMAP); - for (i = start; i < end; i++) - clear_bit(i, info->bitmap); + bitmap_clear(info->bitmap, start, count); info->bytes -= bytes; block_group->free_space -= bytes; @@ -1081,15 +1079,13 @@ static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *info, u64 offset, u64 bytes) { - unsigned long start, end; - unsigned long i; + unsigned long start, count; start = offset_to_bit(info->offset, block_group->sectorsize, offset); - end = start + bytes_to_bits(bytes, block_group->sectorsize); - BUG_ON(end > BITS_PER_BITMAP); + count = bytes_to_bits(bytes, block_group->sectorsize); + BUG_ON(start + count > BITS_PER_BITMAP); - for (i = start; i < end; i++) - set_bit(i, info->bitmap); + bitmap_set(info->bitmap, start, count); info->bytes += bytes; block_group->free_space += bytes; -- 1.7.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
So we can re-use the code to cache free inode numbers. The change is quite straightforward. Two new structures are introduced. - struct btrfs_free_space_ctl We move those variables that are used for caching free space from struct btrfs_block_group_cache to this new struct. - struct btrfs_free_space_op We do block group specific work (e.g. calculation of extents threshold) through functions registered in this struct. And then we can remove references to struct btrfs_block_group_cache. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> --- fs/btrfs/ctree.h | 7 +- fs/btrfs/extent-tree.c | 37 +++-- fs/btrfs/free-space-cache.c | 367 +++++++++++++++++++++++-------------------- fs/btrfs/free-space-cache.h | 20 +++ 4 files changed, 238 insertions(+), 193 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8b4b9d1..2a1e36e 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -829,9 +829,6 @@ struct btrfs_block_group_cache { u64 bytes_super; u64 flags; u64 sectorsize; - int extents_thresh; - int free_extents; - int total_bitmaps; unsigned int ro:1; unsigned int dirty:1; unsigned int iref:1; @@ -846,9 +843,7 @@ struct btrfs_block_group_cache { struct btrfs_space_info *space_info; /* free space cache stuff */ - spinlock_t tree_lock; - struct rb_root free_space_offset; - u64 free_space; + struct btrfs_free_space_ctl *free_space_ctl; /* block group cache stuff */ struct rb_node cache_node; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f1db57d..49eb826 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -88,6 +88,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache) WARN_ON(cache->pinned > 0); WARN_ON(cache->reserved > 0); WARN_ON(cache->reserved_pinned > 0); + kfree(cache->free_space_ctl); kfree(cache); } } @@ -4808,7 +4809,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, return 0; wait_event(caching_ctl->wait, block_group_cache_done(cache) || - (cache->free_space >= num_bytes)); + (cache->free_space_ctl->free_space >= num_bytes)); put_caching_control(caching_ctl); return 0; @@ -8433,10 +8434,16 @@ int btrfs_read_block_groups(struct btrfs_root *root) ret = -ENOMEM; goto error; } + cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), + GFP_NOFS); + if (!cache->free_space_ctl) { + kfree(cache); + ret = -ENOMEM; + goto error; + } atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); - spin_lock_init(&cache->tree_lock); cache->fs_info = info; INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); @@ -8444,14 +8451,6 @@ int btrfs_read_block_groups(struct btrfs_root *root) if (need_clear) cache->disk_cache_state = BTRFS_DC_CLEAR; - /* - * we only want to have 32k of ram per block group for keeping - * track of free space, and if we pass 1/2 of that we want to - * start converting things over to using bitmaps - */ - cache->extents_thresh = ((1024 * 32) / 2) / - sizeof(struct btrfs_free_space); - read_extent_buffer(leaf, &cache->item, btrfs_item_ptr_offset(leaf, path->slots[0]), sizeof(cache->item)); @@ -8462,6 +8461,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) cache->flags = btrfs_block_group_flags(&cache->item); cache->sectorsize = root->sectorsize; + btrfs_init_free_space_ctl(cache); + /* * We need to exclude the super stripes now so that the space * info has super bytes accounted for, otherwise we''ll think @@ -8548,6 +8549,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache = kzalloc(sizeof(*cache), GFP_NOFS); if (!cache) return -ENOMEM; + cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), + GFP_NOFS); + if (!cache->free_space_ctl) { + kfree(cache); + return -ENOMEM; + } cache->key.objectid = chunk_offset; cache->key.offset = size; @@ -8555,19 +8562,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->sectorsize = root->sectorsize; cache->fs_info = root->fs_info; - /* - * we only want to have 32k of ram per block group for keeping track - * of free space, and if we pass 1/2 of that we want to start - * converting things over to using bitmaps - */ - cache->extents_thresh = ((1024 * 32) / 2) / - sizeof(struct btrfs_free_space); atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); - spin_lock_init(&cache->tree_lock); INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); + btrfs_init_free_space_ctl(cache); + btrfs_set_block_group_used(&cache->item, bytes_used); btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); cache->flags = type; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index bc60114..8799208 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -28,9 +28,7 @@ #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) -static void recalculate_thresholds(struct btrfs_block_group_cache - *block_group); -static int link_free_space(struct btrfs_block_group_cache *block_group, +static int link_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info); struct inode *lookup_free_space_inode(struct btrfs_root *root, @@ -209,6 +207,7 @@ static int readahead_cache(struct inode *inode) int load_free_space_cache(struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *block_group) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_root *root = fs_info->tree_root; struct inode *inode; struct btrfs_free_space_header *header; @@ -412,9 +411,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, } if (entry->type == BTRFS_FREE_SPACE_EXTENT) { - spin_lock(&block_group->tree_lock); - ret = link_free_space(block_group, e); - spin_unlock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); + ret = link_free_space(ctl, e); + spin_unlock(&ctl->tree_lock); BUG_ON(ret); } else { e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); @@ -425,11 +424,11 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, page_cache_release(page); goto free_cache; } - spin_lock(&block_group->tree_lock); - ret = link_free_space(block_group, e); - block_group->total_bitmaps++; - recalculate_thresholds(block_group); - spin_unlock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); + ret = link_free_space(ctl, e); + ctl->total_bitmaps++; + ctl->op->recalc_thresholds(ctl); + spin_unlock(&ctl->tree_lock); list_add_tail(&e->list, &bitmaps); } @@ -486,6 +485,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_block_group_cache *block_group, struct btrfs_path *path) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space_header *header; struct extent_buffer *leaf; struct inode *inode; @@ -524,7 +524,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, return 0; } - node = rb_first(&block_group->free_space_offset); + node = rb_first(&ctl->free_space_offset); if (!node) { iput(inode); return 0; @@ -780,30 +780,30 @@ out_free: return ret; } -static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, +static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit, u64 offset) { BUG_ON(offset < bitmap_start); offset -= bitmap_start; - return (unsigned long)(div64_u64(offset, sectorsize)); + return (unsigned long)(div_u64(offset, unit)); } -static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) +static inline unsigned long bytes_to_bits(u64 bytes, u32 unit) { - return (unsigned long)(div64_u64(bytes, sectorsize)); + return (unsigned long)(div_u64(bytes, unit)); } -static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, +static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset) { u64 bitmap_start; u64 bytes_per_bitmap; - bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; - bitmap_start = offset - block_group->key.objectid; + bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; + bitmap_start = offset - ctl->start; bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); bitmap_start *= bytes_per_bitmap; - bitmap_start += block_group->key.objectid; + bitmap_start += ctl->start; return bitmap_start; } @@ -861,10 +861,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, * offset. */ static struct btrfs_free_space * -tree_search_offset(struct btrfs_block_group_cache *block_group, +tree_search_offset(struct btrfs_free_space_ctl *ctl, u64 offset, int bitmap_only, int fuzzy) { - struct rb_node *n = block_group->free_space_offset.rb_node; + struct rb_node *n = ctl->free_space_offset.rb_node; struct btrfs_free_space *entry, *prev = NULL; /* find entry that is closest to the ''offset'' */ @@ -960,8 +960,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, break; } } - if (entry->offset + BITS_PER_BITMAP * - block_group->sectorsize > offset) + if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) return entry; } else if (entry->offset + entry->bytes > offset) return entry; @@ -972,7 +971,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, while (1) { if (entry->bitmap) { if (entry->offset + BITS_PER_BITMAP * - block_group->sectorsize > offset) + ctl->unit > offset) break; } else { if (entry->offset + entry->bytes > offset) @@ -988,38 +987,39 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, } static inline void -__unlink_free_space(struct btrfs_block_group_cache *block_group, +__unlink_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { - rb_erase(&info->offset_index, &block_group->free_space_offset); - block_group->free_extents--; + rb_erase(&info->offset_index, &ctl->free_space_offset); + ctl->free_extents--; } -static void unlink_free_space(struct btrfs_block_group_cache *block_group, +static void unlink_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { - __unlink_free_space(block_group, info); - block_group->free_space -= info->bytes; + __unlink_free_space(ctl, info); + ctl->free_space -= info->bytes; } -static int link_free_space(struct btrfs_block_group_cache *block_group, +static int link_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { int ret = 0; BUG_ON(!info->bitmap && !info->bytes); - ret = tree_insert_offset(&block_group->free_space_offset, info->offset, + ret = tree_insert_offset(&ctl->free_space_offset, info->offset, &info->offset_index, (info->bitmap != NULL)); if (ret) return ret; - block_group->free_space += info->bytes; - block_group->free_extents++; + ctl->free_space += info->bytes; + ctl->free_extents++; return ret; } -static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) +static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) { + struct btrfs_block_group_cache *block_group = ctl->private; u64 max_bytes; u64 bitmap_bytes; u64 extent_bytes; @@ -1041,10 +1041,10 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) * sure we don''t go over our overall goal of MAX_CACHE_BYTES_PER_GIG as * we add more bitmaps. */ - bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE; + bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; if (bitmap_bytes >= max_bytes) { - block_group->extents_thresh = 0; + ctl->extents_thresh = 0; return; } @@ -1055,43 +1055,43 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) extent_bytes = max_bytes - bitmap_bytes; extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); - block_group->extents_thresh + ctl->extents_thresh div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); } -static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, +static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset, u64 bytes) { unsigned long start, count; - start = offset_to_bit(info->offset, block_group->sectorsize, offset); - count = bytes_to_bits(bytes, block_group->sectorsize); + start = offset_to_bit(info->offset, ctl->unit, offset); + count = bytes_to_bits(bytes, ctl->unit); BUG_ON(start + count > BITS_PER_BITMAP); bitmap_clear(info->bitmap, start, count); info->bytes -= bytes; - block_group->free_space -= bytes; + ctl->free_space -= bytes; } -static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, +static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset, u64 bytes) { unsigned long start, count; - start = offset_to_bit(info->offset, block_group->sectorsize, offset); - count = bytes_to_bits(bytes, block_group->sectorsize); + start = offset_to_bit(info->offset, ctl->unit, offset); + count = bytes_to_bits(bytes, ctl->unit); BUG_ON(start + count > BITS_PER_BITMAP); bitmap_set(info->bitmap, start, count); info->bytes += bytes; - block_group->free_space += bytes; + ctl->free_space += bytes; } -static int search_bitmap(struct btrfs_block_group_cache *block_group, +static int search_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info, u64 *offset, u64 *bytes) { @@ -1099,9 +1099,9 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, unsigned long bits, i; unsigned long next_zero; - i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, + i = offset_to_bit(bitmap_info->offset, ctl->unit, max_t(u64, *offset, bitmap_info->offset)); - bits = bytes_to_bits(*bytes, block_group->sectorsize); + bits = bytes_to_bits(*bytes, ctl->unit); for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); i < BITS_PER_BITMAP; @@ -1116,29 +1116,25 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, } if (found_bits) { - *offset = (u64)(i * block_group->sectorsize) + - bitmap_info->offset; - *bytes = (u64)(found_bits) * block_group->sectorsize; + *offset = (u64)(i * ctl->unit) + bitmap_info->offset; + *bytes = (u64)(found_bits) * ctl->unit; return 0; } return -1; } -static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache - *block_group, u64 *offset, - u64 *bytes, int debug) +static struct btrfs_free_space * +find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) { struct btrfs_free_space *entry; struct rb_node *node; int ret; - if (!block_group->free_space_offset.rb_node) + if (!ctl->free_space_offset.rb_node) return NULL; - entry = tree_search_offset(block_group, - offset_to_bitmap(block_group, *offset), - 0, 1); + entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); if (!entry) return NULL; @@ -1148,7 +1144,7 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache continue; if (entry->bitmap) { - ret = search_bitmap(block_group, entry, offset, bytes); + ret = search_bitmap(ctl, entry, offset, bytes); if (!ret) return entry; continue; @@ -1162,33 +1158,28 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache return NULL; } -static void add_new_bitmap(struct btrfs_block_group_cache *block_group, +static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset) { - u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; - int max_bitmaps = (int)div64_u64(block_group->key.offset + - bytes_per_bg - 1, bytes_per_bg); - BUG_ON(block_group->total_bitmaps >= max_bitmaps); - - info->offset = offset_to_bitmap(block_group, offset); + info->offset = offset_to_bitmap(ctl, offset); info->bytes = 0; - link_free_space(block_group, info); - block_group->total_bitmaps++; + link_free_space(ctl, info); + ctl->total_bitmaps++; - recalculate_thresholds(block_group); + ctl->op->recalc_thresholds(ctl); } -static void free_bitmap(struct btrfs_block_group_cache *block_group, +static void free_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info) { - unlink_free_space(block_group, bitmap_info); + unlink_free_space(ctl, bitmap_info); kfree(bitmap_info->bitmap); kfree(bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); + ctl->total_bitmaps--; + ctl->op->recalc_thresholds(ctl); } -static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, +static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info, u64 *offset, u64 *bytes) { @@ -1197,8 +1188,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro int ret; again: - end = bitmap_info->offset + - (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; + end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; /* * XXX - this can go away after a few releases. @@ -1213,24 +1203,22 @@ again: search_start = *offset; search_bytes = *bytes; search_bytes = min(search_bytes, end - search_start + 1); - ret = search_bitmap(block_group, bitmap_info, &search_start, - &search_bytes); + ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); BUG_ON(ret < 0 || search_start != *offset); if (*offset > bitmap_info->offset && *offset + *bytes > end) { - bitmap_clear_bits(block_group, bitmap_info, *offset, - end - *offset + 1); + bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1); *bytes -= end - *offset + 1; *offset = end + 1; } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { - bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); + bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes); *bytes = 0; } if (*bytes) { struct rb_node *next = rb_next(&bitmap_info->offset_index); if (!bitmap_info->bytes) - free_bitmap(block_group, bitmap_info); + free_bitmap(ctl, bitmap_info); /* * no entry after this bitmap, but we still have bytes to @@ -1257,33 +1245,30 @@ again: */ search_start = *offset; search_bytes = *bytes; - ret = search_bitmap(block_group, bitmap_info, &search_start, + ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); if (ret < 0 || search_start != *offset) return -EAGAIN; goto again; } else if (!bitmap_info->bytes) - free_bitmap(block_group, bitmap_info); + free_bitmap(ctl, bitmap_info); return 0; } -static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info) +static bool use_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) { - struct btrfs_free_space *bitmap_info; - int added = 0; - u64 bytes, offset, end; - int ret; + struct btrfs_block_group_cache *block_group = ctl->private; /* * If we are below the extents threshold then we can add this as an * extent, and don''t have to deal with the bitmap */ - if (block_group->free_extents < block_group->extents_thresh && + if (ctl->free_extents < ctl->extents_thresh && info->bytes > block_group->sectorsize * 4) - return 0; + return false; /* * some block groups are so tiny they can''t be enveloped by a bitmap, so @@ -1291,31 +1276,42 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, */ if (BITS_PER_BITMAP * block_group->sectorsize > block_group->key.offset) - return 0; + return false; + + return true; +} + +static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) +{ + struct btrfs_free_space *bitmap_info; + int added = 0; + u64 bytes, offset, end; + int ret; bytes = info->bytes; offset = info->offset; + if (!ctl->op->use_bitmap(ctl, info)) + return 0; + again: - bitmap_info = tree_search_offset(block_group, - offset_to_bitmap(block_group, offset), + bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1, 0); if (!bitmap_info) { BUG_ON(added); goto new_bitmap; } - end = bitmap_info->offset + - (u64)(BITS_PER_BITMAP * block_group->sectorsize); + end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); if (offset >= bitmap_info->offset && offset + bytes > end) { - bitmap_set_bits(block_group, bitmap_info, offset, - end - offset); + bitmap_set_bits(ctl, bitmap_info, offset, end - offset); bytes -= end - offset; offset = end; added = 0; } else if (offset >= bitmap_info->offset && offset + bytes <= end) { - bitmap_set_bits(block_group, bitmap_info, offset, bytes); + bitmap_set_bits(ctl, bitmap_info, offset, bytes); bytes = 0; } else { BUG(); @@ -1329,19 +1325,19 @@ again: new_bitmap: if (info && info->bitmap) { - add_new_bitmap(block_group, info, offset); + add_new_bitmap(ctl, info, offset); added = 1; info = NULL; goto again; } else { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); /* no pre-allocated info, allocate a new one */ if (!info) { info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); if (!info) { - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); ret = -ENOMEM; goto out; } @@ -1349,7 +1345,7 @@ new_bitmap: /* allocate the bitmap */ info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); if (!info->bitmap) { ret = -ENOMEM; goto out; @@ -1367,7 +1363,7 @@ out: return ret; } -bool try_merge_free_space(struct btrfs_block_group_cache *block_group, +bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, bool update_stat) { struct btrfs_free_space *left_info; @@ -1381,18 +1377,18 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, * are adding, if there is remove that struct and add a new one to * cover the entire range */ - right_info = tree_search_offset(block_group, offset + bytes, 0, 0); + right_info = tree_search_offset(ctl, offset + bytes, 0, 0); if (right_info && rb_prev(&right_info->offset_index)) left_info = rb_entry(rb_prev(&right_info->offset_index), struct btrfs_free_space, offset_index); else - left_info = tree_search_offset(block_group, offset - 1, 0, 0); + left_info = tree_search_offset(ctl, offset - 1, 0, 0); if (right_info && !right_info->bitmap) { if (update_stat) - unlink_free_space(block_group, right_info); + unlink_free_space(ctl, right_info); else - __unlink_free_space(block_group, right_info); + __unlink_free_space(ctl, right_info); info->bytes += right_info->bytes; kfree(right_info); merged = true; @@ -1401,9 +1397,9 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, if (left_info && !left_info->bitmap && left_info->offset + left_info->bytes == offset) { if (update_stat) - unlink_free_space(block_group, left_info); + unlink_free_space(ctl, left_info); else - __unlink_free_space(block_group, left_info); + __unlink_free_space(ctl, left_info); info->offset = left_info->offset; info->bytes += left_info->bytes; kfree(left_info); @@ -1416,6 +1412,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; int ret = 0; @@ -1426,9 +1423,9 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, info->offset = offset; info->bytes = bytes; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); - if (try_merge_free_space(block_group, info, true)) + if (try_merge_free_space(ctl, info, true)) goto link; /* @@ -1436,7 +1433,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, * extent then we know we''re going to have to allocate a new extent, so * before we do that see if we need to drop this into a bitmap */ - ret = insert_into_bitmap(block_group, info); + ret = insert_into_bitmap(ctl, info); if (ret < 0) { goto out; } else if (ret) { @@ -1444,11 +1441,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, goto out; } link: - ret = link_free_space(block_group, info); + ret = link_free_space(ctl, info); if (ret) kfree(info); out: - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); if (ret) { printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); @@ -1461,21 +1458,21 @@ out: int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct btrfs_free_space *next_info = NULL; int ret = 0; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); again: - info = tree_search_offset(block_group, offset, 0, 0); + info = tree_search_offset(ctl, offset, 0, 0); if (!info) { /* * oops didn''t find an extent that matched the space we wanted * to remove, look for a bitmap instead */ - info = tree_search_offset(block_group, - offset_to_bitmap(block_group, offset), + info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1, 0); if (!info) { WARN_ON(1); @@ -1490,8 +1487,8 @@ again: offset_index); if (next_info->bitmap) - end = next_info->offset + BITS_PER_BITMAP * - block_group->sectorsize - 1; + end = next_info->offset + + BITS_PER_BITMAP * ctl->unit - 1; else end = next_info->offset + next_info->bytes; @@ -1511,20 +1508,20 @@ again: } if (info->bytes == bytes) { - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); if (info->bitmap) { kfree(info->bitmap); - block_group->total_bitmaps--; + ctl->total_bitmaps--; } kfree(info); goto out_lock; } if (!info->bitmap && info->offset == offset) { - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); info->offset += bytes; info->bytes -= bytes; - link_free_space(block_group, info); + link_free_space(ctl, info); goto out_lock; } @@ -1538,13 +1535,13 @@ again: * first unlink the old info and then * insert it again after the hole we''re creating */ - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); if (offset + bytes < info->offset + info->bytes) { u64 old_end = info->offset + info->bytes; info->offset = offset + bytes; info->bytes = old_end - info->offset; - ret = link_free_space(block_group, info); + ret = link_free_space(ctl, info); WARN_ON(ret); if (ret) goto out_lock; @@ -1554,7 +1551,7 @@ again: */ kfree(info); } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); /* step two, insert a new info struct to cover * anything before the hole @@ -1565,12 +1562,12 @@ again: goto out; } - ret = remove_from_bitmap(block_group, info, &offset, &bytes); + ret = remove_from_bitmap(ctl, info, &offset, &bytes); if (ret == -EAGAIN) goto again; BUG_ON(ret); out_lock: - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); out: return ret; } @@ -1578,11 +1575,12 @@ out: void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct rb_node *n; int count = 0; - for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) { + for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { info = rb_entry(n, struct btrfs_free_space, offset_index); if (info->bytes >= bytes) count++; @@ -1597,6 +1595,30 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "\n", count); } +static struct btrfs_free_space_op free_space_op = { + .recalc_thresholds = recalculate_thresholds, + .use_bitmap = use_bitmap, +}; + +void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + + spin_lock_init(&ctl->tree_lock); + ctl->unit = block_group->sectorsize; + ctl->start = block_group->key.objectid; + ctl->private = block_group; + ctl->op = &free_space_op; + + /* + * we only want to have 32k of ram per block group for keeping + * track of free space, and if we pass 1/2 of that we want to + * start converting things over to using bitmaps + */ + ctl->extents_thresh = ((1024 * 32) / 2) / + sizeof(struct btrfs_free_space); +} + /* * for a given cluster, put all of its extents back into the free * space cache. If the block group passed doesn''t match the block group @@ -1608,6 +1630,7 @@ __btrfs_return_cluster_to_free_space( struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; struct rb_node *node; bool bitmap; @@ -1631,8 +1654,8 @@ __btrfs_return_cluster_to_free_space( node = rb_next(&entry->offset_index); rb_erase(&entry->offset_index, &cluster->root); BUG_ON(entry->bitmap); - try_merge_free_space(block_group, entry, false); - tree_insert_offset(&block_group->free_space_offset, + try_merge_free_space(ctl, entry, false); + tree_insert_offset(&ctl->free_space_offset, entry->offset, &entry->offset_index, 0); } cluster->root = RB_ROOT; @@ -1645,12 +1668,13 @@ out: void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct rb_node *node; struct btrfs_free_cluster *cluster; struct list_head *head; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); while ((head = block_group->cluster_list.next) ! &block_group->cluster_list) { cluster = list_entry(head, struct btrfs_free_cluster, @@ -1659,57 +1683,58 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) WARN_ON(cluster->block_group != block_group); __btrfs_return_cluster_to_free_space(block_group, cluster); if (need_resched()) { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); cond_resched(); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); } } - while ((node = rb_last(&block_group->free_space_offset)) != NULL) { + while ((node = rb_last(&ctl->free_space_offset)) != NULL) { info = rb_entry(node, struct btrfs_free_space, offset_index); - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); if (info->bitmap) kfree(info->bitmap); kfree(info); if (need_resched()) { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); cond_resched(); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); } } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); } u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry = NULL; u64 bytes_search = bytes + empty_size; u64 ret = 0; - spin_lock(&block_group->tree_lock); - entry = find_free_space(block_group, &offset, &bytes_search, 0); + spin_lock(&ctl->tree_lock); + entry = find_free_space(ctl, &offset, &bytes_search); if (!entry) goto out; ret = offset; if (entry->bitmap) { - bitmap_clear_bits(block_group, entry, offset, bytes); + bitmap_clear_bits(ctl, entry, offset, bytes); if (!entry->bytes) - free_bitmap(block_group, entry); + free_bitmap(ctl, entry); } else { - unlink_free_space(block_group, entry); + unlink_free_space(ctl, entry); entry->offset += bytes; entry->bytes -= bytes; if (!entry->bytes) kfree(entry); else - link_free_space(block_group, entry); + link_free_space(ctl, entry); } out: - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); return ret; } @@ -1726,6 +1751,7 @@ int btrfs_return_cluster_to_free_space( struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster) { + struct btrfs_free_space_ctl *ctl; int ret; /* first, get a safe pointer to the block group */ @@ -1744,10 +1770,12 @@ int btrfs_return_cluster_to_free_space( atomic_inc(&block_group->count); spin_unlock(&cluster->lock); + ctl = block_group->free_space_ctl; + /* now return any extents the cluster had on it */ - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); ret = __btrfs_return_cluster_to_free_space(block_group, cluster); - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); /* finally drop our ref */ btrfs_put_block_group(block_group); @@ -1758,13 +1786,14 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 bytes, u64 min_start) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; int err; u64 search_start = cluster->window_start; u64 search_bytes = bytes; u64 ret = 0; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); spin_lock(&cluster->lock); if (!cluster->points_to_bitmap) @@ -1779,8 +1808,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only * to 1 to make sure we get the bitmap entry */ - entry = tree_search_offset(block_group, - offset_to_bitmap(block_group, search_start), + entry = tree_search_offset(ctl, offset_to_bitmap(ctl, search_start), 1, 0); if (!entry || !entry->bitmap) goto out; @@ -1788,18 +1816,17 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, search_start = min_start; search_bytes = bytes; - err = search_bitmap(block_group, entry, &search_start, - &search_bytes); + err = search_bitmap(ctl, entry, &search_start, &search_bytes); if (err) goto out; ret = search_start; - bitmap_clear_bits(block_group, entry, ret, bytes); + bitmap_clear_bits(ctl, entry, ret, bytes); if (entry->bytes == 0) - free_bitmap(block_group, entry); + free_bitmap(ctl, entry); out: spin_unlock(&cluster->lock); - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); return ret; } @@ -1813,6 +1840,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 bytes, u64 min_start) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry = NULL; struct rb_node *node; u64 ret = 0; @@ -1860,15 +1888,15 @@ out: if (!ret) return 0; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); - block_group->free_space -= bytes; + ctl->free_space -= bytes; if (entry->bytes == 0) { - block_group->free_extents--; + ctl->free_extents--; kfree(entry); } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); return ret; } @@ -1950,6 +1978,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 empty_size) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry = NULL; struct rb_node *node; struct btrfs_free_space *next; @@ -1977,7 +2006,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, } else min_bytes = max(bytes, (bytes + empty_size) >> 2); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); spin_lock(&cluster->lock); /* someone already found a cluster, hooray */ @@ -1986,7 +2015,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, goto out; } again: - entry = tree_search_offset(block_group, offset, found_bitmap, 1); + entry = tree_search_offset(ctl, offset, found_bitmap, 1); if (!entry) { ret = -ENOSPC; goto out; @@ -2097,7 +2126,7 @@ again: break; } - rb_erase(&entry->offset_index, &block_group->free_space_offset); + rb_erase(&entry->offset_index, &ctl->free_space_offset); ret = tree_insert_offset(&cluster->root, entry->offset, &entry->offset_index, 0); BUG_ON(ret); @@ -2116,7 +2145,7 @@ got_it: cluster->block_group = block_group; out: spin_unlock(&cluster->lock); - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); return ret; } diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index ef5d7e1..322b7e0 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -27,6 +27,25 @@ struct btrfs_free_space { struct list_head list; }; +struct btrfs_free_space_ctl { + spinlock_t tree_lock; + struct rb_root free_space_offset; + u64 free_space; + int extents_thresh; + int free_extents; + int total_bitmaps; + int unit; + u64 start; + struct btrfs_free_space_op *op; + void *private; +}; + +struct btrfs_free_space_op { + void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl); + bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info); +}; + struct inode *lookup_free_space_inode(struct btrfs_root *root, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); @@ -45,6 +64,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); +void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, -- 1.7.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Currently btrfs stores the highest objectid of the fs tree, and it always returns (highest+1) inode number when we create a file, so inode numbers won''t be reclaimed when we delete files, so we''ll run out of inode numbers as we keep create/delete files in 32bits machines. This fixes it, and it works similarly to how we cache free space in block cgroups. We start a kernel thread to read the file tree. By scanning inode items, we know which chunks of inode numbers are free, and we cache them in an rb-tree. Because we are searching the commit root, we have to carefully handle the cross-transaction case. The rb-tree is a hybrid extent+bitmap tree, so if we have too many small chunks of inode numbers, we''ll use bitmaps. Initially we allow 16K ram of extents, and a bitmap will be used if we exceed this threshold. The extents threshold is adjusted in runtime. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> --- fs/btrfs/ctree.h | 15 +- fs/btrfs/disk-io.c | 18 +++ fs/btrfs/free-space-cache.c | 95 ++++++++++--- fs/btrfs/free-space-cache.h | 16 ++- fs/btrfs/inode-map.c | 341 ++++++++++++++++++++++++++++++++++++++++++- fs/btrfs/inode-map.h | 11 ++ fs/btrfs/inode.c | 42 ++++-- fs/btrfs/ioctl.c | 4 +- fs/btrfs/relocation.c | 3 +- fs/btrfs/transaction.c | 7 +- 10 files changed, 499 insertions(+), 53 deletions(-) create mode 100644 fs/btrfs/inode-map.h diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2a1e36e..40f38f7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1101,6 +1101,15 @@ struct btrfs_root { spinlock_t accounting_lock; struct btrfs_block_rsv *block_rsv; + /* free ino cache stuff */ + struct mutex fs_commit_mutex; + struct btrfs_free_space_ctl *free_ino_ctl; + enum btrfs_caching_type cached; + spinlock_t cache_lock; + wait_queue_head_t cache_wait; + struct btrfs_free_space_ctl *free_ino_pinned; + u64 cache_progress; + struct mutex log_mutex; wait_queue_head_t log_writer_wait; wait_queue_head_t log_commit_wait[2]; @@ -2395,12 +2404,6 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 offset); int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); -/* inode-map.c */ -int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, - struct btrfs_root *fs_root, - u64 dirid, u64 *objectid); -int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid); - /* inode-item.c */ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3e1ea3e..5c92066 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -40,6 +40,7 @@ #include "locking.h" #include "tree-log.h" #include "free-space-cache.h" +#include "inode-map.h" static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); @@ -1233,6 +1234,19 @@ again: if (IS_ERR(root)) return root; + root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); + if (!root->free_ino_ctl) + goto fail; + root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), + GFP_NOFS); + if (!root->free_ino_pinned) + goto fail; + + btrfs_init_free_ino_ctl(root); + mutex_init(&root->fs_commit_mutex); + spin_lock_init(&root->cache_lock); + init_waitqueue_head(&root->cache_wait); + set_anon_super(&root->anon_super, NULL); if (btrfs_root_refs(&root->root_item) == 0) { @@ -2381,6 +2395,8 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) if (btrfs_root_refs(&root->root_item) == 0) synchronize_srcu(&fs_info->subvol_srcu); + __btrfs_remove_free_space_cache(root->free_ino_pinned); + __btrfs_remove_free_space_cache(root->free_ino_ctl); free_fs_root(root); return 0; } @@ -2394,6 +2410,8 @@ static void free_fs_root(struct btrfs_root *root) } free_extent_buffer(root->node); free_extent_buffer(root->commit_root); + kfree(root->free_ino_ctl); + kfree(root->free_ino_pinned); kfree(root->name); kfree(root); } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 8799208..e0dbb3a 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -24,6 +24,7 @@ #include "free-space-cache.h" #include "transaction.h" #include "disk-io.h" +#include "inode-map.h" #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) @@ -102,7 +103,7 @@ int create_free_space_inode(struct btrfs_root *root, u64 objectid; int ret; - ret = btrfs_find_free_objectid(trans, root, 0, &objectid); + ret = btrfs_find_free_objectid(root, &objectid); if (ret < 0) return ret; @@ -1409,10 +1410,9 @@ bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, return merged; } -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 offset, u64 bytes) +int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, + u64 offset, u64 bytes) { - struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; int ret = 0; @@ -1666,11 +1666,29 @@ out: return 0; } -void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) +void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) { - struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct rb_node *node; + + spin_lock(&ctl->tree_lock); + while ((node = rb_last(&ctl->free_space_offset)) != NULL) { + info = rb_entry(node, struct btrfs_free_space, offset_index); + unlink_free_space(ctl, info); + kfree(info->bitmap); + kfree(info); + if (need_resched()) { + spin_unlock(&ctl->tree_lock); + cond_resched(); + spin_lock(&ctl->tree_lock); + } + } + spin_unlock(&ctl->tree_lock); +} + +void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_cluster *cluster; struct list_head *head; @@ -1688,21 +1706,9 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) spin_lock(&ctl->tree_lock); } } - - while ((node = rb_last(&ctl->free_space_offset)) != NULL) { - info = rb_entry(node, struct btrfs_free_space, offset_index); - unlink_free_space(ctl, info); - if (info->bitmap) - kfree(info->bitmap); - kfree(info); - if (need_resched()) { - spin_unlock(&ctl->tree_lock); - cond_resched(); - spin_lock(&ctl->tree_lock); - } - } - spin_unlock(&ctl->tree_lock); + + __btrfs_remove_free_space_cache(ctl); } u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, @@ -2164,3 +2170,52 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) cluster->block_group = NULL; } +/* + * Find the left-most item in the cache tree, and then return the + * smallest inode number in the item. + * + * Note: the returned inode number may not be the smallest one in + * the tree, if the left-most item is a bitmap. + */ +u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root) +{ + struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl; + struct btrfs_free_space *entry = NULL; + u64 ino = 0; + + spin_lock(&ctl->tree_lock); + + if (RB_EMPTY_ROOT(&ctl->free_space_offset)) + goto out; + + entry = rb_entry(rb_first(&ctl->free_space_offset), + struct btrfs_free_space, offset_index); + + if (!entry->bitmap) { + ino = entry->offset; + + unlink_free_space(ctl, entry); + entry->offset++; + entry->bytes--; + if (!entry->bytes) + kfree(entry); + else + link_free_space(ctl, entry); + } else { + u64 offset = 0; + u64 count = 1; + int ret; + + ret = search_bitmap(ctl, entry, &offset, &count); + BUG_ON(ret); + + ino = offset; + bitmap_clear_bits(ctl, entry, offset, 1); + if (entry->bytes == 0) + free_bitmap(ctl, entry); + } +out: + spin_unlock(&ctl->tree_lock); + + return ino; +} diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 322b7e0..282eeda 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -64,15 +64,25 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); + void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 bytenr, u64 size); +int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, + u64 bytenr, u64 size); +static inline int +btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 bytenr, u64 size) +{ + return __btrfs_add_free_space(block_group->free_space_ctl, + bytenr, size); +} int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); +void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); void btrfs_remove_free_space_cache(struct btrfs_block_group_cache - *block_group); + *block_group); u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size); +u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes); int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index c56eb59..cfa9f5e 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -16,11 +16,343 @@ * Boston, MA 021110-1307, USA. */ +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/pagemap.h> + #include "ctree.h" #include "disk-io.h" +#include "free-space-cache.h" +#include "inode-map.h" #include "transaction.h" -int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid) +static int caching_kthread(void *data) +{ + struct btrfs_root *root = data; + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_key key; + struct btrfs_path *path; + struct extent_buffer *leaf; + u64 last = (u64)-1; + int slot; + int ret; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + /* Since the commit root is read-only, we can safely skip locking. */ + path->skip_locking = 1; + path->search_commit_root = 1; + path->reada = 2; + + key.objectid = BTRFS_FIRST_FREE_OBJECTID; + key.offset = 0; + key.type = BTRFS_INODE_ITEM_KEY; +again: + /* need to make sure the commit_root doesn''t disappear */ + mutex_lock(&root->fs_commit_mutex); + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + while (1) { + smp_mb(); + if (fs_info->closing > 1) + goto out; + + leaf = path->nodes[0]; + slot = path->slots[0]; + if (path->slots[0] >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto out; + else if (ret > 0) + break; + + if (need_resched() || + btrfs_transaction_in_commit(fs_info)) { + leaf = path->nodes[0]; + + if (btrfs_header_nritems(leaf) == 0) { + WARN_ON(1); + break; + } + + /* + * Save the key so we can advances forward + * in the next search. + */ + btrfs_item_key_to_cpu(leaf, &key, 0); + btrfs_release_path(root, path); + root->cache_progress = last; + mutex_unlock(&root->fs_commit_mutex); + schedule_timeout(1); + goto again; + } else + continue; + } + + btrfs_item_key_to_cpu(leaf, &key, slot); + + if (key.type != BTRFS_INODE_ITEM_KEY) + goto next; + + if (key.objectid >= BTRFS_LAST_FREE_OBJECTID) + break; + + if (last != (u64)-1 && last + 1 != key.objectid) { + __btrfs_add_free_space(ctl, last + 1, + key.objectid - last - 1); + wake_up(&root->cache_wait); + } + + last = key.objectid; +next: + path->slots[0]++; + } + + if (last < BTRFS_LAST_FREE_OBJECTID - 1) { + __btrfs_add_free_space(ctl, last + 1, + BTRFS_LAST_FREE_OBJECTID - last - 1); + } + + spin_lock(&root->cache_lock); + root->cached = BTRFS_CACHE_FINISHED; + spin_unlock(&root->cache_lock); + + root->cache_progress = (u64)-1; + btrfs_unpin_free_ino(root); +out: + wake_up(&root->cache_wait); + mutex_unlock(&root->fs_commit_mutex); + + btrfs_free_path(path); + + return ret; +} + +static void start_caching(struct btrfs_root *root) +{ + struct task_struct *tsk; + + spin_lock(&root->cache_lock); + if (root->cached != BTRFS_CACHE_NO) { + spin_unlock(&root->cache_lock); + return; + } + + root->cached = BTRFS_CACHE_STARTED; + spin_unlock(&root->cache_lock); + + tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", + root->root_key.objectid); + BUG_ON(IS_ERR(tsk)); +} + +int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) +{ +again: + *objectid = btrfs_find_ino_for_alloc(root); + + if (*objectid != 0) + return 0; + + start_caching(root); + + wait_event(root->cache_wait, + root->cached == BTRFS_CACHE_FINISHED || + root->free_ino_ctl->free_space > 0); + + if (root->cached == BTRFS_CACHE_FINISHED && + root->free_ino_ctl->free_space == 0) + return -ENOSPC; + else + goto again; +} + +void btrfs_return_ino(struct btrfs_root *root, u64 objectid) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; +again: + if (root->cached == BTRFS_CACHE_FINISHED) { + __btrfs_add_free_space(ctl, objectid, 1); + } else { + /* + * If we are in the process of caching free ino chunks, + * to avoid adding the same inode number to the free_ino + * tree twice due to cross transaction, we''ll leave it + * in the pinned tree until a transaction is committed + * or the caching work is done. + */ + + mutex_lock(&root->fs_commit_mutex); + spin_lock(&root->cache_lock); + if (root->cached == BTRFS_CACHE_FINISHED) { + spin_unlock(&root->cache_lock); + mutex_unlock(&root->fs_commit_mutex); + goto again; + } + spin_unlock(&root->cache_lock); + + start_caching(root); + + if (objectid <= root->cache_progress) + __btrfs_add_free_space(ctl, objectid, 1); + else + __btrfs_add_free_space(pinned, objectid, 1); + + mutex_unlock(&root->fs_commit_mutex); + } +} + +/* + * When a transaction is committed, we''ll move those inode numbers which + * are smaller than root->cache_progress from pinned tree to free_ino tree, + * and others will just be dropped, because the commit root we were + * searching has changed. + * + * Must be called with root->fs_commit_mutex held + */ +void btrfs_unpin_free_ino(struct btrfs_root *root) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; + struct btrfs_free_space *info; + struct rb_node *n; + u64 count; + + while (1) { + n = rb_first(rbroot); + if (!n) + break; + + info = rb_entry(n, struct btrfs_free_space, offset_index); + BUG_ON(info->bitmap); + + if (info->offset > root->cache_progress) + goto free; + else if (info->offset + info->bytes > root->cache_progress) + count = root->cache_progress - info->offset + 1; + else + count = info->bytes; + + __btrfs_add_free_space(ctl, info->offset, count); +free: + rb_erase(&info->offset_index, rbroot); + kfree(info); + } +} + +#define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space)) +#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) + +/* + * The goal is to keep the memory used by the free_ino tree won''t + * exceed the memory if we use bitmaps only. + */ +static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) +{ + struct btrfs_free_space *info; + struct rb_node *n; + int max_ino; + int max_bitmaps; + + n = rb_last(&ctl->free_space_offset); + if (!n) { + ctl->extents_thresh = INIT_THRESHOLD; + return; + } + info = rb_entry(n, struct btrfs_free_space, offset_index); + + /* + * Find the maximum inode number in the filesystem. Note we + * ignore the fact that this can be a bitmap, because we are + * not doing precise calculation. + */ + max_ino = info->bytes - 1; + + max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP; + if (max_bitmaps <= ctl->total_bitmaps) { + ctl->extents_thresh = 0; + return; + } + + ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * + PAGE_CACHE_SIZE / sizeof(*info); +} + +/* + * We don''t fall back to bitmap, if we are below the extents threshold + * or this chunk of inode numbers is a big one. + */ +static bool use_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) +{ + if (ctl->free_extents < ctl->extents_thresh || + info->bytes > INODES_PER_BITMAP / 10) + return false; + + return true; +} + +static struct btrfs_free_space_op free_ino_op = { + .recalc_thresholds = recalculate_thresholds, + .use_bitmap = use_bitmap, +}; + +static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl) +{ +} + +static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) +{ + /* + * We always use extents for two reasons: + * + * - The pinned tree is only used during the process of caching + * work. + * - Make code simpler. See btrfs_unpin_free_ino(). + */ + return false; +} + +static struct btrfs_free_space_op pinned_free_ino_op = { + .recalc_thresholds = pinned_recalc_thresholds, + .use_bitmap = pinned_use_bitmap, +}; + +void btrfs_init_free_ino_ctl(struct btrfs_root *root) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; + + spin_lock_init(&ctl->tree_lock); + ctl->unit = 1; + ctl->start = 0; + ctl->private = NULL; + ctl->op = &free_ino_op; + + /* + * Initially we allow to use 16K of ram to cache chunks of + * inode numbers before we resort to bitmaps. This is somewhat + * arbitrary, but it will be adjusted in runtime. + */ + ctl->extents_thresh = INIT_THRESHOLD; + + spin_lock_init(&pinned->tree_lock); + pinned->unit = 1; + pinned->start = 0; + pinned->private = NULL; + pinned->extents_thresh = 0; + pinned->op = &pinned_free_ino_op; +} + +static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) { struct btrfs_path *path; int ret; @@ -54,15 +386,14 @@ error: return ret; } -int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 dirid, u64 *objectid) +int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) { int ret; mutex_lock(&root->objectid_mutex); if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { - ret = btrfs_find_highest_inode(root, &root->highest_objectid); + ret = btrfs_find_highest_objectid(root, + &root->highest_objectid); if (ret) goto out; } diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h new file mode 100644 index 0000000..eb91845 --- /dev/null +++ b/fs/btrfs/inode-map.h @@ -0,0 +1,11 @@ +#ifndef __BTRFS_INODE_MAP +#define __BTRFS_INODE_MAP + +void btrfs_init_free_ino_ctl(struct btrfs_root *root); +void btrfs_unpin_free_ino(struct btrfs_root *root); +void btrfs_return_ino(struct btrfs_root *root, u64 objectid); +int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid); + +int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid); + +#endif diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index db67821..5e48a6d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -50,6 +50,7 @@ #include "tree-log.h" #include "compression.h" #include "locking.h" +#include "inode-map.h" struct btrfs_iget_args { u64 ino; @@ -3784,6 +3785,10 @@ void btrfs_evict_inode(struct inode *inode) BUG_ON(ret); } + if (!(root == root->fs_info->tree_root || + root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) + btrfs_return_ino(root, inode->i_ino); + nr = trans->blocks_used; btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root, nr); @@ -4517,6 +4522,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, if (!inode) return ERR_PTR(-ENOMEM); + /* + * we have to initialize this early, so we can reclaim the inode + * number if we fail afterwards in this function. + */ + inode->i_ino = objectid; + if (dir) { ret = btrfs_set_inode_index(dir, index); if (ret) { @@ -4559,7 +4570,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, goto fail; inode_init_owner(inode, dir, mode); - inode->i_ino = objectid; inode_set_bytes(inode, 0); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], @@ -4684,10 +4694,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, if (!new_valid_dev(rdev)) return -EINVAL; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; - /* * 2 for inode item and ref * 2 for dir items @@ -4699,6 +4705,10 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_unlock; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, mode, &index); @@ -4746,9 +4756,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, u64 objectid; u64 index = 0; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; /* * 2 for inode item and ref * 2 for dir items @@ -4760,6 +4767,10 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_unlock; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, mode, &index); @@ -4871,10 +4882,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) u64 index = 0; unsigned long nr = 1; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; - /* * 2 items for inode and ref * 2 items for dir items @@ -4885,6 +4892,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) return PTR_ERR(trans); btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_fail; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, S_IFDIR | mode, @@ -7079,9 +7090,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) return -ENAMETOOLONG; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; /* * 2 items for inode item and ref * 2 items for dir items @@ -7093,6 +7101,10 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_unlock; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 5fdb2ab..f679522 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -49,6 +49,7 @@ #include "print-tree.h" #include "volumes.h" #include "locking.h" +#include "inode-map.h" /* Mask out flags that are inappropriate for the given type of inode. */ static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) @@ -244,8 +245,7 @@ static noinline int create_subvol(struct btrfs_root *root, u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; u64 index = 0; - ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root, - 0, &objectid); + ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid); if (ret) { dput(parent); return ret; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 31ade58..a40f596 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -30,6 +30,7 @@ #include "btrfs_inode.h" #include "async-thread.h" #include "free-space-cache.h" +#include "inode-map.h" /* * backref_node, mapping_node and tree_block start with this @@ -3891,7 +3892,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, if (IS_ERR(trans)) return ERR_CAST(trans); - err = btrfs_find_free_objectid(trans, root, objectid, &objectid); + err = btrfs_find_free_objectid(root, &objectid); if (err) goto out; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 3d73c8d..2d0caf8f 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -27,6 +27,7 @@ #include "transaction.h" #include "locking.h" #include "tree-log.h" +#include "inode-map.h" #define BTRFS_ROOT_TRANS_TAG 0 @@ -752,7 +753,11 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, btrfs_orphan_commit_root(trans, root); if (root->commit_root != root->node) { + mutex_lock(&root->fs_commit_mutex); switch_commit_root(root); + btrfs_unpin_free_ino(root); + mutex_unlock(&root->fs_commit_mutex); + btrfs_set_root_node(&root->root_item, root->node); } @@ -921,7 +926,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, goto fail; } - ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid); + ret = btrfs_find_free_objectid(tree_root, &objectid); if (ret) { pending->error = ret; goto fail; -- 1.7.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Li Zefan
2011-Mar-16 08:50 UTC
[PATCH 5/7] Btrfs: Make the code for reading/writing free space cache generic
Extract out block group specific code from lookup_free_space_inode(), create_free_space_inode(), load_free_space_cache() and btrfs_write_out_cache(), so the code can be used to read/write free ino cache. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> --- fs/btrfs/free-space-cache.c | 317 ++++++++++++++++++++++++------------------ 1 files changed, 181 insertions(+), 136 deletions(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index e0dbb3a..327e249 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -32,9 +32,9 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info); -struct inode *lookup_free_space_inode(struct btrfs_root *root, - struct btrfs_block_group_cache - *block_group, struct btrfs_path *path) +static struct inode *__lookup_free_space_inode(struct btrfs_root *root, + struct btrfs_path *path, + u64 offset) { struct btrfs_key key; struct btrfs_key location; @@ -44,15 +44,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, struct inode *inode = NULL; int ret; - spin_lock(&block_group->lock); - if (block_group->inode) - inode = igrab(block_group->inode); - spin_unlock(&block_group->lock); - if (inode) - return inode; - key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -80,6 +73,27 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, return ERR_PTR(-ENOENT); } + return inode; +} + +struct inode *lookup_free_space_inode(struct btrfs_root *root, + struct btrfs_block_group_cache + *block_group, struct btrfs_path *path) +{ + struct inode *inode = NULL; + + spin_lock(&block_group->lock); + if (block_group->inode) + inode = igrab(block_group->inode); + spin_unlock(&block_group->lock); + if (inode) + return inode; + + inode = __lookup_free_space_inode(root, path, + block_group->key.objectid); + if (IS_ERR(inode)) + return inode; + spin_lock(&block_group->lock); if (!root->fs_info->closing) { block_group->inode = igrab(inode); @@ -90,24 +104,18 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, return inode; } -int create_free_space_inode(struct btrfs_root *root, - struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, - struct btrfs_path *path) +int __create_free_space_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path, u64 ino, u64 offset) { struct btrfs_key key; struct btrfs_disk_key disk_key; struct btrfs_free_space_header *header; struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; - u64 objectid; int ret; - ret = btrfs_find_free_objectid(root, &objectid); - if (ret < 0) - return ret; - - ret = btrfs_insert_empty_inode(trans, root, path, objectid); + ret = btrfs_insert_empty_inode(trans, root, path, ino); if (ret) return ret; @@ -127,13 +135,12 @@ int create_free_space_inode(struct btrfs_root *root, BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); btrfs_set_inode_nlink(leaf, inode_item, 1); btrfs_set_inode_transid(leaf, inode_item, trans->transid); - btrfs_set_inode_block_group(leaf, inode_item, - block_group->key.objectid); + btrfs_set_inode_block_group(leaf, inode_item, offset); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(root, path); key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_insert_empty_item(trans, root, path, &key, @@ -153,6 +160,22 @@ int create_free_space_inode(struct btrfs_root *root, return 0; } +int create_free_space_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_block_group_cache *block_group, + struct btrfs_path *path) +{ + int ret; + u64 ino; + + ret = btrfs_find_free_objectid(root, &ino); + if (ret < 0) + return ret; + + return __create_free_space_inode(root, trans, path, ino, + block_group->key.objectid); +} + int btrfs_truncate_free_space_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_path *path, @@ -205,16 +228,13 @@ static int readahead_cache(struct inode *inode) return 0; } -int load_free_space_cache(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *block_group) +int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, + struct btrfs_free_space_ctl *ctl, + struct btrfs_path *path, u64 offset) { - struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; - struct btrfs_root *root = fs_info->tree_root; - struct inode *inode; struct btrfs_free_space_header *header; struct extent_buffer *leaf; struct page *page; - struct btrfs_path *path; u32 *checksums = NULL, *crc; char *disk_crcs = NULL; struct btrfs_key key; @@ -226,71 +246,43 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, pgoff_t index = 0; unsigned long first_page_offset; int num_checksums; - int ret = 0; - - /* - * If we''re unmounting then just return, since this does a search on the - * normal root and not the commit root and we could deadlock. - */ - smp_mb(); - if (fs_info->closing) - return 0; - - /* - * If this block group has been marked to be cleared for one reason or - * another then we can''t trust the on disk cache, so just return. - */ - spin_lock(&block_group->lock); - if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { - spin_unlock(&block_group->lock); - return 0; - } - spin_unlock(&block_group->lock); + int ret = 0, ret2; INIT_LIST_HEAD(&bitmaps); - path = btrfs_alloc_path(); - if (!path) - return 0; - - inode = lookup_free_space_inode(root, block_group, path); - if (IS_ERR(inode)) { - btrfs_free_path(path); - return 0; - } - /* Nothing in the space cache, goodbye */ - if (!i_size_read(inode)) { - btrfs_free_path(path); + if (!i_size_read(inode)) goto out; - } key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret) { - btrfs_free_path(path); + if (ret < 0) + goto out; + else if (ret > 0) { + btrfs_release_path(root, path); + ret = 0; goto out; } + ret = -1; + leaf = path->nodes[0]; header = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_free_space_header); num_entries = btrfs_free_space_entries(leaf, header); num_bitmaps = btrfs_free_space_bitmaps(leaf, header); generation = btrfs_free_space_generation(leaf, header); - btrfs_free_path(path); + btrfs_release_path(root, path); if (BTRFS_I(inode)->generation != generation) { printk(KERN_ERR "btrfs: free space inode generation (%llu) did" - " not match free space cache generation (%llu) for " - "block group %llu\n", + " not match free space cache generation (%llu)\n", (unsigned long long)BTRFS_I(inode)->generation, - (unsigned long long)generation, - (unsigned long long)block_group->key.objectid); - goto free_cache; + (unsigned long long)generation); + goto out; } if (!num_entries) @@ -307,10 +299,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, goto out; ret = readahead_cache(inode); - if (ret) { - ret = 0; + if (ret) goto out; - } while (1) { struct btrfs_free_space_entry *entry; @@ -329,10 +319,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, } page = grab_cache_page(inode->i_mapping, index); - if (!page) { - ret = 0; + if (!page) goto free_cache; - } if (!PageUptodate(page)) { btrfs_readpage(NULL, page); @@ -341,9 +329,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, unlock_page(page); page_cache_release(page); printk(KERN_ERR "btrfs: error reading free " - "space cache: %llu\n", - (unsigned long long) - block_group->key.objectid); + "space cache\n"); goto free_cache; } } @@ -356,13 +342,10 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, gen = addr + (sizeof(u32) * num_checksums); if (*gen != BTRFS_I(inode)->generation) { printk(KERN_ERR "btrfs: space cache generation" - " (%llu) does not match inode (%llu) " - "for block group %llu\n", + " (%llu) does not match inode (%llu)\n", (unsigned long long)*gen, (unsigned long long) - BTRFS_I(inode)->generation, - (unsigned long long) - block_group->key.objectid); + BTRFS_I(inode)->generation); kunmap(page); unlock_page(page); page_cache_release(page); @@ -378,9 +361,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, PAGE_CACHE_SIZE - start_offset); btrfs_csum_final(cur_crc, (char *)&cur_crc); if (cur_crc != *crc) { - printk(KERN_ERR "btrfs: crc mismatch for page %lu in " - "block group %llu\n", index, - (unsigned long long)block_group->key.objectid); + printk(KERN_ERR "btrfs: crc mismatch for page %lu\n", + index); kunmap(page); unlock_page(page); page_cache_release(page); @@ -426,7 +408,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, goto free_cache; } spin_lock(&ctl->tree_lock); - ret = link_free_space(ctl, e); + ret2 = link_free_space(ctl, e); ctl->total_bitmaps++; ctl->op->recalc_thresholds(ctl); spin_unlock(&ctl->tree_lock); @@ -469,27 +451,75 @@ next: out: kfree(checksums); kfree(disk_crcs); - iput(inode); return ret; - free_cache: - /* This cache is bogus, make sure it gets cleared */ - spin_lock(&block_group->lock); - block_group->disk_cache_state = BTRFS_DC_CLEAR; - spin_unlock(&block_group->lock); - btrfs_remove_free_space_cache(block_group); + __btrfs_remove_free_space_cache(ctl); goto out; } -int btrfs_write_out_cache(struct btrfs_root *root, - struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, - struct btrfs_path *path) +int load_free_space_cache(struct btrfs_fs_info *fs_info, + struct btrfs_block_group_cache *block_group) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + struct btrfs_root *root = fs_info->tree_root; + struct inode *inode; + struct btrfs_path *path; + int ret; + + /* + * If we''re unmounting then just return, since this does a search on the + * normal root and not the commit root and we could deadlock. + */ + smp_mb(); + if (fs_info->closing) + return 0; + + /* + * If this block group has been marked to be cleared for one reason or + * another then we can''t trust the on disk cache, so just return. + */ + spin_lock(&block_group->lock); + if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { + spin_unlock(&block_group->lock); + return 0; + } + spin_unlock(&block_group->lock); + + path = btrfs_alloc_path(); + if (!path) + return 0; + + inode = lookup_free_space_inode(root, block_group, path); + if (IS_ERR(inode)) { + btrfs_free_path(path); + return 0; + } + + ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, + path, block_group->key.objectid); + btrfs_free_path(path); + + if (ret < 0) { + /* This cache is bogus, make sure it gets cleared */ + spin_lock(&block_group->lock); + block_group->disk_cache_state = BTRFS_DC_CLEAR; + spin_unlock(&block_group->lock); + + printk(KERN_ERR "btrfs: failed to load free space cache " + "for block group %llu\n", block_group->key.objectid); + } + + iput(inode); + return ret; +} + +int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, + struct btrfs_free_space_ctl *ctl, + struct btrfs_trans_handle *trans, + struct btrfs_path *path, u64 offset) +{ struct btrfs_free_space_header *header; struct extent_buffer *leaf; - struct inode *inode; struct rb_node *node; struct list_head *pos, *n; struct page *page; @@ -503,33 +533,16 @@ int btrfs_write_out_cache(struct btrfs_root *root, int num_checksums; int entries = 0; int bitmaps = 0; - int ret = 0; - - root = root->fs_info->tree_root; + int ret = -1; INIT_LIST_HEAD(&bitmap_list); - spin_lock(&block_group->lock); - if (block_group->disk_cache_state < BTRFS_DC_SETUP) { - spin_unlock(&block_group->lock); - return 0; - } - spin_unlock(&block_group->lock); - - inode = lookup_free_space_inode(root, block_group, path); - if (IS_ERR(inode)) - return 0; - - if (!i_size_read(inode)) { - iput(inode); - return 0; - } - node = rb_first(&ctl->free_space_offset); - if (!node) { - iput(inode); + if (!node) return 0; - } + + if (!i_size_read(inode)) + return -1; last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; filemap_write_and_wait(inode->i_mapping); @@ -539,10 +552,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, /* We need a checksum per page. */ num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE; crc = checksums = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS); - if (!crc) { - iput(inode); - return 0; - } + if (!crc) + return -1; /* Since the first page has all of our checksums and our generation we * need to calculate the offset into the page that we can start writing @@ -729,12 +740,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, filemap_write_and_wait(inode->i_mapping); key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_search_slot(trans, root, &key, path, 1, 1); if (ret < 0) { - ret = 0; + ret = -1; clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); @@ -747,8 +758,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, path->slots[0]--; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || - found_key.offset != block_group->key.objectid) { - ret = 0; + found_key.offset != offset) { + ret = -1; clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, @@ -768,15 +779,49 @@ int btrfs_write_out_cache(struct btrfs_root *root, ret = 1; out_free: - if (ret == 0) { + if (ret != 1) { invalidate_inode_pages2_range(inode->i_mapping, 0, index); - spin_lock(&block_group->lock); - block_group->disk_cache_state = BTRFS_DC_ERROR; - spin_unlock(&block_group->lock); BTRFS_I(inode)->generation = 0; } kfree(checksums); btrfs_update_inode(trans, root, inode); + return ret; +} + +int btrfs_write_out_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_block_group_cache *block_group, + struct btrfs_path *path) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + struct inode *inode; + int ret = 0; + + root = root->fs_info->tree_root; + + spin_lock(&block_group->lock); + if (block_group->disk_cache_state < BTRFS_DC_SETUP) { + spin_unlock(&block_group->lock); + return 0; + } + spin_unlock(&block_group->lock); + + inode = lookup_free_space_inode(root, block_group, path); + if (IS_ERR(inode)) + return 0; + + ret = __btrfs_write_out_cache(root->fs_info->tree_root, inode, ctl, + trans, path, block_group->key.objectid); + + if (ret < 0) { + spin_lock(&block_group->lock); + block_group->disk_cache_state = BTRFS_DC_ERROR; + spin_unlock(&block_group->lock); + + printk(KERN_ERR "btrfs: failed to write free space cache " + "for block group %llu\n", block_group->key.objectid); + } + iput(inode); return ret; } -- 1.7.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
There''s a potential problem in 32bit system when we exhaust 32bit inode numbers and start to allocate big inode numbers, because btrfs uses inode->i_ino in many places. So here we always use BTRFS_I(inode)->location.objectid, which is an u64 variable. There are 2 exceptions that BTRFS_I(inode)->location.objectid !inode->i_ino: the btree inode (0 vs 1) and empty subvol dirs (256 vs 2), and inode->i_ino will be used in those cases. Another reason to make this change is I''m going to use a special inode to save free ino cache, and the inode number must be > (u64)-256. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> --- fs/btrfs/btrfs_inode.h | 9 ++ fs/btrfs/compression.c | 5 +- fs/btrfs/export.c | 25 ++++--- fs/btrfs/extent-tree.c | 10 +- fs/btrfs/extent_io.c | 4 +- fs/btrfs/file-item.c | 5 +- fs/btrfs/file.c | 27 ++++--- fs/btrfs/inode.c | 195 +++++++++++++++++++++++++----------------------- fs/btrfs/ioctl.c | 18 ++-- fs/btrfs/relocation.c | 24 +++--- fs/btrfs/transaction.c | 4 +- fs/btrfs/tree-log.c | 58 +++++++------- fs/btrfs/xattr.c | 8 +- 13 files changed, 209 insertions(+), 183 deletions(-) diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index ccc991c..a086b44 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -167,6 +167,15 @@ static inline struct btrfs_inode *BTRFS_I(struct inode *inode) return container_of(inode, struct btrfs_inode, vfs_inode); } +static inline u64 btrfs_ino(struct inode *inode) +{ + u64 ino = BTRFS_I(inode)->location.objectid; + + if (ino <= BTRFS_FIRST_FREE_OBJECTID) + ino = inode->i_ino; + return ino; +} + static inline void btrfs_i_size_write(struct inode *inode, u64 size) { i_size_write(inode, size); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 4d2110e..a164c0c 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -125,9 +125,10 @@ static int check_compressed_csum(struct inode *inode, kunmap_atomic(kaddr, KM_USER0); if (csum != *cb_sum) { - printk(KERN_INFO "btrfs csum failed ino %lu " + printk(KERN_INFO "btrfs csum failed ino %llu " "extent %llu csum %u " - "wanted %u mirror %d\n", inode->i_ino, + "wanted %u mirror %d\n", + (unsigned long long)btrfs_ino(inode), (unsigned long long)disk_start, csum, *cb_sum, cb->mirror_num); ret = -EIO; diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 3220ad1..56c96dc 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -28,7 +28,7 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, len = BTRFS_FID_SIZE_NON_CONNECTABLE; type = FILEID_BTRFS_WITHOUT_PARENT; - fid->objectid = inode->i_ino; + fid->objectid = btrfs_ino(inode); fid->root_objectid = BTRFS_I(inode)->root->objectid; fid->gen = inode->i_generation; @@ -179,13 +179,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child) if (!path) return ERR_PTR(-ENOMEM); - if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = root->root_key.objectid; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = (u64)-1; root = root->fs_info->tree_root; } else { - key.objectid = dir->i_ino; + key.objectid = btrfs_ino(dir); key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; } @@ -248,6 +248,7 @@ static int btrfs_get_name(struct dentry *parent, char *name, struct btrfs_key key; int name_len; int ret; + u64 ino; if (!dir || !inode) return -EINVAL; @@ -255,19 +256,21 @@ static int btrfs_get_name(struct dentry *parent, char *name, if (!S_ISDIR(dir->i_mode)) return -EINVAL; + ino = btrfs_ino(inode); + path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->leave_spinning = 1; - if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + if (ino == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = BTRFS_I(inode)->root->root_key.objectid; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = (u64)-1; root = root->fs_info->tree_root; } else { - key.objectid = inode->i_ino; - key.offset = dir->i_ino; + key.objectid = ino; + key.offset = btrfs_ino(dir); key.type = BTRFS_INODE_REF_KEY; } @@ -276,7 +279,7 @@ static int btrfs_get_name(struct dentry *parent, char *name, btrfs_free_path(path); return ret; } else if (ret > 0) { - if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + if (ino == BTRFS_FIRST_FREE_OBJECTID) { path->slots[0]--; } else { btrfs_free_path(path); @@ -285,11 +288,11 @@ static int btrfs_get_name(struct dentry *parent, char *name, } leaf = path->nodes[0]; - if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { - rref = btrfs_item_ptr(leaf, path->slots[0], + if (ino == BTRFS_FIRST_FREE_OBJECTID) { + rref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); - name_ptr = (unsigned long)(rref + 1); - name_len = btrfs_root_ref_name_len(leaf, rref); + name_ptr = (unsigned long)(rref + 1); + name_len = btrfs_root_ref_name_len(leaf, rref); } else { iref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_ref); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 49eb826..3ad4621 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6905,8 +6905,8 @@ static noinline int get_new_locations(struct inode *reloc_inode, cur_pos = extent_key->objectid - offset; last_byte = extent_key->objectid + extent_key->offset; - ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, - cur_pos, 0); + ret = btrfs_lookup_file_extent(NULL, root, path, + btrfs_ino(reloc_inode), cur_pos, 0); if (ret < 0) goto out; if (ret > 0) { @@ -6929,7 +6929,7 @@ static noinline int get_new_locations(struct inode *reloc_inode, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.offset != cur_pos || found_key.type != BTRFS_EXTENT_DATA_KEY || - found_key.objectid != reloc_inode->i_ino) + found_key.objectid != btrfs_ino(reloc_inode)) break; fi = btrfs_item_ptr(leaf, path->slots[0], @@ -7071,7 +7071,7 @@ next: break; } - if (inode && key.objectid != inode->i_ino) { + if (inode && key.objectid != btrfs_ino(inode)) { BUG_ON(extent_locked); btrfs_release_path(root, path); mutex_unlock(&inode->i_mutex); @@ -7380,7 +7380,7 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root, continue; if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) continue; - if (!inode || inode->i_ino != key.objectid) { + if (!inode || btrfs_ino(inode) != key.objectid) { iput(inode); inode = btrfs_ilookup(target_root->fs_info->sb, key.objectid, target_root, 1); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9fcb5ed..bd34a82 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2994,7 +2994,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, * because there might be preallocation past i_size */ ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, - path, inode->i_ino, -1, 0); + path, btrfs_ino(inode), -1, 0); if (ret < 0) { btrfs_free_path(path); return ret; @@ -3007,7 +3007,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, found_type = btrfs_key_type(&found_key); /* No extents, but there might be delalloc bits */ - if (found_key.objectid != inode->i_ino || + if (found_key.objectid != btrfs_ino(inode) || found_type != BTRFS_EXTENT_DATA_KEY) { /* have to trust i_size as the end */ last = (u64)-1; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 4f19a3e..1e01234 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -205,8 +205,9 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, EXTENT_NODATASUM, GFP_NOFS); } else { printk(KERN_INFO "btrfs no csum found " - "for inode %lu start %llu\n", - inode->i_ino, + "for inode %llu start %llu\n", + (unsigned long long) + btrfs_ino(inode), (unsigned long long)offset); } item = NULL; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index ab22ca4..518beff 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -302,6 +302,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, struct btrfs_path *path; struct btrfs_key key; struct btrfs_key new_key; + u64 ino = btrfs_ino(inode); u64 search_start = start; u64 disk_bytenr = 0; u64 num_bytes = 0; @@ -322,14 +323,14 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, while (1) { recow = 0; - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, ino, search_start, -1); if (ret < 0) break; if (ret > 0 && path->slots[0] > 0 && search_start == start) { leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); - if (key.objectid == inode->i_ino && + if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY) path->slots[0]--; } @@ -350,7 +351,7 @@ next_slot: } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - if (key.objectid > inode->i_ino || + if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) break; @@ -596,6 +597,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, int del_slot = 0; int recow; int ret; + u64 ino = btrfs_ino(inode); btrfs_drop_extent_cache(inode, start, end - 1, 0); @@ -604,7 +606,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, again: recow = 0; split = start; - key.objectid = inode->i_ino; + key.objectid = ino; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = split; @@ -614,8 +616,7 @@ again: leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - BUG_ON(key.objectid != inode->i_ino || - key.type != BTRFS_EXTENT_DATA_KEY); + BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY); fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); BUG_ON(btrfs_file_extent_type(leaf, fi) !@@ -632,7 +633,7 @@ again: other_start = 0; other_end = start; if (extent_mergeable(leaf, path->slots[0] - 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { new_key.offset = end; btrfs_set_item_key_safe(trans, root, path, &new_key); @@ -655,7 +656,7 @@ again: other_start = end; other_end = 0; if (extent_mergeable(leaf, path->slots[0] + 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); @@ -704,7 +705,7 @@ again: ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, - inode->i_ino, orig_offset); + ino, orig_offset); BUG_ON(ret); if (split == start) { @@ -720,7 +721,7 @@ again: other_start = end; other_end = 0; if (extent_mergeable(leaf, path->slots[0] + 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { if (recow) { btrfs_release_path(root, path); @@ -731,13 +732,13 @@ again: del_nr++; ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, - inode->i_ino, orig_offset); + ino, orig_offset); BUG_ON(ret); } other_start = 0; other_end = start; if (extent_mergeable(leaf, path->slots[0] - 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { if (recow) { btrfs_release_path(root, path); @@ -748,7 +749,7 @@ again: del_nr++; ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, - inode->i_ino, orig_offset); + ino, orig_offset); BUG_ON(ret); } if (del_nr == 0) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5e48a6d..d96d858 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -137,7 +137,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, path->leave_spinning = 1; btrfs_set_trans_block_group(trans, inode); - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); key.offset = start; btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); datasize = btrfs_file_extent_calc_inline_size(cur_size); @@ -1044,6 +1044,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, int nocow; int check_prev = 1; bool nolock = false; + u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); BUG_ON(!path); @@ -1058,14 +1059,14 @@ static noinline int run_delalloc_nocow(struct inode *inode, cow_start = (u64)-1; cur_offset = start; while (1) { - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, ino, cur_offset, 0); BUG_ON(ret < 0); if (ret > 0 && path->slots[0] > 0 && check_prev) { leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); - if (found_key.objectid == inode->i_ino && + if (found_key.objectid == ino && found_key.type == BTRFS_EXTENT_DATA_KEY) path->slots[0]--; } @@ -1086,7 +1087,7 @@ next_slot: num_bytes = 0; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid > inode->i_ino || + if (found_key.objectid > ino || found_key.type > BTRFS_EXTENT_DATA_KEY || found_key.offset > end) break; @@ -1121,7 +1122,7 @@ next_slot: goto out_check; if (btrfs_extent_readonly(root, disk_bytenr)) goto out_check; - if (btrfs_cross_ref_exist(trans, root, inode->i_ino, + if (btrfs_cross_ref_exist(trans, root, ino, found_key.offset - extent_offset, disk_bytenr)) goto out_check; @@ -1634,7 +1635,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, &hint, 0); BUG_ON(ret); - ins.objectid = inode->i_ino; + ins.objectid = btrfs_ino(inode); ins.offset = file_pos; ins.type = BTRFS_EXTENT_DATA_KEY; ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); @@ -1665,7 +1666,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, ins.type = BTRFS_EXTENT_ITEM_KEY; ret = btrfs_alloc_reserved_file_extent(trans, root, root->root_key.objectid, - inode->i_ino, file_pos, &ins); + btrfs_ino(inode), file_pos, &ins); BUG_ON(ret); btrfs_free_path(path); @@ -1990,8 +1991,9 @@ good: zeroit: if (printk_ratelimit()) { - printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " - "private %llu\n", page->mapping->host->i_ino, + printk(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u " + "private %llu\n", + (unsigned long long)btrfs_ino(page->mapping->host), (unsigned long long)start, csum, (unsigned long long)private); } @@ -2231,7 +2233,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) /* insert an orphan item to track this unlinked/truncated file */ if (insert >= 1) { - ret = btrfs_insert_orphan_item(trans, root, inode->i_ino); + ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); BUG_ON(ret); } @@ -2268,7 +2270,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) spin_unlock(&root->orphan_lock); if (trans && delete_item) { - ret = btrfs_del_orphan_item(trans, root, inode->i_ino); + ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); BUG_ON(ret); } @@ -2512,7 +2514,8 @@ static void btrfs_read_locked_inode(struct inode *inode) * try to precache a NULL acl entry for files that don''t have * any xattrs or acls */ - maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino); + maybe_acls = acls_after_inode_item(leaf, path->slots[0], + btrfs_ino(inode)); if (!maybe_acls) cache_no_acl(inode); @@ -2646,6 +2649,8 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, struct btrfs_dir_item *di; struct btrfs_key key; u64 index; + u64 ino = btrfs_ino(inode); + u64 dir_ino = btrfs_ino(dir); path = btrfs_alloc_path(); if (!path) { @@ -2654,7 +2659,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, } path->leave_spinning = 1; - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, name_len, -1); if (IS_ERR(di)) { ret = PTR_ERR(di); @@ -2671,17 +2676,16 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, goto err; btrfs_release_path(root, path); - ret = btrfs_del_inode_ref(trans, root, name, name_len, - inode->i_ino, - dir->i_ino, &index); + ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, + dir_ino, &index); if (ret) { printk(KERN_INFO "btrfs failed to delete reference to %.*s, " - "inode %lu parent %lu\n", name_len, name, - inode->i_ino, dir->i_ino); + "inode %llu parent %llu\n", name_len, name, + (unsigned long long)ino, (unsigned long long)dir_ino); goto err; } - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, name, name_len, -1); if (IS_ERR(di)) { ret = PTR_ERR(di); @@ -2695,7 +2699,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, btrfs_release_path(root, path); ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, - inode, dir->i_ino); + inode, dir_ino); BUG_ON(ret != 0 && ret != -ENOENT); ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, @@ -2760,12 +2764,14 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, int check_link = 1; int err = -ENOSPC; int ret; + u64 ino = btrfs_ino(inode); + u64 dir_ino = btrfs_ino(dir); trans = btrfs_start_transaction(root, 10); if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) return trans; - if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return ERR_PTR(-ENOSPC); /* check if there is someone else holds reference */ @@ -2824,7 +2830,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, if (ret == 0 && S_ISREG(inode->i_mode)) { ret = btrfs_lookup_file_extent(trans, root, path, - inode->i_ino, (u64)-1, 0); + ino, (u64)-1, 0); if (ret < 0) { err = ret; goto out; @@ -2840,7 +2846,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, goto out; } - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, root, path, dir_ino, dentry->d_name.name, dentry->d_name.len, 0); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2857,7 +2863,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, ref = btrfs_lookup_inode_ref(trans, root, path, dentry->d_name.name, dentry->d_name.len, - inode->i_ino, dir->i_ino, 0); + ino, dir_ino, 0); if (IS_ERR(ref)) { err = PTR_ERR(ref); goto out; @@ -2868,7 +2874,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, index = btrfs_inode_ref_index(path->nodes[0], ref); btrfs_release_path(root, path); - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, + di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, dentry->d_name.name, dentry->d_name.len, 0); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2943,12 +2949,13 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, struct btrfs_key key; u64 index; int ret; + u64 dir_ino = btrfs_ino(dir); path = btrfs_alloc_path(); if (!path) return -ENOMEM; - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, name_len, -1); BUG_ON(!di || IS_ERR(di)); @@ -2961,10 +2968,10 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, objectid, root->root_key.objectid, - dir->i_ino, &index, name, name_len); + dir_ino, &index, name, name_len); if (ret < 0) { BUG_ON(ret != -ENOENT); - di = btrfs_search_dir_index_item(root, path, dir->i_ino, + di = btrfs_search_dir_index_item(root, path, dir_ino, name, name_len); BUG_ON(!di || IS_ERR(di)); @@ -2974,7 +2981,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, index = key.offset; } - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, name, name_len, -1); BUG_ON(!di || IS_ERR(di)); @@ -3003,7 +3010,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) unsigned long nr = 0; if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || - inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) + btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) return -ENOTEMPTY; trans = __unlink_start_trans(dir, dentry); @@ -3012,7 +3019,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) btrfs_set_trans_block_group(trans, dir); - if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { + if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { err = btrfs_unlink_subvol(trans, root, dir, BTRFS_I(inode)->location.objectid, dentry->d_name.name, @@ -3244,6 +3251,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, int encoding; int ret; int err = 0; + u64 ino = btrfs_ino(inode); BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); @@ -3254,7 +3262,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, BUG_ON(!path); path->reada = -1; - key.objectid = inode->i_ino; + key.objectid = ino; key.offset = (u64)-1; key.type = (u8)-1; @@ -3282,7 +3290,7 @@ search_again: found_type = btrfs_key_type(&found_key); encoding = 0; - if (found_key.objectid != inode->i_ino) + if (found_key.objectid != ino) break; if (found_type < min_type) @@ -3401,7 +3409,7 @@ delete: ret = btrfs_free_extent(trans, root, extent_start, extent_num_bytes, 0, btrfs_header_owner(leaf), - inode->i_ino, extent_offset); + ino, extent_offset); BUG_ON(ret); } @@ -3593,7 +3601,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) BUG_ON(err); err = btrfs_insert_file_extent(trans, root, - inode->i_ino, cur_offset, 0, + btrfs_ino(inode), cur_offset, 0, 0, hole_size, 0, hole_size, 0, 0, 0); BUG_ON(err); @@ -3814,7 +3822,7 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, path = btrfs_alloc_path(); BUG_ON(!path); - di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name, + di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, namelen, 0); if (IS_ERR(di)) ret = PTR_ERR(di); @@ -3867,7 +3875,7 @@ static int fixup_tree_root_location(struct btrfs_root *root, leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); - if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino || + if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) goto out; @@ -3906,6 +3914,7 @@ static void inode_tree_add(struct inode *inode) struct btrfs_inode *entry; struct rb_node **p; struct rb_node *parent; + u64 ino = btrfs_ino(inode); again: p = &root->inode_tree.rb_node; parent = NULL; @@ -3918,9 +3927,9 @@ again: parent = *p; entry = rb_entry(parent, struct btrfs_inode, rb_node); - if (inode->i_ino < entry->vfs_inode.i_ino) + if (ino < btrfs_ino(&entry->vfs_inode)) p = &parent->rb_left; - else if (inode->i_ino > entry->vfs_inode.i_ino) + else if (ino > btrfs_ino(&entry->vfs_inode)) p = &parent->rb_right; else { WARN_ON(!(entry->vfs_inode.i_state & @@ -3984,9 +3993,9 @@ again: prev = node; entry = rb_entry(node, struct btrfs_inode, rb_node); - if (objectid < entry->vfs_inode.i_ino) + if (objectid < btrfs_ino(&entry->vfs_inode)) node = node->rb_left; - else if (objectid > entry->vfs_inode.i_ino) + else if (objectid > btrfs_ino(&entry->vfs_inode)) node = node->rb_right; else break; @@ -3994,7 +4003,7 @@ again: if (!node) { while (prev) { entry = rb_entry(prev, struct btrfs_inode, rb_node); - if (objectid <= entry->vfs_inode.i_ino) { + if (objectid <= btrfs_ino(&entry->vfs_inode)) { node = prev; break; } @@ -4003,7 +4012,7 @@ again: } while (node) { entry = rb_entry(node, struct btrfs_inode, rb_node); - objectid = entry->vfs_inode.i_ino + 1; + objectid = btrfs_ino(&entry->vfs_inode) + 1; inode = igrab(&entry->vfs_inode); if (inode) { spin_unlock(&root->inode_lock); @@ -4041,7 +4050,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) static int btrfs_find_actor(struct inode *inode, void *opaque) { struct btrfs_iget_args *args = opaque; - return args->ino == inode->i_ino && + return args->ino == btrfs_ino(inode) && args->root == BTRFS_I(inode)->root; } @@ -4222,9 +4231,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, /* special case for "." */ if (filp->f_pos == 0) { - over = filldir(dirent, ".", 1, - 1, inode->i_ino, - DT_DIR); + over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR); if (over) return 0; filp->f_pos = 1; @@ -4243,7 +4250,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, btrfs_set_key_type(&key, key_type); key.offset = filp->f_pos; - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) @@ -4401,8 +4408,9 @@ void btrfs_dirty_inode(struct inode *inode) if (IS_ERR(trans)) { if (printk_ratelimit()) { printk(KERN_ERR "btrfs: fail to " - "dirty inode %lu error %ld\n", - inode->i_ino, PTR_ERR(trans)); + "dirty inode %llu error %ld\n", + (unsigned long long)btrfs_ino(inode), + PTR_ERR(trans)); } return; } @@ -4412,8 +4420,9 @@ void btrfs_dirty_inode(struct inode *inode) if (ret) { if (printk_ratelimit()) { printk(KERN_ERR "btrfs: fail to " - "dirty inode %lu error %d\n", - inode->i_ino, ret); + "dirty inode %llu error %d\n", + (unsigned long long)btrfs_ino(inode), + ret); } } } @@ -4433,7 +4442,7 @@ static int btrfs_set_inode_index_count(struct inode *inode) struct extent_buffer *leaf; int ret; - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); key.offset = (u64)-1; @@ -4465,7 +4474,7 @@ static int btrfs_set_inode_index_count(struct inode *inode) leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid != inode->i_ino || + if (found_key.objectid != btrfs_ino(inode) || btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { BTRFS_I(inode)->index_cnt = 2; goto out; @@ -4629,29 +4638,29 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, int ret = 0; struct btrfs_key key; struct btrfs_root *root = BTRFS_I(parent_inode)->root; + u64 ino = btrfs_ino(inode); + u64 parent_ino = btrfs_ino(parent_inode); - if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); } else { - key.objectid = inode->i_ino; + key.objectid = ino; btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); key.offset = 0; } - if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, key.objectid, root->root_key.objectid, - parent_inode->i_ino, - index, name, name_len); + parent_ino, index, name, name_len); } else if (add_backref) { - ret = btrfs_insert_inode_ref(trans, root, - name, name_len, inode->i_ino, - parent_inode->i_ino, index); + ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, + parent_ino, index); } if (ret == 0) { ret = btrfs_insert_dir_item(trans, root, name, name_len, - parent_inode->i_ino, &key, + parent_ino, &key, btrfs_inode_type(inode), index); BUG_ON(ret); @@ -4710,7 +4719,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, mode, &index); err = PTR_ERR(inode); if (IS_ERR(inode)) @@ -4772,7 +4781,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, mode, &index); err = PTR_ERR(inode); if (IS_ERR(inode)) @@ -4897,7 +4906,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) goto out_fail; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, S_IFDIR | mode, &index); if (IS_ERR(inode)) { @@ -5018,7 +5027,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, u64 bytenr; u64 extent_start = 0; u64 extent_end = 0; - u64 objectid = inode->i_ino; + u64 objectid = btrfs_ino(inode); u32 found_type; struct btrfs_path *path = NULL; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -5502,7 +5511,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), offset, 0); if (ret < 0) goto out; @@ -5519,7 +5528,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, ret = 0; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, slot); - if (key.objectid != inode->i_ino || + if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) { /* not our file or wrong item type, must cow */ goto out; @@ -5553,7 +5562,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, * look for other files referencing this extent, if we * find any we must cow */ - if (btrfs_cross_ref_exist(trans, root, inode->i_ino, + if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), key.offset - backref_offset, disk_bytenr)) goto out; @@ -5744,9 +5753,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) flush_dcache_page(bvec->bv_page); if (csum != *private) { - printk(KERN_ERR "btrfs csum failed ino %lu off" + printk(KERN_ERR "btrfs csum failed ino %llu off" " %llu csum %u private %u\n", - inode->i_ino, (unsigned long long)start, + (unsigned long long)btrfs_ino(inode), + (unsigned long long)start, csum, *private); err = -EIO; } @@ -5883,9 +5893,9 @@ static void btrfs_end_dio_bio(struct bio *bio, int err) struct btrfs_dio_private *dip = bio->bi_private; if (err) { - printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu " + printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " "sector %#Lx len %u err no %d\n", - dip->inode->i_ino, bio->bi_rw, + (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw, (unsigned long long)bio->bi_sector, bio->bi_size, err); dip->errors = 1; @@ -6714,8 +6724,8 @@ void btrfs_destroy_inode(struct inode *inode) spin_lock(&root->orphan_lock); if (!list_empty(&BTRFS_I(inode)->i_orphan)) { - printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", - inode->i_ino); + printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", + (unsigned long long)btrfs_ino(inode)); list_del_init(&BTRFS_I(inode)->i_orphan); } spin_unlock(&root->orphan_lock); @@ -6826,16 +6836,17 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, u64 index = 0; u64 root_objectid; int ret; + u64 old_ino = btrfs_ino(old_inode); - if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return -EPERM; /* we only allow rename subvolume link between subvolumes */ - if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) + if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) return -EXDEV; - if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || - (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) + if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || + (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) return -ENOTEMPTY; if (S_ISDIR(old_inode->i_mode) && new_inode && @@ -6851,7 +6862,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, filemap_flush(old_inode->i_mapping); /* close the racy window with snapshot create/destroy ioctl */ - if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) down_read(&root->fs_info->subvol_sem); /* * We want to reserve the absolute worst case amount of items. So if @@ -6874,15 +6885,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (ret) goto out_fail; - if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { /* force full log commit if subvolume involved. */ root->fs_info->last_trans_log_full_commit = trans->transid; } else { ret = btrfs_insert_inode_ref(trans, dest, new_dentry->d_name.name, new_dentry->d_name.len, - old_inode->i_ino, - new_dir->i_ino, index); + old_ino, + btrfs_ino(new_dir), index); if (ret) goto out_fail; /* @@ -6898,10 +6909,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, * make sure the inode gets flushed if it is replacing * something. */ - if (new_inode && new_inode->i_size && - old_inode && S_ISREG(old_inode->i_mode)) { + if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode)) btrfs_add_ordered_operation(trans, root, old_inode); - } old_dir->i_ctime = old_dir->i_mtime = ctime; new_dir->i_ctime = new_dir->i_mtime = ctime; @@ -6910,7 +6919,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (old_dentry->d_parent != new_dentry->d_parent) btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); - if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, old_dentry->d_name.name, @@ -6926,7 +6935,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_inode) { new_inode->i_ctime = CURRENT_TIME; - if (unlikely(new_inode->i_ino =+ if (unlikely(btrfs_ino(new_inode) = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { root_objectid = BTRFS_I(new_inode)->location.objectid; ret = btrfs_unlink_subvol(trans, dest, new_dir, @@ -6952,7 +6961,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.len, 0, index); BUG_ON(ret); - if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { + if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { struct dentry *parent = dget_parent(new_dentry); btrfs_log_new_name(trans, old_inode, old_dir, parent); dput(parent); @@ -6961,7 +6970,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, out_fail: btrfs_end_transaction_throttle(trans, root); - if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&root->fs_info->subvol_sem); return ret; @@ -7106,7 +7115,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, &index); err = PTR_ERR(inode); @@ -7137,7 +7146,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, path = btrfs_alloc_path(); BUG_ON(!path); - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); key.offset = 0; btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); datasize = btrfs_file_extent_calc_inline_size(name_len); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f679522..d2e2e51 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -333,7 +333,7 @@ static noinline int create_subvol(struct btrfs_root *root, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, root, - name, namelen, dir->i_ino, &key, + name, namelen, btrfs_ino(dir), &key, BTRFS_FT_DIR, index); if (ret) goto fail; @@ -344,7 +344,7 @@ static noinline int create_subvol(struct btrfs_root *root, ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, objectid, root->root_key.objectid, - dir->i_ino, index, name, namelen); + btrfs_ino(dir), index, name, namelen); BUG_ON(ret); @@ -1038,7 +1038,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file, int ret = 0; u64 flags = 0; - if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) + if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) return -EINVAL; down_read(&root->fs_info->subvol_sem); @@ -1065,7 +1065,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, if (root->fs_info->sb->s_flags & MS_RDONLY) return -EROFS; - if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) + if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) return -EINVAL; if (copy_from_user(&flags, arg, sizeof(flags))) @@ -1548,7 +1548,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, goto out_dput; } - if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { + if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { err = -EINVAL; goto out_dput; } @@ -1834,7 +1834,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, } /* clone data */ - key.objectid = src->i_ino; + key.objectid = btrfs_ino(src); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = 0; @@ -1861,7 +1861,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_item_key_to_cpu(leaf, &key, slot); if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || - key.objectid != src->i_ino) + key.objectid != btrfs_ino(src)) break; if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) { @@ -1904,7 +1904,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, goto next; memcpy(&new_key, &key, sizeof(new_key)); - new_key.objectid = inode->i_ino; + new_key.objectid = btrfs_ino(inode); if (off <= key.offset) new_key.offset = key.offset + destoff - off; else @@ -1958,7 +1958,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ret = btrfs_inc_extent_ref(trans, root, disko, diskl, 0, root->root_key.objectid, - inode->i_ino, + btrfs_ino(inode), new_key.offset - datao); BUG_ON(ret); } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index a40f596..b046747 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1410,9 +1410,9 @@ again: prev = node; entry = rb_entry(node, struct btrfs_inode, rb_node); - if (objectid < entry->vfs_inode.i_ino) + if (objectid < btrfs_ino(&entry->vfs_inode)) node = node->rb_left; - else if (objectid > entry->vfs_inode.i_ino) + else if (objectid > btrfs_ino(&entry->vfs_inode)) node = node->rb_right; else break; @@ -1420,7 +1420,7 @@ again: if (!node) { while (prev) { entry = rb_entry(prev, struct btrfs_inode, rb_node); - if (objectid <= entry->vfs_inode.i_ino) { + if (objectid <= btrfs_ino(&entry->vfs_inode)) { node = prev; break; } @@ -1435,7 +1435,7 @@ again: return inode; } - objectid = entry->vfs_inode.i_ino + 1; + objectid = btrfs_ino(&entry->vfs_inode) + 1; if (cond_resched_lock(&root->inode_lock)) goto again; @@ -1471,7 +1471,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, return -ENOMEM; bytenr -= BTRFS_I(reloc_inode)->index_cnt; - ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, + ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode), bytenr, 0); if (ret < 0) goto out; @@ -1559,11 +1559,11 @@ int replace_file_extents(struct btrfs_trans_handle *trans, if (first) { inode = find_next_inode(root, key.objectid); first = 0; - } else if (inode && inode->i_ino < key.objectid) { + } else if (inode && btrfs_ino(inode) < key.objectid) { btrfs_add_delayed_iput(inode); inode = find_next_inode(root, key.objectid); } - if (inode && inode->i_ino == key.objectid) { + if (inode && btrfs_ino(inode) == key.objectid) { end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); WARN_ON(!IS_ALIGNED(key.offset, @@ -1893,6 +1893,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, struct inode *inode = NULL; u64 objectid; u64 start, end; + u64 ino; objectid = min_key->objectid; while (1) { @@ -1905,17 +1906,18 @@ static int invalidate_extent_cache(struct btrfs_root *root, inode = find_next_inode(root, objectid); if (!inode) break; + ino = btrfs_ino(inode); - if (inode->i_ino > max_key->objectid) { + if (ino > max_key->objectid) { iput(inode); break; } - objectid = inode->i_ino + 1; + objectid = ino + 1; if (!S_ISREG(inode->i_mode)) continue; - if (unlikely(min_key->objectid == inode->i_ino)) { + if (unlikely(min_key->objectid == ino)) { if (min_key->type > BTRFS_EXTENT_DATA_KEY) continue; if (min_key->type < BTRFS_EXTENT_DATA_KEY) @@ -1928,7 +1930,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, start = 0; } - if (unlikely(max_key->objectid == inode->i_ino)) { + if (unlikely(max_key->objectid == ino)) { if (max_key->type < BTRFS_EXTENT_DATA_KEY) continue; if (max_key->type > BTRFS_EXTENT_DATA_KEY) { diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2d0caf8f..6f5a704 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -963,7 +963,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, parent_root, dentry->d_name.name, dentry->d_name.len, - parent_inode->i_ino, &key, + btrfs_ino(parent_inode), &key, BTRFS_FT_DIR, index); BUG_ON(ret); @@ -1004,7 +1004,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, */ ret = btrfs_add_root_ref(trans, tree_root, objectid, parent_root->root_key.objectid, - parent_inode->i_ino, index, + btrfs_ino(parent_inode), index, dentry->d_name.name, dentry->d_name.len); BUG_ON(ret); dput(parent); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index a4bbb85..e2e325a 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -519,7 +519,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, * file. This must be done before the btrfs_drop_extents run * so we don''t try to drop this extent. */ - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), start, 0); if (ret == 0 && @@ -832,7 +832,7 @@ again: read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen); /* if we already have a perfect match, we''re done */ - if (inode_in_dir(root, path, dir->i_ino, inode->i_ino, + if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), btrfs_inode_ref_index(eb, ref), name, namelen)) { goto out; @@ -898,7 +898,7 @@ conflict_again: btrfs_release_path(root, path); /* look for a conflicting sequence number */ - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), btrfs_inode_ref_index(eb, ref), name, namelen, 0); if (di && !IS_ERR(di)) { @@ -909,7 +909,7 @@ conflict_again: /* look for a conflicting name */ - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), name, namelen, 0); if (di && !IS_ERR(di)) { ret = drop_one_dir_item(trans, root, path, dir, di); @@ -973,8 +973,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, unsigned long ptr; unsigned long ptr_end; int name_len; + u64 ino = btrfs_ino(inode); - key.objectid = inode->i_ino; + key.objectid = ino; key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; @@ -993,7 +994,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); - if (key.objectid != inode->i_ino || + if (key.objectid != ino || key.type != BTRFS_INODE_REF_KEY) break; ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); @@ -1024,10 +1025,10 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, if (inode->i_nlink == 0) { if (S_ISDIR(inode->i_mode)) { ret = replay_dir_deletes(trans, root, NULL, path, - inode->i_ino, 1); + ino, 1); BUG_ON(ret); } - ret = insert_orphan_item(trans, root, inode->i_ino); + ret = insert_orphan_item(trans, root, ino); BUG_ON(ret); } btrfs_free_path(path); @@ -2202,6 +2203,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, int ret; int err = 0; int bytes_del = 0; + u64 dir_ino = btrfs_ino(dir); if (BTRFS_I(dir)->logged_trans < trans->transid) return 0; @@ -2217,7 +2219,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, log, path, dir_ino, name, name_len, -1); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2229,7 +2231,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, BUG_ON(ret); } btrfs_release_path(log, path); - di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino, + di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, index, name, name_len, -1); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2247,7 +2249,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, if (bytes_del) { struct btrfs_key key; - key.objectid = dir->i_ino; + key.objectid = dir_ino; key.offset = 0; key.type = BTRFS_INODE_ITEM_KEY; btrfs_release_path(log, path); @@ -2305,7 +2307,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, log = root->log_root; mutex_lock(&BTRFS_I(inode)->log_mutex); - ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino, + ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), dirid, &index); mutex_unlock(&BTRFS_I(inode)->log_mutex); if (ret == -ENOSPC) { @@ -2371,13 +2373,14 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, int nritems; u64 first_offset = min_offset; u64 last_offset = (u64)-1; + u64 ino = btrfs_ino(inode); log = root->log_root; - max_key.objectid = inode->i_ino; + max_key.objectid = ino; max_key.offset = (u64)-1; max_key.type = key_type; - min_key.objectid = inode->i_ino; + min_key.objectid = ino; min_key.type = key_type; min_key.offset = min_offset; @@ -2390,9 +2393,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, * we didn''t find anything from this transaction, see if there * is anything at all */ - if (ret != 0 || min_key.objectid != inode->i_ino || - min_key.type != key_type) { - min_key.objectid = inode->i_ino; + if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { + min_key.objectid = ino; min_key.type = key_type; min_key.offset = (u64)-1; btrfs_release_path(root, path); @@ -2401,7 +2403,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, btrfs_release_path(root, path); return ret; } - ret = btrfs_previous_item(root, path, inode->i_ino, key_type); + ret = btrfs_previous_item(root, path, ino, key_type); /* if ret == 0 there are items for this type, * create a range to tell us the last key of this type. @@ -2419,7 +2421,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, } /* go backward to find any previous key */ - ret = btrfs_previous_item(root, path, inode->i_ino, key_type); + ret = btrfs_previous_item(root, path, ino, key_type); if (ret == 0) { struct btrfs_key tmp; btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); @@ -2454,8 +2456,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, for (i = path->slots[0]; i < nritems; i++) { btrfs_item_key_to_cpu(src, &min_key, i); - if (min_key.objectid != inode->i_ino || - min_key.type != key_type) + if (min_key.objectid != ino || min_key.type != key_type) goto done; ret = overwrite_item(trans, log, dst_path, src, i, &min_key); @@ -2476,7 +2477,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, goto done; } btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); - if (tmp.objectid != inode->i_ino || tmp.type != key_type) { + if (tmp.objectid != ino || tmp.type != key_type) { last_offset = (u64)-1; goto done; } @@ -2502,8 +2503,7 @@ done: * is valid */ ret = insert_dir_log_key(trans, log, path, key_type, - inode->i_ino, first_offset, - last_offset); + ino, first_offset, last_offset); if (ret) err = ret; } @@ -2747,6 +2747,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, int nritems; int ins_start_slot = 0; int ins_nr; + u64 ino = btrfs_ino(inode); log = root->log_root; @@ -2759,11 +2760,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, return -ENOMEM; } - min_key.objectid = inode->i_ino; + min_key.objectid = ino; min_key.type = BTRFS_INODE_ITEM_KEY; min_key.offset = 0; - max_key.objectid = inode->i_ino; + max_key.objectid = ino; /* today the code can only do partial logging of directories */ if (!S_ISDIR(inode->i_mode)) @@ -2786,8 +2787,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, if (inode_only == LOG_INODE_EXISTS) max_key_type = BTRFS_XATTR_ITEM_KEY; - ret = drop_objectid_items(trans, log, path, - inode->i_ino, max_key_type); + ret = drop_objectid_items(trans, log, path, ino, max_key_type); } else { ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); } @@ -2805,7 +2805,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, break; again: /* note, ins_nr might be > 0 here, cleanup outside the loop */ - if (min_key.objectid != inode->i_ino) + if (min_key.objectid != ino) break; if (min_key.type > max_key.type) break; diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index a577653..59a1a1f 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -44,7 +44,7 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name, return -ENOMEM; /* lookup the xattr by name */ - di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name, + di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, strlen(name), 0); if (!di) { ret = -ENODATA; @@ -103,7 +103,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans, return -ENOMEM; /* first lets see if we already have this xattr */ - di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name, + di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, strlen(name), -1); if (IS_ERR(di)) { ret = PTR_ERR(di); @@ -136,7 +136,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans, } /* ok we have to create a completely new xattr */ - ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino, + ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), name, name_len, value, size); BUG_ON(ret); out: @@ -191,7 +191,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) * NOTE: we set key.offset = 0; because we want to start with the * first xattr that we find and walk forward */ - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); key.offset = 0; -- 1.7.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Li Zefan
2011-Mar-16 08:51 UTC
[PATCH 7/7] Btrfs: Support reading/writing on disk free ino cache
This is similar to block group caching. We dedicate a special inode in fs tree to save free ino cache. At the very first time we create/delete a file after mount, the free ino cache will be loaded from disk into memory. When the fs tree is commited, the cache will be written back to disk. To keep compatibility, we check the root generation against the generation of the special inode when loading the cache, so the loading will fail if the btrfs filesystem was mounted in an older kernel before. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> --- fs/btrfs/ctree.h | 7 +++ fs/btrfs/disk-io.c | 1 + fs/btrfs/extent-tree.c | 3 +- fs/btrfs/free-space-cache.c | 97 ++++++++++++++++++++++++++++++++++++++++++- fs/btrfs/free-space-cache.h | 11 +++++ fs/btrfs/inode-map.c | 87 ++++++++++++++++++++++++++++++++++++++ fs/btrfs/inode-map.h | 2 + fs/btrfs/inode.c | 45 ++++++++++++-------- fs/btrfs/transaction.c | 2 + 9 files changed, 236 insertions(+), 19 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 40f38f7..5217c81 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -103,6 +103,12 @@ struct btrfs_ordered_sum; /* For storing free space cache */ #define BTRFS_FREE_SPACE_OBJECTID -11ULL +/* + * The inode number assigned to the special inode for sotring + * free ino cache + */ +#define BTRFS_FREE_INO_OBJECTID -12ULL + /* dummy objectid represents multiple objectids */ #define BTRFS_MULTIPLE_OBJECTIDS -255ULL @@ -1109,6 +1115,7 @@ struct btrfs_root { wait_queue_head_t cache_wait; struct btrfs_free_space_ctl *free_ino_pinned; u64 cache_progress; + struct inode *cache_inode; struct mutex log_mutex; wait_queue_head_t log_writer_wait; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5c92066..9e8a449 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2403,6 +2403,7 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) static void free_fs_root(struct btrfs_root *root) { + iput(root->cache_inode); WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); if (root->anon_super.s_dev) { down_write(&root->anon_super.s_umount); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 3ad4621..104f2eb 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3122,7 +3122,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes) /* make sure bytes are sectorsize aligned */ bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); - if (root == root->fs_info->tree_root) { + if (root == root->fs_info->tree_root || + BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) { alloc_chunk = 0; committed = 1; } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 327e249..3c9e041 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -206,7 +206,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, return ret; } - return btrfs_update_inode(trans, root, inode); + ret = btrfs_update_inode(trans, root, inode); + return ret; } static int readahead_cache(struct inode *inode) @@ -504,6 +505,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_CLEAR; spin_unlock(&block_group->lock); + ret = 0; printk(KERN_ERR "btrfs: failed to load free space cache " "for block group %llu\n", block_group->key.objectid); @@ -817,6 +819,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_ERROR; spin_unlock(&block_group->lock); + ret = 0; printk(KERN_ERR "btrfs: failed to write free space cache " "for block group %llu\n", block_group->key.objectid); @@ -2264,3 +2267,95 @@ out: return ino; } + +struct inode *lookup_free_ino_inode(struct btrfs_root *root, + struct btrfs_path *path) +{ + struct inode *inode = NULL; + + spin_lock(&root->cache_lock); + if (root->cache_inode) + inode = igrab(root->cache_inode); + spin_unlock(&root->cache_lock); + if (inode) + return inode; + + inode = __lookup_free_space_inode(root, path, 0); + if (IS_ERR(inode)) + return inode; + + spin_lock(&root->cache_lock); + if (!root->fs_info->closing) + root->cache_inode = igrab(inode); + spin_unlock(&root->cache_lock); + + return inode; +} + +int create_free_ino_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path) +{ + return __create_free_space_inode(root, trans, path, + BTRFS_FREE_INO_OBJECTID, 0); +} + +int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_path *path; + struct inode *inode; + int ret = 0; + u64 root_gen = btrfs_root_generation(&root->root_item); + + /* + * If we''re unmounting then just return, since this does a search on the + * normal root and not the commit root and we could deadlock. + */ + smp_mb(); + if (fs_info->closing) + return 0; + + path = btrfs_alloc_path(); + if (!path) + return 0; + + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode)) + goto out; + + if (root_gen != BTRFS_I(inode)->generation) + goto out_put; + + ret = __load_free_space_cache(root, inode, ctl, path, 0); + + if (ret < 0) + printk(KERN_ERR "btrfs: failed to load free ino cache for " + "root %llu\n", root->root_key.objectid); +out_put: + iput(inode); +out: + btrfs_free_path(path); + return ret; +} + +int btrfs_write_out_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct inode *inode; + int ret; + + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode)) + return 0; + + ret = __btrfs_write_out_cache(root, inode, ctl, trans, path, 0); + if (ret < 0) + printk(KERN_ERR "btrfs: failed to write free ino cache " + "for root %llu\n", root->root_key.objectid); + + iput(inode); + return ret; +} diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 282eeda..c805ebc 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -65,6 +65,17 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); +struct inode *lookup_free_ino_inode(struct btrfs_root *root, + struct btrfs_path *path); +int create_free_ino_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path); +int load_free_ino_cache(struct btrfs_fs_info *fs_info, + struct btrfs_root *root); +int btrfs_write_out_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path); + void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, u64 bytenr, u64 size); diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index cfa9f5e..e94e5b4 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -137,6 +137,7 @@ out: static void start_caching(struct btrfs_root *root) { struct task_struct *tsk; + int ret; spin_lock(&root->cache_lock); if (root->cached != BTRFS_CACHE_NO) { @@ -147,6 +148,14 @@ static void start_caching(struct btrfs_root *root) root->cached = BTRFS_CACHE_STARTED; spin_unlock(&root->cache_lock); + ret = load_free_ino_cache(root->fs_info, root); + if (ret == 1) { + spin_lock(&root->cache_lock); + root->cached = BTRFS_CACHE_FINISHED; + spin_unlock(&root->cache_lock); + return; + } + tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", root->root_key.objectid); BUG_ON(IS_ERR(tsk)); @@ -352,6 +361,84 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root) pinned->op = &pinned_free_ino_op; } +int btrfs_save_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_path *path; + struct inode *inode; + u64 alloc_hint = 0; + int ret; + int prealloc; + bool retry = false; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; +again: + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { + ret = PTR_ERR(inode); + goto out; + } + + if (IS_ERR(inode)) { + BUG_ON(retry); + retry = true; + + ret = create_free_ino_inode(root, trans, path); + if (ret) + goto out; + goto again; + } + + BTRFS_I(inode)->generation = 0; + ret = btrfs_update_inode(trans, root, inode); + WARN_ON(ret); + + if (i_size_read(inode) > 0) { + ret = btrfs_truncate_free_space_cache(root, trans, path, inode); + if (ret) + goto out_put; + } + + spin_lock(&root->cache_lock); + if (root->cached != BTRFS_CACHE_FINISHED) { + ret = -1; + spin_unlock(&root->cache_lock); + goto out_put; + } + spin_unlock(&root->cache_lock); + + spin_lock(&ctl->tree_lock); + prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; + prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); + prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; + spin_unlock(&ctl->tree_lock); + + /* Just to make sure we have enough space */ + prealloc += 8 * PAGE_CACHE_SIZE; + + ret = btrfs_check_data_free_space(inode, prealloc); + if (ret) + goto out_put; + + ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, + prealloc, prealloc, &alloc_hint); + if (ret) + goto out_put; + btrfs_free_reserved_data_space(inode, prealloc); + +out_put: + iput(inode); +out: + if (ret == 0) + ret = btrfs_write_out_ino_cache(root, trans, path); + + btrfs_free_path(path); + return ret; +} + static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) { struct btrfs_path *path; diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h index eb91845..ddb347b 100644 --- a/fs/btrfs/inode-map.h +++ b/fs/btrfs/inode-map.h @@ -5,6 +5,8 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root); void btrfs_unpin_free_ino(struct btrfs_root *root); void btrfs_return_ino(struct btrfs_root *root, u64 objectid); int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid); +int btrfs_save_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans); int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d96d858..595646a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -740,6 +740,15 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start, return alloc_hint; } +static inline bool is_free_space_inode(struct btrfs_root *root, + struct inode *inode) +{ + if (root == root->fs_info->tree_root || + BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) + return true; + return false; +} + /* * when extent_io.c finds a delayed allocation range in the file, * the call backs end up in this code. The basic idea is to @@ -772,7 +781,7 @@ static noinline int cow_file_range(struct inode *inode, struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 0; - BUG_ON(root == root->fs_info->tree_root); + BUG_ON(is_free_space_inode(root, inode)); trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); @@ -1043,17 +1052,18 @@ static noinline int run_delalloc_nocow(struct inode *inode, int type; int nocow; int check_prev = 1; - bool nolock = false; + bool nolock; u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); BUG_ON(!path); - if (root == root->fs_info->tree_root) { - nolock = true; + + nolock = is_free_space_inode(root, inode); + + if (nolock) trans = btrfs_join_transaction_nolock(root, 1); - } else { + else trans = btrfs_join_transaction(root, 1); - } BUG_ON(IS_ERR(trans)); cow_start = (u64)-1; @@ -1310,8 +1320,7 @@ static int btrfs_set_bit_hook(struct inode *inode, if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; - int do_list = (root->root_key.objectid !- BTRFS_ROOT_TREE_OBJECTID); + bool do_list = !is_free_space_inode(root, inode); if (*bits & EXTENT_FIRST_DELALLOC) *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1344,8 +1353,7 @@ static int btrfs_clear_bit_hook(struct inode *inode, if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; - int do_list = (root->root_key.objectid !- BTRFS_ROOT_TREE_OBJECTID); + bool do_list = !is_free_space_inode(root, inode); if (*bits & EXTENT_FIRST_DELALLOC) *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1452,7 +1460,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; - if (root == root->fs_info->tree_root) + if (is_free_space_inode(root, inode)) ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); else ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); @@ -1692,7 +1700,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct extent_state *cached_state = NULL; int compress_type = 0; int ret; - bool nolock = false; + bool nolock; ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, end - start + 1); @@ -1700,7 +1708,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) return 0; BUG_ON(!ordered_extent); - nolock = (root == root->fs_info->tree_root); + nolock = is_free_space_inode(root, inode); if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { BUG_ON(!list_empty(&ordered_extent->list)); @@ -3418,7 +3426,9 @@ delete: if (path->slots[0] == 0 || path->slots[0] != pending_del_slot) { - if (root->ref_cows) { + if (root->ref_cows && + BTRFS_I(inode)->location.objectid !+ BTRFS_FREE_INO_OBJECTID) { err = -EAGAIN; goto out; } @@ -3740,7 +3750,7 @@ void btrfs_evict_inode(struct inode *inode) truncate_inode_pages(&inode->i_data, 0); if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || - root == root->fs_info->tree_root)) + is_free_space_inode(root, inode))) goto no_delete; if (is_bad_inode(inode)) { @@ -4363,7 +4373,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) return 0; smp_mb(); - nolock = (root->fs_info->closing && root == root->fs_info->tree_root); + if (root->fs_info->closing && is_free_space_inode(root, inode)) + nolock = true; if (wbc->sync_mode == WB_SYNC_ALL) { if (nolock) @@ -6755,7 +6766,7 @@ int btrfs_drop_inode(struct inode *inode) struct btrfs_root *root = BTRFS_I(inode)->root; if (btrfs_root_refs(&root->root_item) == 0 && - root != root->fs_info->tree_root) + !is_free_space_inode(root, inode)) return 1; else return generic_drop_inode(inode); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 6f5a704..06825c5 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -752,6 +752,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, btrfs_update_reloc_root(trans, root); btrfs_orphan_commit_root(trans, root); + btrfs_save_ino_cache(root, trans); + if (root->commit_root != root->node) { mutex_lock(&root->fs_commit_mutex); switch_commit_root(root); -- 1.7.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html