An assorted set of casts to get rid of the warnings on 32-bit archs. diff -r f89e7971692f ctree.c --- a/ctree.c Mon Oct 15 16:22:39 2007 -0400 +++ b/ctree.c Wed Oct 17 10:37:07 2007 +0200 @@ -1389,7 +1389,7 @@ int btrfs_leaf_free_space(struct btrfs_r ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems); if (ret < 0) { printk("leaf free space ret %d, leaf data size %lu, used %d nritems %d\n", - ret, BTRFS_LEAF_DATA_SIZE(root), + ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root), leaf_space_used(leaf, 0, nritems), nritems); } return ret; diff -r f89e7971692f extent-tree.c --- a/extent-tree.c Mon Oct 15 16:22:39 2007 -0400 +++ b/extent-tree.c Wed Oct 17 10:37:07 2007 +0200 @@ -149,7 +149,7 @@ struct btrfs_block_group_cache *btrfs_lo if (ret) return NULL; - block_group = (struct btrfs_block_group_cache *)ptr; + block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr; if (block_group->key.objectid <= bytenr && bytenr <@@ -279,7 +279,7 @@ again: if (ret) break; - cache = (struct btrfs_block_group_cache *)ptr; + cache = (struct btrfs_block_group_cache *)(unsigned long)ptr; last = cache->key.objectid + cache->key.offset; used = btrfs_block_group_used(&cache->item); @@ -537,7 +537,7 @@ int btrfs_write_dirty_block_groups(struc if (ret) break; - cache = (struct btrfs_block_group_cache *)ptr; + cache = (struct btrfs_block_group_cache *)(unsigned long)ptr; err = write_one_cache_group(trans, root, path, cache); /* @@ -1541,7 +1541,7 @@ int btrfs_read_block_groups(struct btrfs found_key.objectid + found_key.offset - 1, bit | EXTENT_LOCKED, GFP_NOFS); set_state_private(block_group_cache, found_key.objectid, - (u64)cache); + (unsigned long)cache); if (key.objectid > btrfs_super_total_bytes(&info->super_copy)) diff -r f89e7971692f extent_map.c --- a/extent_map.c Mon Oct 15 16:22:39 2007 -0400 +++ b/extent_map.c Wed Oct 17 10:37:07 2007 +0200 @@ -2654,8 +2679,8 @@ void memcpy_extent_buffer(struct extent_ cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - src_off_in_page)); - cur = min(cur, (unsigned long)(PAGE_CACHE_SIZE - - dst_off_in_page)); + cur = min_t(unsigned long, cur, + (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page)); copy_pages(extent_buffer_page(dst, dst_i), extent_buffer_page(dst, src_i), @@ -2707,7 +2732,7 @@ void memmove_extent_buffer(struct extent if (dst_i == 0) dst_off_in_page += start_offset; - cur = min(len, src_off_in_page + 1); + cur = min_t(unsigned long, len, src_off_in_page + 1); cur = min(cur, dst_off_in_page + 1); move_pages(extent_buffer_page(dst, dst_i), extent_buffer_page(dst, src_i), diff -r f89e7971692f file.c --- a/file.c Mon Oct 15 16:22:39 2007 -0400 +++ b/file.c Wed Oct 17 10:37:07 2007 +0200 @@ -188,7 +188,7 @@ insert: while (size > 0) { page = pages[i]; kaddr = kmap_atomic(page, KM_USER0); - cur_size = min(PAGE_CACHE_SIZE - page_offset, size); + cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size); write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size); kunmap_atomic(kaddr, KM_USER0); page_offset = 0; diff -r f89e7971692f inode.c --- a/inode.c Mon Oct 15 16:22:39 2007 -0400 +++ b/inode.c Wed Oct 17 10:37:07 2007 +0200 @@ -1606,7 +1606,7 @@ again: extent_start; ptr = btrfs_file_extent_inline_start(item) + extent_offset; map = kmap(page); - copy_size = min(PAGE_CACHE_SIZE - page_offset, + copy_size = min_t(u64, PAGE_CACHE_SIZE - page_offset, size - extent_offset); em->block_start = EXTENT_MAP_INLINE; -- Jens Axboe