Displaying 20 results from an estimated 97 matches for "rb_entry".
2017 Jan 20
1
[PATCH] drm/nouveau/client: use rb_entry()
To make the code clearer, use rb_entry() instead of container_of() to
deal with rbtree.
Signed-off-by: Geliang Tang <geliangtang at gmail.com>
---
drivers/gpu/drm/nouveau/nvkm/core/client.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau...
2016 Dec 20
1
[PATCH] drm/nouveau/dma: use rb_entry()
To make the code clearer, use rb_entry() instead of container_of() to
deal with rbtree.
Signed-off-by: Geliang Tang <geliangtang at gmail.com>
---
drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm...
2012 May 24
2
[PATCH] Btrfs: fix the same inode id problem when doing auto defragment
...no)
+ return 1;
+ else if (defrag1->ino < defrag2->ino)
+ return -1;
+ else
+ return 0;
+}
+
/* pop a record for an inode into the defrag tree. The lock
* must be held already
*
@@ -87,9 +102,9 @@ static void __btrfs_add_inode_defrag(struct inode *inode,
parent = *p;
entry = rb_entry(parent, struct inode_defrag, rb_node);
- if (defrag->ino < entry->ino)
+ if (__compare_inode_defrag(defrag, entry) < 0)
p = &parent->rb_left;
- else if (defrag->ino > entry->ino)
+ else if (__compare_inode_defrag(defrag, entry) > 0)
p = &parent->r...
2008 Nov 07
0
[PATCH][cfq-cgroups] Introduce ioprio class for top layer.
...ree_add(struct cfq_data *cfqd, int add_front)
unsigned long rb_key;
int left;
- if (!add_front) {
+ if (cfq_data_class_idle(cfqd)) {
+ rb_key = CFQ_CGROUP_IDLE_DELAY;
+ parent = rb_last(&cfqdd->service_tree.rb);
+ if (parent && parent != &cfqd->rb_node) {
+ __cfqd = rb_entry(parent, struct cfq_data, rb_node);
+ rb_key += __cfqd->rb_key;
+ } else
+ rb_key += jiffies;
+ } else if (!add_front) {
rb_key = cfq_cgroup_slice_offset(cfqd) + jiffies;
rb_key += cfqd->slice_resid;
cfqd->slice_resid = 0;
@@ -400,7 +423,23 @@ static void cfq_cgroup_service_tr...
2008 Nov 07
0
[PATCH][cfq-cgroups] Introduce ioprio class for top layer.
...ree_add(struct cfq_data *cfqd, int add_front)
unsigned long rb_key;
int left;
- if (!add_front) {
+ if (cfq_data_class_idle(cfqd)) {
+ rb_key = CFQ_CGROUP_IDLE_DELAY;
+ parent = rb_last(&cfqdd->service_tree.rb);
+ if (parent && parent != &cfqd->rb_node) {
+ __cfqd = rb_entry(parent, struct cfq_data, rb_node);
+ rb_key += __cfqd->rb_key;
+ } else
+ rb_key += jiffies;
+ } else if (!add_front) {
rb_key = cfq_cgroup_slice_offset(cfqd) + jiffies;
rb_key += cfqd->slice_resid;
cfqd->slice_resid = 0;
@@ -400,7 +423,23 @@ static void cfq_cgroup_service_tr...
2009 Dec 15
0
[PATCH 1/4] btrfs: fix the bug that __tree_search() returns the wrong result in extent_map.c
...- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -136,20 +136,15 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
if (prev_ret) {
orig_prev = prev;
- while (prev && offset >= extent_map_end(prev_entry)) {
- prev = rb_next(prev);
- prev_entry = rb_entry(prev, struct extent_map, rb_node);
- }
+ if (prev && offset < prev_entry->start)
+ prev = rb_prev(prev);
*prev_ret = prev;
prev = orig_prev;
}
if (next_ret) {
- prev_entry = rb_entry(prev, struct extent_map, rb_node);
- while (prev && offset < prev_entry-&...
2011 May 25
0
[PATCH] Btrfs: cache bitmaps when searching for a cluster
...(list_empty(&entry->list))
+ list_add_tail(&entry->list, bitmaps);
node = rb_next(&entry->offset_index);
if (!node)
return -ENOSPC;
@@ -2102,8 +2105,12 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
return -ENOSPC;
entry = rb_entry(node, struct btrfs_free_space, offset_index);
- if (entry->bitmap)
+ if (entry->bitmap) {
+ if (list_empty(&entry->list))
+ list_add_tail(&entry->list, bitmaps);
continue;
+ }
+
/*
* we haven''t filled the empty size and the window is
* very larg...
2013 Apr 03
0
[PATCH] Btrfs-progs: add a free space cache checker to fsck
...t;
+
+ return bitmap_start;
+}
+
+static int tree_insert_offset(struct rb_root *root, u64 offset,
+ struct rb_node *node, int bitmap)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct btrfs_free_space *info;
+
+ while (*p) {
+ parent = *p;
+ info = rb_entry(parent, struct btrfs_free_space, offset_index);
+
+ if (offset < info->offset) {
+ p = &(*p)->rb_left;
+ } else if (offset > info->offset) {
+ p = &(*p)->rb_right;
+ } else {
+ /*
+ * we could have a bitmap entry and an extent entry
+ * share the same offset...
2013 Nov 19
6
[PATCH] Btrfs: fix very slow inode eviction and fs unmount
...ent_tree;
+ struct rb_node *node;
+
+ ASSERT(inode->i_state & I_FREEING);
+ truncate_inode_pages(&inode->i_data, 0);
+
+ write_lock(&map_tree->lock);
+ while (!RB_EMPTY_ROOT(&map_tree->map)) {
+ struct extent_map *em;
+
+ node = rb_first(&map_tree->map);
+ em = rb_entry(node, struct extent_map, rb_node);
+ remove_extent_mapping(map_tree, em);
+ free_extent_map(em);
+ }
+ write_unlock(&map_tree->lock);
+
+ spin_lock(&io_tree->lock);
+ while (!RB_EMPTY_ROOT(&io_tree->state)) {
+ struct extent_state *state;
+ struct extent_state *cached_state...
2008 Sep 25
0
[PATCH 2/4] Add shared reference cache
...btrfs_add_leaf_ref(root, ref);
+ ret = btrfs_add_leaf_ref(root, ref, shared);
WARN_ON(ret);
btrfs_free_leaf_ref(root, ref);
}
diff -r 47aa0c51998a ref-cache.c
--- a/ref-cache.c Thu Sep 25 16:00:36 2008 +0800
+++ b/ref-cache.c Thu Sep 25 16:02:11 2008 +0800
@@ -78,7 +78,6 @@
}
entry = rb_entry(node, struct btrfs_leaf_ref, rb_node);
- entry->in_tree = 1;
rb_link_node(node, parent, p);
rb_insert_color(node, root);
return NULL;
@@ -103,23 +102,29 @@
return NULL;
}
-int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen)
+int btrfs_remove_leaf_refs(struct btrfs_ro...
2010 Oct 30
0
[PATCH] Use ERR_CAST inlined function instead of ERR_PTR(PTR_ERR(...)) - generated by Coccinelle
...454ca52..23cb8da 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -335,7 +335,7 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
goto out;
}
if (IS_ERR(rb_node)) {
- em = ERR_PTR(PTR_ERR(rb_node));
+ em = ERR_CAST(rb_node);
goto out;
}
em = rb_entry(rb_node, struct extent_map, rb_node);
@@ -384,7 +384,7 @@ struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
goto out;
}
if (IS_ERR(rb_node)) {
- em = ERR_PTR(PTR_ERR(rb_node));
+ em = ERR_CAST(rb_node);
goto out;
}
em = rb_entry(rb_node, struct extent_map, rb_no...
2013 Jan 31
1
[PATCH] Btrfs: fix freeing delayed ref head while still holding its mutex V2
...ff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 12ef591..42f83aa 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3615,11 +3615,11 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
}
while ((node = rb_first(&delayed_refs->root)) != NULL) {
- ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
+ struct btrfs_delayed_ref_head *head = NULL;
+ ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
atomic_set(&ref->refs, 1);
if (btrfs_delayed_ref_is_head(ref)) {
- struct btrfs_delayed_ref_head *head;
head = btrfs_...
2011 Jan 06
3
Offline Deduplication for Btrfs V2
Just a quick update, I''ve dropped the hashing stuff in favor of doing a memcmp
in the kernel to make sure the data is still the same. The thing that takes a
while is reading the data up from disk, so doing a memcmp of the entire buffer
isn''t that big of a deal, not to mention there''s a possiblity for malicious
users if there is a problem with the hashing algorithms we
2009 Jun 03
0
[PATCH] Make sure all dirty blocks are written at commit time
...s_block_group_cache *
+next_block_group(struct btrfs_root *root,
+ struct btrfs_block_group_cache *cache)
+{
+ struct rb_node *node;
+ spin_lock(&root->fs_info->block_group_cache_lock);
+ node = rb_next(&cache->cache_node);
+ btrfs_put_block_group(cache);
+ if (node) {
+ cache = rb_entry(node, struct btrfs_block_group_cache,
+ cache_node);
+ atomic_inc(&cache->count);
+ } else
+ cache = NULL;
+ spin_unlock(&root->fs_info->block_group_cache_lock);
+ return cache;
+}
+
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_ro...
2023 Apr 06
3
[PATCH] ocfs2: Fix wrong search logic in __ocfs2_resv_find_window
...cstart;
+ create_new = 1;
if (best_len == wanted)
- goto out_insert;
+ goto out_create;
}
prev_resv = next_resv;
@@ -557,13 +559,9 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
while (1) {
next = rb_next(prev);
if (next) {
- next_resv = rb_entry(next,
- struct ocfs2_alloc_reservation,
- r_node);
-
- gap_start = ocfs2_resv_end(prev_resv) + 1;
- gap_end = next_resv->r_start - 1;
- gap_len = gap_end - gap_start + 1;
+ gap_start = prev_resv->r_start;
+ gap_end = prev_resv->r_start + prev_resv->r_len - 1;...
2008 Nov 12
15
[PATCH][RFC][12+2][v3] A expanded CFQ scheduler for cgroups
This patchset expands traditional CFQ scheduler in order to support cgroups,
and improves old version.
Improvements are as following.
* Modularizing our new CFQ scheduler.
The expanded CFQ scheduler is registered/unregistered as new I/O
elevator scheduler called "cfq-cgroups". By this, the traditional CFQ
scheduler, which does not handle cgroups, and our new CFQ
2008 Nov 12
15
[PATCH][RFC][12+2][v3] A expanded CFQ scheduler for cgroups
This patchset expands traditional CFQ scheduler in order to support cgroups,
and improves old version.
Improvements are as following.
* Modularizing our new CFQ scheduler.
The expanded CFQ scheduler is registered/unregistered as new I/O
elevator scheduler called "cfq-cgroups". By this, the traditional CFQ
scheduler, which does not handle cgroups, and our new CFQ
2023 Apr 29
1
[PATCH] ocfs2: Fix wrong search logic in __ocfs2_resv_find_window
...+ goto out_create;
>> }
>>
>> prev_resv = next_resv;
>> @@ -557,13 +559,9 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
>> while (1) {
>> next = rb_next(prev);
>> if (next) {
>> - next_resv = rb_entry(next,
>> - struct ocfs2_alloc_reservation,
>> - r_node);
>> -
>> - gap_start = ocfs2_resv_end(prev_resv) + 1;
>> - gap_end = next_resv->r_start - 1;
>> - gap_len = gap_end - gap_start + 1;
>> + gap_start = prev_resv->r_start;...
2009 Jul 31
1
[PATCH] Btrfs: make sure we find a bitmap entry
...(bitmap_info->bitmap);
@@ -438,16 +448,22 @@ again:
recalculate_thresholds(block_group);
}
- bitmap_info = tree_search_offset(block_group,
- offset_to_bitmap(block_group,
- *offset),
- 1, 0);
- if (!bitmap_info)
+ if (!next)
return -EINVAL;
+ bitmap_info = rb_entry(next, struct btrfs_free_space,
+ offset_index);
if (!bitmap_info->bitmap)
return -EAGAIN;
+ search_start = *offset;
+ search_bytes = *bytes;
+
+ ret = search_bitmap(block_group, bitmap_info, &search_start,
+ &search_bytes);
+ if (ret < 0 || search_start !...
2012 Oct 04
3
[PATCH] btrfs ulist use rbtree instead
...odes[ulist->nnodes].aux = aux;
+
+ node->aux = aux;
++ulist->nnodes;
-
return 1;
}
EXPORT_SYMBOL(ulist_add);
+
+struct ulist_node *__ulist_rbtree_search(struct ulist *ulist, u64 val)
+{
+ struct rb_node *node = ulist->root.rb_node;
+ struct ulist_node *v;
+ while (node) {
+ v = rb_entry(node, struct ulist_node, node);
+ if (v->val < val)
+ node = node->rb_left;
+ else if (v->val > val)
+ node = node->rb_right;
+ else
+ return v;
+ }
+ return NULL;
+}
+
+
+int __ulist_rbtree_add_node(struct ulist *ulist, struct ulist_node *node)
+{
+ struct rb_node **new...