search for: rb_left

Displaying 20 results from an estimated 69 matches for "rb_left".

2017 Jan 20
1
[PATCH] drm/nouveau/client: use rb_entry()
...vkm_client_insert(struct nvkm_client *client, struct nvkm_object *object) while (*ptr) { struct nvkm_object *this = - container_of(*ptr, typeof(*this), node); + rb_entry(*ptr, typeof(*this), node); parent = *ptr; if (object->object < this->object) ptr = &parent->rb_left; @@ -243,7 +243,7 @@ nvkm_client_search(struct nvkm_client *client, u64 handle) struct rb_node *node = client->objroot.rb_node; while (node) { struct nvkm_object *object = - container_of(node, typeof(*object), node); + rb_entry(node, typeof(*object), node); if (handle < object-&...
2016 Dec 20
1
[PATCH] drm/nouveau/dma: use rb_entry()
...vkm_dma *dma, struct nvkm_client *client, u64 object) struct rb_node *node = client->dmaroot.rb_node; while (node) { struct nvkm_dmaobj *dmaobj = - container_of(node, typeof(*dmaobj), rb); + rb_entry(node, typeof(*dmaobj), rb); if (object < dmaobj->handle) node = node->rb_left; else @@ -67,7 +67,7 @@ nvkm_dma_oclass_new(struct nvkm_device *device, dmaobj->handle = oclass->object; while (*ptr) { - struct nvkm_dmaobj *obj = container_of(*ptr, typeof(*obj), rb); + struct nvkm_dmaobj *obj = rb_entry(*ptr, typeof(*obj), rb); parent = *ptr; if (dmaobj-&g...
2012 May 24
2
[PATCH] Btrfs: fix the same inode id problem when doing auto defragment
...* must be held already * @@ -87,9 +102,9 @@ static void __btrfs_add_inode_defrag(struct inode *inode, parent = *p; entry = rb_entry(parent, struct inode_defrag, rb_node); - if (defrag->ino < entry->ino) + if (__compare_inode_defrag(defrag, entry) < 0) p = &parent->rb_left; - else if (defrag->ino > entry->ino) + else if (__compare_inode_defrag(defrag, entry) > 0) p = &parent->rb_right; else { /* if we''re reinserting an entry for @@ -159,28 +174,33 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, /* * must be...
2009 Aug 21
0
[GIT PULL] btrfs rb corruption fix
...ode_tree.rb_node; + parent = NULL; spin_lock(&root->inode_lock); while (*p) { @@ -3108,13 +3112,16 @@ static void inode_tree_add(struct inode *inode) entry = rb_entry(parent, struct btrfs_inode, rb_node); if (inode->i_ino < entry->vfs_inode.i_ino) - p = &(*p)->rb_left; + p = &parent->rb_left; else if (inode->i_ino > entry->vfs_inode.i_ino) - p = &(*p)->rb_right; + p = &parent->rb_right; else { WARN_ON(!(entry->vfs_inode.i_state & (I_WILL_FREE | I_FREEING | I_CLEAR))); - break; + rb_erase(parent, &am...
2008 Nov 07
0
[PATCH][cfq-cgroups] Introduce ioprio class for top layer.
...- if (rb_key < __cfqd->rb_key) + + /* + * sort RT cfq_data first, we always want to give + * preference to them. IDLE cfq_data goes to the back. + * after that, sort on the next service time. + */ + if (cfq_data_class_rt(cfqd) > cfq_data_class_rt(__cfqd)) + n = &(*p)->rb_left; + else if (cfq_data_class_rt(cfqd) < cfq_data_class_rt(__cfqd)) + n = &(*p)->rb_right; + else if (cfq_data_class_idle(cfqd) < + cfq_data_class_idle(__cfqd)) + n = &(*p)->rb_left; + else if (cfq_data_class_idle(cfqd) > + cfq_data_class_idle(__cfqd)) + n = &am...
2008 Nov 07
0
[PATCH][cfq-cgroups] Introduce ioprio class for top layer.
...- if (rb_key < __cfqd->rb_key) + + /* + * sort RT cfq_data first, we always want to give + * preference to them. IDLE cfq_data goes to the back. + * after that, sort on the next service time. + */ + if (cfq_data_class_rt(cfqd) > cfq_data_class_rt(__cfqd)) + n = &(*p)->rb_left; + else if (cfq_data_class_rt(cfqd) < cfq_data_class_rt(__cfqd)) + n = &(*p)->rb_right; + else if (cfq_data_class_idle(cfqd) < + cfq_data_class_idle(__cfqd)) + n = &(*p)->rb_left; + else if (cfq_data_class_idle(cfqd) > + cfq_data_class_idle(__cfqd)) + n = &am...
2006 Aug 02
10
[PATCH 0/6] htb: cleanup
The HTB scheduler code is a mess, this patch set does some basic house cleaning. The first four should cause no code change, but the last two need more testing. -- Stephen Hemminger <shemminger@osdl.org> "And in the Packet there writ down that doome" - To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to
2012 Oct 04
3
[PATCH] btrfs ulist use rbtree instead
...rn 1; } EXPORT_SYMBOL(ulist_add); + +struct ulist_node *__ulist_rbtree_search(struct ulist *ulist, u64 val) +{ + struct rb_node *node = ulist->root.rb_node; + struct ulist_node *v; + while (node) { + v = rb_entry(node, struct ulist_node, node); + if (v->val < val) + node = node->rb_left; + else if (v->val > val) + node = node->rb_right; + else + return v; + } + return NULL; +} + + +int __ulist_rbtree_add_node(struct ulist *ulist, struct ulist_node *node) +{ + struct rb_node **new = &(ulist->root.rb_node), *parent = NULL; + struct ulist_node *v; + while (*new)...
2017 Oct 18
2
Null deference panic in CentOS-6.5
...s = 21530842, nr_switches = 148355748, cfs = { load = { weight = 2, inv_weight = 0 }, nr_running = 1, h_nr_running = 2, exec_clock = 3309310258875, min_vruntime = 1181294560093, tasks_timeline = { rb_node = 0x0 }, rb_leftmost = 0x0, tasks = { next = 0xffff88013bc568e8, prev = 0xffff88013bc568e8 }, balance_iterator = 0xffff88013bc568e8, curr = 0xffff88204b501e00, next = 0x0, last = 0x0, skip = 0x0, nr_spread_over = 5, .... We can see that the valu...
2013 Apr 03
0
[PATCH] Btrfs-progs: add a free space cache checker to fsck
...ode *node, int bitmap) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct btrfs_free_space *info; + + while (*p) { + parent = *p; + info = rb_entry(parent, struct btrfs_free_space, offset_index); + + if (offset < info->offset) { + p = &(*p)->rb_left; + } else if (offset > info->offset) { + p = &(*p)->rb_right; + } else { + /* + * we could have a bitmap entry and an extent entry + * share the same offset. If this is the case, we want + * the extent entry to always be found first if we do a + * linear search throu...
2011 Jan 06
3
Offline Deduplication for Btrfs V2
Just a quick update, I''ve dropped the hashing stuff in favor of doing a memcmp in the kernel to make sure the data is still the same. The thing that takes a while is reading the data up from disk, so doing a memcmp of the entire buffer isn''t that big of a deal, not to mention there''s a possiblity for malicious users if there is a problem with the hashing algorithms we
2008 Nov 12
15
[PATCH][RFC][12+2][v3] A expanded CFQ scheduler for cgroups
This patchset expands traditional CFQ scheduler in order to support cgroups, and improves old version. Improvements are as following. * Modularizing our new CFQ scheduler. The expanded CFQ scheduler is registered/unregistered as new I/O elevator scheduler called "cfq-cgroups". By this, the traditional CFQ scheduler, which does not handle cgroups, and our new CFQ
2008 Nov 12
15
[PATCH][RFC][12+2][v3] A expanded CFQ scheduler for cgroups
This patchset expands traditional CFQ scheduler in order to support cgroups, and improves old version. Improvements are as following. * Modularizing our new CFQ scheduler. The expanded CFQ scheduler is registered/unregistered as new I/O elevator scheduler called "cfq-cgroups". By this, the traditional CFQ scheduler, which does not handle cgroups, and our new CFQ
2013 Oct 17
42
[PATCH v8 0/19] enable swiotlb-xen on arm and arm64
Hi all, this patch series enables xen-swiotlb on arm and arm64. It has been heavily reworked compared to the previous versions in order to achieve better performances and to address review comments. We are not using dma_mark_clean to ensure coherency anymore. We call the platform implementation of map_page and unmap_page. We assume that dom0 has been mapped 1:1 (physical address == machine
2008 Jun 02
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.1.0: Introduction
Hi everyone, This is dm-ioband version 1.1.0 release. Dm-ioband is an I/O bandwidth controller implemented as a device-mapper driver, which gives specified bandwidth to each job running on the same physical device. - Can be applied to the kernel 2.6.26-rc2-mm1. - Changes from 1.0.0 (posted on May 19, 2008): - Measures against high memory pressure. I/O requests to reclaim pages won't
2008 Jun 02
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.1.0: Introduction
Hi everyone, This is dm-ioband version 1.1.0 release. Dm-ioband is an I/O bandwidth controller implemented as a device-mapper driver, which gives specified bandwidth to each job running on the same physical device. - Can be applied to the kernel 2.6.26-rc2-mm1. - Changes from 1.0.0 (posted on May 19, 2008): - Measures against high memory pressure. I/O requests to reclaim pages won't
2008 Jun 02
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.1.0: Introduction
Hi everyone, This is dm-ioband version 1.1.0 release. Dm-ioband is an I/O bandwidth controller implemented as a device-mapper driver, which gives specified bandwidth to each job running on the same physical device. - Can be applied to the kernel 2.6.26-rc2-mm1. - Changes from 1.0.0 (posted on May 19, 2008): - Measures against high memory pressure. I/O requests to reclaim pages won't
2008 Jul 04
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.2.0: Introduction
Hi everyone, This is the dm-ioband version 1.2.0 release. Dm-ioband is an I/O bandwidth controller implemented as a device-mapper driver, which gives specified bandwidth to each job running on the same physical device. - Can be applied to the kernel 2.6.26-rc5-mm3. - Changes from 1.1.0 (posted on June 2, 2008): - Dynamic policy switching A user can change the bandwidth control policy
2008 Jul 04
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.2.0: Introduction
Hi everyone, This is the dm-ioband version 1.2.0 release. Dm-ioband is an I/O bandwidth controller implemented as a device-mapper driver, which gives specified bandwidth to each job running on the same physical device. - Can be applied to the kernel 2.6.26-rc5-mm3. - Changes from 1.1.0 (posted on June 2, 2008): - Dynamic policy switching A user can change the bandwidth control policy
2008 Jul 04
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.2.0: Introduction
Hi everyone, This is the dm-ioband version 1.2.0 release. Dm-ioband is an I/O bandwidth controller implemented as a device-mapper driver, which gives specified bandwidth to each job running on the same physical device. - Can be applied to the kernel 2.6.26-rc5-mm3. - Changes from 1.1.0 (posted on June 2, 2008): - Dynamic policy switching A user can change the bandwidth control policy