Displaying 20 results from an estimated 64 matches for "rb_first".
2008 Nov 07
0
[PATCH][cfq-cgroups] Introduce ioprio class for top layer.
...rn -ENOMEM;
-
- cgroup_lock();
- if (cgroup_is_removed(cont)) {
- cgroup_unlock();
- ret = -ENODEV;
- goto out;
- }
-
- cfqc = cgroup_to_cfq_cgroup(cont);
-
- cgroup_unlock();
-
- /* print priority */
- ret = snprintf(page, PAGE_SIZE, "default priority: %d\n", cfqc->ioprio);
-
- p = rb_first(&cfqc->sibling_tree);
- while (p) {
- struct cfq_data *__cfqd;
-
- __cfqd = rb_entry(p, struct cfq_data, group_node);
-
- ret += snprintf(page + ret, PAGE_SIZE - ret, " %s %d\n",
- __cfqd->cfqdd->queue->kobj.parent->name,
- __cfqd->ioprio);
-
-...
2008 Nov 07
0
[PATCH][cfq-cgroups] Introduce ioprio class for top layer.
...rn -ENOMEM;
-
- cgroup_lock();
- if (cgroup_is_removed(cont)) {
- cgroup_unlock();
- ret = -ENODEV;
- goto out;
- }
-
- cfqc = cgroup_to_cfq_cgroup(cont);
-
- cgroup_unlock();
-
- /* print priority */
- ret = snprintf(page, PAGE_SIZE, "default priority: %d\n", cfqc->ioprio);
-
- p = rb_first(&cfqc->sibling_tree);
- while (p) {
- struct cfq_data *__cfqd;
-
- __cfqd = rb_entry(p, struct cfq_data, group_node);
-
- ret += snprintf(page + ret, PAGE_SIZE - ret, " %s %d\n",
- __cfqd->cfqdd->queue->kobj.parent->name,
- __cfqd->ioprio);
-
-...
2013 Nov 19
6
[PATCH] Btrfs: fix very slow inode eviction and fs unmount
...e *map_tree = &BTRFS_I(inode)->extent_tree;
+ struct rb_node *node;
+
+ ASSERT(inode->i_state & I_FREEING);
+ truncate_inode_pages(&inode->i_data, 0);
+
+ write_lock(&map_tree->lock);
+ while (!RB_EMPTY_ROOT(&map_tree->map)) {
+ struct extent_map *em;
+
+ node = rb_first(&map_tree->map);
+ em = rb_entry(node, struct extent_map, rb_node);
+ remove_extent_mapping(map_tree, em);
+ free_extent_map(em);
+ }
+ write_unlock(&map_tree->lock);
+
+ spin_lock(&io_tree->lock);
+ while (!RB_EMPTY_ROOT(&io_tree->state)) {
+ struct extent_state *st...
2011 Jan 06
3
Offline Deduplication for Btrfs V2
Just a quick update, I''ve dropped the hashing stuff in favor of doing a memcmp
in the kernel to make sure the data is still the same. The thing that takes a
while is reading the data up from disk, so doing a memcmp of the entire buffer
isn''t that big of a deal, not to mention there''s a possiblity for malicious
users if there is a problem with the hashing algorithms we
2004 Dec 05
1
BUG in fs/ext3/dir.c
...fname &&
- call_filldir(filp, dirent, filldir, info->extra_fname))
- goto finished;
+ if (info->extra_fname) {
+ if(call_filldir(filp, dirent, filldir, info->extra_fname))
+ goto finished;
+ else
+ goto next_entry;
+ }
if (!info->curr_node)
info->curr_node = rb_first(&info->root);
@@ -492,7 +495,7 @@
info->curr_minor_hash = fname->minor_hash;
if (call_filldir(filp, dirent, filldir, fname))
break;
-
+next_entry:
info->curr_node = rb_next(info->curr_node);
if (!info->curr_node) {
if (info->next_hash == ~0) {
Regards,...
2013 Jan 31
1
[PATCH] Btrfs: fix freeing delayed ref head while still holding its mutex V2
...+---
1 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 12ef591..42f83aa 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3615,11 +3615,11 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
}
while ((node = rb_first(&delayed_refs->root)) != NULL) {
- ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
+ struct btrfs_delayed_ref_head *head = NULL;
+ ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
atomic_set(&ref->refs, 1);
if (btrfs_delayed_ref_is_head(ref)) {
-...
2011 Mar 31
4
[PATCH] Btrfs: fix free space cache when there are pinned extents and clusters
...ace *e;
e = rb_entry(node, struct btrfs_free_space, offset_index);
@@ -614,12 +651,49 @@ int btrfs_write_out_cache(struct btrfs_root *root,
entry->type = BTRFS_FREE_SPACE_EXTENT;
}
node = rb_next(node);
- if (!node)
- break;
+ if (!node && cluster) {
+ node = rb_first(&cluster->root);
+ cluster = NULL;
+ }
offset += sizeof(struct btrfs_free_space_entry);
if (offset + sizeof(struct btrfs_free_space_entry) >=
PAGE_CACHE_SIZE)
+ next_page = true;
+ entry++;
+ }
+
+ /*
+ * We want to add any pinned extents to our free space ca...
2008 Jun 02
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.1.0: Introduction
Hi everyone,
This is dm-ioband version 1.1.0 release.
Dm-ioband is an I/O bandwidth controller implemented as a device-mapper
driver, which gives specified bandwidth to each job running on the same
physical device.
- Can be applied to the kernel 2.6.26-rc2-mm1.
- Changes from 1.0.0 (posted on May 19, 2008):
- Measures against high memory pressure.
I/O requests to reclaim pages won't
2008 Jun 02
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.1.0: Introduction
Hi everyone,
This is dm-ioband version 1.1.0 release.
Dm-ioband is an I/O bandwidth controller implemented as a device-mapper
driver, which gives specified bandwidth to each job running on the same
physical device.
- Can be applied to the kernel 2.6.26-rc2-mm1.
- Changes from 1.0.0 (posted on May 19, 2008):
- Measures against high memory pressure.
I/O requests to reclaim pages won't
2008 Jun 02
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.1.0: Introduction
Hi everyone,
This is dm-ioband version 1.1.0 release.
Dm-ioband is an I/O bandwidth controller implemented as a device-mapper
driver, which gives specified bandwidth to each job running on the same
physical device.
- Can be applied to the kernel 2.6.26-rc2-mm1.
- Changes from 1.0.0 (posted on May 19, 2008):
- Measures against high memory pressure.
I/O requests to reclaim pages won't
2009 Jun 03
0
[PATCH] Make sure all dirty blocks are written at commit time
...trfs_block_group_cache *cache;
int err = 0;
- int werr = 0;
struct btrfs_path *path;
u64 last = 0;
@@ -2414,39 +2430,35 @@ int btrfs_write_dirty_block_groups(struc
return -ENOMEM;
while (1) {
- cache = NULL;
- spin_lock(&root->fs_info->block_group_cache_lock);
- for (n = rb_first(&root->fs_info->block_group_cache_tree);
- n; n = rb_next(n)) {
- entry = rb_entry(n, struct btrfs_block_group_cache,
- cache_node);
- if (entry->dirty) {
- cache = entry;
- break;
- }
+ if (last == 0) {
+ err = btrfs_run_delayed_refs(trans, root,
+...
2008 Nov 12
15
[PATCH][RFC][12+2][v3] A expanded CFQ scheduler for cgroups
This patchset expands traditional CFQ scheduler in order to support cgroups,
and improves old version.
Improvements are as following.
* Modularizing our new CFQ scheduler.
The expanded CFQ scheduler is registered/unregistered as new I/O
elevator scheduler called "cfq-cgroups". By this, the traditional CFQ
scheduler, which does not handle cgroups, and our new CFQ
2008 Nov 12
15
[PATCH][RFC][12+2][v3] A expanded CFQ scheduler for cgroups
This patchset expands traditional CFQ scheduler in order to support cgroups,
and improves old version.
Improvements are as following.
* Modularizing our new CFQ scheduler.
The expanded CFQ scheduler is registered/unregistered as new I/O
elevator scheduler called "cfq-cgroups". By this, the traditional CFQ
scheduler, which does not handle cgroups, and our new CFQ
2008 Jul 04
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.2.0: Introduction
Hi everyone,
This is the dm-ioband version 1.2.0 release.
Dm-ioband is an I/O bandwidth controller implemented as a device-mapper
driver, which gives specified bandwidth to each job running on the same
physical device.
- Can be applied to the kernel 2.6.26-rc5-mm3.
- Changes from 1.1.0 (posted on June 2, 2008):
- Dynamic policy switching
A user can change the bandwidth control policy
2008 Jul 04
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.2.0: Introduction
Hi everyone,
This is the dm-ioband version 1.2.0 release.
Dm-ioband is an I/O bandwidth controller implemented as a device-mapper
driver, which gives specified bandwidth to each job running on the same
physical device.
- Can be applied to the kernel 2.6.26-rc5-mm3.
- Changes from 1.1.0 (posted on June 2, 2008):
- Dynamic policy switching
A user can change the bandwidth control policy
2008 Jul 04
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.2.0: Introduction
Hi everyone,
This is the dm-ioband version 1.2.0 release.
Dm-ioband is an I/O bandwidth controller implemented as a device-mapper
driver, which gives specified bandwidth to each job running on the same
physical device.
- Can be applied to the kernel 2.6.26-rc5-mm3.
- Changes from 1.1.0 (posted on June 2, 2008):
- Dynamic policy switching
A user can change the bandwidth control policy
2013 Dec 18
2
[PATCH] Btrfs: improve the performance fluctuating of the fsync
...-)
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index b8c2ded..df87ed5 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -433,6 +433,9 @@ void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode)
spin_lock_irq(&tree->lock);
for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
+ if (ordered->csum_bytes_left)
+ continue;
+
spin_lock(&log->log_extents_lock[index]);
if (list_empty(&ordered->log_list)) {
list_add_tail(&ordered->log_li...
2008 Sep 24
1
[PATCH 0/8] I/O bandwidth controller and BIO tracking
Hi everyone,
These patchsets are the new releases of dm-ioband and bio-cgroup which
are ported to 2.6.27-rc5-mm1.
dm-ioband
Dm-ioband is an I/O bandwidth controller implemented as a
device-mapper driver, which gives specified bandwidth to each job
running on the same block device. A job is a group of processes
with the same pid or pgrp or uid or a virtual machine such as KVM
or Xen. A
2008 Sep 24
1
[PATCH 0/8] I/O bandwidth controller and BIO tracking
Hi everyone,
These patchsets are the new releases of dm-ioband and bio-cgroup which
are ported to 2.6.27-rc5-mm1.
dm-ioband
Dm-ioband is an I/O bandwidth controller implemented as a
device-mapper driver, which gives specified bandwidth to each job
running on the same block device. A job is a group of processes
with the same pid or pgrp or uid or a virtual machine such as KVM
or Xen. A
2008 Sep 24
1
[PATCH 0/8] I/O bandwidth controller and BIO tracking
Hi everyone,
These patchsets are the new releases of dm-ioband and bio-cgroup which
are ported to 2.6.27-rc5-mm1.
dm-ioband
Dm-ioband is an I/O bandwidth controller implemented as a
device-mapper driver, which gives specified bandwidth to each job
running on the same block device. A job is a group of processes
with the same pid or pgrp or uid or a virtual machine such as KVM
or Xen. A