search for: class_idx

Displaying 20 results from an estimated 24 matches for "class_idx".

Did you mean: class_id
2016 Mar 11
0
[PATCH v1 05/19] zsmalloc: use first_page rather than page
...eletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 2d7c4c11fc63..bb29203ec6b3 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -414,26 +414,28 @@ static int is_last_page(struct page *page) return PagePrivate2(page); } -static void get_zspage_mapping(struct page *page, unsigned int *class_idx, +static void get_zspage_mapping(struct page *first_page, + unsigned int *class_idx, enum fullness_group *fullness) { unsigned long m; - BUG_ON(!is_first_page(page)); + BUG_ON(!is_first_page(first_page)); - m = (unsigned long)page->mapping; + m = (unsigned long)first_page->mappin...
2016 Apr 17
1
[PATCH v3 06/16] zsmalloc: squeeze inuse into page->mapping
...e_inuse(struct page *first_page, int val) > +{ > + struct zs_meta *m; > + > + VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); > + > + m = (struct zs_meta *)&first_page->mapping; .. > static void get_zspage_mapping(struct page *first_page, > unsigned int *class_idx, > enum fullness_group *fullness) > { > - unsigned long m; > + struct zs_meta *m; > + > VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); > + m = (struct zs_meta *)&first_page->mapping; .. > static void set_zspage_mapping(struct page *first_page, >...
2016 Apr 17
1
[PATCH v3 06/16] zsmalloc: squeeze inuse into page->mapping
...e_inuse(struct page *first_page, int val) > +{ > + struct zs_meta *m; > + > + VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); > + > + m = (struct zs_meta *)&first_page->mapping; .. > static void get_zspage_mapping(struct page *first_page, > unsigned int *class_idx, > enum fullness_group *fullness) > { > - unsigned long m; > + struct zs_meta *m; > + > VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); > + m = (struct zs_meta *)&first_page->mapping; .. > static void set_zspage_mapping(struct page *first_page, >...
2016 Mar 30
0
[PATCH v3 06/16] zsmalloc: squeeze inuse into page->mapping
...um fullness_group { ZS_ALMOST_EMPTY, _ZS_NR_FULLNESS_GROUPS, - ZS_EMPTY, + ZS_EMPTY = _ZS_NR_FULLNESS_GROUPS, ZS_FULL }; @@ -263,14 +269,11 @@ struct zs_pool { #endif }; -/* - * A zspage's class index and fullness group - * are encoded in its (first)page->mapping - */ -#define CLASS_IDX_BITS 28 -#define FULLNESS_BITS 4 -#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1) -#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1) +struct zs_meta { + unsigned long class:CLASS_BITS; + unsigned long fullness:FULLNESS_BITS; + unsigned long inuse:INUSE_BITS; +}; struct mapping_area...
2016 Mar 21
0
[PATCH v2 17/18] zsmalloc: migrate tail pages in zspage
...+++++++++++++++++++++++------- 1 file changed, 115 insertions(+), 16 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 9b4b03d8f993..35bafa0bc3f1 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -551,6 +551,19 @@ static void set_zspage_mapping(struct page *first_page, m->class = class_idx; } +static bool check_isolated_page(struct page *first_page) +{ + struct page *cursor; + + for (cursor = first_page; cursor != NULL; cursor = + get_next_page(cursor)) { + if (PageIsolated(cursor)) + return true; + } + + return false; +} + /* * zsmalloc divides the pool into various siz...
2016 Mar 21
2
[PATCH v2 17/18] zsmalloc: migrate tail pages in zspage
Hi Minchan, [auto build test WARNING on next-20160318] [cannot apply to v4.5-rc7 v4.5-rc6 v4.5-rc5 v4.5] [if your patch is applied to the wrong git tree, please drop us a note to help improving the system] url: https://github.com/0day-ci/linux/commits/Minchan-Kim/Support-non-lru-page-migration/20160321-143339 coccinelle warnings: (new ones prefixed by >>) >>
2016 Mar 21
2
[PATCH v2 17/18] zsmalloc: migrate tail pages in zspage
Hi Minchan, [auto build test WARNING on next-20160318] [cannot apply to v4.5-rc7 v4.5-rc6 v4.5-rc5 v4.5] [if your patch is applied to the wrong git tree, please drop us a note to help improving the system] url: https://github.com/0day-ci/linux/commits/Minchan-Kim/Support-non-lru-page-migration/20160321-143339 coccinelle warnings: (new ones prefixed by >>) >>
2016 Mar 11
31
[PATCH v1 00/19] Support non-lru page migration
Recently, I got many reports about perfermance degradation in embedded system(Android mobile phone, webOS TV and so on) and failed to fork easily. The problem was fragmentation caused by zram and GPU driver pages. Their pages cannot be migrated so compaction cannot work well, either so reclaimer ends up shrinking all of working set pages. It made system very slow and even to fail to fork easily.
2016 Mar 11
31
[PATCH v1 00/19] Support non-lru page migration
Recently, I got many reports about perfermance degradation in embedded system(Android mobile phone, webOS TV and so on) and failed to fork easily. The problem was fragmentation caused by zram and GPU driver pages. Their pages cannot be migrated so compaction cannot work well, either so reclaimer ends up shrinking all of working set pages. It made system very slow and even to fail to fork easily.
2016 Mar 21
22
[PATCH v2 00/18] Support non-lru page migration
Recently, I got many reports about perfermance degradation in embedded system(Android mobile phone, webOS TV and so on) and failed to fork easily. The problem was fragmentation caused by zram and GPU driver pages. Their pages cannot be migrated so compaction cannot work well, either so reclaimer ends up shrinking all of working set pages. It made system very slow and even to fail to fork easily.
2016 Mar 21
22
[PATCH v2 00/18] Support non-lru page migration
Recently, I got many reports about perfermance degradation in embedded system(Android mobile phone, webOS TV and so on) and failed to fork easily. The problem was fragmentation caused by zram and GPU driver pages. Their pages cannot be migrated so compaction cannot work well, either so reclaimer ends up shrinking all of working set pages. It made system very slow and even to fail to fork easily.
2016 Mar 11
0
[PATCH v1 06/19] zsmalloc: clean up many BUG_ON
...first_page->mapping; *fullness = m & FULLNESS_MASK; @@ -431,7 +431,7 @@ static void set_zspage_mapping(struct page *first_page, enum fullness_group fullness) { unsigned long m; - BUG_ON(!is_first_page(first_page)); + VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | (fullness & FULLNESS_MASK); @@ -626,7 +626,8 @@ static enum fullness_group get_fullness_group(struct page *first_page) { int inuse, max_objects; enum fullness_group fg; - BUG_ON(!is_first_page(first_page)); + + VM_BUG_ON_PAGE(!is_first_pa...
2016 Mar 30
0
[PATCH v3 08/16] zsmalloc: squeeze freelist into page->mapping
...atic unsigned long get_freeobj(struct page *first_page) +{ + struct zs_meta *m; + + VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); + + m = (struct zs_meta *)&first_page->mapping; + return m->freeobj; +} + static void get_zspage_mapping(struct page *first_page, unsigned int *class_idx, enum fullness_group *fullness) @@ -837,30 +854,33 @@ static struct page *get_next_page(struct page *page) return next; } -/* - * Encode <page, obj_idx> as a single handle value. - * We use the least bit of handle for tagging. - */ -static void *location_to_obj(struct page *page, un...
2016 Mar 11
0
[PATCH v1 11/19] zsmalloc: squeeze freelist into page->mapping
...atic unsigned long get_freeobj(struct page *first_page) +{ + struct zs_meta *m; + + VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); + + m = (struct zs_meta *)&first_page->mapping; + return m->freeobj; +} + static void get_zspage_mapping(struct page *first_page, unsigned int *class_idx, enum fullness_group *fullness) @@ -832,30 +849,33 @@ static struct page *get_next_page(struct page *page) return next; } -/* - * Encode <page, obj_idx> as a single handle value. - * We use the least bit of handle for tagging. - */ -static void *location_to_obj(struct page *page, un...
2016 Mar 30
33
[PATCH v3 00/16] Support non-lru page migration
Recently, I got many reports about perfermance degradation in embedded system(Android mobile phone, webOS TV and so on) and failed to fork easily. The problem was fragmentation caused by zram and GPU driver pages. Their pages cannot be migrated so compaction cannot work well, either so reclaimer ends up shrinking all of working set pages. It made system very slow and even to fail to fork easily.
2016 Mar 30
33
[PATCH v3 00/16] Support non-lru page migration
Recently, I got many reports about perfermance degradation in embedded system(Android mobile phone, webOS TV and so on) and failed to fork easily. The problem was fragmentation caused by zram and GPU driver pages. Their pages cannot be migrated so compaction cannot work well, either so reclaimer ends up shrinking all of working set pages. It made system very slow and even to fail to fork easily.
2016 Mar 30
0
[PATCH v3 09/16] zsmalloc: move struct zs_meta from mapping to freelist
...n m->freeobj; } @@ -471,7 +471,7 @@ static void get_zspage_mapping(struct page *first_page, VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - m = (struct zs_meta *)&first_page->mapping; + m = (struct zs_meta *)&first_page->freelist; *fullness = m->fullness; *class_idx = m->class; } @@ -484,7 +484,7 @@ static void set_zspage_mapping(struct page *first_page, VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - m = (struct zs_meta *)&first_page->mapping; + m = (struct zs_meta *)&first_page->freelist; m->fullness = fullness; m-&gt...
2016 Mar 12
1
[PATCH v1 09/19] zsmalloc: keep max_object in size_class
...er_zspage / fullness_threshold_frac) > fg = ZS_ALMOST_EMPTY; > else > fg = ZS_ALMOST_FULL; > @@ -723,7 +723,7 @@ static enum fullness_group fix_fullness_group(struct size_class *class, > enum fullness_group currfg, newfg; > > get_zspage_mapping(first_page, &class_idx, &currfg); > - newfg = get_fullness_group(first_page); > + newfg = get_fullness_group(class, first_page); > if (newfg == currfg) > goto out; > > @@ -1003,9 +1003,6 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags) > init_zspage(class, firs...
2016 Mar 12
1
[PATCH v1 09/19] zsmalloc: keep max_object in size_class
...er_zspage / fullness_threshold_frac) > fg = ZS_ALMOST_EMPTY; > else > fg = ZS_ALMOST_FULL; > @@ -723,7 +723,7 @@ static enum fullness_group fix_fullness_group(struct size_class *class, > enum fullness_group currfg, newfg; > > get_zspage_mapping(first_page, &class_idx, &currfg); > - newfg = get_fullness_group(first_page); > + newfg = get_fullness_group(class, first_page); > if (newfg == currfg) > goto out; > > @@ -1003,9 +1003,6 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags) > init_zspage(class, firs...
2016 Mar 11
0
[PATCH v1 08/19] zsmalloc: remove unused pool param in obj_free
...ol *pool, struct size_class *class, - unsigned long obj) +static void obj_free(struct size_class *class, unsigned long obj) { struct link_free *link; struct page *first_page, *f_page; @@ -1482,7 +1481,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle) class = pool->size_class[class_idx]; spin_lock(&class->lock); - obj_free(pool, class, obj); + obj_free(class, obj); fullness = fix_fullness_group(class, first_page); if (fullness == ZS_EMPTY) { zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( @@ -1645,7 +1644,7 @@ static int migrate_zspage(struct zs_pool *p...