Displaying 20 results from an estimated 30 matches for "pages_per_zspag".
Did you mean:
pages_per_zspage
2016 Mar 12
1
[PATCH v1 09/19] zsmalloc: keep max_object in size_class
...03,6 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
> init_zspage(class, first_page);
>
> first_page->freelist = location_to_obj(first_page, 0);
> - /* Maximum number of objects we can store in this zspage */
> - first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
> -
> error = 0; /* Success */
>
> cleanup:
> @@ -1235,11 +1232,11 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
> return true;
> }
>
> -static bool zspage_full(struct page *first_page)
> +sta...
2016 Mar 12
1
[PATCH v1 09/19] zsmalloc: keep max_object in size_class
...03,6 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
> init_zspage(class, first_page);
>
> first_page->freelist = location_to_obj(first_page, 0);
> - /* Maximum number of objects we can store in this zspage */
> - first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
> -
> error = 0; /* Success */
>
> cleanup:
> @@ -1235,11 +1232,11 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
> return true;
> }
>
> -static bool zspage_full(struct page *first_page)
> +sta...
2016 Mar 14
0
[PATCH v1 09/19] zsmalloc: keep max_object in size_class
...page *alloc_zspage(struct size_class *class, gfp_t flags)
> > init_zspage(class, first_page);
> >
> > first_page->freelist = location_to_obj(first_page, 0);
> >- /* Maximum number of objects we can store in this zspage */
> >- first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
> >-
> > error = 0; /* Success */
> >
> > cleanup:
> >@@ -1235,11 +1232,11 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
> > return true;
> > }
> >
> >-static bool zspage_f...
2016 Mar 30
0
[PATCH v3 05/16] zsmalloc: keep max_object in size_class
...oto out;
@@ -1008,9 +1008,6 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
init_zspage(class, first_page);
first_page->freelist = location_to_obj(first_page, 0);
- /* Maximum number of objects we can store in this zspage */
- first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
-
error = 0; /* Success */
cleanup:
@@ -1238,11 +1235,11 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
return true;
}
-static bool zspage_full(struct page *first_page)
+static bool zspage_full(struct size_class *class, str...
2016 Mar 30
0
[PATCH v3 11/16] zsmalloc: separate free_zspage from putback_zspage
...class,
+ struct page *first_page)
{
struct page *nextp, *tmp, *head_extra;
@@ -972,6 +973,11 @@ static void free_zspage(struct page *first_page)
}
reset_page(head_extra);
__free_page(head_extra);
+
+ zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
+ class->size, class->pages_per_zspage));
+ atomic_long_sub(class->pages_per_zspage,
+ &pool->pages_allocated);
}
/* Initialize a newly allocated zspage */
@@ -1559,13 +1565,8 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
spin_lock(&class->lock);
obj_free(class, obj);
fullness = fix_fullness_...
2016 Mar 11
0
[PATCH v1 09/19] zsmalloc: keep max_object in size_class
...oto out;
@@ -1003,9 +1003,6 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
init_zspage(class, first_page);
first_page->freelist = location_to_obj(first_page, 0);
- /* Maximum number of objects we can store in this zspage */
- first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
-
error = 0; /* Success */
cleanup:
@@ -1235,11 +1232,11 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
return true;
}
-static bool zspage_full(struct page *first_page)
+static bool zspage_full(struct size_class *class, str...
2016 Apr 18
2
[PATCH v3 11/16] zsmalloc: separate free_zspage from putback_zspage
...== ZS_EMPTY, dst_page);
well, if we want to VM_BUG_ON_PAGE() at all. there haven't been any
problems with compaction, is there any specific reason these macros
were added?
> + if (putback_zspage(pool, class, src_page) == ZS_EMPTY) {
> pool->stats.pages_compacted += class->pages_per_zspage;
> - spin_unlock(&class->lock);
> + spin_unlock(&class->lock);
> + free_zspage(pool, class, src_page);
do we really need to free_zspage() out of class->lock?
wouldn't something like this
if (putback_zspage(pool, class, src_page) == ZS_EMPTY) {
pool->sta...
2016 Apr 18
2
[PATCH v3 11/16] zsmalloc: separate free_zspage from putback_zspage
...== ZS_EMPTY, dst_page);
well, if we want to VM_BUG_ON_PAGE() at all. there haven't been any
problems with compaction, is there any specific reason these macros
were added?
> + if (putback_zspage(pool, class, src_page) == ZS_EMPTY) {
> pool->stats.pages_compacted += class->pages_per_zspage;
> - spin_unlock(&class->lock);
> + spin_unlock(&class->lock);
> + free_zspage(pool, class, src_page);
do we really need to free_zspage() out of class->lock?
wouldn't something like this
if (putback_zspage(pool, class, src_page) == ZS_EMPTY) {
pool->sta...
2016 Mar 11
31
[PATCH v1 00/19] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.
2016 Mar 11
31
[PATCH v1 00/19] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.
2016 Mar 11
0
[PATCH v1 13/19] zsmalloc: factor page chain functionality out
...gePrivate2(page);
+
+ prev_page = page;
+ }
+}
+
/*
* Allocate a zspage for the given size class
*/
static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
{
- int i, error;
+ int i;
struct page *first_page = NULL, *uninitialized_var(prev_page);
+ struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
/*
* Allocate individual pages and link them together as:
@@ -1041,43 +1074,23 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
* (i.e. no other sub-page has this flag set) and PG_private_2 to
* identify the last page.
*/
- error = -ENOMEM;
for (i = 0;...
2016 Mar 30
0
[PATCH v3 10/16] zsmalloc: factor page chain functionality out
...page;
+ }
+}
+
/*
* Allocate a zspage for the given size class
*/
static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
{
- int i, error;
- struct page *first_page = NULL, *uninitialized_var(prev_page);
+ int i;
+ struct page *first_page = NULL;
+ struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
/*
* Allocate individual pages and link them together as:
@@ -1045,43 +1078,23 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
* (i.e. no other sub-page has this flag set) and PG_private_2 to
* identify the last page.
*/
- error = -ENOMEM;
for (i = 0;...
2016 Mar 12
1
[PATCH v1 13/19] zsmalloc: factor page chain functionality out
...gt; /*
> * Allocate a zspage for the given size class
> */
> static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
> {
> - int i, error;
> + int i;
> struct page *first_page = NULL, *uninitialized_var(prev_page);
> + struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
>
> /*
> * Allocate individual pages and link them together as:
> @@ -1041,43 +1074,23 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
*uninitialized_var(prev_page) in alloc_zspage is not in use more.
> * (i.e. no other sub-page has this fl...
2016 Mar 12
1
[PATCH v1 13/19] zsmalloc: factor page chain functionality out
...gt; /*
> * Allocate a zspage for the given size class
> */
> static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
> {
> - int i, error;
> + int i;
> struct page *first_page = NULL, *uninitialized_var(prev_page);
> + struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
>
> /*
> * Allocate individual pages and link them together as:
> @@ -1041,43 +1074,23 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
*uninitialized_var(prev_page) in alloc_zspage is not in use more.
> * (i.e. no other sub-page has this fl...
2016 Mar 21
22
[PATCH v2 00/18] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.
2016 Mar 21
22
[PATCH v2 00/18] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.
2016 Apr 19
0
[PATCH v3 11/16] zsmalloc: separate free_zspage from putback_zspage
...we want to VM_BUG_ON_PAGE() at all. there haven't been any
> problems with compaction, is there any specific reason these macros
> were added?
>
>
>
> > + if (putback_zspage(pool, class, src_page) == ZS_EMPTY) {
> > pool->stats.pages_compacted += class->pages_per_zspage;
> > - spin_unlock(&class->lock);
> > + spin_unlock(&class->lock);
> > + free_zspage(pool, class, src_page);
>
> do we really need to free_zspage() out of class->lock?
> wouldn't something like this
>
> if (putback_zspage(pool, class,...
2016 Mar 30
33
[PATCH v3 00/16] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.
2016 Mar 30
33
[PATCH v3 00/16] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.
2016 Jun 16
2
[PATCH v7 00/12] Support non-lru page migration
...e8 00 00 00 00 callq 6401 <zs_compact+0x2b6>
> 6401: 48 8b 85 b0 fe ff ff mov -0x150(%rbp),%rax
RAX: 2065676162726166 so rax is totally garbage, I think.
It means obj_to_head returns garbage because get_first_obj_offset is
utter crab because (page_idx / class->pages_per_zspage) was totally
wrong.
> ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> 6408: f0 0f ba 28 00 lock btsl $0x0,(%rax)
<snip>
> > Could you test with [zsmalloc: keep first object offset in struct page]
> > in mmotm?
>
> sure, I can. will it help, tho? we...