Displaying 17 results from an estimated 17 matches for "zs_align".
2016 Mar 15
1
[PATCH v1 09/19] zsmalloc: keep max_object in size_class
...rder * PAGE_SIZE / class->size)
> * page->lru: links together first pages of various zspages.
> * Basically forming list of zspages in a fullness group.
> * page->mapping: class index and fullness group of the zspage
> @@ -211,6 +209,7 @@ struct size_class {
> * of ZS_ALIGN.
> */
> int size;
> + int objs_per_zspage;
> unsigned int index;
struct page ->objects "comes for free". now we don't use it, instead
every size_class grows by 4 bytes? is there any reason for this?
-ss
2016 Mar 15
1
[PATCH v1 09/19] zsmalloc: keep max_object in size_class
...rder * PAGE_SIZE / class->size)
> * page->lru: links together first pages of various zspages.
> * Basically forming list of zspages in a fullness group.
> * page->mapping: class index and fullness group of the zspage
> @@ -211,6 +209,7 @@ struct size_class {
> * of ZS_ALIGN.
> */
> int size;
> + int objs_per_zspage;
> unsigned int index;
struct page ->objects "comes for free". now we don't use it, instead
every size_class grows by 4 bytes? is there any reason for this?
-ss
2016 Mar 12
1
[PATCH v1 09/19] zsmalloc: keep max_object in size_class
...* PAGE_SIZE / class->size)
> * page->lru: links together first pages of various zspages.
> * Basically forming list of zspages in a fullness group.
> * page->mapping: class index and fullness group of the zspage
> @@ -211,6 +209,7 @@ struct size_class {
> * of ZS_ALIGN.
> */
> int size;
> + int objs_per_zspage;
> unsigned int index;
>
> struct zs_size_stat stats;
> @@ -622,21 +621,22 @@ static inline void zs_pool_stat_destroy(struct zs_pool *pool)
> * the pool (not yet implemented). This function returns fullness
> *...
2016 Mar 12
1
[PATCH v1 09/19] zsmalloc: keep max_object in size_class
...* PAGE_SIZE / class->size)
> * page->lru: links together first pages of various zspages.
> * Basically forming list of zspages in a fullness group.
> * page->mapping: class index and fullness group of the zspage
> @@ -211,6 +209,7 @@ struct size_class {
> * of ZS_ALIGN.
> */
> int size;
> + int objs_per_zspage;
> unsigned int index;
>
> struct zs_size_stat stats;
> @@ -622,21 +621,22 @@ static inline void zs_pool_stat_destroy(struct zs_pool *pool)
> * the pool (not yet implemented). This function returns fullness
> *...
2016 Mar 15
0
[PATCH v1 09/19] zsmalloc: keep max_object in size_class
...>size)
> > * page->lru: links together first pages of various zspages.
> > * Basically forming list of zspages in a fullness group.
> > * page->mapping: class index and fullness group of the zspage
> > @@ -211,6 +209,7 @@ struct size_class {
> > * of ZS_ALIGN.
> > */
> > int size;
> > + int objs_per_zspage;
> > unsigned int index;
>
> struct page ->objects "comes for free". now we don't use it, instead
> every size_class grows by 4 bytes? is there any reason for this?
It is union with _mapcount...
2016 Mar 11
0
[PATCH v1 09/19] zsmalloc: keep max_object in size_class
...spage (class->zspage_order * PAGE_SIZE / class->size)
* page->lru: links together first pages of various zspages.
* Basically forming list of zspages in a fullness group.
* page->mapping: class index and fullness group of the zspage
@@ -211,6 +209,7 @@ struct size_class {
* of ZS_ALIGN.
*/
int size;
+ int objs_per_zspage;
unsigned int index;
struct zs_size_stat stats;
@@ -622,21 +621,22 @@ static inline void zs_pool_stat_destroy(struct zs_pool *pool)
* the pool (not yet implemented). This function returns fullness
* status of the given page.
*/
-static enum fulln...
2016 Mar 30
0
[PATCH v3 05/16] zsmalloc: keep max_object in size_class
...spage (class->zspage_order * PAGE_SIZE / class->size)
* page->lru: links together first pages of various zspages.
* Basically forming list of zspages in a fullness group.
* page->mapping: class index and fullness group of the zspage
@@ -211,6 +209,7 @@ struct size_class {
* of ZS_ALIGN.
*/
int size;
+ int objs_per_zspage;
unsigned int index;
struct zs_size_stat stats;
@@ -627,21 +626,22 @@ static inline void zs_pool_stat_destroy(struct zs_pool *pool)
* the pool (not yet implemented). This function returns fullness
* status of the given page.
*/
-static enum fulln...
2016 Mar 14
0
[PATCH v1 09/19] zsmalloc: keep max_object in size_class
...->size)
> > * page->lru: links together first pages of various zspages.
> > * Basically forming list of zspages in a fullness group.
> > * page->mapping: class index and fullness group of the zspage
> >@@ -211,6 +209,7 @@ struct size_class {
> > * of ZS_ALIGN.
> > */
> > int size;
> >+ int objs_per_zspage;
> > unsigned int index;
> >
> > struct zs_size_stat stats;
> >@@ -622,21 +621,22 @@ static inline void zs_pool_stat_destroy(struct zs_pool *pool)
> > * the pool (not yet implemented). This fun...
2016 Mar 30
0
[PATCH v3 06/16] zsmalloc: squeeze inuse into page->mapping
..._MASK ((1 << FULLNESS_BITS) - 1)
+#define INUSE_BITS 11
+#define INUSE_MASK ((1 << INUSE_BITS) - 1)
+
/*
* On systems with 4K page size, this gives 255 size classes! There is a
* trader-off here:
@@ -145,7 +151,7 @@
* ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
* (reason above)
*/
-#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
+#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
/*
* We do not maintain any list for completely empty or full pages
@@ -155,7 +161,7 @@ enum fullness_group {
ZS_ALMOST_EMPTY,
_ZS_NR_FULLNESS_GROUPS,...
2016 Mar 30
0
[PATCH v3 08/16] zsmalloc: squeeze freelist into page->mapping
...17 +224,17 @@ struct size_class {
/*
* Placed within free objects to form a singly linked list.
- * For every zspage, first_page->freelist gives head of this list.
+ * For every zspage, first_page->freeobj gives head of this list.
*
* This must be power of 2 and less than or equal to ZS_ALIGN
*/
struct link_free {
union {
/*
- * Position of next free chunk (encodes <PFN, obj_idx>)
+ * free object list
* It's valid for non-allocated object
*/
- void *next;
+ unsigned long next;
/*
* Handle of allocated object.
*/
@@ -270,6 +266,7 @@ struct zs_po...
2016 Mar 11
0
[PATCH v1 11/19] zsmalloc: squeeze freelist into page->mapping
...17 +224,17 @@ struct size_class {
/*
* Placed within free objects to form a singly linked list.
- * For every zspage, first_page->freelist gives head of this list.
+ * For every zspage, first_page->freeobj gives head of this list.
*
* This must be power of 2 and less than or equal to ZS_ALIGN
*/
struct link_free {
union {
/*
- * Position of next free chunk (encodes <PFN, obj_idx>)
+ * free object list
* It's valid for non-allocated object
*/
- void *next;
+ unsigned long next;
/*
* Handle of allocated object.
*/
@@ -270,6 +266,7 @@ struct zs_po...
2016 Mar 11
31
[PATCH v1 00/19] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.
2016 Mar 11
31
[PATCH v1 00/19] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.
2016 Mar 21
22
[PATCH v2 00/18] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.
2016 Mar 21
22
[PATCH v2 00/18] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.
2016 Mar 30
33
[PATCH v3 00/16] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.
2016 Mar 30
33
[PATCH v3 00/16] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.