Displaying 20 results from an estimated 27 matches for "migrate_async".
2016 Jun 13
2
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...@ -791,6 +921,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
> int rc = -EAGAIN;
> int page_was_mapped = 0;
> struct anon_vma *anon_vma = NULL;
> + bool is_lru = !__PageMovable(page);
>
> if (!trylock_page(page)) {
> if (!force || mode == MIGRATE_ASYNC)
> @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
> goto out_unlock_both;
> }
>
> + if (unlikely(!is_lru)) {
> + rc = move_to_new_page(newpage, page, mode);
> + goto out_unlock_both;
> + }
> +
Hello Minchan,
I might b...
2016 Jun 13
2
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...@ -791,6 +921,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
> int rc = -EAGAIN;
> int page_was_mapped = 0;
> struct anon_vma *anon_vma = NULL;
> + bool is_lru = !__PageMovable(page);
>
> if (!trylock_page(page)) {
> if (!force || mode == MIGRATE_ASYNC)
> @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
> goto out_unlock_both;
> }
>
> + if (unlikely(!is_lru)) {
> + rc = move_to_new_page(newpage, page, mode);
> + goto out_unlock_both;
> + }
> +
Hello Minchan,
I might b...
2016 Jun 15
2
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...AIN;
>>> > > int page_was_mapped = 0;
>>> > > struct anon_vma *anon_vma = NULL;
>>> > > + bool is_lru = !__PageMovable(page);
>>> > >
>>> > > if (!trylock_page(page)) {
>>> > > if (!force || mode == MIGRATE_ASYNC)
>>> > > @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
>>> > > goto out_unlock_both;
>>> > > }
>>> > >
>>> > > + if (unlikely(!is_lru)) {
>>> > > + rc = mov...
2016 Jun 15
2
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...AIN;
>>> > > int page_was_mapped = 0;
>>> > > struct anon_vma *anon_vma = NULL;
>>> > > + bool is_lru = !__PageMovable(page);
>>> > >
>>> > > if (!trylock_page(page)) {
>>> > > if (!force || mode == MIGRATE_ASYNC)
>>> > > @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
>>> > > goto out_unlock_both;
>>> > > }
>>> > >
>>> > > + if (unlikely(!is_lru)) {
>>> > > + rc = mov...
2016 Jun 16
2
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...int page_was_mapped = 0;
>>>>>>> struct anon_vma *anon_vma = NULL;
>>>>>>> + bool is_lru = !__PageMovable(page);
>>>>>>>
>>>>>>> if (!trylock_page(page)) {
>>>>>>> if (!force || mode == MIGRATE_ASYNC)
>>>>>>> @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
>>>>>>> goto out_unlock_both;
>>>>>>> }
>>>>>>>
>>>>>>> + if (unlikely(!is_lru)) {
>...
2016 Jun 16
2
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...int page_was_mapped = 0;
>>>>>>> struct anon_vma *anon_vma = NULL;
>>>>>>> + bool is_lru = !__PageMovable(page);
>>>>>>>
>>>>>>> if (!trylock_page(page)) {
>>>>>>> if (!force || mode == MIGRATE_ASYNC)
>>>>>>> @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
>>>>>>> goto out_unlock_both;
>>>>>>> }
>>>>>>>
>>>>>>> + if (unlikely(!is_lru)) {
>...
2016 Jun 27
2
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...;>>>>> struct anon_vma *anon_vma = NULL;
>>>>>>>>> + bool is_lru = !__PageMovable(page);
>>>>>>>>>
>>>>>>>>> if (!trylock_page(page)) {
>>>>>>>>> if (!force || mode == MIGRATE_ASYNC)
>>>>>>>>> @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
>>>>>>>>> goto out_unlock_both;
>>>>>>>>> }
>>>>>>>>>
>>>>>>>...
2016 Jun 27
2
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...;>>>>> struct anon_vma *anon_vma = NULL;
>>>>>>>>> + bool is_lru = !__PageMovable(page);
>>>>>>>>>
>>>>>>>>> if (!trylock_page(page)) {
>>>>>>>>> if (!force || mode == MIGRATE_ASYNC)
>>>>>>>>> @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
>>>>>>>>> goto out_unlock_both;
>>>>>>>>> }
>>>>>>>>>
>>>>>>>...
2016 Jun 15
0
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...ap_and_move(struct page *page, struct page *newpage,
> > int rc = -EAGAIN;
> > int page_was_mapped = 0;
> > struct anon_vma *anon_vma = NULL;
> > + bool is_lru = !__PageMovable(page);
> >
> > if (!trylock_page(page)) {
> > if (!force || mode == MIGRATE_ASYNC)
> > @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
> > goto out_unlock_both;
> > }
> >
> > + if (unlikely(!is_lru)) {
> > + rc = move_to_new_page(newpage, page, mode);
> > + goto out_unlock_both;
> &g...
2016 Jun 16
0
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...> >>>>>>> struct anon_vma *anon_vma = NULL;
> >>>>>>> + bool is_lru = !__PageMovable(page);
> >>>>>>>
> >>>>>>> if (!trylock_page(page)) {
> >>>>>>> if (!force || mode == MIGRATE_ASYNC)
> >>>>>>> @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
> >>>>>>> goto out_unlock_both;
> >>>>>>> }
> >>>>>>>
> >>>>>>> + if (...
2016 Jun 16
0
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...t; int page_was_mapped = 0;
> >>> > > struct anon_vma *anon_vma = NULL;
> >>> > > + bool is_lru = !__PageMovable(page);
> >>> > >
> >>> > > if (!trylock_page(page)) {
> >>> > > if (!force || mode == MIGRATE_ASYNC)
> >>> > > @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
> >>> > > goto out_unlock_both;
> >>> > > }
> >>> > >
> >>> > > + if (unlikely(!is_lru)) {
> >...
2016 Jun 28
0
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...; struct anon_vma *anon_vma = NULL;
> >>>>>>>>> + bool is_lru = !__PageMovable(page);
> >>>>>>>>>
> >>>>>>>>> if (!trylock_page(page)) {
> >>>>>>>>> if (!force || mode == MIGRATE_ASYNC)
> >>>>>>>>> @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
> >>>>>>>>> goto out_unlock_both;
> >>>>>>>>> }
> >>>>>>>>>
> >...
2016 Jun 30
1
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...non_vma *anon_vma = NULL;
>>>>>>>>>>> + bool is_lru = !__PageMovable(page);
>>>>>>>>>>>
>>>>>>>>>>> if (!trylock_page(page)) {
>>>>>>>>>>> if (!force || mode == MIGRATE_ASYNC)
>>>>>>>>>>> @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
>>>>>>>>>>> goto out_unlock_both;
>>>>>>>>>>> }
>>>>>>>>>>>...
2016 Jun 30
1
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...non_vma *anon_vma = NULL;
>>>>>>>>>>> + bool is_lru = !__PageMovable(page);
>>>>>>>>>>>
>>>>>>>>>>> if (!trylock_page(page)) {
>>>>>>>>>>> if (!force || mode == MIGRATE_ASYNC)
>>>>>>>>>>> @@ -871,6 +1002,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
>>>>>>>>>>> goto out_unlock_both;
>>>>>>>>>>> }
>>>>>>>>>>>...
2016 May 30
5
PATCH v6v2 02/12] mm: migrate: support non-lru movable page migration
...Y);
+
/*
* If network-based swap is enabled, sl*b must keep track of whether pages
* were allocated from pfmemalloc reserves.
diff --git a/mm/compaction.c b/mm/compaction.c
index 1427366ad673..c7e0cd4dda9d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -81,6 +81,39 @@ static inline bool migrate_async_suitable(int migratetype)
#ifdef CONFIG_COMPACTION
+int PageMovable(struct page *page)
+{
+ struct address_space *mapping;
+
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ if (!__PageMovable(page))
+ return 0;
+
+ mapping = page_mapping(page);
+ if (mapping && mapping->a_ops &&am...
2016 May 30
5
PATCH v6v2 02/12] mm: migrate: support non-lru movable page migration
...Y);
+
/*
* If network-based swap is enabled, sl*b must keep track of whether pages
* were allocated from pfmemalloc reserves.
diff --git a/mm/compaction.c b/mm/compaction.c
index 1427366ad673..c7e0cd4dda9d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -81,6 +81,39 @@ static inline bool migrate_async_suitable(int migratetype)
#ifdef CONFIG_COMPACTION
+int PageMovable(struct page *page)
+{
+ struct address_space *mapping;
+
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ if (!__PageMovable(page))
+ return 0;
+
+ mapping = page_mapping(page);
+ if (mapping && mapping->a_ops &&am...
2016 May 09
0
[PATCH v5 02/12] mm: migrate: support non-lru movable page migration
...NULL;
}
+out:
return rc;
}
@@ -791,6 +947,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
int rc = -EAGAIN;
int page_was_mapped = 0;
struct anon_vma *anon_vma = NULL;
+ bool is_lru = !__PageMovable(page);
if (!trylock_page(page)) {
if (!force || mode == MIGRATE_ASYNC)
@@ -871,6 +1028,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
goto out_unlock_both;
}
+ if (unlikely(!is_lru)) {
+ rc = move_to_new_page(newpage, page, mode);
+ goto out_unlock_both;
+ }
+
/*
* Corner case handling:
* 1. When a new swap-cache page is r...
2016 May 20
0
[PATCH v6 02/12] mm: migrate: support non-lru movable page migration
...Y);
+
/*
* If network-based swap is enabled, sl*b must keep track of whether pages
* were allocated from pfmemalloc reserves.
diff --git a/mm/compaction.c b/mm/compaction.c
index 1427366ad673..2d6862d0df60 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -81,6 +81,41 @@ static inline bool migrate_async_suitable(int migratetype)
#ifdef CONFIG_COMPACTION
+int PageMovable(struct page *page)
+{
+ struct address_space *mapping;
+
+ WARN_ON(!PageLocked(page));
+ if (!__PageMovable(page))
+ goto out;
+
+ mapping = page_mapping(page);
+ if (mapping && mapping->a_ops && mapping-&g...
2016 May 31
0
[PATCH v6v3 02/12] mm: migrate: support non-lru movable page migration
...Y);
+
/*
* If network-based swap is enabled, sl*b must keep track of whether pages
* were allocated from pfmemalloc reserves.
diff --git a/mm/compaction.c b/mm/compaction.c
index 1427366ad673..a680b52e190b 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -81,6 +81,44 @@ static inline bool migrate_async_suitable(int migratetype)
#ifdef CONFIG_COMPACTION
+int PageMovable(struct page *page)
+{
+ struct address_space *mapping;
+
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ if (!__PageMovable(page))
+ return 0;
+
+ mapping = page_mapping(page);
+ if (mapping && mapping->a_ops &&am...
2016 May 20
5
[PATCH v6 00/12] Support non-lru page migration
Recently, I got many reports about perfermance degradation in embedded
system(Android mobile phone, webOS TV and so on) and easy fork fail.
The problem was fragmentation caused by zram and GPU driver mainly.
With memory pressure, their pages were spread out all of pageblock and
it cannot be migrated with current compaction algorithm which supports
only LRU pages. In the end, compaction cannot