Displaying 15 results from an estimated 15 matches for "pmd_migration_entry_wait".
2019 Jul 26
0
[PATCH v2 2/7] mm/hmm: a few more C style and comment clean ups
...pfns;
unsigned long addr = start, i;
pte_t *ptep;
pmd_t pmd;
-
again:
pmd = READ_ONCE(*pmdp);
if (pmd_none(pmd))
@@ -648,7 +644,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
0, &fault, &write_fault);
if (fault || write_fault) {
hmm_vma_walk->last = addr;
- pmd_migration_entry_wait(vma->vm_mm, pmdp);
+ pmd_migration_entry_wait(walk->mm, pmdp);
return -EBUSY;
}
return 0;
@@ -657,11 +653,11 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
/*
- * No need to take pmd_lock here, even if some other threads
+ * No...
2022 Jul 08
0
[PATCH v2 07/19] mm/migrate: Convert expected_page_refs() to folio_expected_refs()
...grate.c | 19 ++++++++++++-------
> > 1 file changed, 12 insertions(+), 7 deletions(-)
> >
> > diff --git a/mm/migrate.c b/mm/migrate.c
> > index 2975f0c4d7cf..2e2f41572066 100644
> > --- a/mm/migrate.c
> > +++ b/mm/migrate.c
> > @@ -336,13 +336,18 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
> > }
> > #endif
> >
> > -static int expected_page_refs(struct address_space *mapping, struct page *page)
> > +static int folio_expected_refs(struct address_space *mapping,
> > + struct folio *folio)
> > {
> > -...
2022 Jul 08
0
[PATCH v2 07/19] mm/migrate: Convert expected_page_refs() to folio_expected_refs()
...grate.c | 19 ++++++++++++-------
> > 1 file changed, 12 insertions(+), 7 deletions(-)
> >
> > diff --git a/mm/migrate.c b/mm/migrate.c
> > index 2975f0c4d7cf..2e2f41572066 100644
> > --- a/mm/migrate.c
> > +++ b/mm/migrate.c
> > @@ -336,13 +336,18 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
> > }
> > #endif
> >
> > -static int expected_page_refs(struct address_space *mapping, struct page *page)
> > +static int folio_expected_refs(struct address_space *mapping,
> > + struct folio *folio)
> > {
> > -...
2020 Jun 19
0
[PATCH 13/16] mm: support THP migration to device private memory
...ge(entry);
+ return vmf.page->pgmap->ops->migrate_to_ram(&vmf);
+ }
VM_BUG_ON(thp_migration_supported() &&
- !is_pmd_migration_entry(orig_pmd));
- if (is_pmd_migration_entry(orig_pmd))
+ !is_migration_entry(entry));
+ if (is_migration_entry(entry))
pmd_migration_entry_wait(mm, vmf.pmd);
return 0;
}
diff --git a/mm/memremap.c b/mm/memremap.c
index 03e38b7a38f1..4231054188b4 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -132,8 +132,13 @@ void memunmap_pages(struct dev_pagemap *pgmap)
int nid;
dev_pagemap_kill(pgmap);
- for_each_device_pfn(pfn, pgmap)
-...
2019 Jul 26
13
[PATCH v2 0/7] mm/hmm: more HMM clean up
Here are seven more patches for things I found to clean up.
This was based on top of Christoph's seven patches:
"hmm_range_fault related fixes and legacy API removal v3".
I assume this will go into Jason's tree since there will likely be
more HMM changes in this cycle.
Changes from v1 to v2:
Added AMD GPU to hmm_update removal.
Added 2 patches from Christoph.
Added 2 patches as
2020 Jun 21
2
[PATCH 13/16] mm: support THP migration to device private memory
...;pgmap->ops->migrate_to_ram(&vmf);
> + }
> VM_BUG_ON(thp_migration_supported() &&
> - !is_pmd_migration_entry(orig_pmd));
> - if (is_pmd_migration_entry(orig_pmd))
> + !is_migration_entry(entry));
> + if (is_migration_entry(entry))
> pmd_migration_entry_wait(mm, vmf.pmd);
> return 0;
> }
> diff --git a/mm/memremap.c b/mm/memremap.c
> index 03e38b7a38f1..4231054188b4 100644
> --- a/mm/memremap.c
> +++ b/mm/memremap.c
> @@ -132,8 +132,13 @@ void memunmap_pages(struct dev_pagemap *pgmap)
> int nid;
>
> dev_pagemap_...
2020 Nov 06
0
[PATCH v3 3/6] mm: support THP migration to device private memory
...ge(entry);
+ return vmf.page->pgmap->ops->migrate_to_ram(&vmf);
+ }
VM_BUG_ON(thp_migration_supported() &&
- !is_pmd_migration_entry(orig_pmd));
- if (is_pmd_migration_entry(orig_pmd))
+ !is_migration_entry(entry));
+ if (is_migration_entry(entry))
pmd_migration_entry_wait(mm, vmf.pmd);
return 0;
}
diff --git a/mm/memremap.c b/mm/memremap.c
index d72ce30da94e..8b4e6f12e58f 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -92,7 +92,7 @@ static unsigned long pfn_next(unsigned long pfn)
{
if (pfn % 1024 == 0)
cond_resched();
- return pfn + 1;
+ return pfn...
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...*pmdp,
return hmm_vma_walk_hole(start, end, -1, walk);
if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
- if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0)) {
+ if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
hmm_vma_walk->last = addr;
pmd_migration_entry_wait(walk->mm, pmdp);
return -EBUSY;
}
- return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
+ return hmm_pfns_fill(start, end, range, 0);
}
if (!pmd_present(pmd)) {
- if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0))
+ if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npage...
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...*pmdp,
return hmm_vma_walk_hole(start, end, -1, walk);
if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
- if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0)) {
+ if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
hmm_vma_walk->last = addr;
pmd_migration_entry_wait(walk->mm, pmdp);
return -EBUSY;
}
- return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
+ return hmm_pfns_fill(start, end, range, 0);
}
if (!pmd_present(pmd)) {
- if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0))
+ if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npage...
2020 Apr 22
1
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...hole(start, end, -1, walk);
>
> if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
> - if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0)) {
> + if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
> hmm_vma_walk->last = addr;
> pmd_migration_entry_wait(walk->mm, pmdp);
> return -EBUSY;
> }
> - return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
> + return hmm_pfns_fill(start, end, range, 0);
> }
>
> if (!pmd_present(pmd)) {
> - if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0))
> + if (hmm_r...
2020 Apr 22
11
[PATCH hmm 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2020 Jun 19
22
[PATCH 00/16] mm/hmm/nouveau: THP mapping and migration
These patches apply to linux-5.8.0-rc1. Patches 1-3 should probably go
into 5.8, the others can be queued for 5.9. Patches 4-6 improve the HMM
self tests. Patch 7-8 prepare nouveau for the meat of this series which
adds support and testing for compound page mapping of system memory
(patches 9-11) and compound page migration to device private memory
(patches 12-16). Since these changes are split
2020 May 01
13
[PATCH hmm v2 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2020 Nov 06
12
[PATCH v3 0/6] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
Earlier versions were posted previously [1] and [2].
The patches apply cleanly to the linux-mm 5.10.0-rc2 tree. There are a
lot of other THP patches being posted. I don't think there are any
semantic conflicts but there may be some merge conflicts depending on
2020 Sep 02
10
[PATCH v2 0/7] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
An earlier version was posted previously [1]. This version now
supports splitting a THP midway in the migration process which
led to a number of changes.
The patches apply cleanly to the current linux-mm tree. Since there
are a couple of patches in linux-mm from Dan