Displaying 20 results from an estimated 21 matches for "pmd_devmap".
2020 Jun 19
0
[PATCH 13/16] mm: support THP migration to device private memory
...k(ptl);
+ ret = split_huge_page(page);
+ unlock_page(page);
+ put_page(page);
+ if (ret)
+ return migrate_vma_collect_skip(start, end, walk);
+ if (pmd_none(*pmdp))
+ return migrate_vma_collect_hole(start, end, -1, walk);
}
- if (unlikely(pmd_bad(*pmdp)))
+ if (unlikely(pmd_bad(pmd) || pmd_devmap(pmd)))
return migrate_vma_collect_skip(start, end, walk);
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
@@ -2310,8 +2352,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
}
- /* FIXME support THP */
- if (!page || !page-&g...
2019 Aug 07
2
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...> unsigned long pfn, npages, i;
> bool fault, write_fault;
> uint64_t cpu_flags;
> @@ -490,17 +490,14 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
> pfn = pmd_pfn(pmd) + pte_index(addr);
> for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
> if (pmd_devmap(pmd)) {
> - hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
> - hmm_vma_walk->pgmap);
> - if (unlikely(!hmm_vma_walk->pgmap))
> + pgmap = get_dev_pagemap(pfn, pgmap);
> + if (unlikely(!pgmap))
> return -EBUSY;
Unrelated to this patch, but what is the po...
2019 Aug 06
0
[PATCH 09/15] mm: don't abuse pte_index() in hmm_vma_handle_pmd
...lk,
if (pmd_protnone(pmd) || fault || write_fault)
return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
- pfn = pmd_pfn(pmd) + pte_index(addr);
+ pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
if (pmd_devmap(pmd)) {
pgmap = get_dev_pagemap(pfn, pgmap);
--
2.20.1
2020 Jun 21
2
[PATCH 13/16] mm: support THP migration to device private memory
...unlock_page(page);
> + put_page(page);
> + if (ret)
> + return migrate_vma_collect_skip(start, end, walk);
> + if (pmd_none(*pmdp))
> + return migrate_vma_collect_hole(start, end, -1, walk);
> }
>
> - if (unlikely(pmd_bad(*pmdp)))
> + if (unlikely(pmd_bad(pmd) || pmd_devmap(pmd)))
> return migrate_vma_collect_skip(start, end, walk);
>
> ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
> @@ -2310,8 +2352,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
> mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
> }
>
> - /* FIXME sup...
2019 Aug 06
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...+ struct dev_pagemap *pgmap = NULL;
unsigned long pfn, npages, i;
bool fault, write_fault;
uint64_t cpu_flags;
@@ -490,17 +490,14 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
pfn = pmd_pfn(pmd) + pte_index(addr);
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
if (pmd_devmap(pmd)) {
- hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
- hmm_vma_walk->pgmap);
- if (unlikely(!hmm_vma_walk->pgmap))
+ pgmap = get_dev_pagemap(pfn, pgmap);
+ if (unlikely(!pgmap))
return -EBUSY;
}
pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
}
-...
2019 Aug 07
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...l fault, write_fault;
> > uint64_t cpu_flags;
> > @@ -490,17 +490,14 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
> > pfn = pmd_pfn(pmd) + pte_index(addr);
> > for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
> > if (pmd_devmap(pmd)) {
> > - hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
> > - hmm_vma_walk->pgmap);
> > - if (unlikely(!hmm_vma_walk->pgmap))
> > + pgmap = get_dev_pagemap(pfn, pg...
2019 Sep 11
0
[PATCH 1/4] mm/hmm: make full use of walk_page_range()
...t;values[HMM_PFN_ERROR];
+ pfns[i] = range->values[value];
return 0;
}
@@ -584,7 +583,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
}
return 0;
} else if (!pmd_present(pmd))
- return hmm_pfns_bad(start, end, walk);
+ return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
/*
@@ -612,7 +611,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
* recover.
*/
if (pmd_bad(pmd))
- return hmm_pfns_bad(start, end, walk);
+ return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
ptep = pte_offset_map(pmdp, addr);
i = (addr - range-&...
2019 Jul 26
0
[PATCH v2 2/7] mm/hmm: a few more C style and comment clean ups
...;fault, &write_fault);
if (fault || write_fault) {
hmm_vma_walk->last = addr;
- pmd_migration_entry_wait(vma->vm_mm, pmdp);
+ pmd_migration_entry_wait(walk->mm, pmdp);
return -EBUSY;
}
return 0;
@@ -657,11 +653,11 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
/*
- * No need to take pmd_lock here, even if some other threads
+ * No need to take pmd_lock here, even if some other thread
* is splitting the huge pmd we will get that event through
* mmu_notifier callback.
*
- * So just read pmd value and chec...
2020 Nov 06
0
[PATCH v3 3/6] mm: support THP migration to device private memory
...;
+ spinlock_t *ptl;
+ pte_t *ptep;
+
+again:
+ if (pmd_trans_huge(*pmdp) || !pmd_present(*pmdp)) {
+ int ret = migrate_vma_handle_pmd(pmdp, start, end, walk);
+
+ if (!ret)
+ return 0;
+ if (ret == -EAGAIN)
+ goto again;
}
- if (unlikely(pmd_bad(*pmdp)))
+ if (unlikely(pmd_bad(*pmdp) || pmd_devmap(*pmdp)))
return migrate_vma_collect_skip(start, end, walk);
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
@@ -2404,8 +2488,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
}
- /* FIXME support THP */
- if (!page || !page-...
2019 Aug 06
24
hmm cleanups, v2
Hi Jérôme, Ben, Felix and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
11 files changed, 94 insertions(+), 210 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups.2
Gitweb:
2020 Jun 19
22
[PATCH 00/16] mm/hmm/nouveau: THP mapping and migration
These patches apply to linux-5.8.0-rc1. Patches 1-3 should probably go
into 5.8, the others can be queued for 5.9. Patches 4-6 improve the HMM
self tests. Patch 7-8 prepare nouveau for the meat of this series which
adds support and testing for compound page mapping of system memory
(patches 9-11) and compound page migration to device private memory
(patches 12-16). Since these changes are split
2019 Jul 30
29
hmm_range_fault related fixes and legacy API removal v3
Hi Jérôme, Ben, Felxi and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
7 files changed, 81 insertions(+), 171 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups
Gitweb:
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...f (!pmd_present(pmd)) {
- if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0))
+ if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
return -EFAULT;
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
}
@@ -362,7 +353,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
goto again;
- return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
+ return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
}
/*
@@ -372,16 +363,16 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
* recover.
*/
if (pmd_bad(pmd)) {
- if (...
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...f (!pmd_present(pmd)) {
- if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0))
+ if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
return -EFAULT;
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
}
@@ -362,7 +353,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
goto again;
- return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
+ return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
}
/*
@@ -372,16 +363,16 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
* recover.
*/
if (pmd_bad(pmd)) {
- if (...
2020 Apr 22
1
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...(hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0))
> + if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
> return -EFAULT;
> return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
> }
> @@ -362,7 +353,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
> if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
> goto again;
>
> - return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
> + return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
> }
>
> /*
> @@ -372,16 +363,16 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
> *...
2020 Nov 06
12
[PATCH v3 0/6] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
Earlier versions were posted previously [1] and [2].
The patches apply cleanly to the linux-mm 5.10.0-rc2 tree. There are a
lot of other THP patches being posted. I don't think there are any
semantic conflicts but there may be some merge conflicts depending on
2020 Sep 02
10
[PATCH v2 0/7] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
An earlier version was posted previously [1]. This version now
supports splitting a THP midway in the migration process which
led to a number of changes.
The patches apply cleanly to the current linux-mm tree. Since there
are a couple of patches in linux-mm from Dan
2020 Apr 22
11
[PATCH hmm 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2019 Jul 26
13
[PATCH v2 0/7] mm/hmm: more HMM clean up
Here are seven more patches for things I found to clean up.
This was based on top of Christoph's seven patches:
"hmm_range_fault related fixes and legacy API removal v3".
I assume this will go into Jason's tree since there will likely be
more HMM changes in this cycle.
Changes from v1 to v2:
Added AMD GPU to hmm_update removal.
Added 2 patches from Christoph.
Added 2 patches as
2020 May 01
13
[PATCH hmm v2 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two