Displaying 20 results from an estimated 74 matches for "hmm_vma_walk".
2019 Aug 06
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...---
mm/hmm.c | 62 ++++++++++++++++++++++++--------------------------------
1 file changed, 27 insertions(+), 35 deletions(-)
diff --git a/mm/hmm.c b/mm/hmm.c
index 9a908902e4cc..d66fa29b42e0 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -278,7 +278,6 @@ EXPORT_SYMBOL(hmm_mirror_unregister);
struct hmm_vma_walk {
struct hmm_range *range;
- struct dev_pagemap *pgmap;
unsigned long last;
unsigned int flags;
};
@@ -475,6 +474,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range =...
2019 Aug 07
2
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...+++++++++++++++++++--------------------------------
> 1 file changed, 27 insertions(+), 35 deletions(-)
>
> diff --git a/mm/hmm.c b/mm/hmm.c
> index 9a908902e4cc..d66fa29b42e0 100644
> +++ b/mm/hmm.c
> @@ -278,7 +278,6 @@ EXPORT_SYMBOL(hmm_mirror_unregister);
>
> struct hmm_vma_walk {
> struct hmm_range *range;
> - struct dev_pagemap *pgmap;
> unsigned long last;
> unsigned int flags;
> };
> @@ -475,6 +474,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> struct hmm_vma_walk *hmm_vma_walk = walk-...
2019 Aug 07
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...--------------
> > 1 file changed, 27 insertions(+), 35 deletions(-)
> >
> > diff --git a/mm/hmm.c b/mm/hmm.c
> > index 9a908902e4cc..d66fa29b42e0 100644
> > +++ b/mm/hmm.c
> > @@ -278,7 +278,6 @@ EXPORT_SYMBOL(hmm_mirror_unregister);
> >
> > struct hmm_vma_walk {
> > struct hmm_range *range;
> > - struct dev_pagemap *pgmap;
> > unsigned long last;
> > unsigned int flags;
> > };
> > @@ -475,6 +474,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
> > #...
2019 Aug 14
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...some sense to cache it once we find it?
diff --git a/mm/hmm.c b/mm/hmm.c
index 9a908902e4cc38..4e30128c23a505 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -497,10 +497,6 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
}
pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
}
- if (hmm_vma_walk->pgmap) {
- put_dev_pagemap(hmm_vma_walk->pgmap);
- hmm_vma_walk->pgmap = NULL;
- }
hmm_vma_walk->last = end;
return 0;
#else
@@ -604,10 +600,6 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
return 0;
fault:
- if (hmm_vma_walk->pgmap) {
- put_...
2019 Aug 14
2
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
On Tue, Aug 13, 2019 at 06:36:33PM -0700, Dan Williams wrote:
> Section alignment constraints somewhat save us here. The only example
> I can think of a PMD not containing a uniform pgmap association for
> each pte is the case when the pgmap overlaps normal dram, i.e. shares
> the same 'struct memory_section' for a given span. Otherwise, distinct
> pgmaps arrange to manage
2019 Jul 26
0
[PATCH v2 5/7] mm/hmm: make full use of walk_page_range()
...stoph Hellwig <hch at lst.de>
---
mm/hmm.c | 130 ++++++++++++++++++++++++-------------------------------
1 file changed, 57 insertions(+), 73 deletions(-)
diff --git a/mm/hmm.c b/mm/hmm.c
index 1bc014cddd78..838cd1d50497 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -840,13 +840,44 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
#endif
}
-static void hmm_pfns_clear(struct hmm_range *range,
- uint64_t *pfns,
- unsigned long addr,
- unsigned long end)
+static int hmm_vma_walk_test(unsigned long start,
+ unsigned long end,
+ struct mm_walk *walk)...
2019 Sep 11
0
[PATCH 1/4] mm/hmm: make full use of walk_page_range()
...ct mm_walk *walk, unsigned long addr,
return -EFAULT;
}
-static int hmm_pfns_bad(unsigned long addr,
- unsigned long end,
- struct mm_walk *walk)
+static int hmm_pfns_fill(unsigned long addr,
+ unsigned long end,
+ struct hmm_range *range,
+ enum hmm_pfn_value_e value)
{
- struct hmm_vma_walk *hmm_vma_walk = walk->private;
- struct hmm_range *range = hmm_vma_walk->range;
uint64_t *pfns = range->pfns;
unsigned long i;
i = (addr - range->start) >> PAGE_SHIFT;
for (; addr < end; addr += PAGE_SIZE, i++)
- pfns[i] = range->values[HMM_PFN_ERROR];
+ pfns[i]...
2019 Aug 06
24
hmm cleanups, v2
Hi Jérôme, Ben, Felix and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
11 files changed, 94 insertions(+), 210 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups.2
Gitweb:
2019 Jul 26
13
[PATCH v2 0/7] mm/hmm: more HMM clean up
...to hmm_range_fault with a flags
value
mm: merge hmm_range_snapshot into hmm_range_fault
Ralph Campbell (5):
mm/hmm: replace hmm_update with mmu_notifier_range
mm/hmm: a few more C style and comment clean ups
mm/hmm: make full use of walk_page_range()
mm/hmm: remove hugetlbfs check in hmm_vma_walk_pmd
mm/hmm: remove hmm_range vma
Documentation/vm/hmm.rst | 17 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 8 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +-
drivers/gpu/drm/nouveau/nouveau_svm.c | 13 +-
include/linux/hmm.h | 47 ++--
mm/hmm.c...
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...> PAGE_SHIFT;
for (; addr < end; addr += PAGE_SIZE, i++)
- pfns[i] = range->values[value];
-
+ range->hmm_pfns[i] = cpu_flags;
return 0;
}
@@ -96,7 +81,8 @@ static int hmm_vma_fault(unsigned long addr, unsigned long end,
}
static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
- uint64_t pfns, uint64_t cpu_flags)
+ unsigned long pfn_req_flags,
+ unsigned long cpu_flags)
{
struct hmm_range *range = hmm_vma_walk->range;
@@ -110,27 +96,28 @@ static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,...
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...> PAGE_SHIFT;
for (; addr < end; addr += PAGE_SIZE, i++)
- pfns[i] = range->values[value];
-
+ range->hmm_pfns[i] = cpu_flags;
return 0;
}
@@ -96,7 +81,8 @@ static int hmm_vma_fault(unsigned long addr, unsigned long end,
}
static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
- uint64_t pfns, uint64_t cpu_flags)
+ unsigned long pfn_req_flags,
+ unsigned long cpu_flags)
{
struct hmm_range *range = hmm_vma_walk->range;
@@ -110,27 +96,28 @@ static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,...
2019 Jul 30
29
hmm_range_fault related fixes and legacy API removal v3
Hi Jérôme, Ben, Felxi and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
7 files changed, 81 insertions(+), 171 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups
Gitweb:
2020 Apr 22
1
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...AGE_SIZE, i++)
> - pfns[i] = range->values[value];
> -
> + range->hmm_pfns[i] = cpu_flags;
> return 0;
> }
>
> @@ -96,7 +81,8 @@ static int hmm_vma_fault(unsigned long addr, unsigned long end,
> }
>
> static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
> - uint64_t pfns, uint64_t cpu_flags)
> + unsigned long pfn_req_flags,
> + unsigned long cpu_flags)
> {
> struct hmm_range *range = hmm_vma_walk->range;
>
> @@ -110,27 +96,28 @@ static unsigned int hmm_pte_need_fault(const s...
2020 Apr 22
11
[PATCH hmm 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2020 Apr 22
0
[PATCH hmm 2/5] mm/hmm: make hmm_range_fault return 0 or -1
From: Jason Gunthorpe <jgg at mellanox.com>
hmm_vma_walk->last is supposed to be updated after every write to the
pfns, so that it can be returned by hmm_range_fault(). However, this is
not done consistently. Fortunately nothing checks the return code of
hmm_range_fault() for anything other than error.
More importantly last must be set before returni...
2020 May 01
0
[PATCH hmm v2 2/5] mm/hmm: make hmm_range_fault return 0 or -1
From: Jason Gunthorpe <jgg at mellanox.com>
hmm_vma_walk->last is supposed to be updated after every write to the
pfns, so that it can be returned by hmm_range_fault(). However, this is
not done consistently. Fortunately nothing checks the return code of
hmm_range_fault() for anything other than error.
More importantly last must be set before returni...
2019 Jul 26
0
[PATCH v2 2/7] mm/hmm: a few more C style and comment clean ups
...cing the existing
+ * Return: an HMM object, either by referencing the existing
* (per-process) object, or by creating a new one.
*
* This is not intended to be used directly by device drivers. If mm already
@@ -325,8 +325,8 @@ static int hmm_pfns_bad(unsigned long addr,
}
/*
- * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
- * @start: range virtual start address (inclusive)
+ * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s)
+ * @addr: range virtual start address (inclusive)
* @end: range virtual end address (exclusive)
* @fault: should we faul...
2020 May 01
13
[PATCH hmm v2 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2019 Aug 08
2
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...gt; > + * We do put_dev_pagemap() here so that we can leverage
> > + * get_dev_pagemap() optimization which will not re-take a
> > + * reference on a pgmap if we already have one.
> > + */
> > + if (hmm_vma_walk->pgmap)
> > + put_dev_pagemap(hmm_vma_walk->pgmap);
> > +
>
> Seems ok, but only if the caller is guaranteeing that the range does
> not span outside of a single pagemap instance. If that guarantee is
> met why not just have the caller pass in a...
2019 Aug 14
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...* We do put_dev_pagemap() here so that we can leverage
> > > + * get_dev_pagemap() optimization which will not re-take a
> > > + * reference on a pgmap if we already have one.
> > > + */
> > > + if (hmm_vma_walk->pgmap)
> > > + put_dev_pagemap(hmm_vma_walk->pgmap);
> > > +
> >
> > Seems ok, but only if the caller is guaranteeing that the range does
> > not span outside of a single pagemap instance. If that guarantee is
> > met why not ju...