Displaying 19 results from an estimated 19 matches for "pte_to_hmm_pfn_flags".
2020 Jul 01
0
[PATCH v3 2/5] mm/hmm: add hmm_mapping order
...flags;
spinlock_t *ptl;
pte_t entry;
+ unsigned long horder = huge_page_order(hstate_vma(vma));
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
i = (start - range->start) >> PAGE_SHIFT;
pfn_req_flags = range->hmm_pfns[i];
- cpu_flags = pte_to_hmm_pfn_flags(range, entry);
+ cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
+ (horder << HMM_PFN_ORDER_SHIFT);
required_fault =
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
if (required_fault) {
--
2.20.1
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...ong hmm_pfns[], pmd_t pmd);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline bool hmm_is_device_private_entry(struct hmm_range *range,
@@ -222,31 +210,31 @@ static inline bool hmm_is_device_private_entry(struct hmm_range *range,
range->dev_private_owner;
}
-static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
+static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
+ pte_t pte)
{
if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
return 0;
- return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
- range->flags[HMM_PFN_WR...
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...ong hmm_pfns[], pmd_t pmd);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline bool hmm_is_device_private_entry(struct hmm_range *range,
@@ -222,31 +210,31 @@ static inline bool hmm_is_device_private_entry(struct hmm_range *range,
range->dev_private_owner;
}
-static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
+static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
+ pte_t pte)
{
if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
return 0;
- return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
- range->flags[HMM_PFN_WR...
2019 Jul 30
0
[PATCH 11/13] mm: cleanup the hmm_vma_handle_pmd stub
...should never reach this code ! */
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
+ unsigned long end, uint64_t *pfns, pmd_t pmd)
+{
return -EINVAL;
-#endif
}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
{
--
2.20.1
2019 Aug 06
0
[PATCH 11/15] mm: cleanup the hmm_vma_handle_pmd stub
...return -EINVAL;
-#endif
}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+/* stub to allow the code below to compile */
+int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
+ unsigned long end, uint64_t *pfns, pmd_t pmd);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
{
--
2.20.1
2020 Apr 22
1
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
.../* CONFIG_TRANSPARENT_HUGEPAGE */
>
> static inline bool hmm_is_device_private_entry(struct hmm_range *range,
> @@ -222,31 +210,31 @@ static inline bool hmm_is_device_private_entry(struct hmm_range *range,
> range->dev_private_owner;
> }
>
> -static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
> +static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
> + pte_t pte)
> {
> if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
> return 0;
> - return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
&...
2020 Mar 16
0
[PATCH 4/4] mm: check the device private page owner in hmm_range_fault
...G_TRANSPARENT_HUGEPAGE */
+static inline bool hmm_is_device_private_entry(struct hmm_range *range,
+ swp_entry_t entry)
+{
+ return is_device_private_entry(entry) &&
+ device_private_entry_to_page(entry)->pgmap->owner ==
+ range->dev_private_owner;
+}
+
static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
{
if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
@@ -254,7 +262,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
* Never fault in device private pages pages, but just report
* the PFN even if not present.
*...
2020 Jun 30
0
[PATCH v2 2/5] mm/hmm: add output flags for PMD/PUD page mapping
...ma(vma));
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
@@ -475,6 +480,10 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
i = (start - range->start) >> PAGE_SHIFT;
pfn_req_flags = range->hmm_pfns[i];
cpu_flags = pte_to_hmm_pfn_flags(range, entry);
+ if (hshift >= PUD_SHIFT)
+ cpu_flags |= HMM_PFN_PUD;
+ else if (hshift >= PMD_SHIFT)
+ cpu_flags |= HMM_PFN_PMD;
required_fault =
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
if (required_fault) {
--
2.20.1
2019 Aug 06
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...m_device_entry_from_pfn(range, pfn) | cpu_flags;
}
- if (hmm_vma_walk->pgmap) {
- put_dev_pagemap(hmm_vma_walk->pgmap);
- hmm_vma_walk->pgmap = NULL;
- }
+ if (pgmap)
+ put_dev_pagemap(pgmap);
hmm_vma_walk->last = end;
return 0;
#else
@@ -520,7 +517,7 @@ static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
unsigned long end, pmd_t *pmdp, pte_t *ptep,
- uint64_t *pfn)
+ uint64_t *pfn, struct dev_pagemap **pgmap)
{
struct hmm_vma_walk *hmm_vma_walk = walk->priva...
2020 Apr 22
11
[PATCH hmm 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2019 Jul 30
0
[PATCH 07/13] mm: remove the page_shift member from struct hmm_range
...= huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
- i = (start - range->start) >> range->page_shift;
+ i = (start - range->start) >> PAGE_SHIFT;
orig_pfn = range->pfns[i];
range->pfns[i] = range->values[HMM_PFN_NONE];
cpu_flags = pte_to_hmm_pfn_flags(range, entry);
@@ -812,8 +799,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
goto unlock;
}
- pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift);
- for (; addr < end; addr += size, i++, pfn += pfn_inc)
+ pfn = pte_pfn(entry) + ((start &a...
2019 Aug 06
0
[PATCH 07/15] mm: remove the page_shift member from struct hmm_range
...= huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
- i = (start - range->start) >> range->page_shift;
+ i = (start - range->start) >> PAGE_SHIFT;
orig_pfn = range->pfns[i];
range->pfns[i] = range->values[HMM_PFN_NONE];
cpu_flags = pte_to_hmm_pfn_flags(range, entry);
@@ -812,8 +799,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
goto unlock;
}
- pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift);
- for (; addr < end; addr += size, i++, pfn += pfn_inc)
+ pfn = pte_pfn(entry) + ((start &a...
2019 Aug 06
24
hmm cleanups, v2
Hi Jérôme, Ben, Felix and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
11 files changed, 94 insertions(+), 210 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups.2
Gitweb:
2019 Jul 30
29
hmm_range_fault related fixes and legacy API removal v3
Hi Jérôme, Ben, Felxi and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
7 files changed, 81 insertions(+), 171 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups
Gitweb:
2020 May 01
13
[PATCH hmm v2 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2020 Jul 01
8
[PATCH v3 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_pfn_to_map_order()
function. This allows a device driver to know that a given 4K PFN is
actually mapped by the CPU using a larger sized CPU page table entry and
therefore the device driver can safely map system memory using larger
device MMU PTEs.
The series is based on 5.8.0-rc3 and is intended for Jason Gunthorpe's
hmm tree. These were
2020 Mar 16
14
ensure device private pages have an owner v2
When acting on device private mappings a driver needs to know if the
device (or other entity in case of kvmppc) actually owns this private
mapping. This series adds an owner field and converts the migrate_vma
code over to check it. I looked into doing the same for
hmm_range_fault, but as far as I can tell that code has never been
wired up to actually work for device private memory, so instead of
2020 Jun 30
6
[PATCH v2 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_range_fault() output
array flags HMM_PFN_PMD and HMM_PFN_PUD. This allows a device driver to
know that a given 4K PFN is actually mapped by the CPU using either a
PMD sized or PUD sized CPU page table entry and therefore the device
driver can safely map system memory using larger device MMU PTEs.
The series is based on 5.8.0-rc3 and is intended for
2019 Jul 01
30
dev_pagemap related cleanups v4
Hi Dan, Jérôme and Jason,
below is a series that cleans up the dev_pagemap interface so that
it is more easily usable, which removes the need to wrap it in hmm
and thus allowing to kill a lot of code
Note: this series is on top of Linux 5.2-rc6 and has some minor
conflicts with the hmm tree that are easy to resolve.
Diffstat summary:
34 files changed, 379 insertions(+), 1016 deletions(-)
Git