Displaying 20 results from an estimated 24 matches for "hmm_pte_need_fault".
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...= (addr - range->start) >> PAGE_SHIFT;
for (; addr < end; addr += PAGE_SIZE, i++)
- pfns[i] = range->values[value];
-
+ range->hmm_pfns[i] = cpu_flags;
return 0;
}
@@ -96,7 +81,8 @@ static int hmm_vma_fault(unsigned long addr, unsigned long end,
}
static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
- uint64_t pfns, uint64_t cpu_flags)
+ unsigned long pfn_req_flags,
+ unsigned long cpu_flags)
{
struct hmm_range *range = hmm_vma_walk->range;
@@ -110,27 +96,28 @@ static unsigned int hmm_pte_need_fault(const struct hmm...
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...= (addr - range->start) >> PAGE_SHIFT;
for (; addr < end; addr += PAGE_SIZE, i++)
- pfns[i] = range->values[value];
-
+ range->hmm_pfns[i] = cpu_flags;
return 0;
}
@@ -96,7 +81,8 @@ static int hmm_vma_fault(unsigned long addr, unsigned long end,
}
static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
- uint64_t pfns, uint64_t cpu_flags)
+ unsigned long pfn_req_flags,
+ unsigned long cpu_flags)
{
struct hmm_range *range = hmm_vma_walk->range;
@@ -110,27 +96,28 @@ static unsigned int hmm_pte_need_fault(const struct hmm...
2020 Apr 22
1
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...for (; addr < end; addr += PAGE_SIZE, i++)
> - pfns[i] = range->values[value];
> -
> + range->hmm_pfns[i] = cpu_flags;
> return 0;
> }
>
> @@ -96,7 +81,8 @@ static int hmm_vma_fault(unsigned long addr, unsigned long end,
> }
>
> static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
> - uint64_t pfns, uint64_t cpu_flags)
> + unsigned long pfn_req_flags,
> + unsigned long cpu_flags)
> {
> struct hmm_range *range = hmm_vma_walk->range;
>
> @@ -110,27 +96,28 @@ static unsigned int...
2020 Apr 22
11
[PATCH hmm 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2020 Mar 16
4
ensure device private pages have an owner
When acting on device private mappings a driver needs to know if the
device (or other entity in case of kvmppc) actually owns this private
mapping. This series adds an owner field and converts the migrate_vma
code over to check it. I looked into doing the same for
hmm_range_fault, but as far as I can tell that code has never been
wired up to actually work for device private memory, so instead of
2020 Mar 16
0
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
...bit for an entry is bit 3,
@@ -86,7 +85,6 @@
enum hmm_pfn_flag_e {
HMM_PFN_VALID = 0,
HMM_PFN_WRITE,
- HMM_PFN_DEVICE_PRIVATE,
HMM_PFN_FLAG_MAX
};
diff --git a/mm/hmm.c b/mm/hmm.c
index 180e398170b0..3d10485bf323 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -118,15 +118,6 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
/* We aren't ask to do anything ... */
if (!(pfns & range->flags[HMM_PFN_VALID]))
return;
- /* If this is device memory then only fault if explicitly requested */
- if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
- /* Do w...
2020 Mar 16
6
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
...lag_e {
> HMM_PFN_VALID = 0,
> HMM_PFN_WRITE,
> - HMM_PFN_DEVICE_PRIVATE,
> HMM_PFN_FLAG_MAX
> };
>
> diff --git a/mm/hmm.c b/mm/hmm.c
> index 180e398170b0..3d10485bf323 100644
> --- a/mm/hmm.c
> +++ b/mm/hmm.c
> @@ -118,15 +118,6 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
> /* We aren't ask to do anything ... */
> if (!(pfns & range->flags[HMM_PFN_VALID]))
> return;
> - /* If this is device memory then only fault if explicitly requested */
> - if ((cpu_flags & range->flags[HMM_PFN_DE...
2020 May 01
13
[PATCH hmm v2 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2020 Mar 16
0
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...bit for an entry is bit 3,
@@ -86,7 +85,6 @@
enum hmm_pfn_flag_e {
HMM_PFN_VALID = 0,
HMM_PFN_WRITE,
- HMM_PFN_DEVICE_PRIVATE,
HMM_PFN_FLAG_MAX
};
diff --git a/mm/hmm.c b/mm/hmm.c
index 180e398170b0..cfad65f6a67b 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -118,15 +118,6 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
/* We aren't ask to do anything ... */
if (!(pfns & range->flags[HMM_PFN_VALID]))
return;
- /* If this is device memory then only fault if explicitly requested */
- if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
- /* Do w...
2020 Mar 16
2
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
...t;
> For that we'd need HMM_PFN_DEVICE_PRIVATE NVIF_VMM_PFNMAP_V0_VRAM
> set in ->pfns before calling hmm_range_fault, which isn't happening.
Oh, I got tripped on this too
The logic is backwards from what you'd think.. If you *set*
HMM_PFN_DEVICE_PRIVATE then this triggers:
hmm_pte_need_fault():
if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
/* Do we fault on device memory ? */
if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
*write_fault = pfns & range->flags[HMM_PFN_WRITE];
*fault = true;
}
return;
}
Ie if the cpu page is a DEVICE_PRI...
2019 Jul 26
0
[PATCH v2 2/7] mm/hmm: a few more C style and comment clean ups
...rt address (inclusive)
+ * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s)
+ * @addr: range virtual start address (inclusive)
* @end: range virtual end address (exclusive)
* @fault: should we fault or not ?
* @write_fault: write fault ?
@@ -376,9 +376,9 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
/*
* So we not only consider the individual per page request we also
* consider the default flags requested for the range. The API can
- * be use in 2 fashions. The first one where the HMM user coalesce
- * multiple page fault into one request and...
2020 Mar 16
4
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...lag_e {
> HMM_PFN_VALID = 0,
> HMM_PFN_WRITE,
> - HMM_PFN_DEVICE_PRIVATE,
> HMM_PFN_FLAG_MAX
> };
>
> diff --git a/mm/hmm.c b/mm/hmm.c
> index 180e398170b0..cfad65f6a67b 100644
> --- a/mm/hmm.c
> +++ b/mm/hmm.c
> @@ -118,15 +118,6 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
> /* We aren't ask to do anything ... */
> if (!(pfns & range->flags[HMM_PFN_VALID]))
> return;
> - /* If this is device memory then only fault if explicitly requested */
> - if ((cpu_flags & range->flags[HMM_PFN_DE...
2020 Mar 16
14
ensure device private pages have an owner v2
When acting on device private mappings a driver needs to know if the
device (or other entity in case of kvmppc) actually owns this private
mapping. This series adds an owner field and converts the migrate_vma
code over to check it. I looked into doing the same for
hmm_range_fault, but as far as I can tell that code has never been
wired up to actually work for device private memory, so instead of
2020 Mar 16
0
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
...ise fault it.
+ */
+ hmm_vma_walk->pgmap = get_dev_pagemap(
+ device_private_entry_to_pfn(entry),
+ hmm_vma_walk->pgmap);
+ if (!unlikely(!pgmap))
+ return -EBUSY;
+ if (hmm_vma_walk->pgmap->owner !=
+ hmm_vma_walk->dev_private_owner)
+ cpu_flags = 0;
+
hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
&fault, &write_fault);
if (fault || write_fault)
Jason
2020 Mar 16
1
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
On Mon, Mar 16, 2020 at 08:32:15PM +0100, Christoph Hellwig wrote:
> diff --git a/mm/hmm.c b/mm/hmm.c
> index 180e398170b0..cfad65f6a67b 100644
> +++ b/mm/hmm.c
> @@ -118,15 +118,6 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
> /* We aren't ask to do anything ... */
> if (!(pfns & range->flags[HMM_PFN_VALID]))
> return;
> - /* If this is device memory then only fault if explicitly requested */
> - if ((cpu_flags & range->flags[HMM_PFN_DEVIC...
2020 Mar 20
1
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...pfns,
>> + .flags = dmirror_hmm_flags,
>> + .values = dmirror_hmm_values,
>> + .pfn_shift = DPT_SHIFT,
>> + .pfn_flags_mask = ~0ULL,
>
> Same here, especially since this is snapshot
>
> Jason
Actually, snapshot ignores pfn_flags_mask and default_flags.
In hmm_pte_need_fault(), HMM_FAULT_SNAPSHOT is checked and returns early before
checking pfn_flags_mask and default_flags since no faults are being requested.
2020 Mar 16
0
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
..._DEVICE_PRIVATE NVIF_VMM_PFNMAP_V0_VRAM
>> set in ->pfns before calling hmm_range_fault, which isn't happening.
>
> Oh, I got tripped on this too
>
> The logic is backwards from what you'd think.. If you *set*
> HMM_PFN_DEVICE_PRIVATE then this triggers:
>
> hmm_pte_need_fault():
> if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
> /* Do we fault on device memory ? */
> if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
> *write_fault = pfns & range->flags[HMM_PFN_WRITE];
> *fault = true;
> }
> return;
>...
2020 Jun 30
0
[PATCH v2 2/5] mm/hmm: add output flags for PMD/PUD page mapping
...ong hmask,
i = (start - range->start) >> PAGE_SHIFT;
pfn_req_flags = range->hmm_pfns[i];
cpu_flags = pte_to_hmm_pfn_flags(range, entry);
+ if (hshift >= PUD_SHIFT)
+ cpu_flags |= HMM_PFN_PUD;
+ else if (hshift >= PMD_SHIFT)
+ cpu_flags |= HMM_PFN_PMD;
required_fault =
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
if (required_fault) {
--
2.20.1
2020 Jul 01
0
[PATCH v3 2/5] mm/hmm: add hmm_mapping order
..., pte);
entry = huge_ptep_get(pte);
i = (start - range->start) >> PAGE_SHIFT;
pfn_req_flags = range->hmm_pfns[i];
- cpu_flags = pte_to_hmm_pfn_flags(range, entry);
+ cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
+ (horder << HMM_PFN_ORDER_SHIFT);
required_fault =
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
if (required_fault) {
--
2.20.1
2019 Jul 26
13
[PATCH v2 0/7] mm/hmm: more HMM clean up
Here are seven more patches for things I found to clean up.
This was based on top of Christoph's seven patches:
"hmm_range_fault related fixes and legacy API removal v3".
I assume this will go into Jason's tree since there will likely be
more HMM changes in this cycle.
Changes from v1 to v2:
Added AMD GPU to hmm_update removal.
Added 2 patches from Christoph.
Added 2 patches as