Displaying 18 results from an estimated 18 matches for "orig_pfn".
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...mdp, pte_t *ptep,
- uint64_t *pfn)
+ unsigned long *hmm_pfn)
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
unsigned int required_fault;
- uint64_t cpu_flags;
+ unsigned long cpu_flags;
pte_t pte = *ptep;
- uint64_t orig_pfn = *pfn;
+ uint64_t pfn_req_flags = *hmm_pfn;
if (pte_none(pte)) {
- required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0);
+ required_fault =
+ hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
if (required_fault)
goto fault;
- *pfn = range->values[HMM_PFN_NONE];
+ *h...
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...mdp, pte_t *ptep,
- uint64_t *pfn)
+ unsigned long *hmm_pfn)
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
unsigned int required_fault;
- uint64_t cpu_flags;
+ unsigned long cpu_flags;
pte_t pte = *ptep;
- uint64_t orig_pfn = *pfn;
+ uint64_t pfn_req_flags = *hmm_pfn;
if (pte_none(pte)) {
- required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0);
+ required_fault =
+ hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
if (required_fault)
goto fault;
- *pfn = range->values[HMM_PFN_NONE];
+ *h...
2020 Apr 22
1
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...t; + unsigned long *hmm_pfn)
> {
> struct hmm_vma_walk *hmm_vma_walk = walk->private;
> struct hmm_range *range = hmm_vma_walk->range;
> unsigned int required_fault;
> - uint64_t cpu_flags;
> + unsigned long cpu_flags;
> pte_t pte = *ptep;
> - uint64_t orig_pfn = *pfn;
> + uint64_t pfn_req_flags = *hmm_pfn;
>
> if (pte_none(pte)) {
> - required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0);
> + required_fault =
> + hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
> if (required_fault)
> goto fault;
> -...
2020 Mar 16
4
ensure device private pages have an owner
When acting on device private mappings a driver needs to know if the
device (or other entity in case of kvmppc) actually owns this private
mapping. This series adds an owner field and converts the migrate_vma
code over to check it. I looked into doing the same for
hmm_range_fault, but as far as I can tell that code has never been
wired up to actually work for device private memory, so instead of
2020 Apr 22
11
[PATCH hmm 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2020 Mar 16
0
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
...nything else as error.
- */
- if (is_device_private_entry(entry)) {
- cpu_flags = range->flags[HMM_PFN_VALID] |
- range->flags[HMM_PFN_DEVICE_PRIVATE];
- cpu_flags |= is_write_device_private_entry(entry) ?
- range->flags[HMM_PFN_WRITE] : 0;
- hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
- &fault, &write_fault);
- if (fault || write_fault)
- goto fault;
- *pfn = hmm_device_entry_from_pfn(range,
- swp_offset(entry));
- *pfn |= cpu_flags;
- return 0;
- }
-
hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
&write_fa...
2020 Mar 16
6
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
...> - if (is_device_private_entry(entry)) {
> - cpu_flags = range->flags[HMM_PFN_VALID] |
> - range->flags[HMM_PFN_DEVICE_PRIVATE];
> - cpu_flags |= is_write_device_private_entry(entry) ?
> - range->flags[HMM_PFN_WRITE] : 0;
> - hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
> - &fault, &write_fault);
> - if (fault || write_fault)
> - goto fault;
> - *pfn = hmm_device_entry_from_pfn(range,
> - swp_offset(entry));
> - *pfn |= cpu_flags;
> - return 0;
> - }
> -
> hmm_pte_need_fault(hmm_vma_w...
2019 Jul 30
0
[PATCH 08/13] mm: remove the mask variable in hmm_vma_walk_hugetlb_entry
...IG_HUGETLB_PAGE
- unsigned long addr = start, i, pfn, mask;
+ unsigned long addr = start, i, pfn;
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
struct vm_area_struct *vma = walk->vma;
- struct hstate *h = hstate_vma(vma);
uint64_t orig_pfn, cpu_flags;
bool fault, write_fault;
spinlock_t *ptl;
pte_t entry;
int ret = 0;
- mask = huge_page_size(h) - 1;
-
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
@@ -799,7 +796,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long h...
2019 Aug 06
0
[PATCH 08/15] mm: remove the mask variable in hmm_vma_walk_hugetlb_entry
...IG_HUGETLB_PAGE
- unsigned long addr = start, i, pfn, mask;
+ unsigned long addr = start, i, pfn;
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
struct vm_area_struct *vma = walk->vma;
- struct hstate *h = hstate_vma(vma);
uint64_t orig_pfn, cpu_flags;
bool fault, write_fault;
spinlock_t *ptl;
pte_t entry;
int ret = 0;
- mask = huge_page_size(h) - 1;
-
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
@@ -799,7 +796,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long h...
2020 Mar 16
0
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
..._walk->pgmap = get_dev_pagemap(
+ device_private_entry_to_pfn(entry),
+ hmm_vma_walk->pgmap);
+ if (!unlikely(!pgmap))
+ return -EBUSY;
+ if (hmm_vma_walk->pgmap->owner !=
+ hmm_vma_walk->dev_private_owner)
+ cpu_flags = 0;
+
hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
&fault, &write_fault);
if (fault || write_fault)
Jason
2020 May 01
13
[PATCH hmm v2 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2020 Mar 16
0
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...N even if not present.
*/
if (is_device_private_entry(entry)) {
- cpu_flags = range->flags[HMM_PFN_VALID] |
- range->flags[HMM_PFN_DEVICE_PRIVATE];
- cpu_flags |= is_write_device_private_entry(entry) ?
- range->flags[HMM_PFN_WRITE] : 0;
- hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
- &fault, &write_fault);
- if (fault || write_fault)
- goto fault;
*pfn = hmm_device_entry_from_pfn(range,
swp_offset(entry));
- *pfn |= cpu_flags;
+ *pfn |= range->flags[HMM_PFN_VALID];
+ if (is_write_device_private_entry(entry))
+ *pfn |=...
2019 Jul 30
29
hmm_range_fault related fixes and legacy API removal v3
Hi Jérôme, Ben, Felxi and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
7 files changed, 81 insertions(+), 171 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups
Gitweb:
2019 Jul 30
0
[PATCH 07/13] mm: remove the page_shift member from struct hmm_range
...HIFT;
- } else {
- pfn_inc = 1;
- size = PAGE_SIZE;
- }
+ mask = huge_page_size(h) - 1;
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
- i = (start - range->start) >> range->page_shift;
+ i = (start - range->start) >> PAGE_SHIFT;
orig_pfn = range->pfns[i];
range->pfns[i] = range->values[HMM_PFN_NONE];
cpu_flags = pte_to_hmm_pfn_flags(range, entry);
@@ -812,8 +799,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
goto unlock;
}
- pfn = pte_pfn(entry) + ((start & mask) >> range-&...
2019 Aug 06
0
[PATCH 07/15] mm: remove the page_shift member from struct hmm_range
...HIFT;
- } else {
- pfn_inc = 1;
- size = PAGE_SIZE;
- }
+ mask = huge_page_size(h) - 1;
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
- i = (start - range->start) >> range->page_shift;
+ i = (start - range->start) >> PAGE_SHIFT;
orig_pfn = range->pfns[i];
range->pfns[i] = range->values[HMM_PFN_NONE];
cpu_flags = pte_to_hmm_pfn_flags(range, entry);
@@ -812,8 +799,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
goto unlock;
}
- pfn = pte_pfn(entry) + ((start & mask) >> range-&...
2020 Mar 16
4
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...gt; if (is_device_private_entry(entry)) {
> - cpu_flags = range->flags[HMM_PFN_VALID] |
> - range->flags[HMM_PFN_DEVICE_PRIVATE];
> - cpu_flags |= is_write_device_private_entry(entry) ?
> - range->flags[HMM_PFN_WRITE] : 0;
> - hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
> - &fault, &write_fault);
> - if (fault || write_fault)
> - goto fault;
> *pfn = hmm_device_entry_from_pfn(range,
> swp_offset(entry));
> - *pfn |= cpu_flags;
> + *pfn |= range->flags[HMM_PFN_VALID];
> + if (is_write_...
2019 Aug 06
24
hmm cleanups, v2
Hi Jérôme, Ben, Felix and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
11 files changed, 94 insertions(+), 210 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups.2
Gitweb:
2020 Mar 16
14
ensure device private pages have an owner v2
When acting on device private mappings a driver needs to know if the
device (or other entity in case of kvmppc) actually owns this private
mapping. This series adds an owner field and converts the migrate_vma
code over to check it. I looked into doing the same for
hmm_range_fault, but as far as I can tell that code has never been
wired up to actually work for device private memory, so instead of