search for: cpu_flags

Displaying 20 results from an estimated 50 matches for "cpu_flags".

2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...uct hmm_range *range, - unsigned long pfn) -{ - return (pfn << range->pfn_shift) | range->flags[HMM_PFN_VALID]; -} - static int hmm_pfns_fill(unsigned long addr, unsigned long end, - struct hmm_range *range, enum hmm_pfn_value_e value) + struct hmm_range *range, unsigned long cpu_flags) { - uint64_t *pfns = range->pfns; - unsigned long i; + unsigned long i = (addr - range->start) >> PAGE_SHIFT; - i = (addr - range->start) >> PAGE_SHIFT; for (; addr < end; addr += PAGE_SIZE, i++) - pfns[i] = range->values[value]; - + range->hmm_pfns[i] = cpu_fl...
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...uct hmm_range *range, - unsigned long pfn) -{ - return (pfn << range->pfn_shift) | range->flags[HMM_PFN_VALID]; -} - static int hmm_pfns_fill(unsigned long addr, unsigned long end, - struct hmm_range *range, enum hmm_pfn_value_e value) + struct hmm_range *range, unsigned long cpu_flags) { - uint64_t *pfns = range->pfns; - unsigned long i; + unsigned long i = (addr - range->start) >> PAGE_SHIFT; - i = (addr - range->start) >> PAGE_SHIFT; for (; addr < end; addr += PAGE_SIZE, i++) - pfns[i] = range->values[value]; - + range->hmm_pfns[i] = cpu_fl...
2020 Apr 22
1
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...ned long pfn) > -{ > - return (pfn << range->pfn_shift) | range->flags[HMM_PFN_VALID]; > -} > - > static int hmm_pfns_fill(unsigned long addr, unsigned long end, > - struct hmm_range *range, enum hmm_pfn_value_e value) > + struct hmm_range *range, unsigned long cpu_flags) > { > - uint64_t *pfns = range->pfns; > - unsigned long i; > + unsigned long i = (addr - range->start) >> PAGE_SHIFT; > > - i = (addr - range->start) >> PAGE_SHIFT; > for (; addr < end; addr += PAGE_SIZE, i++) > - pfns[i] = range->values[val...
2020 Apr 22
11
[PATCH hmm 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com> The API is a bit complicated for the uses we actually have, and disucssions for simplifying have come up a number of times. This small series removes the customizable pfn format and simplifies the return code of hmm_range_fault() All the drivers are adjusted to process in the simplified format. I would appreciated tested-by's for the two
2020 May 01
13
[PATCH hmm v2 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com> The API is a bit complicated for the uses we actually have, and disucssions for simplifying have come up a number of times. This small series removes the customizable pfn format and simplifies the return code of hmm_range_fault() All the drivers are adjusted to process in the simplified format. I would appreciated tested-by's for the two
2020 Mar 16
0
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...b/mm/hmm.c @@ -118,15 +118,6 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, /* We aren't ask to do anything ... */ if (!(pfns & range->flags[HMM_PFN_VALID])) return; - /* If this is device memory then only fault if explicitly requested */ - if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { - /* Do we fault on device memory ? */ - if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { - *write_fault = pfns & range->flags[HMM_PFN_WRITE]; - *fault = true; - } - return; - } /* If CPU page table is not valid then we n...
2020 Mar 16
4
ensure device private pages have an owner
When acting on device private mappings a driver needs to know if the device (or other entity in case of kvmppc) actually owns this private mapping. This series adds an owner field and converts the migrate_vma code over to check it. I looked into doing the same for hmm_range_fault, but as far as I can tell that code has never been wired up to actually work for device private memory, so instead of
2020 Mar 16
0
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
...b/mm/hmm.c @@ -118,15 +118,6 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, /* We aren't ask to do anything ... */ if (!(pfns & range->flags[HMM_PFN_VALID])) return; - /* If this is device memory then only fault if explicitly requested */ - if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { - /* Do we fault on device memory ? */ - if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { - *write_fault = pfns & range->flags[HMM_PFN_WRITE]; - *fault = true; - } - return; - } /* If CPU page table is not valid then we n...
2020 Jun 30
0
[PATCH v2 2/5] mm/hmm: add output flags for PMD/PUD page mapping
...n pud_write(pud) ? + (HMM_PFN_VALID | HMM_PFN_PUD | HMM_PFN_WRITE) : + (HMM_PFN_VALID | HMM_PFN_PUD); } static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, @@ -468,6 +472,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, unsigned long cpu_flags; spinlock_t *ptl; pte_t entry; + unsigned int hshift = huge_page_shift(hstate_vma(vma)); ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); entry = huge_ptep_get(pte); @@ -475,6 +480,10 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, i = (start - range-&gt...
2020 Mar 16
6
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
...6 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, > /* We aren't ask to do anything ... */ > if (!(pfns & range->flags[HMM_PFN_VALID])) > return; > - /* If this is device memory then only fault if explicitly requested */ > - if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { > - /* Do we fault on device memory ? */ > - if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { > - *write_fault = pfns & range->flags[HMM_PFN_WRITE]; > - *fault = true; > - } > - return; > - } > >...
2019 Aug 07
2
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...mm_walk *walk, > #ifdef CONFIG_TRANSPARENT_HUGEPAGE > struct hmm_vma_walk *hmm_vma_walk = walk->private; > struct hmm_range *range = hmm_vma_walk->range; > + struct dev_pagemap *pgmap = NULL; > unsigned long pfn, npages, i; > bool fault, write_fault; > uint64_t cpu_flags; > @@ -490,17 +490,14 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, > pfn = pmd_pfn(pmd) + pte_index(addr); > for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { > if (pmd_devmap(pmd)) { > - hmm_vma_walk->pgmap = get_dev_pagemap(pfn, > - hmm_...
2020 Mar 16
4
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...6 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, > /* We aren't ask to do anything ... */ > if (!(pfns & range->flags[HMM_PFN_VALID])) > return; > - /* If this is device memory then only fault if explicitly requested */ > - if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { > - /* Do we fault on device memory ? */ > - if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { > - *write_fault = pfns & range->flags[HMM_PFN_WRITE]; > - *fault = true; > - } > - return; > - } > >...
2020 Jul 01
0
[PATCH v3 2/5] mm/hmm: add hmm_mapping order
...; + HMM_PFN_ORDER_SHIFT) | + pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : + HMM_PFN_VALID; } static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, @@ -468,13 +474,15 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, unsigned long cpu_flags; spinlock_t *ptl; pte_t entry; + unsigned long horder = huge_page_order(hstate_vma(vma)); ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); entry = huge_ptep_get(pte); i = (start - range->start) >> PAGE_SHIFT; pfn_req_flags = range->hmm_pfns[i]; - cpu_flags = pte_t...
2019 Aug 07
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...E > > struct hmm_vma_walk *hmm_vma_walk = walk->private; > > struct hmm_range *range = hmm_vma_walk->range; > > + struct dev_pagemap *pgmap = NULL; > > unsigned long pfn, npages, i; > > bool fault, write_fault; > > uint64_t cpu_flags; > > @@ -490,17 +490,14 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, > > pfn = pmd_pfn(pmd) + pte_index(addr); > > for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { > > if (pmd_devmap(pmd)) { > > - hmm_v...
2020 Mar 16
0
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
...39;s 1/2 patch - Ralph can you check, test, etc? diff --git a/mm/hmm.c b/mm/hmm.c index 9e8f68eb83287a..9fa4748da1b779 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -300,6 +300,20 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, range->flags[HMM_PFN_DEVICE_PRIVATE]; cpu_flags |= is_write_device_private_entry(entry) ? range->flags[HMM_PFN_WRITE] : 0; + + /* + * If the caller understands this kind of device_private + * page, then leave it as is, otherwise fault it. + */ + hmm_vma_walk->pgmap = get_dev_pagemap( + device_private_entry_to_pfn(entry...
2020 Mar 16
14
ensure device private pages have an owner v2
When acting on device private mappings a driver needs to know if the device (or other entity in case of kvmppc) actually owns this private mapping. This series adds an owner field and converts the migrate_vma code over to check it. I looked into doing the same for hmm_range_fault, but as far as I can tell that code has never been wired up to actually work for device private memory, so instead of
2019 Aug 06
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...atic int hmm_vma_handle_pmd(struct mm_walk *walk, #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; + struct dev_pagemap *pgmap = NULL; unsigned long pfn, npages, i; bool fault, write_fault; uint64_t cpu_flags; @@ -490,17 +490,14 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, pfn = pmd_pfn(pmd) + pte_index(addr); for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { if (pmd_devmap(pmd)) { - hmm_vma_walk->pgmap = get_dev_pagemap(pfn, - hmm_vma_walk->pgmap); - if (u...
2020 Apr 22
0
[PATCH hmm 2/5] mm/hmm: make hmm_range_fault return 0 or -1
...ange, HMM_PFN_NONE); } @@ -207,7 +206,6 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; - hmm_vma_walk->last = end; return 0; } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -386,13 +384,10 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns); if (r) { /* hmm_vma_handle_pte() did pte_unmap() */ - hmm_vma_walk->last =...
2019 Aug 06
24
hmm cleanups, v2
Hi Jérôme, Ben, Felix and Jason, below is a series against the hmm tree which cleans up various minor bits and allows HMM_MIRROR to be built on all architectures. Diffstat: 11 files changed, 94 insertions(+), 210 deletions(-) A git tree is also available at: git://git.infradead.org/users/hch/misc.git hmm-cleanups.2 Gitweb:
2020 May 01
0
[PATCH hmm v2 2/5] mm/hmm: make hmm_range_fault return 0 or -1
...ange, HMM_PFN_NONE); } @@ -207,7 +206,6 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; - hmm_vma_walk->last = end; return 0; } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -386,13 +384,10 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns); if (r) { /* hmm_vma_handle_pte() did pte_unmap() */ - hmm_vma_walk->last =...