search for: hstate_vma

Displaying 14 results from an estimated 14 matches for "hstate_vma".

2019 Jul 30
0
[PATCH 08/13] mm: remove the mask variable in hmm_vma_walk_hugetlb_entry
..._walk *walk) { #ifdef CONFIG_HUGETLB_PAGE - unsigned long addr = start, i, pfn, mask; + unsigned long addr = start, i, pfn; struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; struct vm_area_struct *vma = walk->vma; - struct hstate *h = hstate_vma(vma); uint64_t orig_pfn, cpu_flags; bool fault, write_fault; spinlock_t *ptl; pte_t entry; int ret = 0; - mask = huge_page_size(h) - 1; - ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); entry = huge_ptep_get(pte); @@ -799,7 +796,7 @@ static int hmm_vma_walk_hugetlb_entry(p...
2019 Aug 06
0
[PATCH 08/15] mm: remove the mask variable in hmm_vma_walk_hugetlb_entry
..._walk *walk) { #ifdef CONFIG_HUGETLB_PAGE - unsigned long addr = start, i, pfn, mask; + unsigned long addr = start, i, pfn; struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; struct vm_area_struct *vma = walk->vma; - struct hstate *h = hstate_vma(vma); uint64_t orig_pfn, cpu_flags; bool fault, write_fault; spinlock_t *ptl; pte_t entry; int ret = 0; - mask = huge_page_size(h) - 1; - ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); entry = huge_ptep_get(pte); @@ -799,7 +796,7 @@ static int hmm_vma_walk_hugetlb_entry(p...
2020 Jun 30
0
[PATCH v2 2/5] mm/hmm: add output flags for PMD/PUD page mapping
...ALID | HMM_PFN_PUD); } static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, @@ -468,6 +472,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, unsigned long cpu_flags; spinlock_t *ptl; pte_t entry; + unsigned int hshift = huge_page_shift(hstate_vma(vma)); ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); entry = huge_ptep_get(pte); @@ -475,6 +480,10 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, i = (start - range->start) >> PAGE_SHIFT; pfn_req_flags = range->hmm_pfns[i]; cpu_flags = pt...
2020 Jul 01
0
[PATCH v3 2/5] mm/hmm: add hmm_mapping order
...HMM_PFN_VALID; } static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, @@ -468,13 +474,15 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, unsigned long cpu_flags; spinlock_t *ptl; pte_t entry; + unsigned long horder = huge_page_order(hstate_vma(vma)); ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); entry = huge_ptep_get(pte); i = (start - range->start) >> PAGE_SHIFT; pfn_req_flags = range->hmm_pfns[i]; - cpu_flags = pte_to_hmm_pfn_flags(range, entry); + cpu_flags = pte_to_hmm_pfn_flags(range, entry) | + (...
2019 Jul 26
0
[PATCH v2 5/7] mm/hmm: make full use of walk_page_range()
...+ /* + * Skip vma ranges that don't have struct page backing them or + * map I/O devices directly. + * TODO: handle peer-to-peer device mappings. + */ + if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) + return -EFAULT; + + if (is_vm_hugetlb_page(vma)) { + if (huge_page_shift(hstate_vma(vma)) != range->page_shift && + range->page_shift != PAGE_SHIFT) + return -EINVAL; + } else { + if (range->page_shift != PAGE_SHIFT) + return -EINVAL; + } + + /* + * If vma does not allow read access, then assume that it does not + * allow write access, either. HMM does...
2019 Jul 30
29
hmm_range_fault related fixes and legacy API removal v3
Hi Jérôme, Ben, Felxi and Jason, below is a series against the hmm tree which cleans up various minor bits and allows HMM_MIRROR to be built on all architectures. Diffstat: 7 files changed, 81 insertions(+), 171 deletions(-) A git tree is also available at: git://git.infradead.org/users/hch/misc.git hmm-cleanups Gitweb:
2019 Jul 30
0
[PATCH 07/13] mm: remove the page_shift member from struct hmm_range
...FT) { - /* Make sure we are looking at a full page. */ - if (start & mask) - return -EINVAL; - if (end < (start + size)) - return -EINVAL; - pfn_inc = size >> PAGE_SHIFT; - } else { - pfn_inc = 1; - size = PAGE_SIZE; - } + mask = huge_page_size(h) - 1; ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); entry = huge_ptep_get(pte); - i = (start - range->start) >> range->page_shift; + i = (start - range->start) >> PAGE_SHIFT; orig_pfn = range->pfns[i]; range->pfns[i] = range->values[HMM_PFN_NONE]; cpu_flags = pte_to_hmm_pfn_flags(range...
2019 Jul 26
0
[PATCH v2 2/7] mm/hmm: a few more C style and comment clean ups
...e are looking at full page. */ + /* Make sure we are looking at a full page. */ if (start & mask) return -EINVAL; if (end < (start + size)) @@ -809,8 +805,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, size = PAGE_SIZE; } - - ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); + ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); entry = huge_ptep_get(pte); i = (start - range->start) >> range->page_shift; @@ -859,7 +854,7 @@ static void hmm_pfns_clear(struct hmm_range *range, * @start: start virtual address (inclu...
2019 Aug 06
0
[PATCH 07/15] mm: remove the page_shift member from struct hmm_range
...FT) { - /* Make sure we are looking at a full page. */ - if (start & mask) - return -EINVAL; - if (end < (start + size)) - return -EINVAL; - pfn_inc = size >> PAGE_SHIFT; - } else { - pfn_inc = 1; - size = PAGE_SIZE; - } + mask = huge_page_size(h) - 1; ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); entry = huge_ptep_get(pte); - i = (start - range->start) >> range->page_shift; + i = (start - range->start) >> PAGE_SHIFT; orig_pfn = range->pfns[i]; range->pfns[i] = range->values[HMM_PFN_NONE]; cpu_flags = pte_to_hmm_pfn_flags(range...
2019 Jul 26
13
[PATCH v2 0/7] mm/hmm: more HMM clean up
Here are seven more patches for things I found to clean up. This was based on top of Christoph's seven patches: "hmm_range_fault related fixes and legacy API removal v3". I assume this will go into Jason's tree since there will likely be more HMM changes in this cycle. Changes from v1 to v2: Added AMD GPU to hmm_update removal. Added 2 patches from Christoph. Added 2 patches as
2019 Aug 06
24
hmm cleanups, v2
Hi Jérôme, Ben, Felix and Jason, below is a series against the hmm tree which cleans up various minor bits and allows HMM_MIRROR to be built on all architectures. Diffstat: 11 files changed, 94 insertions(+), 210 deletions(-) A git tree is also available at: git://git.infradead.org/users/hch/misc.git hmm-cleanups.2 Gitweb:
2020 Jun 30
6
[PATCH v2 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_range_fault() output array flags HMM_PFN_PMD and HMM_PFN_PUD. This allows a device driver to know that a given 4K PFN is actually mapped by the CPU using either a PMD sized or PUD sized CPU page table entry and therefore the device driver can safely map system memory using larger device MMU PTEs. The series is based on 5.8.0-rc3 and is intended for
2020 Jul 01
8
[PATCH v3 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_pfn_to_map_order() function. This allows a device driver to know that a given 4K PFN is actually mapped by the CPU using a larger sized CPU page table entry and therefore the device driver can safely map system memory using larger device MMU PTEs. The series is based on 5.8.0-rc3 and is intended for Jason Gunthorpe's hmm tree. These were
2019 Jul 01
30
dev_pagemap related cleanups v4
Hi Dan, Jérôme and Jason, below is a series that cleans up the dev_pagemap interface so that it is more easily usable, which removes the need to wrap it in hmm and thus allowing to kill a lot of code Note: this series is on top of Linux 5.2-rc6 and has some minor conflicts with the hmm tree that are easy to resolve. Diffstat summary: 34 files changed, 379 insertions(+), 1016 deletions(-) Git