Displaying 12 results from an estimated 12 matches for "huge_pte_lock".
2019 Jul 26
0
[PATCH v2 2/7] mm/hmm: a few more C style and comment clean ups
.../* Make sure we are looking at full page. */
+ /* Make sure we are looking at a full page. */
if (start & mask)
return -EINVAL;
if (end < (start + size))
@@ -809,8 +805,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
size = PAGE_SIZE;
}
-
- ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
+ ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
i = (start - range->start) >> range->page_shift;
@@ -859,7 +854,7 @@ static void hmm_pfns_clear(struct hmm_range *range,
* @start: start virtual add...
2019 Jul 30
0
[PATCH 08/13] mm: remove the mask variable in hmm_vma_walk_hugetlb_entry
...>private;
struct hmm_range *range = hmm_vma_walk->range;
struct vm_area_struct *vma = walk->vma;
- struct hstate *h = hstate_vma(vma);
uint64_t orig_pfn, cpu_flags;
bool fault, write_fault;
spinlock_t *ptl;
pte_t entry;
int ret = 0;
- mask = huge_page_size(h) - 1;
-
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
@@ -799,7 +796,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
goto unlock;
}
- pfn = pte_pfn(entry) + ((start & mask) >> PAGE_SHIFT);
+ pfn = pte_pfn(entry) + ((start & hmask) >&...
2019 Aug 06
0
[PATCH 08/15] mm: remove the mask variable in hmm_vma_walk_hugetlb_entry
...>private;
struct hmm_range *range = hmm_vma_walk->range;
struct vm_area_struct *vma = walk->vma;
- struct hstate *h = hstate_vma(vma);
uint64_t orig_pfn, cpu_flags;
bool fault, write_fault;
spinlock_t *ptl;
pte_t entry;
int ret = 0;
- mask = huge_page_size(h) - 1;
-
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
@@ -799,7 +796,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
goto unlock;
}
- pfn = pte_pfn(entry) + ((start & mask) >> PAGE_SHIFT);
+ pfn = pte_pfn(entry) + ((start & ~hmask) >...
2020 Jun 30
0
[PATCH v2 2/5] mm/hmm: add output flags for PMD/PUD page mapping
...tatic int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
@@ -468,6 +472,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
unsigned long cpu_flags;
spinlock_t *ptl;
pte_t entry;
+ unsigned int hshift = huge_page_shift(hstate_vma(vma));
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
@@ -475,6 +480,10 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
i = (start - range->start) >> PAGE_SHIFT;
pfn_req_flags = range->hmm_pfns[i];
cpu_flags = pte_to_hmm_pfn_flags(range, entry...
2020 Jul 01
0
[PATCH v3 2/5] mm/hmm: add hmm_mapping order
...ic int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
@@ -468,13 +474,15 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
unsigned long cpu_flags;
spinlock_t *ptl;
pte_t entry;
+ unsigned long horder = huge_page_order(hstate_vma(vma));
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
i = (start - range->start) >> PAGE_SHIFT;
pfn_req_flags = range->hmm_pfns[i];
- cpu_flags = pte_to_hmm_pfn_flags(range, entry);
+ cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
+ (horder << HMM_PFN_ORDER_S...
2019 Jul 30
29
hmm_range_fault related fixes and legacy API removal v3
Hi Jérôme, Ben, Felxi and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
7 files changed, 81 insertions(+), 171 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups
Gitweb:
2019 Jul 30
0
[PATCH 07/13] mm: remove the page_shift member from struct hmm_range
...ft != PAGE_SHIFT) {
- /* Make sure we are looking at a full page. */
- if (start & mask)
- return -EINVAL;
- if (end < (start + size))
- return -EINVAL;
- pfn_inc = size >> PAGE_SHIFT;
- } else {
- pfn_inc = 1;
- size = PAGE_SIZE;
- }
+ mask = huge_page_size(h) - 1;
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
- i = (start - range->start) >> range->page_shift;
+ i = (start - range->start) >> PAGE_SHIFT;
orig_pfn = range->pfns[i];
range->pfns[i] = range->values[HMM_PFN_NONE];
cpu_flags = pte_to_hmm_pfn_...
2019 Aug 06
0
[PATCH 07/15] mm: remove the page_shift member from struct hmm_range
...ft != PAGE_SHIFT) {
- /* Make sure we are looking at a full page. */
- if (start & mask)
- return -EINVAL;
- if (end < (start + size))
- return -EINVAL;
- pfn_inc = size >> PAGE_SHIFT;
- } else {
- pfn_inc = 1;
- size = PAGE_SIZE;
- }
+ mask = huge_page_size(h) - 1;
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
- i = (start - range->start) >> range->page_shift;
+ i = (start - range->start) >> PAGE_SHIFT;
orig_pfn = range->pfns[i];
range->pfns[i] = range->values[HMM_PFN_NONE];
cpu_flags = pte_to_hmm_pfn_...
2019 Aug 06
24
hmm cleanups, v2
Hi Jérôme, Ben, Felix and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
11 files changed, 94 insertions(+), 210 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups.2
Gitweb:
2019 Jul 26
13
[PATCH v2 0/7] mm/hmm: more HMM clean up
Here are seven more patches for things I found to clean up.
This was based on top of Christoph's seven patches:
"hmm_range_fault related fixes and legacy API removal v3".
I assume this will go into Jason's tree since there will likely be
more HMM changes in this cycle.
Changes from v1 to v2:
Added AMD GPU to hmm_update removal.
Added 2 patches from Christoph.
Added 2 patches as
2020 Jun 30
6
[PATCH v2 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_range_fault() output
array flags HMM_PFN_PMD and HMM_PFN_PUD. This allows a device driver to
know that a given 4K PFN is actually mapped by the CPU using either a
PMD sized or PUD sized CPU page table entry and therefore the device
driver can safely map system memory using larger device MMU PTEs.
The series is based on 5.8.0-rc3 and is intended for
2020 Jul 01
8
[PATCH v3 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_pfn_to_map_order()
function. This allows a device driver to know that a given 4K PFN is
actually mapped by the CPU using a larger sized CPU page table entry and
therefore the device driver can safely map system memory using larger
device MMU PTEs.
The series is based on 5.8.0-rc3 and is intended for Jason Gunthorpe's
hmm tree. These were