Displaying 10 results from an estimated 10 matches for "huge_page_shift".
2019 Jul 26
0
[PATCH v2 5/7] mm/hmm: make full use of walk_page_range()
...eturn -EBUSY;
+
+ /*
+ * Skip vma ranges that don't have struct page backing them or
+ * map I/O devices directly.
+ * TODO: handle peer-to-peer device mappings.
+ */
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP))
+ return -EFAULT;
+
+ if (is_vm_hugetlb_page(vma)) {
+ if (huge_page_shift(hstate_vma(vma)) != range->page_shift &&
+ range->page_shift != PAGE_SHIFT)
+ return -EINVAL;
+ } else {
+ if (range->page_shift != PAGE_SHIFT)
+ return -EINVAL;
+ }
+
+ /*
+ * If vma does not allow read access, then assume that it does not
+ * allow write access, eithe...
2019 Jul 26
13
[PATCH v2 0/7] mm/hmm: more HMM clean up
Here are seven more patches for things I found to clean up.
This was based on top of Christoph's seven patches:
"hmm_range_fault related fixes and legacy API removal v3".
I assume this will go into Jason's tree since there will likely be
more HMM changes in this cycle.
Changes from v1 to v2:
Added AMD GPU to hmm_update removal.
Added 2 patches from Christoph.
Added 2 patches as
2020 Jun 30
0
[PATCH v2 2/5] mm/hmm: add output flags for PMD/PUD page mapping
...:
+ (HMM_PFN_VALID | HMM_PFN_PUD);
}
static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
@@ -468,6 +472,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
unsigned long cpu_flags;
spinlock_t *ptl;
pte_t entry;
+ unsigned int hshift = huge_page_shift(hstate_vma(vma));
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
@@ -475,6 +480,10 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
i = (start - range->start) >> PAGE_SHIFT;
pfn_req_flags = range->hmm_pfns[i];
cpu...
2019 Jul 30
0
[PATCH 07/13] mm: remove the page_shift member from struct hmm_range
...E - 1)))
return -EINVAL;
if (range->start >= range->end)
return -EINVAL;
@@ -964,16 +950,6 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags)
if (vma == NULL || (vma->vm_flags & device_vma))
return -EFAULT;
- if (is_vm_hugetlb_page(vma)) {
- if (huge_page_shift(hstate_vma(vma)) !=
- range->page_shift &&
- range->page_shift != PAGE_SHIFT)
- return -EINVAL;
- } else {
- if (range->page_shift != PAGE_SHIFT)
- return -EINVAL;
- }
-
if (!(vma->vm_flags & VM_READ)) {
/*
* If vma do not allow read access,...
2019 Jul 26
0
[PATCH v2 2/7] mm/hmm: a few more C style and comment clean ups
...* huge or transparent huge. At this point either it is a valid pmd
* entry pointing to pte directory or it is a bad pmd that will not
* recover.
@@ -795,10 +791,10 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
pte_t entry;
int ret = 0;
- size = 1UL << huge_page_shift(h);
+ size = huge_page_size(h);
mask = size - 1;
if (range->page_shift != PAGE_SHIFT) {
- /* Make sure we are looking at full page. */
+ /* Make sure we are looking at a full page. */
if (start & mask)
return -EINVAL;
if (end < (start + size))
@@ -809,8 +805,7 @@ static in...
2019 Aug 06
0
[PATCH 07/15] mm: remove the page_shift member from struct hmm_range
...E - 1)))
return -EINVAL;
if (range->start >= range->end)
return -EINVAL;
@@ -964,16 +950,6 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags)
if (vma == NULL || (vma->vm_flags & device_vma))
return -EFAULT;
- if (is_vm_hugetlb_page(vma)) {
- if (huge_page_shift(hstate_vma(vma)) !=
- range->page_shift &&
- range->page_shift != PAGE_SHIFT)
- return -EINVAL;
- } else {
- if (range->page_shift != PAGE_SHIFT)
- return -EINVAL;
- }
-
if (!(vma->vm_flags & VM_READ)) {
/*
* If vma do not allow read access,...
2019 Jul 30
29
hmm_range_fault related fixes and legacy API removal v3
Hi Jérôme, Ben, Felxi and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
7 files changed, 81 insertions(+), 171 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups
Gitweb:
2020 Jun 30
6
[PATCH v2 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_range_fault() output
array flags HMM_PFN_PMD and HMM_PFN_PUD. This allows a device driver to
know that a given 4K PFN is actually mapped by the CPU using either a
PMD sized or PUD sized CPU page table entry and therefore the device
driver can safely map system memory using larger device MMU PTEs.
The series is based on 5.8.0-rc3 and is intended for
2019 Jul 01
30
dev_pagemap related cleanups v4
Hi Dan, Jérôme and Jason,
below is a series that cleans up the dev_pagemap interface so that
it is more easily usable, which removes the need to wrap it in hmm
and thus allowing to kill a lot of code
Note: this series is on top of Linux 5.2-rc6 and has some minor
conflicts with the hmm tree that are easy to resolve.
Diffstat summary:
34 files changed, 379 insertions(+), 1016 deletions(-)
Git
2019 Aug 06
24
hmm cleanups, v2
Hi Jérôme, Ben, Felix and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
11 files changed, 94 insertions(+), 210 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups.2
Gitweb: