search for: pte_unmap

Displaying 20 results from an estimated 44 matches for "pte_unmap".

2019 Aug 07
2
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...;pgmap = NULL; - } hmm_vma_walk->last = end; return 0; #else @@ -604,10 +600,6 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, return 0; fault: - if (hmm_vma_walk->pgmap) { - put_dev_pagemap(hmm_vma_walk->pgmap); - hmm_vma_walk->pgmap = NULL; - } pte_unmap(ptep); /* Fault any virtual address we were asked to fault */ return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); @@ -690,16 +682,6 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, return r; } } - if (hmm_vma_walk->pgmap) { - /* - * We do put_dev_pagemap() here and not in...
2020 Sep 02
0
[PATCH v2 1/7] mm/thp: fix __split_huge_pmd_locked() for migration PMD
...ifier_invalidate_range() see comments below inside @@ -2117,30 +2117,34 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pte = pte_offset_map(&_pmd, addr); BUG_ON(!pte_none(*pte)); set_pte_at(mm, addr, pte, entry); - atomic_inc(&page[i]._mapcount); - pte_unmap(pte); - } - - /* - * Set PG_double_map before dropping compound_mapcount to avoid - * false-negative page_mapped(). - */ - if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { - for (i = 0; i < HPAGE_PMD_NR; i++) + if (!pmd_migration) atomic_inc(&page[i]._map...
2019 Aug 14
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...;pgmap = NULL; - } hmm_vma_walk->last = end; return 0; #else @@ -604,10 +600,6 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, return 0; fault: - if (hmm_vma_walk->pgmap) { - put_dev_pagemap(hmm_vma_walk->pgmap); - hmm_vma_walk->pgmap = NULL; - } pte_unmap(ptep); /* Fault any virtual address we were asked to fault */ return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); @@ -690,16 +682,6 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, return r; } } - if (hmm_vma_walk->pgmap) { - /* - * We do put_dev_pagemap() here and not in...
2020 Sep 02
1
[PATCH v2 1/7] mm/thp: fix __split_huge_pmd_locked() for migration PMD
...omments below inside > @@ -2117,30 +2117,34 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, > pte = pte_offset_map(&_pmd, addr); > BUG_ON(!pte_none(*pte)); > set_pte_at(mm, addr, pte, entry); > - atomic_inc(&page[i]._mapcount); > - pte_unmap(pte); > - } > - > - /* > - * Set PG_double_map before dropping compound_mapcount to avoid > - * false-negative page_mapped(). > - */ > - if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { > - for (i = 0; i < HPAGE_PMD_NR; i++) > + if (!pm...
2019 Aug 06
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...CIAL]; @@ -604,10 +600,10 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, return 0; fault: - if (hmm_vma_walk->pgmap) { - put_dev_pagemap(hmm_vma_walk->pgmap); - hmm_vma_walk->pgmap = NULL; - } + if (*pgmap) + put_dev_pagemap(*pgmap); + *pgmap = NULL; + pte_unmap(ptep); /* Fault any virtual address we were asked to fault */ return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); @@ -620,6 +616,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, { struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->rang...
2019 Aug 14
2
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
On Tue, Aug 13, 2019 at 06:36:33PM -0700, Dan Williams wrote: > Section alignment constraints somewhat save us here. The only example > I can think of a PMD not containing a uniform pgmap association for > each pte is the case when the pgmap overlaps normal dram, i.e. shares > the same 'struct memory_section' for a given span. Otherwise, distinct > pgmaps arrange to manage
2019 Aug 07
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, > return 0; > > fault: > - if (hmm_vma_walk->pgmap) { > - put_dev_pagemap(hmm_vma_walk->pgmap); > - hmm_vma_walk->pgmap = NULL; > - } > pte_unmap(ptep); > /* Fault any virtual address we were asked to fault */ > return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); > @@ -690,16 +682,6 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, > return r; > } > } &gt...
2020 Apr 22
0
[PATCH hmm 2/5] mm/hmm: make hmm_range_fault return 0 or -1
...ice_entry_from_pfn(range, pfn) | cpu_flags; - hmm_vma_walk->last = end; return 0; } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -386,13 +384,10 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns); if (r) { /* hmm_vma_handle_pte() did pte_unmap() */ - hmm_vma_walk->last = addr; return r; } } pte_unmap(ptep - 1); - - hmm_vma_walk->last = addr; return 0; } @@ -455,7 +450,6 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, for (i = 0; i < npages; ++i, ++pfn) pfns[i] = hmm_de...
2020 May 01
0
[PATCH hmm v2 2/5] mm/hmm: make hmm_range_fault return 0 or -1
...ice_entry_from_pfn(range, pfn) | cpu_flags; - hmm_vma_walk->last = end; return 0; } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -386,13 +384,10 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns); if (r) { /* hmm_vma_handle_pte() did pte_unmap() */ - hmm_vma_walk->last = addr; return r; } } pte_unmap(ptep - 1); - - hmm_vma_walk->last = addr; return 0; } @@ -455,7 +450,6 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, for (i = 0; i < npages; ++i, ++pfn) pfns[i] = hmm_de...
2020 Sep 03
1
[PATCH v3] mm/thp: fix __split_huge_pmd_locked() for migration PMD
...ifier_invalidate_range() see comments below inside @@ -2117,30 +2117,34 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pte = pte_offset_map(&_pmd, addr); BUG_ON(!pte_none(*pte)); set_pte_at(mm, addr, pte, entry); - atomic_inc(&page[i]._mapcount); - pte_unmap(pte); - } - - /* - * Set PG_double_map before dropping compound_mapcount to avoid - * false-negative page_mapped(). - */ - if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { - for (i = 0; i < HPAGE_PMD_NR; i++) + if (!pmd_migration) atomic_inc(&page[i]._map...
2020 Apr 22
0
[PATCH hmm 4/5] mm/hmm: remove HMM_PFN_SPECIAL
...flags[HMM_PFN_VALID])) return NULL; return pfn_to_page(entry >> range->pfn_shift); diff --git a/mm/hmm.c b/mm/hmm.c index 4c7c396655b528..2693393dc14b30 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -301,7 +301,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, pte_unmap(ptep); return -EFAULT; } - *pfn = range->values[HMM_PFN_SPECIAL]; + *pfn = range->values[HMM_PFN_ERROR]; return 0; } -- 2.26.0
2020 May 01
0
[PATCH hmm v2 4/5] mm/hmm: remove HMM_PFN_SPECIAL
...flags[HMM_PFN_VALID])) return NULL; return pfn_to_page(entry >> range->pfn_shift); diff --git a/mm/hmm.c b/mm/hmm.c index f06bcac948a79b..2e975eedb14f89 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -301,7 +301,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, pte_unmap(ptep); return -EFAULT; } - *pfn = range->values[HMM_PFN_SPECIAL]; + *pfn = range->values[HMM_PFN_ERROR]; return 0; } -- 2.26.2
2020 May 01
13
[PATCH hmm v2 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com> The API is a bit complicated for the uses we actually have, and disucssions for simplifying have come up a number of times. This small series removes the customizable pfn format and simplifies the return code of hmm_range_fault() All the drivers are adjusted to process in the simplified format. I would appreciated tested-by's for the two
2007 Apr 18
1
[PATCH 4/9] Vmi fix highpte
...d(dir, address) \ +({ \ + pte_t *__ptep; \ + unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \ + __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1);\ + paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \ + __ptep = __ptep + pte_index(address); \ + __ptep; \ +}) #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) #else
2007 Apr 18
1
[PATCH 4/9] Vmi fix highpte
...d(dir, address) \ +({ \ + pte_t *__ptep; \ + unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \ + __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1);\ + paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \ + __ptep = __ptep + pte_index(address); \ + __ptep; \ +}) #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) #else
2019 Jul 26
0
[PATCH v2 2/7] mm/hmm: a few more C style and comment clean ups
...te; struct hmm_range *range = hmm_vma_walk->range; - struct vm_area_struct *vma = walk->vma; bool fault, write_fault; uint64_t cpu_flags; pte_t pte = *ptep; @@ -571,8 +570,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, if (fault || write_fault) { pte_unmap(ptep); hmm_vma_walk->last = addr; - migration_entry_wait(vma->vm_mm, - pmdp, addr); + migration_entry_wait(walk->mm, pmdp, addr); return -EBUSY; } return 0; @@ -620,13 +618,11 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, { struct hmm_vma_walk *hmm_vma_wa...
2020 Apr 22
11
[PATCH hmm 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com> The API is a bit complicated for the uses we actually have, and disucssions for simplifying have come up a number of times. This small series removes the customizable pfn format and simplifies the return code of hmm_range_fault() All the drivers are adjusted to process in the simplified format. I would appreciated tested-by's for the two
2019 Aug 06
24
hmm cleanups, v2
Hi Jérôme, Ben, Felix and Jason, below is a series against the hmm tree which cleans up various minor bits and allows HMM_MIRROR to be built on all architectures. Diffstat: 11 files changed, 94 insertions(+), 210 deletions(-) A git tree is also available at: git://git.infradead.org/users/hch/misc.git hmm-cleanups.2 Gitweb:
2020 Sep 02
10
[PATCH v2 0/7] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers. An earlier version was posted previously [1]. This version now supports splitting a THP midway in the migration process which led to a number of changes. The patches apply cleanly to the current linux-mm tree. Since there are a couple of patches in linux-mm from Dan
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
..._vma_handle_pte(struct mm_walk *walk, unsigned long addr, * fall through and treat it like a normal page. */ if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) { - if (hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0)) { + if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) { pte_unmap(ptep); return -EFAULT; } - *pfn = range->values[HMM_PFN_ERROR]; + *hmm_pfn = HMM_PFN_ERROR; return 0; } - *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags; + *hmm_pfn = pte_pfn(pte) | cpu_flags; return 0; fault: @@ -321,7 +311,8 @@ static int hmm_vma_walk_pm...