search for: pmd_trans_huge

Displaying 20 results from an estimated 23 matches for "pmd_trans_huge".

2020 Jun 19
0
[PATCH 13/16] mm: support THP migration to device private memory
...48 +2222,87 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, unsigned long addr = start, unmapped = 0; spinlock_t *ptl; pte_t *ptep; + pmd_t pmd; again: - if (pmd_none(*pmdp)) + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) return migrate_vma_collect_hole(start, end, -1, walk); - if (pmd_trans_huge(*pmdp)) { + if (pmd_trans_huge(pmd) || !pmd_present(pmd)) { struct page *page; + unsigned long write = 0; + int ret; ptl = pmd_lock(mm, pmdp); - if (unlikely(!pmd_trans_huge(*pmdp))) { - spin_unlock(ptl); - goto again; - } + if (pmd_trans_huge(*pmdp)) { + page = pmd_page(*pmdp);...
2020 Jun 21
2
[PATCH 13/16] mm: support THP migration to device private memory
...*pmdp, > unsigned long addr = start, unmapped = 0; > spinlock_t *ptl; > pte_t *ptep; > + pmd_t pmd; > > again: > - if (pmd_none(*pmdp)) > + pmd = READ_ONCE(*pmdp); > + if (pmd_none(pmd)) > return migrate_vma_collect_hole(start, end, -1, walk); > > - if (pmd_trans_huge(*pmdp)) { > + if (pmd_trans_huge(pmd) || !pmd_present(pmd)) { > struct page *page; > + unsigned long write = 0; > + int ret; > > ptl = pmd_lock(mm, pmdp); > - if (unlikely(!pmd_trans_huge(*pmdp))) { > - spin_unlock(ptl); > - goto again; > - } > + if (...
2020 Nov 06
0
[PATCH v3 3/6] mm: support THP migration to device private memory
...MC_TARGET_DEVICE; + } else { + page = pmd_page(pmd); + ret = MC_TARGET_PAGE; } - page = pmd_page(pmd); VM_BUG_ON_PAGE(!page || !PageHead(page), page); if (!(mc.flags & MOVE_ANON)) return ret; @@ -5828,12 +5838,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { - /* - * Note their can not be MC_TARGET_DEVICE for now as we do not - * support transparent huge page with MEMORY_DEVICE_PRIVATE but - * this might change. - */ - if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) + if (get_mctgt_type_thp(vma,...
2020 Sep 02
0
[PATCH v2 1/7] mm/thp: fix __split_huge_pmd_locked() for migration PMD
...d9505 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2023,7 +2023,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, put_page(page); add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); return; - } else if (is_huge_zero_pmd(*pmd)) { + } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) { /* * FIXME: Do we want to invalidate secondary mmu by calling * mmu_notifier_invalidate_range() see comments below inside @@ -2117,30 +2117,34 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pte = pte_offset_map...
2020 Jun 19
22
[PATCH 00/16] mm/hmm/nouveau: THP mapping and migration
These patches apply to linux-5.8.0-rc1. Patches 1-3 should probably go into 5.8, the others can be queued for 5.9. Patches 4-6 improve the HMM self tests. Patch 7-8 prepare nouveau for the meat of this series which adds support and testing for compound page mapping of system memory (patches 9-11) and compound page migration to device private memory (patches 12-16). Since these changes are split
2020 Sep 02
10
[PATCH v2 0/7] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers. An earlier version was posted previously [1]. This version now supports splitting a THP midway in the migration process which led to a number of changes. The patches apply cleanly to the current linux-mm tree. Since there are a couple of patches in linux-mm from Dan
2020 Sep 02
1
[PATCH v2 1/7] mm/thp: fix __split_huge_pmd_locked() for migration PMD
...> +++ b/mm/huge_memory.c > @@ -2023,7 +2023,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, > put_page(page); > add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); > return; > - } else if (is_huge_zero_pmd(*pmd)) { > + } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) { > /* > * FIXME: Do we want to invalidate secondary mmu by calling > * mmu_notifier_invalidate_range() see comments below inside > @@ -2117,30 +2117,34 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, &gt...
2019 Sep 11
0
[PATCH 1/4] mm/hmm: make full use of walk_page_range()
...ROR]; + pfns[i] = range->values[value]; return 0; } @@ -584,7 +583,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, } return 0; } else if (!pmd_present(pmd)) - return hmm_pfns_bad(start, end, walk); + return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { /* @@ -612,7 +611,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, * recover. */ if (pmd_bad(pmd)) - return hmm_pfns_bad(start, end, walk); + return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); ptep = pte_offset_map(pmdp, addr); i = (addr - range->start) >> PAGE...
2019 Jul 26
0
[PATCH v2 2/7] mm/hmm: a few more C style and comment clean ups
...fault); if (fault || write_fault) { hmm_vma_walk->last = addr; - pmd_migration_entry_wait(vma->vm_mm, pmdp); + pmd_migration_entry_wait(walk->mm, pmdp); return -EBUSY; } return 0; @@ -657,11 +653,11 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { /* - * No need to take pmd_lock here, even if some other threads + * No need to take pmd_lock here, even if some other thread * is splitting the huge pmd we will get that event through * mmu_notifier callback. * - * So just read pmd value and check again its a transpare...
2020 Mar 20
0
[PATCH 2/2] mm/thp: Rename pmd_mknotpresent() as pmd_mknotvalid()
...(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) -#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID)) +#define pmd_mknotvalid(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID)) #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index aef5378f909c..2a66dee3a9b8 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -615,7 +615,7 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) return pmd; }...
2020 Sep 03
1
[PATCH v3] mm/thp: fix __split_huge_pmd_locked() for migration PMD
...d9505 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2023,7 +2023,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, put_page(page); add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); return; - } else if (is_huge_zero_pmd(*pmd)) { + } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) { /* * FIXME: Do we want to invalidate secondary mmu by calling * mmu_notifier_invalidate_range() see comments below inside @@ -2117,30 +2117,34 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pte = pte_offset_map...
2016 Nov 25
5
[PATCH 0/3] virtio/vringh: kill off ACCESS_ONCE()
On 11/25/2016 05:17 PM, Peter Zijlstra wrote: > On Fri, Nov 25, 2016 at 04:10:04PM +0000, Mark Rutland wrote: >> On Fri, Nov 25, 2016 at 04:21:39PM +0100, Dmitry Vyukov wrote: > >>> What are use cases for such primitive that won't be OK with "read once >>> _and_ atomically"? >> >> I have none to hand. > > Whatever triggers the
2016 Nov 25
5
[PATCH 0/3] virtio/vringh: kill off ACCESS_ONCE()
On 11/25/2016 05:17 PM, Peter Zijlstra wrote: > On Fri, Nov 25, 2016 at 04:10:04PM +0000, Mark Rutland wrote: >> On Fri, Nov 25, 2016 at 04:21:39PM +0100, Dmitry Vyukov wrote: > >>> What are use cases for such primitive that won't be OK with "read once >>> _and_ atomically"? >> >> I have none to hand. > > Whatever triggers the
2020 Apr 22
1
[PATCH V2 0/2] mm/thp: Rename pmd_mknotpresent() as pmd_mkinvalid()
This series renames pmd_mknotpresent() as pmd_mkinvalid(). Before that it drops an existing pmd_mknotpresent() definition from powerpc platform which was never required as it defines it's pmdp_invalidate() through subscribing __HAVE_ARCH_PMDP_INVALIDATE. This does not create any functional change. This rename was suggested by Catalin during a previous discussion while we were trying to change
2020 Nov 06
12
[PATCH v3 0/6] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers. Earlier versions were posted previously [1] and [2]. The patches apply cleanly to the linux-mm 5.10.0-rc2 tree. There are a lot of other THP patches being posted. I don't think there are any semantic conflicts but there may be some merge conflicts depending on
2020 Mar 20
4
[PATCH 0/2] mm/thp: Rename pmd_mknotpresent() as pmd_mknotvalid()
This series renames pmd_mknotpresent() as pmd_mknotvalid(). Before that it drops an existing pmd_mknotpresent() definition from powerpc platform which was never required as it defines it's pmdp_invalidate() through subscribing __HAVE_ARCH_PMDP_INVALIDATE. This does not create any functional change. This rename was suggested by Catalin during a previous discussion while we were trying to
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...f (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0)) + if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) return -EFAULT; return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); } @@ -362,7 +353,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) goto again; - return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd); + return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd); } /* @@ -372,16 +363,16 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, * recover. */ if (pmd_bad(pmd)) { - if (hmm_range_need_fault(hmm_vma_wal...
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...f (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0)) + if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) return -EFAULT; return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); } @@ -362,7 +353,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) goto again; - return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd); + return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd); } /* @@ -372,16 +363,16 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, * recover. */ if (pmd_bad(pmd)) { - if (hmm_range_need_fault(hmm_vma_wal...
2020 Apr 22
1
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...a_walk, pfns, npages, 0)) > + if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) > return -EFAULT; > return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); > } > @@ -362,7 +353,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, > if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) > goto again; > > - return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd); > + return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd); > } > > /* > @@ -372,16 +363,16 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, > * recover. > */ > if (...
2020 Apr 22
11
[PATCH hmm 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com> The API is a bit complicated for the uses we actually have, and disucssions for simplifying have come up a number of times. This small series removes the customizable pfn format and simplifies the return code of hmm_range_fault() All the drivers are adjusted to process in the simplified format. I would appreciated tested-by's for the two