Displaying 13 results from an estimated 13 matches for "hpage_pmd_order".
2020 Nov 06
1
[PATCH v3 1/6] mm/thp: add prep_transhuge_device_private_page()
...ON(compound_order(page) == 1);
INIT_LIST_HEAD(page_deferred_list(page));
set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
+
+ return page;
}
It simplifies the users.
> +void prep_transhuge_device_private_page(struct page *page)
> +{
> + prep_compound_page(page, HPAGE_PMD_ORDER);
> + prep_transhuge_page(page);
> + /* Only the head page has a reference to the pgmap. */
> + percpu_ref_put_many(page->pgmap->ref, HPAGE_PMD_NR - 1);
> +}
> +EXPORT_SYMBOL_GPL(prep_transhuge_device_private_page);
Something else that may interest you from my patch series is...
2020 Jun 22
1
[PATCH 14/16] mm/thp: add THP allocation helper
...; +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
> +struct page *alloc_transhugepage(struct vm_area_struct *vma,
> + unsigned long haddr)
> +{
> + gfp_t gfp;
> + struct page *page;
> +
> + gfp = alloc_hugepage_direct_gfpmask(vma);
> + page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
> + if (page)
> + prep_transhuge_page(page);
> + return page;
> +}
> +EXPORT_SYMBOL_GPL(alloc_transhugepage);
> +#endif
> +
> static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
> pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
> pgta...
2020 Jun 19
0
[PATCH 14/16] mm/thp: add THP allocation helper
..._pmd_anonymous_page(vmf, page, gfp);
}
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+struct page *alloc_transhugepage(struct vm_area_struct *vma,
+ unsigned long haddr)
+{
+ gfp_t gfp;
+ struct page *page;
+
+ gfp = alloc_hugepage_direct_gfpmask(vma);
+ page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+ if (page)
+ prep_transhuge_page(page);
+ return page;
+}
+EXPORT_SYMBOL_GPL(alloc_transhugepage);
+#endif
+
static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
pgtable_t pgtable)
--
2.20.1
2020 Sep 02
0
[PATCH v2 4/7] mm/thp: add prep_transhuge_device_private_page()
...index a8d48994481a..1e848cc0c3dc 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -498,6 +498,14 @@ void prep_transhuge_page(struct page *page)
set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
}
+void prep_transhuge_device_private_page(struct page *page)
+{
+ prep_compound_page(page, HPAGE_PMD_ORDER);
+ prep_transhuge_page(page);
+ percpu_ref_put_many(page->pgmap->ref, HPAGE_PMD_NR - 1);
+}
+EXPORT_SYMBOL_GPL(prep_transhuge_device_private_page);
+
bool is_transparent_hugepage(struct page *page)
{
if (!PageCompound(page))
--
2.20.1
2020 Nov 06
0
[PATCH v3 1/6] mm/thp: add prep_transhuge_device_private_page()
...index 08a183f6c3ab..b4141f12ff31 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -498,6 +498,15 @@ void prep_transhuge_page(struct page *page)
set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
}
+void prep_transhuge_device_private_page(struct page *page)
+{
+ prep_compound_page(page, HPAGE_PMD_ORDER);
+ prep_transhuge_page(page);
+ /* Only the head page has a reference to the pgmap. */
+ percpu_ref_put_many(page->pgmap->ref, HPAGE_PMD_NR - 1);
+}
+EXPORT_SYMBOL_GPL(prep_transhuge_device_private_page);
+
bool is_transparent_hugepage(struct page *page)
{
if (!PageCompound(page))
--
2....
2020 Nov 06
0
[PATCH v3 4/6] mm/thp: add THP allocation helper
...(struct vm_fault *vmf)
return __do_huge_pmd_anonymous_page(vmf, page, gfp);
}
+struct page *alloc_transhugepage(struct vm_area_struct *vma,
+ unsigned long haddr)
+{
+ gfp_t gfp;
+ struct page *page;
+
+ gfp = alloc_hugepage_direct_gfpmask(vma);
+ page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+ if (page)
+ prep_transhuge_page(page);
+ return page;
+}
+EXPORT_SYMBOL_GPL(alloc_transhugepage);
+
static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
pgtable_t pgtable)
--
2.20.1
2020 Nov 06
12
[PATCH v3 0/6] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
Earlier versions were posted previously [1] and [2].
The patches apply cleanly to the linux-mm 5.10.0-rc2 tree. There are a
lot of other THP patches being posted. I don't think there are any
semantic conflicts but there may be some merge conflicts depending on
2020 Jun 19
0
[PATCH 13/16] mm: support THP migration to device private memory
...+ struct page *page,
+ unsigned long *src,
+ unsigned long *dst,
+ pmd_t *pmdp)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned int i;
+ spinlock_t *ptl;
+ bool flush = false;
+ pgtable_t pgtable;
+ gfp_t gfp;
+ pmd_t entry;
+
+ if (WARN_ON_ONCE(compound_order(page) != HPAGE_PMD_ORDER))
+ goto abort;
+
+ if (unlikely(anon_vma_prepare(vma)))
+ goto abort;
+
+ prep_transhuge_page(page);
+
+ gfp = GFP_TRANSHUGE_LIGHT;
+ if (mem_cgroup_charge(page, mm, gfp))
+ goto abort;
+
+ pgtable = pte_alloc_one(mm);
+ if (unlikely(!pgtable))
+ goto abort;
+
+ __SetPageUptodate(page);
+
+ if...
2020 Jun 19
22
[PATCH 00/16] mm/hmm/nouveau: THP mapping and migration
These patches apply to linux-5.8.0-rc1. Patches 1-3 should probably go
into 5.8, the others can be queued for 5.9. Patches 4-6 improve the HMM
self tests. Patch 7-8 prepare nouveau for the meat of this series which
adds support and testing for compound page mapping of system memory
(patches 9-11) and compound page migration to device private memory
(patches 12-16). Since these changes are split
2020 Jun 19
0
[PATCH 15/16] mm/hmm/test: add self tests for THP migration
...* Check for PMD aligned PFN and create a huge page.
+ * Check for "< pfn_last - 1" so that the last two huge pages
+ * are used for normal pages.
+ */
+ if ((pfn & (HPAGE_PMD_NR - 1)) == 0 &&
+ pfn + HPAGE_PMD_NR < pfn_last - 1) {
+ prep_compound_page(page, HPAGE_PMD_ORDER);
+ page->zone_device_data = mdevice->free_huge_pages;
+ mdevice->free_huge_pages = page;
+ pfn += HPAGE_PMD_NR;
+ percpu_ref_put_many(page->pgmap->ref, HPAGE_PMD_NR - 1);
+ continue;
+ }
+#endif
page->zone_device_data = mdevice->free_pages;
mdevice->free_pa...
2020 Jun 21
2
[PATCH 13/16] mm: support THP migration to device private memory
...+ unsigned long *dst,
> + pmd_t *pmdp)
> +{
> + struct mm_struct *mm = vma->vm_mm;
> + unsigned int i;
> + spinlock_t *ptl;
> + bool flush = false;
> + pgtable_t pgtable;
> + gfp_t gfp;
> + pmd_t entry;
> +
> + if (WARN_ON_ONCE(compound_order(page) != HPAGE_PMD_ORDER))
> + goto abort;
> +
> + if (unlikely(anon_vma_prepare(vma)))
> + goto abort;
> +
> + prep_transhuge_page(page);
> +
> + gfp = GFP_TRANSHUGE_LIGHT;
> + if (mem_cgroup_charge(page, mm, gfp))
> + goto abort;
> +
> + pgtable = pte_alloc_one(mm);
> + if (unlik...
2020 Sep 02
10
[PATCH v2 0/7] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
An earlier version was posted previously [1]. This version now
supports splitting a THP midway in the migration process which
led to a number of changes.
The patches apply cleanly to the current linux-mm tree. Since there
are a couple of patches in linux-mm from Dan
2020 Nov 06
0
[PATCH v3 3/6] mm: support THP migration to device private memory
...unsigned long haddr,
+ struct page *page,
+ unsigned long *src,
+ pmd_t *pmdp)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned int i;
+ spinlock_t *ptl;
+ bool flush = false;
+ pgtable_t pgtable;
+ gfp_t gfp;
+ pmd_t entry;
+
+ if (WARN_ON_ONCE(compound_order(page) != HPAGE_PMD_ORDER))
+ goto abort;
+
+ if (unlikely(anon_vma_prepare(vma)))
+ goto abort;
+
+ prep_transhuge_page(page);
+
+ gfp = GFP_TRANSHUGE_LIGHT;
+ if (mem_cgroup_charge(page, mm, gfp))
+ goto abort;
+
+ pgtable = pte_alloc_one(mm);
+ if (unlikely(!pgtable))
+ goto abort;
+
+ __SetPageUptodate(page);
+
+ if...