Displaying 13 results from an estimated 13 matches for "prep_transhuge_page".
2020 Nov 06
1
[PATCH v3 1/6] mm/thp: add prep_transhuge_device_private_page()
...v 05, 2020 at 04:51:42PM -0800, Ralph Campbell wrote:
> Add a helper function to allow device drivers to create device private
> transparent huge pages. This is intended to help support device private
> THP migrations.
I think you'd be better off with these calling conventions:
-void prep_transhuge_page(struct page *page)
+struct page *thp_prep(struct page *page)
{
+ if (!page || compound_order(page) == 0)
+ return page;
/*
- * we use page->mapping and page->indexlru in second tail page
+ * we use page->mapping and page->index in second tail p...
2020 Sep 02
0
[PATCH v2 4/7] mm/thp: add prep_transhuge_device_private_page()
...insertions(+)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 87b42c81dedc..126e54da4fee 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -187,6 +187,7 @@ extern unsigned long thp_get_unmapped_area(struct file *filp,
unsigned long flags);
extern void prep_transhuge_page(struct page *page);
+extern void prep_transhuge_device_private_page(struct page *page);
extern void free_transhuge_page(struct page *page);
bool is_transparent_hugepage(struct page *page);
@@ -382,6 +383,10 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
static inlin...
2020 Nov 06
0
[PATCH v3 1/6] mm/thp: add prep_transhuge_device_private_page()
...insertions(+)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 0365aa97f8e7..3ec26ef27a93 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -184,6 +184,7 @@ extern unsigned long thp_get_unmapped_area(struct file *filp,
unsigned long flags);
extern void prep_transhuge_page(struct page *page);
+extern void prep_transhuge_device_private_page(struct page *page);
extern void free_transhuge_page(struct page *page);
bool is_transparent_hugepage(struct page *page);
@@ -377,6 +378,10 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
static inlin...
2020 Jun 22
1
[PATCH 14/16] mm/thp: add THP allocation helper
...> +struct page *alloc_transhugepage(struct vm_area_struct *vma,
> + unsigned long haddr)
> +{
> + gfp_t gfp;
> + struct page *page;
> +
> + gfp = alloc_hugepage_direct_gfpmask(vma);
> + page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
> + if (page)
> + prep_transhuge_page(page);
> + return page;
> +}
> +EXPORT_SYMBOL_GPL(alloc_transhugepage);
> +#endif
> +
> static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
> pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
> pgtable_t pgtable)
> --
> 2.20.1
Why use CO...
2020 Jun 19
0
[PATCH 14/16] mm/thp: add THP allocation helper
...p);
}
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+struct page *alloc_transhugepage(struct vm_area_struct *vma,
+ unsigned long haddr)
+{
+ gfp_t gfp;
+ struct page *page;
+
+ gfp = alloc_hugepage_direct_gfpmask(vma);
+ page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+ if (page)
+ prep_transhuge_page(page);
+ return page;
+}
+EXPORT_SYMBOL_GPL(alloc_transhugepage);
+#endif
+
static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
pgtable_t pgtable)
--
2.20.1
2020 Nov 06
0
[PATCH v3 4/6] mm/thp: add THP allocation helper
..._do_huge_pmd_anonymous_page(vmf, page, gfp);
}
+struct page *alloc_transhugepage(struct vm_area_struct *vma,
+ unsigned long haddr)
+{
+ gfp_t gfp;
+ struct page *page;
+
+ gfp = alloc_hugepage_direct_gfpmask(vma);
+ page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+ if (page)
+ prep_transhuge_page(page);
+ return page;
+}
+EXPORT_SYMBOL_GPL(alloc_transhugepage);
+
static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
pgtable_t pgtable)
--
2.20.1
2020 Nov 06
1
[PATCH v3 1/6] mm/thp: add prep_transhuge_device_private_page()
On Thu, Nov 05, 2020 at 04:51:42PM -0800, Ralph Campbell wrote:
> +extern void prep_transhuge_device_private_page(struct page *page);
No need for the extern.
> +static inline void prep_transhuge_device_private_page(struct page *page)
> +{
> +}
Is the code to call this even reachable if THP support is configured
out? If not just declaring it unconditionally and letting dead code
2020 Nov 06
12
[PATCH v3 0/6] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
Earlier versions were posted previously [1] and [2].
The patches apply cleanly to the linux-mm 5.10.0-rc2 tree. There are a
lot of other THP patches being posted. I don't think there are any
semantic conflicts but there may be some merge conflicts depending on
2020 Sep 02
10
[PATCH v2 0/7] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
An earlier version was posted previously [1]. This version now
supports splitting a THP midway in the migration process which
led to a number of changes.
The patches apply cleanly to the current linux-mm tree. Since there
are a couple of patches in linux-mm from Dan
2020 Jun 19
0
[PATCH 13/16] mm: support THP migration to device private memory
...d_t *pmdp)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned int i;
+ spinlock_t *ptl;
+ bool flush = false;
+ pgtable_t pgtable;
+ gfp_t gfp;
+ pmd_t entry;
+
+ if (WARN_ON_ONCE(compound_order(page) != HPAGE_PMD_ORDER))
+ goto abort;
+
+ if (unlikely(anon_vma_prepare(vma)))
+ goto abort;
+
+ prep_transhuge_page(page);
+
+ gfp = GFP_TRANSHUGE_LIGHT;
+ if (mem_cgroup_charge(page, mm, gfp))
+ goto abort;
+
+ pgtable = pte_alloc_one(mm);
+ if (unlikely(!pgtable))
+ goto abort;
+
+ __SetPageUptodate(page);
+
+ if (is_zone_device_page(page)) {
+ if (!is_device_private_page(page))
+ goto pgtable_abort;
+ e...
2020 Jun 21
2
[PATCH 13/16] mm: support THP migration to device private memory
...int i;
> + spinlock_t *ptl;
> + bool flush = false;
> + pgtable_t pgtable;
> + gfp_t gfp;
> + pmd_t entry;
> +
> + if (WARN_ON_ONCE(compound_order(page) != HPAGE_PMD_ORDER))
> + goto abort;
> +
> + if (unlikely(anon_vma_prepare(vma)))
> + goto abort;
> +
> + prep_transhuge_page(page);
> +
> + gfp = GFP_TRANSHUGE_LIGHT;
> + if (mem_cgroup_charge(page, mm, gfp))
> + goto abort;
> +
> + pgtable = pte_alloc_one(mm);
> + if (unlikely(!pgtable))
> + goto abort;
> +
> + __SetPageUptodate(page);
> +
> + if (is_zone_device_page(page)) {
> +...
2020 Jun 19
22
[PATCH 00/16] mm/hmm/nouveau: THP mapping and migration
These patches apply to linux-5.8.0-rc1. Patches 1-3 should probably go
into 5.8, the others can be queued for 5.9. Patches 4-6 improve the HMM
self tests. Patch 7-8 prepare nouveau for the meat of this series which
adds support and testing for compound page mapping of system memory
(patches 9-11) and compound page migration to device private memory
(patches 12-16). Since these changes are split
2020 Nov 06
0
[PATCH v3 3/6] mm: support THP migration to device private memory
...d_t *pmdp)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned int i;
+ spinlock_t *ptl;
+ bool flush = false;
+ pgtable_t pgtable;
+ gfp_t gfp;
+ pmd_t entry;
+
+ if (WARN_ON_ONCE(compound_order(page) != HPAGE_PMD_ORDER))
+ goto abort;
+
+ if (unlikely(anon_vma_prepare(vma)))
+ goto abort;
+
+ prep_transhuge_page(page);
+
+ gfp = GFP_TRANSHUGE_LIGHT;
+ if (mem_cgroup_charge(page, mm, gfp))
+ goto abort;
+
+ pgtable = pte_alloc_one(mm);
+ if (unlikely(!pgtable))
+ goto abort;
+
+ __SetPageUptodate(page);
+
+ if (is_zone_device_page(page)) {
+ if (!is_device_private_page(page))
+ goto pgtable_abort;
+ e...