Displaying 15 results from an estimated 15 matches for "hpage_pmd_nr".
2020 Sep 02
0
[PATCH v2 1/7] mm/thp: fix __split_huge_pmd_locked() for migration PMD
...diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2a468a4acb0a..606d712d9505 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2023,7 +2023,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
put_page(page);
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
return;
- } else if (is_huge_zero_pmd(*pmd)) {
+ } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
/*
* FIXME: Do we want to invalidate secondary mmu by calling
* mmu_notifier_invalidate_range() see comments below inside
@@ -2117,30 +2117,34 @@ static void __split...
2020 Sep 02
1
[PATCH v2 1/7] mm/thp: fix __split_huge_pmd_locked() for migration PMD
...c b/mm/huge_memory.c
> index 2a468a4acb0a..606d712d9505 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2023,7 +2023,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
> put_page(page);
> add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
> return;
> - } else if (is_huge_zero_pmd(*pmd)) {
> + } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
> /*
> * FIXME: Do we want to invalidate secondary mmu by calling
> * mmu_notifier_invalidate_range() see comments below inside
> @@ -2117...
2020 Sep 03
1
[PATCH v3] mm/thp: fix __split_huge_pmd_locked() for migration PMD
...diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2a468a4acb0a..606d712d9505 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2023,7 +2023,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
put_page(page);
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
return;
- } else if (is_huge_zero_pmd(*pmd)) {
+ } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
/*
* FIXME: Do we want to invalidate secondary mmu by calling
* mmu_notifier_invalidate_range() see comments below inside
@@ -2117,30 +2117,34 @@ static void __split...
2020 Sep 02
10
[PATCH v2 0/7] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
An earlier version was posted previously [1]. This version now
supports splitting a THP midway in the migration process which
led to a number of changes.
The patches apply cleanly to the current linux-mm tree. Since there
are a couple of patches in linux-mm from Dan
2020 Jun 19
0
[PATCH 13/16] mm: support THP migration to device private memory
...ffset(entry));
+ is_anon = PageAnon(page);
+ }
flush_needed = 0;
} else
WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
- if (PageAnon(page)) {
+ if (is_anon) {
zap_deposited_table(tlb->mm, pmd);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
} else {
@@ -1778,8 +1790,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
/*
* Returns
* - 0 if PMD could not be locked
- * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
- * - HPAGE_PMD_NR is protections changed and TLB flush neces...
2020 Nov 06
1
[PATCH v3 1/6] mm/thp: add prep_transhuge_device_private_page()
...}
It simplifies the users.
> +void prep_transhuge_device_private_page(struct page *page)
> +{
> + prep_compound_page(page, HPAGE_PMD_ORDER);
> + prep_transhuge_page(page);
> + /* Only the head page has a reference to the pgmap. */
> + percpu_ref_put_many(page->pgmap->ref, HPAGE_PMD_NR - 1);
> +}
> +EXPORT_SYMBOL_GPL(prep_transhuge_device_private_page);
Something else that may interest you from my patch series is support
for page sizes other than PMD_SIZE. I don't know what page sizes your
hardware supports. There's no support for page sizes other than PMD
for an...
2020 Jun 21
2
[PATCH 13/16] mm: support THP migration to device private memory
...);
> + }
> flush_needed = 0;
> } else
> WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
>
> - if (PageAnon(page)) {
> + if (is_anon) {
> zap_deposited_table(tlb->mm, pmd);
> add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> } else {
> @@ -1778,8 +1790,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
> /*
> * Returns
> * - 0 if PMD could not be locked
> - * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
> - * - HPAGE_PMD_NR is prot...
2020 Nov 06
0
[PATCH v3 3/6] mm: support THP migration to device private memory
...ffset(entry));
+ is_anon = PageAnon(page);
+ }
flush_needed = 0;
} else
WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
- if (PageAnon(page)) {
+ if (is_anon) {
zap_deposited_table(tlb->mm, pmd);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
} else {
@@ -2358,9 +2370,10 @@ static void remap_page(struct page *page, unsigned int nr)
}
static void __split_huge_page_tail(struct page *head, int tail,
- struct lruvec *lruvec, struct list_head *list)
+ struct lruvec *lruvec, struct list_head *list, bool remap)
{
struct page *pag...
2020 Jun 19
0
[PATCH 15/16] mm/hmm/test: add self tests for THP migration
...fn < pfn_last; ) {
struct page *page = pfn_to_page(pfn);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /*
+ * Check for PMD aligned PFN and create a huge page.
+ * Check for "< pfn_last - 1" so that the last two huge pages
+ * are used for normal pages.
+ */
+ if ((pfn & (HPAGE_PMD_NR - 1)) == 0 &&
+ pfn + HPAGE_PMD_NR < pfn_last - 1) {
+ prep_compound_page(page, HPAGE_PMD_ORDER);
+ page->zone_device_data = mdevice->free_huge_pages;
+ mdevice->free_huge_pages = page;
+ pfn += HPAGE_PMD_NR;
+ percpu_ref_put_many(page->pgmap->ref, HPAGE_PMD...
2020 Nov 06
12
[PATCH v3 0/6] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
Earlier versions were posted previously [1] and [2].
The patches apply cleanly to the linux-mm 5.10.0-rc2 tree. There are a
lot of other THP patches being posted. I don't think there are any
semantic conflicts but there may be some merge conflicts depending on
2020 Jun 19
22
[PATCH 00/16] mm/hmm/nouveau: THP mapping and migration
These patches apply to linux-5.8.0-rc1. Patches 1-3 should probably go
into 5.8, the others can be queued for 5.9. Patches 4-6 improve the HMM
self tests. Patch 7-8 prepare nouveau for the meat of this series which
adds support and testing for compound page mapping of system memory
(patches 9-11) and compound page migration to device private memory
(patches 12-16). Since these changes are split
2020 Sep 02
0
[PATCH v2 4/7] mm/thp: add prep_transhuge_device_private_page()
...8,6 +498,14 @@ void prep_transhuge_page(struct page *page)
set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
}
+void prep_transhuge_device_private_page(struct page *page)
+{
+ prep_compound_page(page, HPAGE_PMD_ORDER);
+ prep_transhuge_page(page);
+ percpu_ref_put_many(page->pgmap->ref, HPAGE_PMD_NR - 1);
+}
+EXPORT_SYMBOL_GPL(prep_transhuge_device_private_page);
+
bool is_transparent_hugepage(struct page *page)
{
if (!PageCompound(page))
--
2.20.1
2020 Nov 06
0
[PATCH v3 1/6] mm/thp: add prep_transhuge_device_private_page()
...)
set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
}
+void prep_transhuge_device_private_page(struct page *page)
+{
+ prep_compound_page(page, HPAGE_PMD_ORDER);
+ prep_transhuge_page(page);
+ /* Only the head page has a reference to the pgmap. */
+ percpu_ref_put_many(page->pgmap->ref, HPAGE_PMD_NR - 1);
+}
+EXPORT_SYMBOL_GPL(prep_transhuge_device_private_page);
+
bool is_transparent_hugepage(struct page *page)
{
if (!PageCompound(page))
--
2.20.1
2019 Jun 26
0
[PATCH 04/25] mm: remove MEMORY_DEVICE_PUBLIC support
...* support transparent huge page with MEMORY_DEVICE_PUBLIC or
- * MEMORY_DEVICE_PRIVATE but this might change.
+ * support transparent huge page with MEMORY_DEVICE_PRIVATE but
+ * this might change.
*/
if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
mc.precharge += HPAGE_PMD_NR;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 8da0334b9ca0..d9fc1a8bdf6a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1177,16 +1177,12 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
goto unlock;
}
- switch (pgmap->type) {
- case ME...
2019 Jun 26
41
dev_pagemap related cleanups v3
Hi Dan, Jérôme and Jason,
below is a series that cleans up the dev_pagemap interface so that
it is more easily usable, which removes the need to wrap it in hmm
and thus allowing to kill a lot of code
Note: this series is on top of Linux 5.2-rc5 and has some minor
conflicts with the hmm tree that are easy to resolve.
Diffstat summary:
32 files changed, 361 insertions(+), 1012 deletions(-)
Git