Alistair Popple
2021-Mar-26 00:08 UTC
[Nouveau] [PATCH v7 3/8] mm/rmap: Split try_to_munlock from try_to_unmap
The behaviour of try_to_unmap_one() is difficult to follow because it
performs different operations based on a fairly large set of flags used
in different combinations.
TTU_MUNLOCK is one such flag. However it is exclusively used by
try_to_munlock() which specifies no other flags. Therefore rather than
overload try_to_unmap_one() with unrelated behaviour split this out into
it's own function and remove the flag.
Signed-off-by: Alistair Popple <apopple at nvidia.com>
Reviewed-by: Ralph Campbell <rcampbell at nvidia.com>
Reviewed-by: Christoph Hellwig <hch at lst.de>
---
v7:
* Added Christoph's Reviewed-by
v4:
* Removed redundant check for VM_LOCKED
---
include/linux/rmap.h | 1 -
mm/rmap.c | 40 ++++++++++++++++++++++++++++++++--------
2 files changed, 32 insertions(+), 9 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index def5c62c93b3..e26ac2d71346 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -87,7 +87,6 @@ struct anon_vma_chain {
enum ttu_flags {
TTU_MIGRATION = 0x1, /* migration mode */
- TTU_MUNLOCK = 0x2, /* munlock mode */
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
diff --git a/mm/rmap.c b/mm/rmap.c
index 977e70803ed8..d02bade5245b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1405,10 +1405,6 @@ static bool try_to_unmap_one(struct page *page, struct
vm_area_struct *vma,
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
- /* munlock has nothing to gain from examining un-locked vmas */
- if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
- return true;
-
if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION)
&&
is_zone_device_page(page) && !is_device_private_page(page))
return true;
@@ -1469,8 +1465,6 @@ static bool try_to_unmap_one(struct page *page, struct
vm_area_struct *vma,
page_vma_mapped_walk_done(&pvmw);
break;
}
- if (flags & TTU_MUNLOCK)
- continue;
}
/* Unexpected PMD-mapped THP? */
@@ -1784,6 +1778,37 @@ bool try_to_unmap(struct page *page, enum ttu_flags
flags)
return !page_mapcount(page) ? true : false;
}
+static bool try_to_munlock_one(struct page *page, struct vm_area_struct *vma,
+ unsigned long address, void *arg)
+{
+ struct page_vma_mapped_walk pvmw = {
+ .page = page,
+ .vma = vma,
+ .address = address,
+ };
+
+ /* munlock has nothing to gain from examining un-locked vmas */
+ if (!(vma->vm_flags & VM_LOCKED))
+ return true;
+
+ while (page_vma_mapped_walk(&pvmw)) {
+ /* PTE-mapped THP are never mlocked */
+ if (!PageTransCompound(page)) {
+ /*
+ * Holding pte lock, we do *not* need
+ * mmap_lock here
+ */
+ mlock_vma_page(page);
+ }
+ page_vma_mapped_walk_done(&pvmw);
+
+ /* found a mlocked page, no point continuing munlock check */
+ return false;
+ }
+
+ return true;
+}
+
/**
* try_to_munlock - try to munlock a page
* @page: the page to be munlocked
@@ -1796,8 +1821,7 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
void try_to_munlock(struct page *page)
{
struct rmap_walk_control rwc = {
- .rmap_one = try_to_unmap_one,
- .arg = (void *)TTU_MUNLOCK,
+ .rmap_one = try_to_munlock_one,
.done = page_not_mapped,
.anon_lock = page_lock_anon_vma_read,
--
2.20.1
Jason Gunthorpe
2021-Mar-30 18:49 UTC
[Nouveau] [PATCH v7 3/8] mm/rmap: Split try_to_munlock from try_to_unmap
On Fri, Mar 26, 2021 at 11:08:00AM +1100, Alistair Popple wrote:> +static bool try_to_munlock_one(struct page *page, struct vm_area_struct *vma, > + unsigned long address, void *arg) > +{Is this function name right?> + struct page_vma_mapped_walk pvmw = { > + .page = page, > + .vma = vma, > + .address = address, > + }; > + > + /* munlock has nothing to gain from examining un-locked vmas */ > + if (!(vma->vm_flags & VM_LOCKED)) > + return true; > + > + while (page_vma_mapped_walk(&pvmw)) { > + /* PTE-mapped THP are never mlocked */ > + if (!PageTransCompound(page)) { > + /* > + * Holding pte lock, we do *not* need > + * mmap_lock here > + */ > + mlock_vma_page(page);Because the only action this function seems to take is to call *mlock*_vma_page()> + } > + page_vma_mapped_walk_done(&pvmw); > + > + /* found a mlocked page, no point continuing munlock check */ > + return false; > + } > + > + return true; > +} > + > /** > * try_to_munlock - try to munlock a page > * @page: the page to be munlocked > @@ -1796,8 +1821,7 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags) > void try_to_munlock(struct page *page) > {But this is also called try_to_munlock ?? /** * try_to_munlock - try to munlock a page * @page: the page to be munlocked * * Called from munlock code. Checks all of the VMAs mapping the page * to make sure nobody else has this page mlocked. The page will be * returned with PG_mlocked cleared if no other vmas have it mlocked. */ So what clears PG_mlocked on this call path? Something needs attention here.. Jason