search for: memunmap_pages

Displaying 16 results from an estimated 16 matches for "memunmap_pages".

2020 Nov 11
0
[PATCH v3 3/6] mm: support THP migration to device private memory
...ently, memremap_pages() allocates struct pages for a physical address range with a page_ref_count(page) of one and increments the pgmap->ref per CPU reference count by the number of pages created since each ZONE_DEVICE struct page has a pointer to the pgmap. The struct pages are not freed until memunmap_pages() is called which calls put_page() which calls put_dev_pagemap() which releases a reference to pgmap->ref. memunmap_pages() blocks waiting for pgmap->ref reference count to be zero. As far as I can tell, the put_page() in memunmap_pages() has to be the *last* put_page() (see MEMORY_DEVICE_PCI...
2020 Nov 09
3
[PATCH v3 3/6] mm: support THP migration to device private memory
On Fri, Nov 06, 2020 at 01:26:50PM -0800, Ralph Campbell wrote: > > On 11/6/20 12:03 AM, Christoph Hellwig wrote: >> I hate the extra pin count magic here. IMHO we really need to finish >> off the series to get rid of the extra references on the ZONE_DEVICE >> pages first. > > First, thanks for the review comments. > > I don't like the extra refcount
2020 Sep 16
0
[PATCH] mm: remove extra ZONE_DEVICE struct page refcount
...v_pagemap *pgmap) -{ - return -EINVAL; -} -static void devmap_managed_enable_put(void) -{ -} -#endif /* CONFIG_DEV_PAGEMAP_OPS */ - static void pgmap_array_delete(struct range *range) { xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), @@ -181,7 +152,6 @@ void memunmap_pages(struct dev_pagemap *pgmap) pageunmap_range(pgmap, i); WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); - devmap_managed_enable_put(); } EXPORT_SYMBOL_GPL(memunmap_pages); @@ -319,7 +289,6 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) .p...
2020 Sep 25
0
[PATCH 2/2] mm: remove extra ZONE_DEVICE struct page refcount
...- cond_resched(); - return pfn + 1; -} - -#define for_each_device_pfn(pfn, map) \ - for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn)) - static void dev_pagemap_kill(struct dev_pagemap *pgmap) { if (pgmap->ops && pgmap->ops->kill) @@ -128,12 +86,10 @@ void memunmap_pages(struct dev_pagemap *pgmap) { struct resource *res = &pgmap->res; struct page *first_page; - unsigned long pfn; int nid; dev_pagemap_kill(pgmap); - for_each_device_pfn(pfn, pgmap) - put_page(pfn_to_page(pfn)); + percpu_ref_put_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap))...
2020 Sep 17
0
[PATCH] mm: remove extra ZONE_DEVICE struct page refcount
...static void devmap_managed_enable_put(void) > -{ > -} > -#endif /* CONFIG_DEV_PAGEMAP_OPS */ > - > static void pgmap_array_delete(struct range *range) > { > xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), > @@ -181,7 +152,6 @@ void memunmap_pages(struct dev_pagemap *pgmap) > pageunmap_range(pgmap, i); > > WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); > - devmap_managed_enable_put(); > } > EXPORT_SYMBOL_GPL(memunmap_pages); > > @@ -319,7 +289,6 @@ void *memremap_p...
2020 Oct 01
8
[RFC PATCH v3 0/2] mm: remove extra ZONE_DEVICE struct page refcount
...ax code some more, I realized that the ZONE_DEVICE struct pages are being inserted into the process' page tables with vmf_insert_mixed() and a zero refcount on the ZONE_DEVICE struct page. This is sort of OK because insert_pfn() increments the reference count on the pgmap which is what prevents memunmap_pages() from freeing the struct pages and it doesn't check for a non-zero struct page reference count. But, any calls to get_page() will hit the VM_BUG_ON_PAGE() that checks for a reference count == 0. // mmap() an ext4 file that is mounted -o dax. ext4_dax_fault() ext4_dax_huge_fault() dax_io...
2020 Oct 08
2
[PATCH] mm: make device private reference counts zero based
...i) \ - for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn)) - static void dev_pagemap_kill(struct dev_pagemap *pgmap) { if (pgmap->ops && pgmap->ops->kill) @@ -177,20 +161,20 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) void memunmap_pages(struct dev_pagemap *pgmap) { - unsigned long pfn; int i; dev_pagemap_kill(pgmap); for (i = 0; i < pgmap->nr_range; i++) - for_each_device_pfn(pfn, pgmap, i) - put_page(pfn_to_page(pfn)); + percpu_ref_put_many(pgmap->ref, pfn_end(pgmap, i) - + pfn_first(pgmap, i)); dev_p...
2020 Sep 14
5
[PATCH] mm: remove extra ZONE_DEVICE struct page refcount
...} -static unsigned long pfn_next(unsigned long pfn) -{ - if (pfn % 1024 == 0) - cond_resched(); - return pfn + 1; -} - /* * This returns true if the page is reserved by ZONE_DEVICE driver. */ @@ -176,13 +169,12 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) void memunmap_pages(struct dev_pagemap *pgmap) { - unsigned long pfn; int i; dev_pagemap_kill(pgmap); for (i = 0; i < pgmap->nr_range; i++) - for_each_device_pfn(pfn, pgmap, i) - put_page(pfn_to_page(pfn)); + percpu_ref_put_many(pgmap->ref, pfn_end(pgmap, i) - + pfn_first(pgmap, i)); dev_p...
2020 Oct 12
2
[PATCH v2] mm/hmm: make device private reference counts zero based
...i) \ - for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn)) - static void dev_pagemap_kill(struct dev_pagemap *pgmap) { if (pgmap->ops && pgmap->ops->kill) @@ -177,20 +161,20 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) void memunmap_pages(struct dev_pagemap *pgmap) { - unsigned long pfn; int i; dev_pagemap_kill(pgmap); for (i = 0; i < pgmap->nr_range; i++) - for_each_device_pfn(pfn, pgmap, i) - put_page(pfn_to_page(pfn)); + percpu_ref_put_many(pgmap->ref, pfn_end(pgmap, i) - + pfn_first(pgmap, i)); dev_p...
2020 Oct 01
0
[RFC PATCH v3 2/2] mm: remove extra ZONE_DEVICE struct page refcount
...i) \ - for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn)) - static void dev_pagemap_kill(struct dev_pagemap *pgmap) { if (pgmap->ops && pgmap->ops->kill) @@ -176,20 +137,18 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) void memunmap_pages(struct dev_pagemap *pgmap) { - unsigned long pfn; int i; dev_pagemap_kill(pgmap); for (i = 0; i < pgmap->nr_range; i++) - for_each_device_pfn(pfn, pgmap, i) - put_page(pfn_to_page(pfn)); + percpu_ref_put_many(pgmap->ref, pfn_end(pgmap, i) - + pfn_first(pgmap, i)); dev_p...
2020 Sep 25
6
[RFC PATCH v2 0/2] mm: remove extra ZONE_DEVICE struct page refcount
Matthew Wilcox, Ira Weiny, and others have complained that ZONE_DEVICE struct page reference counting is ugly because they are "free" when the reference count is one instead of zero. This leads to explicit checks for ZONE_DEVICE pages in places like put_page(), GUP, THP splitting, and page migration which have to adjust the expected reference count when determining if the page is
2020 Jun 19
0
[PATCH 13/16] mm: support THP migration to device private memory
...ntry(orig_pmd)) + !is_migration_entry(entry)); + if (is_migration_entry(entry)) pmd_migration_entry_wait(mm, vmf.pmd); return 0; } diff --git a/mm/memremap.c b/mm/memremap.c index 03e38b7a38f1..4231054188b4 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -132,8 +132,13 @@ void memunmap_pages(struct dev_pagemap *pgmap) int nid; dev_pagemap_kill(pgmap); - for_each_device_pfn(pfn, pgmap) - put_page(pfn_to_page(pfn)); + for_each_device_pfn(pfn, pgmap) { + struct page *page = pfn_to_page(pfn); + unsigned int order = compound_order(page); + + put_page(page); + pfn += (1U <<...
2020 Apr 21
2
[PATCH] nouveau/hmm: fix nouveau_dmem_chunk allocations
...if (chunk->bo) { - nouveau_bo_unpin(chunk->bo); - nouveau_bo_ref(NULL, &chunk->bo); - } + list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) { + nouveau_bo_unpin(chunk->bo); + nouveau_bo_ref(NULL, &chunk->bo); list_del(&chunk->list); + memunmap_pages(&chunk->pagemap); + release_mem_region(chunk->pagemap.res.start, + resource_size(&chunk->pagemap.res)); kfree(chunk); } @@ -493,9 +456,6 @@ nouveau_dmem_migrate_init(struct nouveau_drm *drm) void nouveau_dmem_init(struct nouveau_drm *drm) { - struct device *device...
2020 Jun 21
2
[PATCH 13/16] mm: support THP migration to device private memory
...ntry)); > + if (is_migration_entry(entry)) > pmd_migration_entry_wait(mm, vmf.pmd); > return 0; > } > diff --git a/mm/memremap.c b/mm/memremap.c > index 03e38b7a38f1..4231054188b4 100644 > --- a/mm/memremap.c > +++ b/mm/memremap.c > @@ -132,8 +132,13 @@ void memunmap_pages(struct dev_pagemap *pgmap) > int nid; > > dev_pagemap_kill(pgmap); > - for_each_device_pfn(pfn, pgmap) > - put_page(pfn_to_page(pfn)); > + for_each_device_pfn(pfn, pgmap) { > + struct page *page = pfn_to_page(pfn); > + unsigned int order = compound_order(page); > +...
2020 Jun 19
22
[PATCH 00/16] mm/hmm/nouveau: THP mapping and migration
These patches apply to linux-5.8.0-rc1. Patches 1-3 should probably go into 5.8, the others can be queued for 5.9. Patches 4-6 improve the HMM self tests. Patch 7-8 prepare nouveau for the meat of this series which adds support and testing for compound page mapping of system memory (patches 9-11) and compound page migration to device private memory (patches 12-16). Since these changes are split
2020 Jan 13
9
[PATCH v6 0/6] mm/hmm/test: add self tests for HMM
This series adds new functions to the mmu interval notifier API to allow device drivers with MMUs to dynamically mirror a process' page tables based on device faults and invalidation callbacks. The Nouveau driver is updated to use the extended API and a set of stand alone self tests is added to help validate and maintain correctness. The patches are based on linux-5.5.0-rc6 and are for