Displaying 15 results from an estimated 15 matches for "dev_pagemap_kill".
2019 Jun 26
0
[PATCH 15/25] memremap: provide an optional internal refcount in struct dev_pagemap
...ap->ops->page_free) {
WARN(1, "Missing page_free method\n");
return -EINVAL;
}
@@ -75,6 +75,24 @@ static unsigned long pfn_next(unsigned long pfn)
#define for_each_device_pfn(pfn, map) \
for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
+static void dev_pagemap_kill(struct dev_pagemap *pgmap)
+{
+ if (pgmap->ops && pgmap->ops->kill)
+ pgmap->ops->kill(pgmap);
+ else
+ percpu_ref_kill(pgmap->ref);
+}
+
+static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
+{
+ if (pgmap->ops && pgmap->ops->cleanup) {
+ pgmap-&...
2019 Jun 26
1
[PATCH 15/25] memremap: provide an optional internal refcount in struct dev_pagemap
...(1, "Missing page_free method\n");
> return -EINVAL;
> }
> @@ -75,6 +75,24 @@ static unsigned long pfn_next(unsigned long pfn)
> #define for_each_device_pfn(pfn, map) \
> for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
>
> +static void dev_pagemap_kill(struct dev_pagemap *pgmap)
> +{
> + if (pgmap->ops && pgmap->ops->kill)
> + pgmap->ops->kill(pgmap);
> + else
> + percpu_ref_kill(pgmap->ref);
> +}
> +
> +static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
> +{
> + if (pgmap->ops...
2020 Sep 25
0
[PATCH 2/2] mm: remove extra ZONE_DEVICE struct page refcount
...tart + resource_size(res)) >> PAGE_SHIFT;
}
-static unsigned long pfn_next(unsigned long pfn)
-{
- if (pfn % 1024 == 0)
- cond_resched();
- return pfn + 1;
-}
-
-#define for_each_device_pfn(pfn, map) \
- for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
-
static void dev_pagemap_kill(struct dev_pagemap *pgmap)
{
if (pgmap->ops && pgmap->ops->kill)
@@ -128,12 +86,10 @@ void memunmap_pages(struct dev_pagemap *pgmap)
{
struct resource *res = &pgmap->res;
struct page *first_page;
- unsigned long pfn;
int nid;
dev_pagemap_kill(pgmap);
- for_each...
2020 Oct 12
2
[PATCH v2] mm/hmm: make device private reference counts zero based
...* This returns true if the page is reserved by ZONE_DEVICE driver.
*/
@@ -119,9 +106,6 @@ bool pfn_zone_device_reserved(unsigned long pfn)
return ret;
}
-#define for_each_device_pfn(pfn, map, i) \
- for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
-
static void dev_pagemap_kill(struct dev_pagemap *pgmap)
{
if (pgmap->ops && pgmap->ops->kill)
@@ -177,20 +161,20 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
void memunmap_pages(struct dev_pagemap *pgmap)
{
- unsigned long pfn;
int i;
dev_pagemap_kill(pgmap);
for (i =...
2020 Oct 01
0
[RFC PATCH v3 2/2] mm: remove extra ZONE_DEVICE struct page refcount
...* This returns true if the page is reserved by ZONE_DEVICE driver.
*/
@@ -118,9 +82,6 @@ bool pfn_zone_device_reserved(unsigned long pfn)
return ret;
}
-#define for_each_device_pfn(pfn, map, i) \
- for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
-
static void dev_pagemap_kill(struct dev_pagemap *pgmap)
{
if (pgmap->ops && pgmap->ops->kill)
@@ -176,20 +137,18 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
void memunmap_pages(struct dev_pagemap *pgmap)
{
- unsigned long pfn;
int i;
dev_pagemap_kill(pgmap);
for (i =...
2020 Oct 08
2
[PATCH] mm: make device private reference counts zero based
...* This returns true if the page is reserved by ZONE_DEVICE driver.
*/
@@ -119,9 +106,6 @@ bool pfn_zone_device_reserved(unsigned long pfn)
return ret;
}
-#define for_each_device_pfn(pfn, map, i) \
- for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
-
static void dev_pagemap_kill(struct dev_pagemap *pgmap)
{
if (pgmap->ops && pgmap->ops->kill)
@@ -177,20 +161,20 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
void memunmap_pages(struct dev_pagemap *pgmap)
{
- unsigned long pfn;
int i;
dev_pagemap_kill(pgmap);
for (i =...
2020 Sep 25
6
[RFC PATCH v2 0/2] mm: remove extra ZONE_DEVICE struct page refcount
Matthew Wilcox, Ira Weiny, and others have complained that ZONE_DEVICE
struct page reference counting is ugly because they are "free" when the
reference count is one instead of zero. This leads to explicit checks
for ZONE_DEVICE pages in places like put_page(), GUP, THP splitting, and
page migration which have to adjust the expected reference count when
determining if the page is
2020 Oct 01
8
[RFC PATCH v3 0/2] mm: remove extra ZONE_DEVICE struct page refcount
This is still an RFC because after looking at the pmem/dax code some
more, I realized that the ZONE_DEVICE struct pages are being inserted
into the process' page tables with vmf_insert_mixed() and a zero
refcount on the ZONE_DEVICE struct page. This is sort of OK because
insert_pfn() increments the reference count on the pgmap which is what
prevents memunmap_pages() from freeing the struct
2020 Jun 19
0
[PATCH 13/16] mm: support THP migration to device private memory
...if (is_migration_entry(entry))
pmd_migration_entry_wait(mm, vmf.pmd);
return 0;
}
diff --git a/mm/memremap.c b/mm/memremap.c
index 03e38b7a38f1..4231054188b4 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -132,8 +132,13 @@ void memunmap_pages(struct dev_pagemap *pgmap)
int nid;
dev_pagemap_kill(pgmap);
- for_each_device_pfn(pfn, pgmap)
- put_page(pfn_to_page(pfn));
+ for_each_device_pfn(pfn, pgmap) {
+ struct page *page = pfn_to_page(pfn);
+ unsigned int order = compound_order(page);
+
+ put_page(page);
+ pfn += (1U << order) - 1;
+ }
dev_pagemap_cleanup(pgmap);
/* make s...
2019 Jun 26
41
dev_pagemap related cleanups v3
Hi Dan, Jérôme and Jason,
below is a series that cleans up the dev_pagemap interface so that
it is more easily usable, which removes the need to wrap it in hmm
and thus allowing to kill a lot of code
Note: this series is on top of Linux 5.2-rc5 and has some minor
conflicts with the hmm tree that are easy to resolve.
Diffstat summary:
32 files changed, 361 insertions(+), 1012 deletions(-)
Git
2020 Jun 21
2
[PATCH 13/16] mm: support THP migration to device private memory
...n_entry_wait(mm, vmf.pmd);
> return 0;
> }
> diff --git a/mm/memremap.c b/mm/memremap.c
> index 03e38b7a38f1..4231054188b4 100644
> --- a/mm/memremap.c
> +++ b/mm/memremap.c
> @@ -132,8 +132,13 @@ void memunmap_pages(struct dev_pagemap *pgmap)
> int nid;
>
> dev_pagemap_kill(pgmap);
> - for_each_device_pfn(pfn, pgmap)
> - put_page(pfn_to_page(pfn));
> + for_each_device_pfn(pfn, pgmap) {
> + struct page *page = pfn_to_page(pfn);
> + unsigned int order = compound_order(page);
> +
> + put_page(page);
> + pfn += (1U << order) - 1;
> +...
2020 Sep 14
5
[PATCH] mm: remove extra ZONE_DEVICE struct page refcount
...)
- cond_resched();
- return pfn + 1;
-}
-
/*
* This returns true if the page is reserved by ZONE_DEVICE driver.
*/
@@ -176,13 +169,12 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
void memunmap_pages(struct dev_pagemap *pgmap)
{
- unsigned long pfn;
int i;
dev_pagemap_kill(pgmap);
for (i = 0; i < pgmap->nr_range; i++)
- for_each_device_pfn(pfn, pgmap, i)
- put_page(pfn_to_page(pfn));
+ percpu_ref_put_many(pgmap->ref, pfn_end(pgmap, i) -
+ pfn_first(pgmap, i));
dev_pagemap_cleanup(pgmap);
for (i = 0; i < pgmap->nr_range; i++)
@@ -516,6...
2019 Jun 17
34
dev_pagemap related cleanups v2
Hi Dan, Jérôme and Jason,
below is a series that cleans up the dev_pagemap interface so that
it is more easily usable, which removes the need to wrap it in hmm
and thus allowing to kill a lot of code
Note: this series is on top of the rdma/hmm branch + the dev_pagemap
releas fix series from Dan that went into 5.2-rc5.
Git tree:
git://git.infradead.org/users/hch/misc.git
2019 Jun 13
57
dev_pagemap related cleanups
Hi Dan, Jérôme and Jason,
below is a series that cleans up the dev_pagemap interface so that
it is more easily usable, which removes the need to wrap it in hmm
and thus allowing to kill a lot of code
Diffstat:
22 files changed, 245 insertions(+), 802 deletions(-)
Git tree:
git://git.infradead.org/users/hch/misc.git hmm-devmem-cleanup
Gitweb:
2020 Jun 19
22
[PATCH 00/16] mm/hmm/nouveau: THP mapping and migration
These patches apply to linux-5.8.0-rc1. Patches 1-3 should probably go
into 5.8, the others can be queued for 5.9. Patches 4-6 improve the HMM
self tests. Patch 7-8 prepare nouveau for the meat of this series which
adds support and testing for compound page mapping of system memory
(patches 9-11) and compound page migration to device private memory
(patches 12-16). Since these changes are split