Displaying 20 results from an estimated 55 matches for "is_device_private_pag".
Did you mean:
is_device_private_page
2019 Jun 26
0
[PATCH 04/25] mm: remove MEMORY_DEVICE_PUBLIC support
...@@ static inline bool put_devmap_managed_page(struct page *page)
return false;
switch (page->pgmap->type) {
case MEMORY_DEVICE_PRIVATE:
- case MEMORY_DEVICE_PUBLIC:
case MEMORY_DEVICE_FS_DAX:
__put_devmap_managed_page(page);
return true;
@@ -960,12 +959,6 @@ static inline bool is_device_private_page(const struct page *page)
page->pgmap->type == MEMORY_DEVICE_PRIVATE;
}
-static inline bool is_device_public_page(const struct page *page)
-{
- return is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PUBLIC;
-}
-
#ifdef CONFIG_PCI_P2PDMA
static inline b...
2019 Aug 15
4
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...echnically deref the struct page:
nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
struct hmm_range *range)
struct page *page;
page = hmm_pfn_to_page(range, range->pfns[i]);
if (!nouveau_dmem_page(drm, page)) {
nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
{
return is_device_private_page(page) && drm->dmem == page_to_dmem(page)
Which does touch 'page->pgmap'
Is this OK without having a get_dev_pagemap() ?
Noting that the collision-retry scheme doesn't protect anything here
as we can have a concurrent invalidation while doing the above deref.
Jason
2020 Mar 16
4
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
On 3/16/20 12:32 PM, Christoph Hellwig wrote:
> Remove the code to fault device private pages back into system memory
> that has never been used by any driver. Also replace the usage of the
> HMM_PFN_DEVICE_PRIVATE flag in the pfns array with a simple
> is_device_private_page check in nouveau.
>
> Signed-off-by: Christoph Hellwig <hch at lst.de>
Getting rid of HMM_PFN_DEVICE_PRIVATE seems reasonable to me since a driver can
look at the struct page but what if a driver needs to fault in a page from
another device's private memory? Should it call handle...
2020 Nov 06
0
[PATCH v3 3/6] mm: support THP migration to device private memory
...put_page()
- * which needs correct compound_head().
+ * Also, ZONE_DEVICE struct pages share the compound_head field and
+ * need to restore the pgmap pointer before unfreezing page refcount
+ * in order for is_zone_device_page() to work correctly.
*/
- clear_compound_head(page_tail);
+ if (is_device_private_page(head)) {
+ head->pgmap->ops->page_split(head, page_tail);
+ pin_count = 2;
+ } else {
+ clear_compound_head(page_tail);
+ pin_count = 1;
+ }
/* Finally unfreeze refcount. Additional reference from page cache. */
- page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
+ page_ref_unf...
2020 Oct 12
2
[PATCH v2] mm/hmm: make device private reference counts zero based
...prot_noncached(params.pgprot);
- need_devmap_managed = false;
break;
default:
WARN(1, "Invalid pgmap type %d\n", pgmap->type);
@@ -508,10 +495,13 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap);
void free_devmap_managed_page(struct page *page)
{
/* notify page idle for dax */
- if (!is_device_private_page(page)) {
- dax_wakeup_page(page);
+ dax_wakeup_page(page);
+}
+
+void free_zone_device_page(struct page *page)
+{
+ if (!is_device_private_page(page))
return;
- }
__ClearPageWaiters(page);
diff --git a/mm/migrate.c b/mm/migrate.c
index 5ca5842df5db..ee09334b46d8 100644
--- a/mm/migrate.c...
2020 Mar 16
0
[PATCH 4/4] mm: check the device private page owner in hmm_range_fault
...ba4..ad89e09a0be3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -672,12 +672,6 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
return ret;
}
-static inline bool
-nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
-{
- return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
-}
-
void
nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
struct hmm_range *range)
@@ -696,12 +690,6 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
if (!is_device_private_page(page))
continue;
- if (!nouveau_dmem_page(dr...
2020 Mar 16
14
ensure device private pages have an owner v2
When acting on device private mappings a driver needs to know if the
device (or other entity in case of kvmppc) actually owns this private
mapping. This series adds an owner field and converts the migrate_vma
code over to check it. I looked into doing the same for
hmm_range_fault, but as far as I can tell that code has never been
wired up to actually work for device private memory, so instead of
2020 Mar 16
0
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
Remove the code to fault device private pages back into system memory
that has never been used by any driver. Also replace the usage of the
HMM_PFN_DEVICE_PRIVATE flag in the pfns array with a simple
is_device_private_page check in nouveau.
Signed-off-by: Christoph Hellwig <hch at lst.de>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1 -
drivers/gpu/drm/nouveau/nouveau_dmem.c | 5 +++--
drivers/gpu/drm/nouveau/nouveau_svm.c | 1 -
include/linux/hmm.h | 2 --
mm/hmm.c...
2019 Aug 16
2
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...struct page *page;
> > page = hmm_pfn_to_page(range, range->pfns[i]);
> > if (!nouveau_dmem_page(drm, page)) {
> >
> >
> > nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
> > {
> > return is_device_private_page(page) && drm->dmem == page_to_dmem(page)
> >
> >
> > Which does touch 'page->pgmap'
> >
> > Is this OK without having a get_dev_pagemap() ?
> >
> > Noting that the collision-retry scheme doesn't protect anything here
> > as...
2020 Jun 23
1
[RESEND PATCH 2/3] nouveau: fix mixed normal and device private page migration
...; --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> @@ -540,6 +540,12 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> if (!(src & MIGRATE_PFN_MIGRATE))
> goto out;
>
> + if (spage && is_device_private_page(spage)) {
> + paddr = nouveau_dmem_page_addr(spage);
> + *dma_addr = DMA_MAPPING_ERROR;
> + goto done;
> + }
> +
> dpage = nouveau_dmem_page_alloc_locked(drm);
> if (!dpage)
> goto out;
> @@ -560,6 +566,7 @@ static unsigned long nouveau_dmem_migrate_copy_one...
2020 Sep 16
0
[PATCH] mm: remove extra ZONE_DEVICE struct page refcount
.../linux/mm.h
@@ -1090,11 +1090,6 @@ static inline bool is_zone_device_page(const struct page *page)
}
#endif
-#ifdef CONFIG_DEV_PAGEMAP_OPS
-void free_devmap_managed_page(struct page *page);
-DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-#endif /* CONFIG_DEV_PAGEMAP_OPS */
-
static inline bool is_device_private_page(const struct page *page)
{
return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
diff --git a/mm/internal.h b/mm/internal.h
index 6345b08ce86ccf..629959a6f26d7c 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -618,4 +618,12 @@ struct migration_target_control {
gfp_t gfp_mask;
};
+#ifdef C...
2020 Sep 14
5
[PATCH] mm: remove extra ZONE_DEVICE struct page refcount
...oid put_devmap_managed_page(struct page *page);
-
-#else /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool page_is_devmap_managed(struct page *page)
-{
- return false;
-}
-
-static inline void put_devmap_managed_page(struct page *page)
-{
-}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool is_device_private_page(const struct page *page)
@@ -1169,17 +1141,6 @@ static inline void put_page(struct page *page)
{
page = compound_head(page);
- /*
- * For devmap managed pages we need to catch refcount transition from
- * 2 to 1, when refcount reach one it means the page is free and we
- * need to inform t...
2020 Sep 25
0
[PATCH 2/2] mm: remove extra ZONE_DEVICE struct page refcount
...oid put_devmap_managed_page(struct page *page);
-
-#else /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool page_is_devmap_managed(struct page *page)
-{
- return false;
-}
-
-static inline void put_devmap_managed_page(struct page *page)
-{
-}
-#endif /* CONFIG_DEV_PAGEMAP_OPS */
-
static inline bool is_device_private_page(const struct page *page)
{
return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
@@ -1171,17 +1138,6 @@ static inline void put_page(struct page *page)
{
page = compound_head(page);
- /*
- * For devmap managed pages we need to catch refcount transition from
- * 2 to 1, when refcount reach...
2020 Sep 17
0
[PATCH] mm: remove extra ZONE_DEVICE struct page refcount
...bool is_zone_device_page(const struct page *page)
> }
> #endif
>
> -#ifdef CONFIG_DEV_PAGEMAP_OPS
> -void free_devmap_managed_page(struct page *page);
> -DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
> -#endif /* CONFIG_DEV_PAGEMAP_OPS */
> -
> static inline bool is_device_private_page(const struct page *page)
> {
> return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
> diff --git a/mm/internal.h b/mm/internal.h
> index 6345b08ce86ccf..629959a6f26d7c 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -618,4 +618,12 @@ struct migration_target_contro...
2020 Apr 22
2
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...0; i < npages; ++i) {
> struct page *page;
>
> + if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) {
> + ioctl_addr[i] = 0;
> continue;
> + }
Can't we rely on the caller pre-zeroing the array?
> + page = hmm_pfn_to_page(range->hmm_pfns[i]);
> + if (is_device_private_page(page))
> + ioctl_addr[i] = nouveau_dmem_page_addr(page) |
> + NVIF_VMM_PFNMAP_V0_V |
> + NVIF_VMM_PFNMAP_V0_VRAM;
> + else
> + ioctl_addr[i] = page_to_phys(page) |
> + NVIF_VMM_PFNMAP_V0_V |
> + NVIF_VMM_PFNMAP_V0_HOST;
> + if (range->hmm_pfns[i] &a...
2020 Jun 19
0
[PATCH 13/16] mm: support THP migration to device private memory
...mp;ds_queue->split_queue_lock, flags);
free_compound_page(page);
@@ -2963,6 +2975,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
pmde = pmd_mksoft_dirty(pmde);
if (is_write_migration_entry(entry))
pmde = maybe_pmd_mkwrite(pmde, vma);
+ if (unlikely(is_device_private_page(new))) {
+ entry = make_device_private_entry(new, pmd_write(pmde));
+ pmde = swp_entry_to_pmd(entry);
+ }
flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
if (PageAnon(new))
diff --git a/mm/internal.h b/mm/internal.h
index 9886db20d94f..58f051a14dae 100644
--- a/mm/internal...
2020 Oct 01
0
[RFC PATCH v3 2/2] mm: remove extra ZONE_DEVICE struct page refcount
...oid put_devmap_managed_page(struct page *page);
-
-#else /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool page_is_devmap_managed(struct page *page)
-{
- return false;
-}
-
-static inline void put_devmap_managed_page(struct page *page)
-{
-}
-#endif /* CONFIG_DEV_PAGEMAP_OPS */
-
static inline bool is_device_private_page(const struct page *page)
{
return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
@@ -1179,17 +1146,6 @@ static inline void put_page(struct page *page)
{
page = compound_head(page);
- /*
- * For devmap managed pages we need to catch refcount transition from
- * 2 to 1, when refcount reach...
2020 Oct 08
2
[PATCH] mm: make device private reference counts zero based
...prot_noncached(params.pgprot);
- need_devmap_managed = false;
break;
default:
WARN(1, "Invalid pgmap type %d\n", pgmap->type);
@@ -508,10 +495,13 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap);
void free_devmap_managed_page(struct page *page)
{
/* notify page idle for dax */
- if (!is_device_private_page(page)) {
- dax_wakeup_page(page);
+ dax_wakeup_page(page);
+}
+
+void free_zone_device_page(struct page *page)
+{
+ if (!is_device_private_page(page))
return;
- }
__ClearPageWaiters(page);
diff --git a/mm/migrate.c b/mm/migrate.c
index 5ca5842df5db..ee09334b46d8 100644
--- a/mm/migrate.c...
2020 Jun 19
0
[PATCH 08/16] nouveau/hmm: fault one page at a time
...esentation.
*/
- npages = (range->end - range->start) >> PAGE_SHIFT;
- for (i = 0; i < npages; ++i) {
- struct page *page;
-
- if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) {
- ioctl_addr[i] = 0;
- continue;
- }
-
- page = hmm_pfn_to_page(range->hmm_pfns[i]);
- if (is_device_private_page(page))
- ioctl_addr[i] = nouveau_dmem_page_addr(page) |
- NVIF_VMM_PFNMAP_V0_V |
- NVIF_VMM_PFNMAP_V0_VRAM;
- else
- ioctl_addr[i] = page_to_phys(page) |
- NVIF_VMM_PFNMAP_V0_V |
- NVIF_VMM_PFNMAP_V0_HOST;
- if (range->hmm_pfns[i] & HMM_PFN_WRITE)
- ioctl_addr[i] |=...
2020 Jul 01
0
[PATCH v3 1/5] nouveau/hmm: fault one page at a time
...esentation.
*/
- npages = (range->end - range->start) >> PAGE_SHIFT;
- for (i = 0; i < npages; ++i) {
- struct page *page;
-
- if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) {
- ioctl_addr[i] = 0;
- continue;
- }
-
- page = hmm_pfn_to_page(range->hmm_pfns[i]);
- if (is_device_private_page(page))
- ioctl_addr[i] = nouveau_dmem_page_addr(page) |
- NVIF_VMM_PFNMAP_V0_V |
- NVIF_VMM_PFNMAP_V0_VRAM;
- else
- ioctl_addr[i] = page_to_phys(page) |
- NVIF_VMM_PFNMAP_V0_V |
- NVIF_VMM_PFNMAP_V0_HOST;
- if (range->hmm_pfns[i] & HMM_PFN_WRITE)
- ioctl_addr[i] |=...