search for: nouveau_dmem_page_addr

Displaying 20 results from an estimated 46 matches for "nouveau_dmem_page_addr".

2019 Jul 29
0
[PATCH 3/9] nouveau: factor out device memory address calculation
...gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index e696157f771e..d469bc334438 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -102,6 +102,14 @@ struct nouveau_migrate { unsigned long dma_nr; }; +static unsigned long nouveau_dmem_page_addr(struct page *page) +{ + struct nouveau_dmem_chunk *chunk = page->zone_device_data; + unsigned long idx = page_to_pfn(page) - chunk->pfn_first; + + return (idx << PAGE_SHIFT) + chunk->bo->bo.offset; +} + static void nouveau_dmem_page_free(struct page *page) { struct nouveau_dme...
2020 Apr 22
2
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...; > + if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) { > + ioctl_addr[i] = 0; > continue; > + } Can't we rely on the caller pre-zeroing the array? > + page = hmm_pfn_to_page(range->hmm_pfns[i]); > + if (is_device_private_page(page)) > + ioctl_addr[i] = nouveau_dmem_page_addr(page) | > + NVIF_VMM_PFNMAP_V0_V | > + NVIF_VMM_PFNMAP_V0_VRAM; > + else > + ioctl_addr[i] = page_to_phys(page) | > + NVIF_VMM_PFNMAP_V0_V | > + NVIF_VMM_PFNMAP_V0_HOST; > + if (range->hmm_pfns[i] & HMM_PFN_WRITE) > + ioctl_addr[i] |= NVIF_VMM_PF...
2019 Jul 29
0
[PATCH 6/9] nouveau: simplify nouveau_dmem_migrate_vma
...ruct page *page) return container_of(page->pgmap, struct nouveau_dmem, pagemap); } -struct nouveau_migrate { - struct vm_area_struct *vma; - struct nouveau_drm *drm; - struct nouveau_fence *fence; - unsigned long npages; - dma_addr_t *dma; - unsigned long dma_nr; -}; - static unsigned long nouveau_dmem_page_addr(struct page *page) { struct nouveau_dmem_chunk *chunk = page->zone_device_data; @@ -569,131 +558,67 @@ nouveau_dmem_init(struct nouveau_drm *drm) drm->dmem = NULL; } -static void -nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma, - const unsigned long *src_pfns, -...
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
....phys in nouveau_svm_fault - I didn't see a zeroing? I think it makes sense that this routine fully sets the output array and does not assume pre-initialize > > + page = hmm_pfn_to_page(range->hmm_pfns[i]); > > + if (is_device_private_page(page)) > > + ioctl_addr[i] = nouveau_dmem_page_addr(page) | > > + NVIF_VMM_PFNMAP_V0_V | > > + NVIF_VMM_PFNMAP_V0_VRAM; > > + else > > + ioctl_addr[i] = page_to_phys(page) | > > + NVIF_VMM_PFNMAP_V0_V | > > + NVIF_VMM_PFNMAP_V0_HOST; > > + if (range->hmm_pfns[i] & HMM_PFN_WRITE) &g...
2019 Aug 08
10
turn hmm migrate_vma upside down v2
Hi Jérôme, Ben and Jason, below is a series against the hmm tree which starts revamping the migrate_vma functionality. The prime idea is to export three slightly lower level functions and thus avoid the need for migrate_vma_ops callbacks. Diffstat: 5 files changed, 281 insertions(+), 607 deletions(-) A git tree is also available at: git://git.infradead.org/users/hch/misc.git
2019 Jul 31
1
[PATCH 5/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...if (dma_mapping_error(dev, fault->dma[fault->npages])) { > - dst_pfns[i] = MIGRATE_PFN_ERROR; > - __free_page(dpage); > - continue; > - } > - > - ret = copy(drm, 1, NOUVEAU_APER_HOST, > - fault->dma[fault->npages++], > - NOUVEAU_APER_VRAM, > - nouveau_dmem_page_addr(spage)); > - if (ret) { > - dst_pfns[i] = MIGRATE_PFN_ERROR; > - __free_page(dpage); > - continue; > - } > - } > + *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); > + if (dma_mapping_error(dev, *dma_addr)) > + goto error_free_page; > &...
2020 Jun 23
1
[RESEND PATCH 2/3] nouveau: fix mixed normal and device private page migration
...t; +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c > @@ -540,6 +540,12 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, > if (!(src & MIGRATE_PFN_MIGRATE)) > goto out; > > + if (spage && is_device_private_page(spage)) { > + paddr = nouveau_dmem_page_addr(spage); > + *dma_addr = DMA_MAPPING_ERROR; > + goto done; > + } > + > dpage = nouveau_dmem_page_alloc_locked(drm); > if (!dpage) > goto out; > @@ -560,6 +566,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, > goto out_free_...
2019 Aug 08
0
[PATCH 6/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...RROR; - __free_page(dpage); - continue; - } + *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, *dma_addr)) + goto error_free_page; - ret = copy(drm, 1, NOUVEAU_APER_HOST, - fault->dma[fault->npages++], - NOUVEAU_APER_VRAM, - nouveau_dmem_page_addr(spage)); - if (ret) { - dst_pfns[i] = MIGRATE_PFN_ERROR; - __free_page(dpage); - continue; - } - } + if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr, + NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) + goto error_dma_unmap; - nouveau_fence_new(drm->dme...
2019 Jul 29
0
[PATCH 5/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...TIONAL, - DMA_ATTR_SKIP_CPU_SYNC); - if (dma_mapping_error(dev, fault->dma[fault->npages])) { - dst_pfns[i] = MIGRATE_PFN_ERROR; - __free_page(dpage); - continue; - } - - ret = copy(drm, 1, NOUVEAU_APER_HOST, - fault->dma[fault->npages++], - NOUVEAU_APER_VRAM, - nouveau_dmem_page_addr(spage)); - if (ret) { - dst_pfns[i] = MIGRATE_PFN_ERROR; - __free_page(dpage); - continue; - } - } + *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, *dma_addr)) + goto error_free_page; - nouveau_fence_new(drm->dmem->migrate.chan,...
2019 Aug 08
1
[PATCH 6/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...t; - } > + *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); > + if (dma_mapping_error(dev, *dma_addr)) > + goto error_free_page; > > - ret = copy(drm, 1, NOUVEAU_APER_HOST, > - fault->dma[fault->npages++], > - NOUVEAU_APER_VRAM, > - nouveau_dmem_page_addr(spage)); > - if (ret) { > - dst_pfns[i] = MIGRATE_PFN_ERROR; > - __free_page(dpage); > - continue; > - } > - } > + if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr, > + NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) > + goto error...
2019 Aug 07
4
[PATCH] nouveau/hmm: map pages after migration
...long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, dpage = nouveau_dmem_page_alloc_locked(drm); if (!dpage) - return 0; + goto out; *dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, *dma_addr)) goto out_free_page; + paddr = nouveau_dmem_page_addr(dpage); if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM, - nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST, - *dma_addr)) + paddr, NOUVEAU_APER_HOST, *dma_addr)) goto out_dma_unmap; + *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM | + ((paddr >> PAGE_SHI...
2019 Jul 29
24
turn the hmm migrate_vma upside down
Hi Jérôme, Ben and Jason, below is a series against the hmm tree which starts revamping the migrate_vma functionality. The prime idea is to export three slightly lower level functions and thus avoid the need for migrate_vma_ops callbacks. Diffstat: 4 files changed, 285 insertions(+), 602 deletions(-) A git tree is also available at: git://git.infradead.org/users/hch/misc.git
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...be39a..3364904eccff5a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -85,7 +85,7 @@ static inline struct nouveau_dmem *page_to_dmem(struct page *page) return container_of(page->pgmap, struct nouveau_dmem, pagemap); } -static unsigned long nouveau_dmem_page_addr(struct page *page) +unsigned long nouveau_dmem_page_addr(struct page *page) { struct nouveau_dmem_chunk *chunk = page->zone_device_data; unsigned long idx = page_to_pfn(page) - chunk->pfn_first; @@ -671,28 +671,3 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm, out: return ret; }...
2020 Jun 19
0
[PATCH 08/16] nouveau/hmm: fault one page at a time
...e->start) >> PAGE_SHIFT; - for (i = 0; i < npages; ++i) { - struct page *page; - - if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) { - ioctl_addr[i] = 0; - continue; - } - - page = hmm_pfn_to_page(range->hmm_pfns[i]); - if (is_device_private_page(page)) - ioctl_addr[i] = nouveau_dmem_page_addr(page) | - NVIF_VMM_PFNMAP_V0_V | - NVIF_VMM_PFNMAP_V0_VRAM; - else - ioctl_addr[i] = page_to_phys(page) | - NVIF_VMM_PFNMAP_V0_V | - NVIF_VMM_PFNMAP_V0_HOST; - if (range->hmm_pfns[i] & HMM_PFN_WRITE) - ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W; + if (!(range->hmm_pfns[0]...
2020 Jul 01
0
[PATCH v3 1/5] nouveau/hmm: fault one page at a time
...e->start) >> PAGE_SHIFT; - for (i = 0; i < npages; ++i) { - struct page *page; - - if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) { - ioctl_addr[i] = 0; - continue; - } - - page = hmm_pfn_to_page(range->hmm_pfns[i]); - if (is_device_private_page(page)) - ioctl_addr[i] = nouveau_dmem_page_addr(page) | - NVIF_VMM_PFNMAP_V0_V | - NVIF_VMM_PFNMAP_V0_VRAM; - else - ioctl_addr[i] = page_to_phys(page) | - NVIF_VMM_PFNMAP_V0_V | - NVIF_VMM_PFNMAP_V0_HOST; - if (range->hmm_pfns[i] & HMM_PFN_WRITE) - ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W; + if (!(range->hmm_pfns[0]...
2019 Aug 13
0
[PATCH] nouveau/hmm: map pages after migration
...drm, > > dpage = nouveau_dmem_page_alloc_locked(drm); > if (!dpage) > - return 0; > + goto out; > > *dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); > if (dma_mapping_error(dev, *dma_addr)) > goto out_free_page; > > + paddr = nouveau_dmem_page_addr(dpage); > if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM, > - nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST, > - *dma_addr)) > + paddr, NOUVEAU_APER_HOST, *dma_addr)) > goto out_dma_unmap; > > + *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_...
2020 Jun 19
0
[PATCH 10/16] nouveau/hmm: support mapping large sysmem pages
...nd page). + */ + if (range->hmm_pfns[0] & HMM_PFN_COMPOUND) { + page = compound_head(page); + args->p.page = page_shift(page); + args->p.size = 1UL << args->p.page; + args->p.addr &= ~(args->p.size - 1); + } if (is_device_private_page(page)) - ioctl_addr[0] = nouveau_dmem_page_addr(page) | + args->p.phys[0] = nouveau_dmem_page_addr(page) | NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM; else - ioctl_addr[0] = page_to_phys(page) | + args->p.phys[0] = page_to_phys(page) | NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_HOST; if (range->hmm_pfns[0]...
2020 Jun 30
6
[PATCH v2 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_range_fault() output array flags HMM_PFN_PMD and HMM_PFN_PUD. This allows a device driver to know that a given 4K PFN is actually mapped by the CPU using either a PMD sized or PUD sized CPU page table entry and therefore the device driver can safely map system memory using larger device MMU PTEs. The series is based on 5.8.0-rc3 and is intended for
2020 May 08
11
[PATCH 0/6] nouveau/hmm: add support for mapping large pages
hmm_range_fault() returns an array of page frame numbers and flags for how the pages are mapped in the requested process' page tables. The PFN can be used to get the struct page with hmm_pfn_to_page() and the page size order can be determined with compound_order(page) but if the page is larger than order 0 (PAGE_SIZE), there is no indication that the page is mapped using a larger page size. To
2020 Jun 22
0
[RESEND PATCH 2/3] nouveau: fix mixed normal and device private page migration
...vers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -540,6 +540,12 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, if (!(src & MIGRATE_PFN_MIGRATE)) goto out; + if (spage && is_device_private_page(spage)) { + paddr = nouveau_dmem_page_addr(spage); + *dma_addr = DMA_MAPPING_ERROR; + goto done; + } + dpage = nouveau_dmem_page_alloc_locked(drm); if (!dpage) goto out; @@ -560,6 +566,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, goto out_free_page; } +done: *pfn = NVIF_VMM_PFNMAP_V0_V...