Displaying 20 results from an estimated 27 matches for "nouveau_dmem_migrate_chunk".
2023 Aug 05
1
[PATCH drm-misc-next] nouveau/dmem: fix copy-paste error in nouveau_dmem_migrate_chunk()
...file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 4ad40e42cae1..61e84562094a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -678,7 +678,7 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
}
if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
+ nouveau_fence_emit(fence, drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
nouveau_pfns_map(svmm, args-&g...
2019 Aug 07
4
[PATCH] nouveau/hmm: map pages after migration
...igrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
out_dma_unmap:
@@ -590,18 +597,19 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
out_free_page:
nouveau_dmem_page_free_locked(drm, dpage);
out:
+ *pfn = NVIF_VMM_PFNMAP_V0_NONE;
return 0;
}
static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
- struct nouveau_drm *drm, dma_addr_t *dma_addrs)
+ struct nouveau_drm *drm, dma_addr_t *dma_addrs, u64 *pfns)
{
struct nouveau_fence *fence;
unsigned long addr = args->start, nr_dma = 0, i;
for (i = 0; addr < args->end; i++) {
args->dst[i] = nouv...
2019 Jul 29
0
[PATCH 6/9] nouveau: simplify nouveau_dmem_migrate_vma
...N_DEVICE;
- __free_page(page);
- }
+out_dma_unmap:
+ dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+out_free_page:
+ nouveau_dmem_page_free_locked(drm, dpage);
+out:
+ return 0;
}
-static void
-nouveau_dmem_migrate_finalize_and_map(struct nouveau_migrate *migrate)
+static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
+ struct nouveau_drm *drm, dma_addr_t *dma_addrs)
{
- struct nouveau_drm *drm = migrate->drm;
+ struct nouveau_fence *fence;
+ unsigned long addr = args->start, nr_dma = 0, i;
+
+ for (i = 0; addr < args->end; i++) {
+ args->dst[i] = nouveau_dmem_migrate_...
2019 Aug 13
0
[PATCH] nouveau/hmm: map pages after migration
...t;
> out_dma_unmap:
> @@ -590,18 +597,19 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> out_free_page:
> nouveau_dmem_page_free_locked(drm, dpage);
> out:
> + *pfn = NVIF_VMM_PFNMAP_V0_NONE;
> return 0;
> }
>
> static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
> - struct nouveau_drm *drm, dma_addr_t *dma_addrs)
> + struct nouveau_drm *drm, dma_addr_t *dma_addrs, u64 *pfns)
> {
> struct nouveau_fence *fence;
> unsigned long addr = args->start, nr_dma = 0, i;
>
> for (i = 0; addr < args->en...
2020 Mar 03
2
[PATCH v2] nouveau/hmm: map pages after migration
...igrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
out_dma_unmap:
@@ -587,18 +594,19 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
out_free_page:
nouveau_dmem_page_free_locked(drm, dpage);
out:
+ *pfn = NVIF_VMM_PFNMAP_V0_NONE;
return 0;
}
static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
- struct migrate_vma *args, dma_addr_t *dma_addrs)
+ struct migrate_vma *args, dma_addr_t *dma_addrs, u64 *pfns)
{
struct nouveau_fence *fence;
unsigned long addr = args->start, nr_dma = 0, i;
for (i = 0; addr < args->end; i++) {
args->dst[i] = nou...
2020 Mar 04
5
[PATCH v3 0/4] nouveau/hmm: map pages after migration
Originally patch 4 was targeted for Jason's rdma tree since other HMM
related changes were queued there. Now that those have been merged,
these patches just contain changes to nouveau so they could go through
any tree. I guess Ben Skeggs' tree would be appropriate.
Changes since v2:
Added patches 1-3 to fix some minor issues.
Eliminated nouveau_find_svmm() since it is easily found.
2019 Aug 08
0
[PATCH] nouveau/hmm: map pages after migration
...ong addr,
> - unsigned long src, dma_addr_t *dma_addr)
> + struct vm_area_struct *vma, unsigned long src,
> + dma_addr_t *dma_addr, u64 *pfn)
I'll pick up the removal of the not needed addr argument for the patch
introducing nouveau_dmem_migrate_copy_one, thanks,
> static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
> - struct nouveau_drm *drm, dma_addr_t *dma_addrs)
> + struct nouveau_drm *drm, dma_addr_t *dma_addrs, u64 *pfns)
> {
> struct nouveau_fence *fence;
> unsigned long addr = args->start, nr_dma = 0, i;
>
> for (i = 0; addr < args->en...
2019 Aug 08
10
turn hmm migrate_vma upside down v2
Hi Jérôme, Ben and Jason,
below is a series against the hmm tree which starts revamping the
migrate_vma functionality. The prime idea is to export three slightly
lower level functions and thus avoid the need for migrate_vma_ops
callbacks.
Diffstat:
5 files changed, 281 insertions(+), 607 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git
2019 Jul 29
24
turn the hmm migrate_vma upside down
Hi Jérôme, Ben and Jason,
below is a series against the hmm tree which starts revamping the
migrate_vma functionality. The prime idea is to export three slightly
lower level functions and thus avoid the need for migrate_vma_ops
callbacks.
Diffstat:
4 files changed, 285 insertions(+), 602 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git
2019 Aug 14
20
turn hmm migrate_vma upside down v3
Hi Jérôme, Ben and Jason,
below is a series against the hmm tree which starts revamping the
migrate_vma functionality. The prime idea is to export three slightly
lower level functions and thus avoid the need for migrate_vma_ops
callbacks.
Diffstat:
7 files changed, 282 insertions(+), 614 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git
2020 Jul 23
0
[PATCH v4 4/6] nouveau/svm: use the new migration invalidation
..._copy_one(struct nouveau_drm *drm,
goto out_free_page;
}
+ dpage->zone_device_data = svmm;
*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
if (src & MIGRATE_PFN_WRITE)
@@ -584,8 +592,8 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
unsigned long addr = args->start, nr_dma = 0, i;
for (i = 0; addr < args->end; i++) {
- args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
- dma_addrs + nr_dma, pfns + i);
+ args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
+...
2023 Aug 29
1
[PATCH drm-misc-next] drm/nouveau: fence: fix undefined fence state after emit
...ence, chunk->drm->dmem->migrate.chan);
+ nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan);
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
@@ -677,8 +675,7 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
addr += PAGE_SIZE;
}
- if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, drm->dmem->migrate.chan);
+ nouveau_fence_new(&fence, drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
nouveau_pfns_ma...
2019 Aug 08
2
[PATCH] nouveau/hmm: map pages after migration
...rc, dma_addr_t *dma_addr)
>> + struct vm_area_struct *vma, unsigned long src,
>> + dma_addr_t *dma_addr, u64 *pfn)
>
> I'll pick up the removal of the not needed addr argument for the patch
> introducing nouveau_dmem_migrate_copy_one, thanks,
>
>> static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
>> - struct nouveau_drm *drm, dma_addr_t *dma_addrs)
>> + struct nouveau_drm *drm, dma_addr_t *dma_addrs, u64 *pfns)
>> {
>> struct nouveau_fence *fence;
>> unsigned long addr = args->start, nr_dma = 0, i;
>>
>>...
2020 May 08
11
[PATCH 0/6] nouveau/hmm: add support for mapping large pages
hmm_range_fault() returns an array of page frame numbers and flags for
how the pages are mapped in the requested process' page tables. The PFN
can be used to get the struct page with hmm_pfn_to_page() and the page size
order can be determined with compound_order(page) but if the page is larger
than order 0 (PAGE_SIZE), there is no indication that the page is mapped
using a larger page size. To
2020 May 20
2
[PATCH] nouveau/hmm: fix migrate zero page to GPU
...NG_ERROR;
+ if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
+ NOUVEAU_APER_VRAM, paddr))
+ goto out_free_page;
+ }
*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
@@ -528,7 +585,7 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
for (i = 0; addr < args->end; i++) {
args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
dma_addrs + nr_dma, pfns + i);
- if (args->dst[i])
+ if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
nr_dma++;
addr += PAGE...
2019 Jul 29
0
[PATCH 1/9] mm: turn migrate_vma upside down
...9,10 +773,15 @@ void nouveau_dmem_migrate_finalize_and_map(struct vm_area_struct *vma,
*/
}
-static const struct migrate_vma_ops nouveau_dmem_migrate_ops = {
- .alloc_and_copy = nouveau_dmem_migrate_alloc_and_copy,
- .finalize_and_map = nouveau_dmem_migrate_finalize_and_map,
-};
+static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
+ struct nouveau_migrate *migrate)
+{
+ nouveau_dmem_migrate_alloc_and_copy(args->vma, args->src, args->dst,
+ args->start, args->end, migrate);
+ migrate_vma_pages(args);
+ nouveau_dmem_migrate_finalize_and_map(migrate);
+ migrate_vma_finalize(args);
+}...
2019 Aug 14
0
[PATCH 01/10] mm: turn migrate_vma upside down
...9,10 +773,15 @@ void nouveau_dmem_migrate_finalize_and_map(struct vm_area_struct *vma,
*/
}
-static const struct migrate_vma_ops nouveau_dmem_migrate_ops = {
- .alloc_and_copy = nouveau_dmem_migrate_alloc_and_copy,
- .finalize_and_map = nouveau_dmem_migrate_finalize_and_map,
-};
+static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
+ struct nouveau_migrate *migrate)
+{
+ nouveau_dmem_migrate_alloc_and_copy(args->vma, args->src, args->dst,
+ args->start, args->end, migrate);
+ migrate_vma_pages(args);
+ nouveau_dmem_migrate_finalize_and_map(migrate);
+ migrate_vma_finalize(args);
+}...
2019 Jul 31
1
[PATCH 1/9] mm: turn migrate_vma upside down
...inalize_and_map(struct vm_area_struct *vma,
> */
> }
>
> -static const struct migrate_vma_ops nouveau_dmem_migrate_ops = {
> - .alloc_and_copy = nouveau_dmem_migrate_alloc_and_copy,
> - .finalize_and_map = nouveau_dmem_migrate_finalize_and_map,
> -};
> +static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
> + struct nouveau_migrate *migrate)
> +{
> + nouveau_dmem_migrate_alloc_and_copy(args->vma, args->src, args->dst,
> + args->start, args->end, migrate);
> + migrate_vma_pages(args);
> + nouveau_dmem_migrate_finalize_and_map(migrate);
>...
2020 Mar 16
6
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
On 3/16/20 10:52 AM, Christoph Hellwig wrote:
> No driver has actually used properly wire up and support this feature.
> There is various code related to it in nouveau, but as far as I can tell
> it never actually got turned on, and the only changes since the initial
> commit are global cleanups.
This is not actually true. OpenCL 2.x does support SVM with nouveau and
device private
2020 Nov 06
4
[PATCH 0/3] drm/nouveau: extend the lifetime of nouveau_drm
Hi folks,
Currently, when the device is removed (or the driver is unbound) the
nouveau_drm structure de-allocated. However, it's still accessible from
and used by some DRM layer callbacks. For example, file handles can be
closed after the device has been removed (physically or otherwise). This
series converts the Nouveau device structure to be allocated and
de-allocated with the