Displaying 12 results from an estimated 12 matches for "nvif_vmm_pfnmap_v0".
2020 Mar 03
1
[PATCH v2] nouveau/hmm: map pages after migration
...so it could go through any tree.
>> I guess Ben Skeggs' tree would be appropriate.
>
> Yep
>
>> +static inline struct nouveau_pfnmap_args *
>> +nouveau_pfns_to_args(void *pfns)
>
> don't use static inline inside C files
OK.
>> +{
>> + struct nvif_vmm_pfnmap_v0 *p =
>> + container_of(pfns, struct nvif_vmm_pfnmap_v0, phys);
>> +
>> + return container_of(p, struct nouveau_pfnmap_args, p);
>
> And this should just be
>
> return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
Much simpler, thanks.
>> +stati...
2020 Mar 03
2
[PATCH v2] nouveau/hmm: map pages after migration
...or(dev, *dma_addr))
goto out_free_page;
+ paddr = nouveau_dmem_page_addr(dpage);
if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
- nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
- *dma_addr))
+ paddr, NOUVEAU_APER_HOST, *dma_addr))
goto out_dma_unmap;
+ *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
+ ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
+ if (src & MIGRATE_PFN_WRITE)
+ *pfn |= NVIF_VMM_PFNMAP_V0_W;
return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
out_dma_unmap:
@@ -587,18 +594,19 @@ static unsigned long nouv...
2019 Aug 07
4
[PATCH] nouveau/hmm: map pages after migration
...or(dev, *dma_addr))
goto out_free_page;
+ paddr = nouveau_dmem_page_addr(dpage);
if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
- nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
- *dma_addr))
+ paddr, NOUVEAU_APER_HOST, *dma_addr))
goto out_dma_unmap;
+ *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
+ ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
+ if (src & MIGRATE_PFN_WRITE)
+ *pfn |= NVIF_VMM_PFNMAP_V0_W;
return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
out_dma_unmap:
@@ -590,18 +597,19 @@ static unsigned long nouv...
2020 Mar 03
0
[PATCH v2] nouveau/hmm: map pages after migration
...this
> patch just contains changes to nouveau so it could go through any tree.
> I guess Ben Skeggs' tree would be appropriate.
Yep
> +static inline struct nouveau_pfnmap_args *
> +nouveau_pfns_to_args(void *pfns)
don't use static inline inside C files
> +{
> + struct nvif_vmm_pfnmap_v0 *p =
> + container_of(pfns, struct nvif_vmm_pfnmap_v0, phys);
> +
> + return container_of(p, struct nouveau_pfnmap_args, p);
And this should just be
return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
> +static struct nouveau_svmm *
> +nouveau_find_svmm(struct nouv...
2019 Aug 13
0
[PATCH] nouveau/hmm: map pages after migration
...t;
> + paddr = nouveau_dmem_page_addr(dpage);
> if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
> - nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
> - *dma_addr))
> + paddr, NOUVEAU_APER_HOST, *dma_addr))
> goto out_dma_unmap;
>
> + *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
> + ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
> + if (src & MIGRATE_PFN_WRITE)
> + *pfn |= NVIF_VMM_PFNMAP_V0_W;
> return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
>
> out_dma_unmap:
> @@ -590,18 +...
2020 Jun 19
0
[PATCH 08/16] nouveau/hmm: fault one page at a time
...or (i = 0; i < npages; ++i) {
- struct page *page;
-
- if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) {
- ioctl_addr[i] = 0;
- continue;
- }
-
- page = hmm_pfn_to_page(range->hmm_pfns[i]);
- if (is_device_private_page(page))
- ioctl_addr[i] = nouveau_dmem_page_addr(page) |
- NVIF_VMM_PFNMAP_V0_V |
- NVIF_VMM_PFNMAP_V0_VRAM;
- else
- ioctl_addr[i] = page_to_phys(page) |
- NVIF_VMM_PFNMAP_V0_V |
- NVIF_VMM_PFNMAP_V0_HOST;
- if (range->hmm_pfns[i] & HMM_PFN_WRITE)
- ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W;
+ if (!(range->hmm_pfns[0] & HMM_PFN_VALID)) {
+ ioctl...
2020 Jul 01
0
[PATCH v3 1/5] nouveau/hmm: fault one page at a time
...or (i = 0; i < npages; ++i) {
- struct page *page;
-
- if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) {
- ioctl_addr[i] = 0;
- continue;
- }
-
- page = hmm_pfn_to_page(range->hmm_pfns[i]);
- if (is_device_private_page(page))
- ioctl_addr[i] = nouveau_dmem_page_addr(page) |
- NVIF_VMM_PFNMAP_V0_V |
- NVIF_VMM_PFNMAP_V0_VRAM;
- else
- ioctl_addr[i] = page_to_phys(page) |
- NVIF_VMM_PFNMAP_V0_V |
- NVIF_VMM_PFNMAP_V0_HOST;
- if (range->hmm_pfns[i] & HMM_PFN_WRITE)
- ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W;
+ if (!(range->hmm_pfns[0] & HMM_PFN_VALID)) {
+ ioctl...
2020 Mar 04
5
[PATCH v3 0/4] nouveau/hmm: map pages after migration
Originally patch 4 was targeted for Jason's rdma tree since other HMM
related changes were queued there. Now that those have been merged,
these patches just contain changes to nouveau so they could go through
any tree. I guess Ben Skeggs' tree would be appropriate.
Changes since v2:
Added patches 1-3 to fix some minor issues.
Eliminated nouveau_find_svmm() since it is easily found.
2020 May 08
11
[PATCH 0/6] nouveau/hmm: add support for mapping large pages
hmm_range_fault() returns an array of page frame numbers and flags for
how the pages are mapped in the requested process' page tables. The PFN
can be used to get the struct page with hmm_pfn_to_page() and the page size
order can be determined with compound_order(page) but if the page is larger
than order 0 (PAGE_SIZE), there is no indication that the page is mapped
using a larger page size. To
2020 Jun 30
6
[PATCH v2 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_range_fault() output
array flags HMM_PFN_PMD and HMM_PFN_PUD. This allows a device driver to
know that a given 4K PFN is actually mapped by the CPU using either a
PMD sized or PUD sized CPU page table entry and therefore the device
driver can safely map system memory using larger device MMU PTEs.
The series is based on 5.8.0-rc3 and is intended for
2020 Jul 01
8
[PATCH v3 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_pfn_to_map_order()
function. This allows a device driver to know that a given 4K PFN is
actually mapped by the CPU using a larger sized CPU page table entry and
therefore the device driver can safely map system memory using larger
device MMU PTEs.
The series is based on 5.8.0-rc3 and is intended for Jason Gunthorpe's
hmm tree. These were
2020 Jun 19
22
[PATCH 00/16] mm/hmm/nouveau: THP mapping and migration
These patches apply to linux-5.8.0-rc1. Patches 1-3 should probably go
into 5.8, the others can be queued for 5.9. Patches 4-6 improve the HMM
self tests. Patch 7-8 prepare nouveau for the meat of this series which
adds support and testing for compound page mapping of system memory
(patches 9-11) and compound page migration to device private memory
(patches 12-16). Since these changes are split