Displaying 20 results from an estimated 56 matches for "spage".
Did you mean:
page
2019 Jul 31
1
[PATCH 5/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...> struct device *dev = drm->dev->dev;
> - unsigned long addr, i, npages = 0;
> - nouveau_migrate_copy_t copy;
> - int ret;
> -
> -
> - /* First allocate new memory */
> - for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
> - struct page *dpage, *spage;
> -
> - dst_pfns[i] = 0;
> - spage = migrate_pfn_to_page(src_pfns[i]);
> - if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
> - continue;
> + struct page *dpage, *spage;
>
> - dpage = alloc_page_vma(GFP_HIGHUSER, vma, addr);
> - if (!dpage) {
> - dst...
2019 Aug 08
0
[PATCH 6/9] nouveau: simplify nouveau_dmem_migrate_to_ram
..._one(struct nouveau_drm *drm,
+ struct vm_fault *vmf, struct migrate_vma *args,
+ dma_addr_t *dma_addr)
{
- struct nouveau_drm *drm = fault->drm;
struct device *dev = drm->dev->dev;
- unsigned long addr, i, npages = 0;
- nouveau_migrate_copy_t copy;
- int ret;
-
+ struct page *dpage, *spage;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
- /* First allocate new memory */
- for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
- struct page *dpage, *spage;
-
- dst_pfns[i] = 0;
- spage = migrate_pfn_to_page(src_pfns[i]);
- if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))...
2019 Jul 29
0
[PATCH 5/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...{
- struct nouveau_drm *drm = fault->drm;
struct device *dev = drm->dev->dev;
- unsigned long addr, i, npages = 0;
- nouveau_migrate_copy_t copy;
- int ret;
-
-
- /* First allocate new memory */
- for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
- struct page *dpage, *spage;
-
- dst_pfns[i] = 0;
- spage = migrate_pfn_to_page(src_pfns[i]);
- if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
- continue;
+ struct page *dpage, *spage;
- dpage = alloc_page_vma(GFP_HIGHUSER, vma, addr);
- if (!dpage) {
- dst_pfns[i] = MIGRATE_PFN_ERROR;
- continue;
- }
-...
2019 Aug 08
1
[PATCH 6/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...t *vmf, struct migrate_vma *args,
> + dma_addr_t *dma_addr)
> {
> - struct nouveau_drm *drm = fault->drm;
> struct device *dev = drm->dev->dev;
> - unsigned long addr, i, npages = 0;
> - nouveau_migrate_copy_t copy;
> - int ret;
> -
> + struct page *dpage, *spage;
> + vm_fault_t ret = VM_FAULT_SIGBUS;
You can remove this line and return VM_FAULT_SIGBUS in the error path below.
>
> - /* First allocate new memory */
> - for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
> - struct page *dpage, *spage;
> -
> - dst_pf...
2019 Jul 29
0
[PATCH 6/9] nouveau: simplify nouveau_dmem_migrate_vma
...{
- struct nouveau_drm *drm = migrate->drm;
struct device *dev = drm->dev->dev;
- unsigned long addr, i, npages = 0;
- nouveau_migrate_copy_t copy;
- int ret;
-
- /* First allocate new memory */
- for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
- struct page *dpage, *spage;
-
- dst_pfns[i] = 0;
- spage = migrate_pfn_to_page(src_pfns[i]);
- if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- dpage = nouveau_dmem_page_alloc_locked(drm);
- if (!dpage)
- continue;
-
- dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)) |
- MIGRATE_PFN_LOCK...
2020 Jun 19
0
[PATCH 15/16] mm/hmm/test: add self tests for THP migration
...>dst;
- unsigned long addr;
+ unsigned long end_pfn = args->end >> PAGE_SHIFT;
+ unsigned long pfn;
- for (addr = args->start; addr < args->end; addr += PAGE_SIZE,
- src++, dst++) {
+ for (pfn = args->start >> PAGE_SHIFT; pfn < end_pfn; ) {
struct page *spage;
struct page *dpage;
struct page *rpage;
+ bool is_huge;
if (!(*src & MIGRATE_PFN_MIGRATE))
- continue;
+ goto next;
/*
* Note that spage might be NULL which is OK since it is an
@@ -595,7 +645,6 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,...
2019 Jul 29
0
[PATCH 3/9] nouveau: factor out device memory address calculation
...age->zone_device_data;
@@ -169,9 +177,7 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
/* Copy things over */
copy = drm->dmem->migrate.copy_func;
for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
- struct nouveau_dmem_chunk *chunk;
struct page *spage, *dpage;
- u64 src_addr, dst_addr;
dpage = migrate_pfn_to_page(dst_pfns[i]);
if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
@@ -194,14 +200,10 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
continue;
}
- dst_addr = fault->dma[fault->npages++];
-
- chunk...
2019 Aug 08
10
turn hmm migrate_vma upside down v2
Hi Jérôme, Ben and Jason,
below is a series against the hmm tree which starts revamping the
migrate_vma functionality. The prime idea is to export three slightly
lower level functions and thus avoid the need for migrate_vma_ops
callbacks.
Diffstat:
5 files changed, 281 insertions(+), 607 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git
2020 Nov 06
12
[PATCH v3 0/6] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
Earlier versions were posted previously [1] and [2].
The patches apply cleanly to the linux-mm 5.10.0-rc2 tree. There are a
lot of other THP patches being posted. I don't think there are any
semantic conflicts but there may be some merge conflicts depending on
2020 Sep 02
10
[PATCH v2 0/7] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
An earlier version was posted previously [1]. This version now
supports splitting a THP midway in the migration process which
led to a number of changes.
The patches apply cleanly to the current linux-mm tree. Since there
are a couple of patches in linux-mm from Dan
2020 Jun 23
1
[RESEND PATCH 2/3] nouveau: fix mixed normal and device private page migration
...ba3caa 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> @@ -540,6 +540,12 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> if (!(src & MIGRATE_PFN_MIGRATE))
> goto out;
>
> + if (spage && is_device_private_page(spage)) {
> + paddr = nouveau_dmem_page_addr(spage);
> + *dma_addr = DMA_MAPPING_ERROR;
> + goto done;
> + }
> +
> dpage = nouveau_dmem_page_alloc_locked(drm);
> if (!dpage)
> goto out;
> @@ -560,6 +566,7 @@ static unsigned l...
2020 Jul 23
0
[PATCH v4 4/6] nouveau/svm: use the new migration invalidation
...u_dmem.c
index 78b9e3c2a5b3..511fe04cd680 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -140,6 +140,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
{
struct device *dev = drm->dev->dev;
struct page *dpage, *spage;
+ struct nouveau_svmm *svmm;
spage = migrate_pfn_to_page(args->src[0]);
if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
@@ -154,14 +155,19 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
if (dma_mapping_error(dev, *dma_addr))
goto error_free_page...
2020 May 20
2
[PATCH] nouveau/hmm: fix migrate zero page to GPU
...e.copy_func = nvc0b5_migrate_copy;
+ drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
drm->dmem->migrate.chan = drm->ttm.chan;
return 0;
default:
@@ -487,21 +537,28 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
unsigned long paddr;
spage = migrate_pfn_to_page(src);
- if (!spage || !(src & MIGRATE_PFN_MIGRATE))
+ if (!(src & MIGRATE_PFN_MIGRATE))
goto out;
dpage = nouveau_dmem_page_alloc_locked(drm);
if (!dpage)
goto out;
- *dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping...
2019 Jul 29
24
turn the hmm migrate_vma upside down
Hi Jérôme, Ben and Jason,
below is a series against the hmm tree which starts revamping the
migrate_vma functionality. The prime idea is to export three slightly
lower level functions and thus avoid the need for migrate_vma_ops
callbacks.
Diffstat:
4 files changed, 285 insertions(+), 602 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git
2020 Jun 23
2
[RESEND PATCH 1/3] nouveau: fix migrate page regression
...rm/nouveau/nouveau_dmem.c
> @@ -550,7 +550,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> DMA_BIDIRECTIONAL);
> if (dma_mapping_error(dev, *dma_addr))
> goto out_free_page;
> - if (drm->dmem->migrate.copy_func(drm, page_size(spage),
> + if (drm->dmem->migrate.copy_func(drm, 1,
> NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
> goto out_dma_unmap;
> } else {
>
I Am Not A Nouveau Expert, nor is it really clear to me how
page_size(spage) came to contain something other than a page&...
2020 Jun 22
7
[RESEND PATCH 0/3] nouveau: fixes for SVM
These are based on 5.8.0-rc2 and intended for Ben Skeggs' nouveau tree.
I believe the changes can be queued for 5.8-rcX after being reviewed.
These were part of a larger series but I'm resending them separately as
suggested by Jason Gunthorpe.
https://lore.kernel.org/linux-mm/20200619215649.32297-1-rcampbell at nvidia.com/
Note that in order to exercise/test patch 2 here, you will need a
2020 Jun 19
22
[PATCH 00/16] mm/hmm/nouveau: THP mapping and migration
These patches apply to linux-5.8.0-rc1. Patches 1-3 should probably go
into 5.8, the others can be queued for 5.9. Patches 4-6 improve the HMM
self tests. Patch 7-8 prepare nouveau for the meat of this series which
adds support and testing for compound page mapping of system memory
(patches 9-11) and compound page migration to device private memory
(patches 12-16). Since these changes are split
2019 Aug 14
20
turn hmm migrate_vma upside down v3
Hi Jérôme, Ben and Jason,
below is a series against the hmm tree which starts revamping the
migrate_vma functionality. The prime idea is to export three slightly
lower level functions and thus avoid the need for migrate_vma_ops
callbacks.
Diffstat:
7 files changed, 282 insertions(+), 614 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git
2020 Jun 22
0
[RESEND PATCH 2/3] nouveau: fix mixed normal and device private page migration
...veau_dmem.c
index cc9993837508..f6a806ba3caa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -540,6 +540,12 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
if (!(src & MIGRATE_PFN_MIGRATE))
goto out;
+ if (spage && is_device_private_page(spage)) {
+ paddr = nouveau_dmem_page_addr(spage);
+ *dma_addr = DMA_MAPPING_ERROR;
+ goto done;
+ }
+
dpage = nouveau_dmem_page_alloc_locked(drm);
if (!dpage)
goto out;
@@ -560,6 +566,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau...
2019 Aug 07
4
[PATCH] nouveau/hmm: map pages after migration
...au_dmem_migrate_copy_one(struct nouveau_drm *drm,
- struct vm_area_struct *vma, unsigned long addr,
- unsigned long src, dma_addr_t *dma_addr)
+ struct vm_area_struct *vma, unsigned long src,
+ dma_addr_t *dma_addr, u64 *pfn)
{
struct device *dev = drm->dev->dev;
struct page *dpage, *spage;
+ unsigned long paddr;
spage = migrate_pfn_to_page(src);
if (!spage || !(src & MIGRATE_PFN_MIGRATE))
@@ -572,17 +575,21 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
dpage = nouveau_dmem_page_alloc_locked(drm);
if (!dpage)
- return 0;
+ goto out;...