Displaying 20 results from an estimated 49 matches for "nvif_vmm_pfnmap_v0_addr_shift".
2019 May 20
3
[PATCH] drm/nouveau/svm: Convert to use hmm_range_fault()
...rivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 93ed43c..8d56bd6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -649,7 +649,7 @@ struct nouveau_svmm {
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = hmm_vma_fault(&range, true);
+ ret = hmm_range_fault(&range, true);
if (ret == 0) {
mutex_lock(&svmm->mutex);
if (!hmm_vma_range_done(&range)) {
--
1.9.1
2020 Jun 23
1
[RESEND PATCH 2/3] nouveau: fix mixed normal and device private page migration
...(!dpage)
> goto out;
> @@ -560,6 +566,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> goto out_free_page;
> }
>
> +done:
> *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
> ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
> if (src & MIGRATE_PFN_WRITE)
> @@ -615,6 +622,7 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
> struct migrate_vma args = {
> .vma = vma,
> .start = start,
> + .src_owner = drm->dev,
Hi Ralph,
This .src_owner setting does look like a required f...
2019 Oct 15
0
[PATCH hmm 11/15] nouveau: use mmu_range_notifier instead of hmm_mirror
...the GPU. */
+ struct hmm_range range = {
+ .notifier = ¬ifier->notifier,
+ .start = notifier->notifier.interval_tree.start,
+ .end = notifier->notifier.interval_tree.last + 1,
+ .pfns = pfns,
+ .flags = nouveau_svm_pfn_flags,
+ .values = nouveau_svm_pfn_values,
+ .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT,
+ };
+ struct mm_struct *mm = notifier->notifier.mm;
long ret;
- range->default_flags = 0;
- range->pfn_flags_mask = -1UL;
+ while (true) {
+ if (time_after(jiffies, timeout))
+ return -EBUSY;
- ret = hmm_range_register(range, &svmm->mirror);
- if (ret) {
- up_read(&s...
2020 Jan 14
2
[PATCH v6 5/6] nouveau: use new mmu interval notifiers
...ee.start,
> - .end = notifier->notifier.interval_tree.last + 1,
> + .start = start,
> + .end = end,
> .pfns = pfns,
> .flags = nouveau_svm_pfn_flags,
> .values = nouveau_svm_pfn_values,
> + .default_flags = 0,
> + .pfn_flags_mask = ~0UL,
> .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT,
> };
> - struct mm_struct *mm = notifier->notifier.mm;
> + struct mm_struct *mm = svmm->mm;
> long ret;
>
> while (true) {
> if (time_after(jiffies, timeout))
> return -EBUSY;
>
> - range.notifier_seq = mmu_interval_read_begin(range.notifier);...
2019 May 27
0
[PATCH] drm/nouveau/svm: Convert to use hmm_range_fault()
...ouveau/nouveau_svm.c
> index 93ed43c..8d56bd6 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_svm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
> @@ -649,7 +649,7 @@ struct nouveau_svmm {
> range.values = nouveau_svm_pfn_values;
> range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
> again:
> - ret = hmm_vma_fault(&range, true);
> + ret = hmm_range_fault(&range, true);
> if (ret == 0) {
> mutex_lock(&svmm->mutex);
> if (!hmm_vma_range_done(&range)...
2019 Jun 07
0
[PATCH] drm/nouveau/svm: Convert to use hmm_range_fault()
...ouveau/nouveau_svm.c
> index 93ed43c..8d56bd6 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_svm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
> @@ -649,7 +649,7 @@ struct nouveau_svmm {
> range.values = nouveau_svm_pfn_values;
> range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
> again:
> - ret = hmm_vma_fault(&range, true);
> + ret = hmm_range_fault(&range, true);
> if (ret == 0) {
> mutex_lock(&svmm->mutex);
> if (!hmm_vma_range_done(&range)...
2019 Jul 03
0
[PATCH 3/6] nouveau: remove the block parameter to nouveau_range_fault
...ge_fault(range, block);
+ ret = hmm_range_fault(range, true);
if (ret <= 0) {
if (ret == -EBUSY || !ret) {
up_read(&range->vma->vm_mm->mmap_sem);
@@ -697,7 +696,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = nouveau_range_fault(&svmm->mirror, &range, true);
+ ret = nouveau_range_fault(&svmm->mirror, &range);
if (ret == 0) {
mutex_lock(&svmm->mutex);
if (!nouveau_range_done(&range)) {
--
2.20.1
2019 Jul 22
0
[PATCH 3/6] nouveau: remove the block parameter to nouveau_range_fault
...ge_fault(range, block);
+ ret = hmm_range_fault(range, true);
if (ret <= 0) {
if (ret == -EBUSY || !ret) {
up_read(&range->vma->vm_mm->mmap_sem);
@@ -690,7 +689,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = nouveau_range_fault(&svmm->mirror, &range, true);
+ ret = nouveau_range_fault(&svmm->mirror, &range);
if (ret == 0) {
mutex_lock(&svmm->mutex);
if (!nouveau_range_done(&range)) {
--
2.20.1
2019 Jul 30
0
[PATCH 03/13] nouveau: pass struct nouveau_svmm to nouveau_range_fault
...- ret = hmm_range_register(range, mirror,
+ ret = hmm_range_register(range, &svmm->mirror,
range->start, range->end,
PAGE_SHIFT);
if (ret) {
@@ -689,7 +689,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = nouveau_range_fault(&svmm->mirror, &range);
+ ret = nouveau_range_fault(svmm, &range);
if (ret == 0) {
mutex_lock(&svmm->mutex);
if (!nouveau_range_done(&range)) {
--
2.20.1
2019 Aug 06
0
[PATCH 03/15] nouveau: pass struct nouveau_svmm to nouveau_range_fault
...== 0)
ret = -EBUSY;
- up_read(&range->hmm->mm->mmap_sem);
+ up_read(&svmm->mm->mmap_sem);
hmm_range_unregister(range);
return ret;
}
@@ -689,7 +689,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = nouveau_range_fault(&svmm->mirror, &range);
+ ret = nouveau_range_fault(svmm, &range);
if (ret == 0) {
mutex_lock(&svmm->mutex);
if (!nouveau_range_done(&range)) {
--
2.20.1
2020 Jun 22
0
[RESEND PATCH 2/3] nouveau: fix mixed normal and device private page migration
...e = nouveau_dmem_page_alloc_locked(drm);
if (!dpage)
goto out;
@@ -560,6 +566,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
goto out_free_page;
}
+done:
*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
if (src & MIGRATE_PFN_WRITE)
@@ -615,6 +622,7 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
struct migrate_vma args = {
.vma = vma,
.start = start,
+ .src_owner = drm->dev,
};
unsigned long i;
u64 *pfns;
--
2.20.1
2019 Jul 22
0
[PATCH 2/6] mm: move hmm_vma_range_done and hmm_vma_fault to nouveau
...if (ret == -EAGAIN)
+ ret = -EBUSY;
+ hmm_range_unregister(range);
+ return ret;
+ }
+ return 0;
+}
+
static int
nouveau_svm_fault(struct nvif_notify *notify)
{
@@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = hmm_vma_fault(&svmm->mirror, &range, true);
+ ret = nouveau_range_fault(&svmm->mirror, &range, true);
if (ret == 0) {
mutex_lock(&svmm->mutex);
- if (!hmm_vma_range_done(&range)) {
+ if (!nouveau_range_done(&range)) {
mutex_unl...
2020 Jun 22
7
[RESEND PATCH 0/3] nouveau: fixes for SVM
These are based on 5.8.0-rc2 and intended for Ben Skeggs' nouveau tree.
I believe the changes can be queued for 5.8-rcX after being reviewed.
These were part of a larger series but I'm resending them separately as
suggested by Jason Gunthorpe.
https://lore.kernel.org/linux-mm/20200619215649.32297-1-rcampbell at nvidia.com/
Note that in order to exercise/test patch 2 here, you will need a
2019 Jul 01
0
[PATCH 20/22] mm: move hmm_vma_fault to nouveau
...se if (ret == -EAGAIN)
+ ret = -EBUSY;
+ hmm_range_unregister(range);
+ return ret;
+ }
+ return 0;
+}
+
static int
nouveau_svm_fault(struct nvif_notify *notify)
{
@@ -649,7 +701,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = hmm_vma_fault(&svmm->mirror, &range, true);
+ ret = nouveau_range_fault(&svmm->mirror, &range, true);
if (ret == 0) {
mutex_lock(&svmm->mutex);
if (!hmm_range_unregister(&range)) {
diff --git a/include/linux/hmm.h b/include/linux/hmm.h...
2019 Jul 03
10
hmm_range_fault related fixes and legacy API removal v2
Hi Jérôme, Ben and Jason,
below is a series against the hmm tree which fixes up the mmap_sem
locking in nouveau and while at it also removes leftover legacy HMM APIs
only used by nouveau.
Changes since v1:
- don't return the valid state from hmm_range_unregister
- additional nouveau cleanups
2019 Jul 03
0
[PATCH 2/6] mm: move hmm_vma_range_done and hmm_vma_fault to nouveau
...if (ret == -EAGAIN)
+ ret = -EBUSY;
+ hmm_range_unregister(range);
+ return ret;
+ }
+ return 0;
+}
+
static int
nouveau_svm_fault(struct nvif_notify *notify)
{
@@ -649,10 +697,10 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = hmm_vma_fault(&svmm->mirror, &range, true);
+ ret = nouveau_range_fault(&svmm->mirror, &range, true);
if (ret == 0) {
mutex_lock(&svmm->mutex);
- if (!hmm_vma_range_done(&range)) {
+ if (!nouveau_range_done(&range)) {
mutex_unl...
2020 Jan 15
0
[PATCH v6 5/6] nouveau: use new mmu interval notifiers
...t;notifier.interval_tree.last + 1,
>> + .start = start,
>> + .end = end,
>> .pfns = pfns,
>> .flags = nouveau_svm_pfn_flags,
>> .values = nouveau_svm_pfn_values,
>> + .default_flags = 0,
>> + .pfn_flags_mask = ~0UL,
>> .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT,
>> };
>> - struct mm_struct *mm = notifier->notifier.mm;
>> + struct mm_struct *mm = svmm->mm;
>> long ret;
>>
>> while (true) {
>> if (time_after(jiffies, timeout))
>> return -EBUSY;
>>
>> - range.notifier_...
2019 Jul 24
10
hmm_range_fault related fixes and legacy API removal v3
Hi Jérôme, Ben and Jason,
below is a series against the hmm tree which fixes up the mmap_sem
locking in nouveau and while at it also removes leftover legacy HMM APIs
only used by nouveau.
The first 4 patches are a bug fix for nouveau, which I suspect should
go into this merge window even if the code is marked as staging, just
to avoid people copying the breakage.
Changes since v2:
- new patch
2020 Jul 23
0
[PATCH v4 4/6] nouveau/svm: use the new migration invalidation
...gt;dev;
struct page *dpage, *spage;
@@ -561,6 +568,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
goto out_free_page;
}
+ dpage->zone_device_data = svmm;
*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
if (src & MIGRATE_PFN_WRITE)
@@ -584,8 +592,8 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
unsigned long addr = args->start, nr_dma = 0, i;
for (i = 0; addr < args->end; i++) {
- args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
-...
2019 Jul 03
1
[PATCH 20/22] mm: move hmm_vma_fault to nouveau
...ster(range);
> + return ret;
> + }
> + return 0;
> +}
> +
> static int
> nouveau_svm_fault(struct nvif_notify *notify)
> {
> @@ -649,7 +701,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
> range.values = nouveau_svm_pfn_values;
> range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
> again:
> - ret = hmm_vma_fault(&svmm->mirror, &range, true);
> + ret = nouveau_range_fault(&svmm->mirror, &range, true);
> if (ret == 0) {
> mutex_lock(&svmm->mutex);
> if (!hmm_range_unregister(&range)) {
> diff --git a/inc...