Displaying 20 results from an estimated 60 matches for "nouveau_svm_fault".
2019 Jul 03
1
[PATCH 4/5] nouveau: unlock mmap_sem on all errors from nouveau_range_fault
On 7/3/19 11:45 AM, Christoph Hellwig wrote:
> Currently nouveau_svm_fault expects nouveau_range_fault to never unlock
> mmap_sem, but the latter unlocks it for a random selection of error
> codes. Fix this up by always unlocking mmap_sem for non-zero return
> values in nouveau_range_fault, and only unlocking it in the caller
> for successful returns.
>
&g...
2019 Oct 15
0
[PATCH hmm 11/15] nouveau: use mmu_range_notifier instead of hmm_mirror
...svmm = svmm;
@@ -374,8 +355,6 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
mutex_unlock(&cli->mutex);
return 0;
-out_hmm_unregister:
- hmm_mirror_unregister(&svmm->mirror);
out_mm_unlock:
up_write(¤t->mm->mmap_sem);
out_free:
@@ -503,43 +482,89 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
-static inline bool
-nouveau_range_done(struct hmm_range *range)
+struct svm_notifier {
+ struct mmu_range_notifier notifier;
+ struct nouveau_svmm *svmm;
+};
+
+static bool nouveau_svm_range_invalidate(struct...
2019 Jul 03
8
hmm_range_fault related fixes and legacy API removal
Hi Jérôme, Ben and Jason,
below is a series against the hmm tree which fixes up the mmap_sem
locking in nouveau and while at it also removes leftover legacy HMM APIs
only used by nouveau.
2020 Jun 19
0
[PATCH 08/16] nouveau/hmm: fault one page at a time
...= hmm_range_fault(&range);
mmap_read_unlock(mm);
if (ret) {
- /*
- * FIXME: the input PFN_REQ flags are destroyed on
- * -EBUSY, we need to regenerate them, also for the
- * other continue below
- */
if (ret == -EBUSY)
continue;
return ret;
@@ -614,17 +605,12 @@ nouveau_svm_fault(struct nvif_notify *notify)
struct nvif_object *device = &svm->drm->client.device.object;
struct nouveau_svmm *svmm;
struct {
- struct {
- struct nvif_ioctl_v0 i;
- struct nvif_ioctl_mthd_v0 m;
- struct nvif_vmm_pfnmap_v0 p;
- } i;
- u64 phys[16];
+ struct nouveau_pfnmap_a...
2020 Jul 01
0
[PATCH v3 1/5] nouveau/hmm: fault one page at a time
...= hmm_range_fault(&range);
mmap_read_unlock(mm);
if (ret) {
- /*
- * FIXME: the input PFN_REQ flags are destroyed on
- * -EBUSY, we need to regenerate them, also for the
- * other continue below
- */
if (ret == -EBUSY)
continue;
return ret;
@@ -614,17 +605,12 @@ nouveau_svm_fault(struct nvif_notify *notify)
struct nvif_object *device = &svm->drm->client.device.object;
struct nouveau_svmm *svmm;
struct {
- struct {
- struct nvif_ioctl_v0 i;
- struct nvif_ioctl_mthd_v0 m;
- struct nvif_vmm_pfnmap_v0 p;
- } i;
- u64 phys[16];
+ struct nouveau_pfnmap_a...
2020 Apr 22
2
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...m_convert_pfn(struct nouveau_drm *drm, struct hmm_range *range,
> + u64 *ioctl_addr)
> {
> unsigned long i, npages;
>
> + /*
> + * The ioctl_addr prepared here is passed through nvif_object_ioctl()
> + * to an eventual DMA map on some call chain like:
> + * nouveau_svm_fault():
> + * args.i.m.method = NVIF_VMM_V0_PFNMAP
> + * nouveau_range_fault()
> + * nvif_object_ioctl()
> + * client->driver->ioctl()
> + * struct nvif_driver nvif_driver_nvkm:
> + * .ioctl = nvkm_client_ioctl
> + *...
2019 Oct 15
0
[PATCH hmm 10/15] nouveau: use mmu_notifier directly for invalidate_range_start
...@@ nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range)
if (ret <= 0) {
if (ret == 0)
ret = -EBUSY;
- up_read(&svmm->mm->mmap_sem);
+ up_read(&svmm->notifier.mm->mmap_sem);
hmm_range_unregister(range);
return ret;
}
@@ -587,12 +615,15 @@ nouveau_svm_fault(struct nvif_notify *notify)
args.i.p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
+ struct mm_struct *mm;
+
/* Cancel any faults from non-SVM channels. */
if (!(svmm = buffer->fault[fi]->svmm)) {
nouveau_svm_fault_cancel_fault(svm, buffer->...
2019 Jul 22
15
hmm_range_fault related fixes and legacy API removal v2
Hi Jérôme, Ben and Jason,
below is a series against the hmm tree which fixes up the mmap_sem
locking in nouveau and while at it also removes leftover legacy HMM APIs
only used by nouveau.
The first 4 patches are a bug fix for nouveau, which I suspect should
go into this merge window even if the code is marked as staging, just
to avoid people copying the breakage.
Changes since v1:
- don't
2019 Jul 03
10
hmm_range_fault related fixes and legacy API removal v2
Hi Jérôme, Ben and Jason,
below is a series against the hmm tree which fixes up the mmap_sem
locking in nouveau and while at it also removes leftover legacy HMM APIs
only used by nouveau.
Changes since v1:
- don't return the valid state from hmm_range_unregister
- additional nouveau cleanups
2020 Jan 13
0
[PATCH v6 5/6] nouveau: use new mmu interval notifiers
...urrent->mm);
+ ret = __mmu_notifier_register(NULL, current->mm);
if (ret)
goto out_mm_unlock;
- /* Note, ownership of svmm transfers to mmu_notifier */
+
+ mmgrab(current->mm);
+ svmm->mm = current->mm;
cli->svm.svmm = svmm;
cli->svm.cli = cli;
@@ -482,65 +459,212 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
-struct svm_notifier {
- struct mmu_interval_notifier notifier;
- struct nouveau_svmm *svmm;
-};
+static struct svmm_interval *nouveau_svmm_new_interval(
+ struct nouveau_svmm *svmm,
+ unsigned long st...
2019 Jul 24
10
hmm_range_fault related fixes and legacy API removal v3
Hi Jérôme, Ben and Jason,
below is a series against the hmm tree which fixes up the mmap_sem
locking in nouveau and while at it also removes leftover legacy HMM APIs
only used by nouveau.
The first 4 patches are a bug fix for nouveau, which I suspect should
go into this merge window even if the code is marked as staging, just
to avoid people copying the breakage.
Changes since v2:
- new patch
2019 Jul 03
0
[PATCH 4/5] nouveau: unlock mmap_sem on all errors from nouveau_range_fault
Currently nouveau_svm_fault expects nouveau_range_fault to never unlock
mmap_sem, but the latter unlocks it for a random selection of error
codes. Fix this up by always unlocking mmap_sem for non-zero return
values in nouveau_range_fault, and only unlocking it in the caller
for successful returns.
Signed-off-by: Christoph He...
2019 Jul 22
0
[PATCH 4/6] nouveau: unlock mmap_sem on all errors from nouveau_range_fault
Currently nouveau_svm_fault expects nouveau_range_fault to never unlock
mmap_sem, but the latter unlocks it for a random selection of error
codes. Fix this up by always unlocking mmap_sem for non-zero return
values in nouveau_range_fault, and only unlocking it in the caller
for successful returns.
Signed-off-by: Christoph He...
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...truct hmm_range *range,
> > + u64 *ioctl_addr)
> > {
> > unsigned long i, npages;
> >
> > + /*
> > + * The ioctl_addr prepared here is passed through nvif_object_ioctl()
> > + * to an eventual DMA map on some call chain like:
> > + * nouveau_svm_fault():
> > + * args.i.m.method = NVIF_VMM_V0_PFNMAP
> > + * nouveau_range_fault()
> > + * nvif_object_ioctl()
> > + * client->driver->ioctl()
> > + * struct nvif_driver nvif_driver_nvkm:
> > + * .ioctl = nvkm_c...
2020 Jun 30
6
[PATCH v2 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_range_fault() output
array flags HMM_PFN_PMD and HMM_PFN_PUD. This allows a device driver to
know that a given 4K PFN is actually mapped by the CPU using either a
PMD sized or PUD sized CPU page table entry and therefore the device
driver can safely map system memory using larger device MMU PTEs.
The series is based on 5.8.0-rc3 and is intended for
2019 Jul 22
0
[PATCH 2/6] mm: move hmm_vma_range_done and hmm_vma_fault to nouveau
...------
2 files changed, 43 insertions(+), 56 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 8c92374afcf2..cde09003c06b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
+static inline bool nouveau_range_done(struct hmm_range *range)
+{
+ bool ret = hmm_range_valid(range);
+
+ hmm_range_unregister(range);
+ return ret;
+}
+
+static int
+nouveau_range_fault(struct hmm_mirror *m...
2019 Jul 01
0
[PATCH 20/22] mm: move hmm_vma_fault to nouveau
...ing for mmu notifiers we need some kind of time out otherwise we
+ * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
+ * wait already.
+ */
+#define NOUVEAU_RANGE_FAULT_TIMEOUT 1000
+
struct nouveau_svm {
struct nouveau_drm *drm;
struct mutex mutex;
@@ -475,6 +482,51 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
+static int
+nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range,
+ bool block)
+{
+ long ret;
+
+ /*
+ * With the old API the driver must set each individual entries with
+ * the req...
2019 Jul 03
0
[PATCH 2/6] mm: move hmm_vma_range_done and hmm_vma_fault to nouveau
...ing for mmu notifiers we need some kind of time out otherwise we
+ * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
+ * wait already.
+ */
+#define NOUVEAU_RANGE_FAULT_TIMEOUT 1000
+
struct nouveau_svm {
struct nouveau_drm *drm;
struct mutex mutex;
@@ -475,6 +482,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
+static inline bool nouveau_range_done(struct hmm_range *range)
+{
+ bool ret = hmm_range_valid(range);
+
+ hmm_range_unregister(range);
+ return ret;
+}
+
+static int
+nouveau_range_fault(struct hmm_mirror *m...
2019 Jul 03
1
[PATCH 20/22] mm: move hmm_vma_fault to nouveau
...out otherwise we
> + * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
> + * wait already.
> + */
> +#define NOUVEAU_RANGE_FAULT_TIMEOUT 1000
> +
> struct nouveau_svm {
> struct nouveau_drm *drm;
> struct mutex mutex;
> @@ -475,6 +482,51 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
> fault->inst, fault->addr, fault->access);
> }
>
> +static int
> +nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range,
> + bool block)
> +{
> + long ret;
> +
> + /*
> + * With the old API the...
2019 Jul 23
2
[PATCH 4/6] nouveau: unlock mmap_sem on all errors from nouveau_range_fault
On Mon, Jul 22, 2019 at 11:44:24AM +0200, Christoph Hellwig wrote:
> Currently nouveau_svm_fault expects nouveau_range_fault to never unlock
> mmap_sem, but the latter unlocks it for a random selection of error
> codes. Fix this up by always unlocking mmap_sem for non-zero return
> values in nouveau_range_fault, and only unlocking it in the caller
> for successful returns.
>
&g...