Displaying 20 results from an estimated 36 matches for "nouveau_dmem_convert_pfn".
2019 Aug 15
4
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
On Thu, Aug 15, 2019 at 04:33:06PM -0400, Jerome Glisse wrote:
> So nor HMM nor driver should dereference the struct page (i do not
> think any iommu driver would either),
Er, they do technically deref the struct page:
nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
struct hmm_range *range)
struct page *page;
page = hmm_pfn_to_page(range, range->pfns[i]);
if (!nouveau_dmem_page(drm, page)) {
nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
{
return is_device_private_page(page) && drm->dmem == pag...
2019 Aug 16
2
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...> On Thu, Aug 15, 2019 at 04:33:06PM -0400, Jerome Glisse wrote:
> >
> > > So nor HMM nor driver should dereference the struct page (i do not
> > > think any iommu driver would either),
> >
> > Er, they do technically deref the struct page:
> >
> > nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
> > struct hmm_range *range)
> > struct page *page;
> > page = hmm_pfn_to_page(range, range->pfns[i]);
> > if (!nouveau_dmem_page(drm, page)) {
> >
> >
> >...
2020 Mar 16
4
ensure device private pages have an owner
When acting on device private mappings a driver needs to know if the
device (or other entity in case of kvmppc) actually owns this private
mapping. This series adds an owner field and converts the migrate_vma
code over to check it. I looked into doing the same for
hmm_range_fault, but as far as I can tell that code has never been
wired up to actually work for device private memory, so instead of
2020 Mar 16
0
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
...uveau/nouveau_dmem.c
@@ -671,40 +671,3 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
out:
return ret;
}
-
-static inline bool
-nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
-{
- return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
-}
-
-void
-nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
- struct hmm_range *range)
-{
- unsigned long i, npages;
-
- npages = (range->end - range->start) >> PAGE_SHIFT;
- for (i = 0; i < npages; ++i) {
- struct page *page;
- uint64_t addr;
-
- page = hmm_device_entry_to_page(range, range->pfns[i]);
- if...
2020 Mar 16
6
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
...vma(struct nouveau_drm *drm,
> out:
> return ret;
> }
> -
> -static inline bool
> -nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
> -{
> - return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
> -}
> -
> -void
> -nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
> - struct hmm_range *range)
> -{
> - unsigned long i, npages;
> -
> - npages = (range->end - range->start) >> PAGE_SHIFT;
> - for (i = 0; i < npages; ++i) {
> - struct page *page;
> - uint64_t addr;
> -
> - page = hmm_devi...
2019 Aug 15
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...gg at mellanox.com> wrote:
>
> On Thu, Aug 15, 2019 at 04:33:06PM -0400, Jerome Glisse wrote:
>
> > So nor HMM nor driver should dereference the struct page (i do not
> > think any iommu driver would either),
>
> Er, they do technically deref the struct page:
>
> nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
> struct hmm_range *range)
> struct page *page;
> page = hmm_pfn_to_page(range, range->pfns[i]);
> if (!nouveau_dmem_page(drm, page)) {
>
>
> nouveau_dmem_page(struct nouveau_dr...
2019 Aug 16
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...06PM -0400, Jerome Glisse wrote:
> > >
> > > > So nor HMM nor driver should dereference the struct page (i do not
> > > > think any iommu driver would either),
> > >
> > > Er, they do technically deref the struct page:
> > >
> > > nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
> > > struct hmm_range *range)
> > > struct page *page;
> > > page = hmm_pfn_to_page(range, range->pfns[i]);
> > > if (!nouveau_dmem_page(drm, page)) {
> >...
2019 Jul 29
0
[PATCH 3/9] nouveau: factor out device memory address calculation
...AM, dst_addr,
- NOUVEAU_APER_HOST, src_addr);
+ ret = copy(drm, 1, NOUVEAU_APER_VRAM,
+ nouveau_dmem_page_addr(dpage),
+ NOUVEAU_APER_HOST,
+ migrate->dma[migrate->dma_nr++]);
if (ret) {
nouveau_dmem_page_free_locked(drm, dpage);
dst_pfns[i] = 0;
@@ -846,7 +842,6 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
npages = (range->end - range->start) >> PAGE_SHIFT;
for (i = 0; i < npages; ++i) {
- struct nouveau_dmem_chunk *chunk;
struct page *page;
uint64_t addr;
@@ -864,10 +859,7 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
continue;
}...
2020 Mar 16
0
[PATCH 4/4] mm: check the device private page owner in hmm_range_fault
...drm/nouveau/nouveau_dmem.c
@@ -672,12 +672,6 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
return ret;
}
-static inline bool
-nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
-{
- return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
-}
-
void
nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
struct hmm_range *range)
@@ -696,12 +690,6 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
if (!is_device_private_page(page))
continue;
- if (!nouveau_dmem_page(drm, page)) {
- WARN(1, "Some unknown device memory !\n");
- range->pfns[i]...
2020 Mar 16
14
ensure device private pages have an owner v2
When acting on device private mappings a driver needs to know if the
device (or other entity in case of kvmppc) actually owns this private
mapping. This series adds an owner field and converts the migrate_vma
code over to check it. I looked into doing the same for
hmm_range_fault, but as far as I can tell that code has never been
wired up to actually work for device private memory, so instead of
2020 Mar 16
0
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -28,6 +28,7 @@
#include <nvif/class.h>
#include <nvif/object.h>
+#include <nvif/if000c.h>
#include <nvif/if500b.h>
#include <nvif/if900b.h>
@@ -692,9 +693,8 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
if (page == NULL)
continue;
- if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
+ if (!is_device_private_page(page))
continue;
- }
if (!nouveau_dmem_page(drm, page)) {
WARN(1, "Some unknown device memory !\n");
@@ -...
2020 Mar 16
4
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...t; +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> @@ -28,6 +28,7 @@
>
> #include <nvif/class.h>
> #include <nvif/object.h>
> +#include <nvif/if000c.h>
> #include <nvif/if500b.h>
> #include <nvif/if900b.h>
>
> @@ -692,9 +693,8 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
> if (page == NULL)
> continue;
>
> - if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
> + if (!is_device_private_page(page))
> continue;
> - }
>
> if (!nouveau_dmem_page(drm, page)) {
>...
2019 Oct 15
0
[PATCH hmm 11/15] nouveau: use mmu_range_notifier instead of hmm_mirror
....notifier_seq)) {
+ mutex_unlock(&svmm->mutex);
+ continue;
+ }
+ break;
}
- ret = hmm_range_fault(range, 0);
- if (ret <= 0) {
- if (ret == 0)
- ret = -EBUSY;
- up_read(&svmm->notifier.mm->mmap_sem);
- hmm_range_unregister(range);
- return ret;
- }
- return 0;
+ nouveau_dmem_convert_pfn(drm, &range);
+
+ svmm->vmm->vmm.object.client->super = true;
+ ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL);
+ svmm->vmm->vmm.object.client->super = false;
+ mutex_unlock(&svmm->mutex);
+
+ return ret;
}
static int
@@ -559,7 +584,6 @@...
2019 Jul 01
0
[PATCH 22/22] mm: remove the legacy hmm_pfn_* APIs
...-----
2 files changed, 1 insertion(+), 37 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 40c47d6a7d78..534069ffe20a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -853,7 +853,7 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
struct page *page;
uint64_t addr;
- page = hmm_pfn_to_page(range, range->pfns[i]);
+ page = hmm_device_entry_to_page(range, range->pfns[i]);
if (page == NULL)
continue;
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 3457cf9182e5..9799f...
2019 Jul 03
0
[PATCH 5/5] mm: remove the legacy hmm_pfn_* APIs
...-----
2 files changed, 1 insertion(+), 35 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 42c026010938..b9ced2e61667 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -844,7 +844,7 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
struct page *page;
uint64_t addr;
- page = hmm_pfn_to_page(range, range->pfns[i]);
+ page = hmm_device_entry_to_page(range, range->pfns[i]);
if (page == NULL)
continue;
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 657606f48796..cdcd7...
2019 Aug 15
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...M +0000, Jason Gunthorpe wrote:
> On Thu, Aug 15, 2019 at 04:33:06PM -0400, Jerome Glisse wrote:
>
> > So nor HMM nor driver should dereference the struct page (i do not
> > think any iommu driver would either),
>
> Er, they do technically deref the struct page:
>
> nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
> struct hmm_range *range)
> struct page *page;
> page = hmm_pfn_to_page(range, range->pfns[i]);
> if (!nouveau_dmem_page(drm, page)) {
>
>
> nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
> {
> return is_device_priv...
2020 May 05
1
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...is FIXME? It seems like we could get stuck in a loop here,
if we're not issuing a new REQ, right?
> if (ret == -EBUSY)
> continue;
> return ret;
> @@ -562,7 +587,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
> break;
> }
>
> - nouveau_dmem_convert_pfn(drm, &range);
> + nouveau_hmm_convert_pfn(drm, &range, ioctl_addr);
>
> svmm->vmm->vmm.object.client->super = true;
> ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL);
> @@ -589,6 +614,7 @@ nouveau_svm_fault(struct nvif_notify *not...
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index ad89e09a0be39a..07876fb0e1d665 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -672,27 +672,61 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
return ret;
}
-void
-nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
- struct hmm_range *range)
+void nouveau_hmm_convert_pfn(struct nouveau_drm *drm, struct hmm_range *range,
+ u64 *ioctl_addr)
{
unsigned long i, npages;
+ /*
+ * The ioctl_addr prepared here is passed through nvif_object_ioctl()
+ * to an eventual DMA map o...
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...ge *page)
+unsigned long nouveau_dmem_page_addr(struct page *page)
{
struct nouveau_dmem_chunk *chunk = page->zone_device_data;
unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
@@ -671,28 +671,3 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
out:
return ret;
}
-
-void
-nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
- struct hmm_range *range)
-{
- unsigned long i, npages;
-
- npages = (range->end - range->start) >> PAGE_SHIFT;
- for (i = 0; i < npages; ++i) {
- struct page *page;
- uint64_t addr;
-
- page = hmm_device_entry_to_page(range, range->pfns[i]);
- if...
2020 Apr 22
1
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...nouveau/nouveau_dmem.c
> index ad89e09a0be39a..07876fb0e1d665 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> @@ -672,27 +672,61 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
> return ret;
> }
>
> -void
> -nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
> - struct hmm_range *range)
> +void nouveau_hmm_convert_pfn(struct nouveau_drm *drm, struct hmm_range *range,
> + u64 *ioctl_addr)
> {
> unsigned long i, npages;
>
> + /*
> + * The ioctl_addr prepared here is passed through nvif_obje...