search for: hmm_pfn_value_max

Displaying 20 results from an estimated 22 matches for "hmm_pfn_value_max".

2020 Apr 22
0
[PATCH hmm 4/5] mm/hmm: remove HMM_PFN_SPECIAL
...c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index bff8e64701a547..449083f9f8a2bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -775,7 +775,6 @@ static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ 0, /* HMM_PFN_NONE */ - 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */ }; /** diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index c68e9317cf0740..cf0d9bd61bebf9 100644 --- a/drivers/gpu/drm/nouveau/nouve...
2020 May 01
0
[PATCH hmm v2 4/5] mm/hmm: remove HMM_PFN_SPECIAL
...c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 41ae7f96f48194..76b4a4fa39ed04 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -775,7 +775,6 @@ static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ 0, /* HMM_PFN_NONE */ - 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */ }; /** diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index c68e9317cf0740..cf0d9bd61bebf9 100644 --- a/drivers/gpu/drm/nouveau/nouve...
2020 May 01
13
[PATCH hmm v2 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com> The API is a bit complicated for the uses we actually have, and disucssions for simplifying have come up a number of times. This small series removes the customizable pfn format and simplifies the return code of hmm_range_fault() All the drivers are adjusted to process in the simplified format. I would appreciated tested-by's for the two
2020 Apr 22
11
[PATCH hmm 0/5] Adjust hmm_range_fault() API
...DPT_SHIFT PAGE_SHIFT -#define DPT_VALID (1UL << 0) -#define DPT_WRITE (1UL << 1) - #define DPT_XA_TAG_WRITE 3UL -static const uint64_t dmirror_hmm_flags[HMM_PFN_FLAG_MAX] = { - [HMM_PFN_VALID] = DPT_VALID, - [HMM_PFN_WRITE] = DPT_WRITE, -}; - -static const uint64_t dmirror_hmm_values[HMM_PFN_VALUE_MAX] = { - [HMM_PFN_NONE] = 0, - [HMM_PFN_ERROR] = 0x10, - [HMM_PFN_SPECIAL] = 0x10, -}; - /* * Data structure to track address ranges and register for mmu interval * notifier updates. @@ -175,7 +160,7 @@ static inline struct dmirror_device *dmirror_page_to_device(struct page *page) static...
2019 Oct 29
0
[PATCH v2 14/15] drm/amdgpu: Use mmu_range_notifier instead of hmm_mirror
...internal, not related to CPU/GPU PTE flags */ > -static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { > - (1 << 0), /* HMM_PFN_VALID */ > - (1 << 1), /* HMM_PFN_WRITE */ > - 0 /* HMM_PFN_DEVICE_PRIVATE */ > -}; > - > -static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { > - 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ > - 0, /* HMM_PFN_NONE */ > - 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */ > -}; > - > -void amdgpu_hmm_init_range(struct hmm_range *range) > -{ > - if (range) { > - range->flags = hmm_range_flags; > - range->...
2019 Oct 28
1
[PATCH v2 14/15] drm/amdgpu: Use mmu_range_notifier instead of hmm_mirror
...= NULL; } - -/* flags used by HMM internal, not related to CPU/GPU PTE flags */ -static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { - (1 << 0), /* HMM_PFN_VALID */ - (1 << 1), /* HMM_PFN_WRITE */ - 0 /* HMM_PFN_DEVICE_PRIVATE */ -}; - -static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { - 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ - 0, /* HMM_PFN_NONE */ - 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */ -}; - -void amdgpu_hmm_init_range(struct hmm_range *range) -{ - if (range) { - range->flags = hmm_range_flags; - range->values = hmm_range_values; - range->pfn_shift...
2019 Nov 12
0
[PATCH v3 12/14] drm/amdgpu: Use mmu_interval_notifier instead of hmm_mirror
...= NULL; } - -/* flags used by HMM internal, not related to CPU/GPU PTE flags */ -static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { - (1 << 0), /* HMM_PFN_VALID */ - (1 << 1), /* HMM_PFN_WRITE */ - 0 /* HMM_PFN_DEVICE_PRIVATE */ -}; - -static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { - 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ - 0, /* HMM_PFN_NONE */ - 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */ -}; - -void amdgpu_hmm_init_range(struct hmm_range *range) -{ - if (range) { - range->flags = hmm_range_flags; - range->values = hmm_range_values; - range->pfn_shift...
2020 Mar 16
0
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
.../drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -776,7 +776,6 @@ struct amdgpu_ttm_tt { static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { (1 << 0), /* HMM_PFN_VALID */ (1 << 1), /* HMM_PFN_WRITE */ - 0 /* HMM_PFN_DEVICE_PRIVATE */ }; static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 0e36345d395c..edfd0805fba4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -28,6 +28,7 @@ #include <nvif/class.h> #include <nvif/o...
2020 Mar 16
4
ensure device private pages have an owner
When acting on device private mappings a driver needs to know if the device (or other entity in case of kvmppc) actually owns this private mapping. This series adds an owner field and converts the migrate_vma code over to check it. I looked into doing the same for hmm_range_fault, but as far as I can tell that code has never been wired up to actually work for device private memory, so instead of
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...ttm_tt { }; #ifdef CONFIG_DRM_AMDGPU_USERPTR -/* flags used by HMM internal, not related to CPU/GPU PTE flags */ -static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { - (1 << 0), /* HMM_PFN_VALID */ - (1 << 1), /* HMM_PFN_WRITE */ -}; - -static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { - 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ - 0, /* HMM_PFN_NONE */ -}; - /** * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user * memory and start HMM tracking CPU page table update @@ -815,18 +804,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, stru...
2020 Mar 16
0
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
.../drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -776,7 +776,6 @@ struct amdgpu_ttm_tt { static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { (1 << 0), /* HMM_PFN_VALID */ (1 << 1), /* HMM_PFN_WRITE */ - 0 /* HMM_PFN_DEVICE_PRIVATE */ }; static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 7605c4c48985..42808efceaf2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -671,40 +671,3 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,...
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...ttm_tt { }; #ifdef CONFIG_DRM_AMDGPU_USERPTR -/* flags used by HMM internal, not related to CPU/GPU PTE flags */ -static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { - (1 << 0), /* HMM_PFN_VALID */ - (1 << 1), /* HMM_PFN_WRITE */ -}; - -static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { - 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ - 0, /* HMM_PFN_NONE */ -}; - /** * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user * memory and start HMM tracking CPU page table update @@ -815,18 +804,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, stru...
2020 Apr 22
1
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...MDGPU_USERPTR > -/* flags used by HMM internal, not related to CPU/GPU PTE flags */ > -static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { > - (1 << 0), /* HMM_PFN_VALID */ > - (1 << 1), /* HMM_PFN_WRITE */ > -}; > - > -static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { > - 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ > - 0, /* HMM_PFN_NONE */ > -}; > - > /** > * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user > * memory and start HMM tracking CPU page table update > @@ -815,18 +804,15 @@ int amdgpu_ttm_tt_g...
2019 Oct 29
4
[PATCH v2 14/15] drm/amdgpu: Use mmu_range_notifier instead of hmm_mirror
On Tue, Oct 29, 2019 at 07:22:37PM +0000, Yang, Philip wrote: > Hi Jason, > > I did quick test after merging amd-staging-drm-next with the > mmu_notifier branch, which includes this set changes. The test result > has different failures, app stuck intermittently, GUI no display etc. I > am understanding the changes and will try to figure out the cause. Thanks! I'm not
2019 Nov 01
2
[PATCH v2 14/15] drm/amdgpu: Use mmu_range_notifier instead of hmm_mirror
On 2019-11-01 1:42 p.m., Jason Gunthorpe wrote: > On Fri, Nov 01, 2019 at 03:59:26PM +0000, Yang, Philip wrote: >>> This test for range_blockable should be before mutex_lock, I can move >>> it up >>> >> yes, thanks. > > Okay, I wrote it like this: > > if (mmu_notifier_range_blockable(range)) > mutex_lock(&adev->notifier_lock); >
2020 Mar 16
4
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...@@ -776,7 +776,6 @@ struct amdgpu_ttm_tt { > static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { > (1 << 0), /* HMM_PFN_VALID */ > (1 << 1), /* HMM_PFN_WRITE */ > - 0 /* HMM_PFN_DEVICE_PRIVATE */ > }; > > static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { > diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c > index 0e36345d395c..edfd0805fba4 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c > +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c > @@ -28,6 +28,7 @@ > > #include <...
2020 Mar 16
14
ensure device private pages have an owner v2
When acting on device private mappings a driver needs to know if the device (or other entity in case of kvmppc) actually owns this private mapping. This series adds an owner field and converts the migrate_vma code over to check it. I looked into doing the same for hmm_range_fault, but as far as I can tell that code has never been wired up to actually work for device private memory, so instead of
2020 Mar 16
6
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
...@@ -776,7 +776,6 @@ struct amdgpu_ttm_tt { > static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { > (1 << 0), /* HMM_PFN_VALID */ > (1 << 1), /* HMM_PFN_WRITE */ > - 0 /* HMM_PFN_DEVICE_PRIVATE */ > }; > > static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { > diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c > index 7605c4c48985..42808efceaf2 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c > +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c > @@ -671,40 +671,3 @@ nouveau_dmem_migrate_vma(...
2019 Nov 12
20
[PATCH hmm v3 00/14] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com> 8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1, scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where they only use invalidate_range_start/end and immediately check the invalidating range against some driver data structure to tell if the driver is interested. Half of them use an interval_tree, the others
2019 Oct 28
32
[PATCH v2 00/15] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com> 8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1, scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where they only use invalidate_range_start/end and immediately check the invalidating range against some driver data structure to tell if the driver is interested. Half of them use an interval_tree, the others