Displaying 19 results from an estimated 19 matches for "svm_notifier".
2019 Oct 15
0
[PATCH hmm 11/15] nouveau: use mmu_range_notifier instead of hmm_mirror
...(&svmm->mirror);
out_mm_unlock:
up_write(¤t->mm->mmap_sem);
out_free:
@@ -503,43 +482,89 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
-static inline bool
-nouveau_range_done(struct hmm_range *range)
+struct svm_notifier {
+ struct mmu_range_notifier notifier;
+ struct nouveau_svmm *svmm;
+};
+
+static bool nouveau_svm_range_invalidate(struct mmu_range_notifier *mrn,
+ const struct mmu_notifier_range *range)
{
- bool ret = hmm_range_valid(range);
+ struct svm_notifier *sn =
+ container_of(mrn, struct svm_not...
2020 Jan 13
0
[PATCH v6 5/6] nouveau: use new mmu interval notifiers
...ownership of svmm transfers to mmu_notifier */
+
+ mmgrab(current->mm);
+ svmm->mm = current->mm;
cli->svm.svmm = svmm;
cli->svm.cli = cli;
@@ -482,65 +459,212 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
-struct svm_notifier {
- struct mmu_interval_notifier notifier;
- struct nouveau_svmm *svmm;
-};
+static struct svmm_interval *nouveau_svmm_new_interval(
+ struct nouveau_svmm *svmm,
+ unsigned long start,
+ unsigned long last)
+{
+ struct svmm_interval *smi;
+ int ret;
+
+ smi = kmalloc(sizeof(*smi), GFP_A...
2020 Jan 14
2
[PATCH v6 5/6] nouveau: use new mmu interval notifiers
...algorithm only works if it is protected by the read side of
the interval tree lock. Deserves at least a comment if not an
assertion too.
> static int nouveau_range_fault(struct nouveau_svmm *svmm,
> struct nouveau_drm *drm, void *data, u32 size,
> - u64 *pfns, struct svm_notifier *notifier)
> + u64 *pfns, u64 start, u64 end)
> {
> unsigned long timeout =
> jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
> /* Have HMM fault pages within the fault window to the GPU. */
> struct hmm_range range = {
> - .notifier = ¬ifier-&...
2020 Jan 15
0
[PATCH v6 5/6] nouveau: use new mmu interval notifiers
...ver page table lock so the struct mmu_interval_notifier and
the interval tree can't change.
I will add comments for v7.
>> static int nouveau_range_fault(struct nouveau_svmm *svmm,
>> struct nouveau_drm *drm, void *data, u32 size,
>> - u64 *pfns, struct svm_notifier *notifier)
>> + u64 *pfns, u64 start, u64 end)
>> {
>> unsigned long timeout =
>> jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
>> /* Have HMM fault pages within the fault window to the GPU. */
>> struct hmm_range range = {
>&g...
2020 Jun 19
0
[PATCH 08/16] nouveau/hmm: fault one page at a time
...PFN_WRITE)
+ ioctl_addr[0] |= NVIF_VMM_PFNMAP_V0_W;
}
static int nouveau_range_fault(struct nouveau_svmm *svmm,
struct nouveau_drm *drm, void *data, u32 size,
- unsigned long hmm_pfns[], u64 *ioctl_addr,
+ u64 *ioctl_addr, unsigned long hmm_flags,
struct svm_notifier *notifier)
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
/* Have HMM fault pages within the fault window to the GPU. */
+ unsigned long hmm_pfns[1];
struct hmm_range range = {
.notifier = ¬ifier->notifier,
.start = notifier->notifier.i...
2020 Jul 01
0
[PATCH v3 1/5] nouveau/hmm: fault one page at a time
...PFN_WRITE)
+ ioctl_addr[0] |= NVIF_VMM_PFNMAP_V0_W;
}
static int nouveau_range_fault(struct nouveau_svmm *svmm,
struct nouveau_drm *drm, void *data, u32 size,
- unsigned long hmm_pfns[], u64 *ioctl_addr,
+ u64 *ioctl_addr, unsigned long hmm_flags,
struct svm_notifier *notifier)
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
/* Have HMM fault pages within the fault window to the GPU. */
+ unsigned long hmm_pfns[1];
struct hmm_range range = {
.notifier = ¬ifier->notifier,
.start = notifier->notifier.i...
2020 Jun 19
0
[PATCH 10/16] nouveau/hmm: support mapping large sysmem pages
...veau_range_fault(struct nouveau_svmm *svmm,
- struct nouveau_drm *drm, void *data, u32 size,
- u64 *ioctl_addr, unsigned long hmm_flags,
+ struct nouveau_drm *drm,
+ struct nouveau_pfnmap_args *args, u32 size,
+ unsigned long hmm_flags,
struct svm_notifier *notifier)
{
unsigned long timeout =
@@ -585,10 +598,10 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
break;
}
- nouveau_hmm_convert_pfn(drm, &range, ioctl_addr);
+ nouveau_hmm_convert_pfn(drm, &range, args);
svmm->vmm->vmm.object.client->super = true;...
2020 Jun 30
6
[PATCH v2 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_range_fault() output
array flags HMM_PFN_PMD and HMM_PFN_PUD. This allows a device driver to
know that a given 4K PFN is actually mapped by the CPU using either a
PMD sized or PUD sized CPU page table entry and therefore the device
driver can safely map system memory using larger device MMU PTEs.
The series is based on 5.8.0-rc3 and is intended for
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...tatic void
nouveau_svm_fault_replay(struct nouveau_svm *svm)
@@ -520,7 +508,8 @@ static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
static int nouveau_range_fault(struct nouveau_svmm *svmm,
struct nouveau_drm *drm, void *data, u32 size,
- u64 *pfns, struct svm_notifier *notifier)
+ unsigned long hmm_pfns[], u64 *ioctl_addr,
+ struct svm_notifier *notifier)
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
@@ -529,10 +518,8 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
.notifier = ¬ifi...
2020 Jan 13
9
[PATCH v6 0/6] mm/hmm/test: add self tests for HMM
This series adds new functions to the mmu interval notifier API to
allow device drivers with MMUs to dynamically mirror a process' page
tables based on device faults and invalidation callbacks. The Nouveau
driver is updated to use the extended API and a set of stand alone self
tests is added to help validate and maintain correctness.
The patches are based on linux-5.5.0-rc6 and are for
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...IF_VMM_PFNMAP_V0_V |
+ NVIF_VMM_PFNMAP_V0_HOST;
+ if (range->hmm_pfns[i] & HMM_PFN_WRITE)
+ ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W;
+ }
+}
+
static int nouveau_range_fault(struct nouveau_svmm *svmm,
struct nouveau_drm *drm, void *data, u32 size,
- u64 *pfns, struct svm_notifier *notifier)
+ unsigned long hmm_pfns[], u64 *ioctl_addr,
+ struct svm_notifier *notifier)
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
@@ -529,10 +553,8 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
.notifier = ¬ifi...
2020 Apr 22
1
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...ault_replay(struct nouveau_svm *svm)
> @@ -520,7 +508,8 @@ static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
>
> static int nouveau_range_fault(struct nouveau_svmm *svmm,
> struct nouveau_drm *drm, void *data, u32 size,
> - u64 *pfns, struct svm_notifier *notifier)
> + unsigned long hmm_pfns[], u64 *ioctl_addr,
> + struct svm_notifier *notifier)
> {
> unsigned long timeout =
> jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
> @@ -529,10 +518,8 @@ static int nouveau_range_fault(struct nouveau_svmm *sv...
2020 Jul 01
8
[PATCH v3 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_pfn_to_map_order()
function. This allows a device driver to know that a given 4K PFN is
actually mapped by the CPU using a larger sized CPU page table entry and
therefore the device driver can safely map system memory using larger
device MMU PTEs.
The series is based on 5.8.0-rc3 and is intended for Jason Gunthorpe's
hmm tree. These were
2020 Apr 22
11
[PATCH hmm 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2020 May 08
11
[PATCH 0/6] nouveau/hmm: add support for mapping large pages
hmm_range_fault() returns an array of page frame numbers and flags for
how the pages are mapped in the requested process' page tables. The PFN
can be used to get the struct page with hmm_pfn_to_page() and the page size
order can be determined with compound_order(page) but if the page is larger
than order 0 (PAGE_SIZE), there is no indication that the page is mapped
using a larger page size. To
2019 Nov 12
20
[PATCH hmm v3 00/14] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others
2020 May 01
13
[PATCH hmm v2 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2019 Oct 28
32
[PATCH v2 00/15] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others
2020 Jun 19
22
[PATCH 00/16] mm/hmm/nouveau: THP mapping and migration
These patches apply to linux-5.8.0-rc1. Patches 1-3 should probably go
into 5.8, the others can be queued for 5.9. Patches 4-6 improve the HMM
self tests. Patch 7-8 prepare nouveau for the meat of this series which
adds support and testing for compound page mapping of system memory
(patches 9-11) and compound page migration to device private memory
(patches 12-16). Since these changes are split