Displaying 16 results from an estimated 16 matches for "mm_has_notifiers".
2019 Nov 05
1
[PATCH v2 01/15] mm/mmu_notifier: define the header pre-processor parts even if disabled
...,7 +8,6 @@
#include <linux/srcu.h>
#include <linux/interval_tree.h>
-struct mmu_notifier_mm;
struct mmu_notifier;
struct mmu_notifier_range;
struct mmu_range_notifier;
@@ -263,10 +262,7 @@ struct mmu_notifier_range {
enum mmu_notifier_event event;
};
-static inline int mm_has_notifiers(struct mm_struct *mm)
-{
- return unlikely(mm->mmu_notifier_mm);
-}
+int mm_has_notifiers(struct mm_struct *mm);
struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
struct mm_struct *mm);
@@ -477,10 +473,7 @@ stat...
2020 Jul 28
1
[PATCH v4 3/6] mm/notifier: add migration invalidation type
On Thu, Jul 23, 2020 at 03:30:01PM -0700, Ralph Campbell wrote:
> static inline int mm_has_notifiers(struct mm_struct *mm)
> @@ -513,6 +519,7 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
> range->start = start;
> range->end = end;
> range->flags = flags;
> + range->migrate_pgmap_owner = NULL;
> }
Since this function is common...
2020 Jul 20
1
[PATCH v2 3/5] mm/notifier: add migration invalidation type
...FY_MIGRATE,
> };
>
> #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
> @@ -264,6 +269,7 @@ struct mmu_notifier_range {
> unsigned long end;
> unsigned flags;
> enum mmu_notifier_event event;
> + void *migrate_pgmap_owner;
> };
>
> static inline int mm_has_notifiers(struct mm_struct *mm)
> @@ -513,6 +519,7 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
> range->start = start;
> range->end = end;
> range->flags = flags;
> + range->migrate_pgmap_owner = NULL;
> }
>
> #define ptep_clea...
2019 Nov 06
0
[PATCH v2 01/15] mm/mmu_notifier: define the header pre-processor parts even if disabled
...rd <jhubbard at nvidia.com>
>
Thanks
> diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
> index 2b7485919ecf..107f9406a92d 100644
> +++ b/mm/mmu_notifier.c
> @@ -47,6 +47,16 @@ struct mmu_notifier_mm {
> struct hlist_head deferred_list;
> };
>
> +int mm_has_notifiers(struct mm_struct *mm)
> +{
> + return unlikely(mm->mmu_notifier_mm);
> +}
This inline is performance sensitive, it needs to stay inlined..
Jason
2020 Jul 13
0
[PATCH v2 3/5] mm/notifier: add migration invalidation type
...MMU_NOTIFY_SOFT_DIRTY,
MMU_NOTIFY_RELEASE,
+ MMU_NOTIFY_MIGRATE,
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
@@ -264,6 +269,7 @@ struct mmu_notifier_range {
unsigned long end;
unsigned flags;
enum mmu_notifier_event event;
+ void *migrate_pgmap_owner;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
@@ -513,6 +519,7 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
range->start = start;
range->end = end;
range->flags = flags;
+ range->migrate_pgmap_owner = NULL;
}
#define ptep_clear_flush_young_notify(__vma, __address, _...
2020 Jul 06
0
[PATCH 3/5] mm/notifier: add migration invalidation type
...ECTION_PAGE,
MMU_NOTIFY_SOFT_DIRTY,
MMU_NOTIFY_RELEASE,
+ MMU_NOTIFY_MIGRATE,
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
@@ -264,6 +269,7 @@ struct mmu_notifier_range {
unsigned long end;
unsigned flags;
enum mmu_notifier_event event;
+ void *data;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
@@ -513,6 +519,7 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
range->start = start;
range->end = end;
range->flags = flags;
+ range->data = NULL;
}
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
diff...
2019 Oct 28
0
[PATCH v2 01/15] mm/mmu_notifier: define the header pre-processor parts even if disabled
...f CONFIG_LOCKDEP
+extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
+#endif
+
+struct mmu_notifier_range {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ unsigned long start;
+ unsigned long end;
+ unsigned flags;
+ enum mmu_notifier_event event;
+};
+
static inline int mm_has_notifiers(struct mm_struct *mm)
{
return unlikely(mm->mmu_notifier_mm);
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 7fde88695f35d6..367670cfd02b7b 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -27,6 +27,19 @@ struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
};...
2019 Nov 12
0
[PATCH v3 01/14] mm/mmu_notifier: define the header pre-processor parts even if disabled
...f CONFIG_LOCKDEP
+extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
+#endif
+
+struct mmu_notifier_range {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ unsigned long start;
+ unsigned long end;
+ unsigned flags;
+ enum mmu_notifier_event event;
+};
+
static inline int mm_has_notifiers(struct mm_struct *mm)
{
return unlikely(mm->mmu_notifier_mm);
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 7fde88695f35d6..367670cfd02b7b 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -27,6 +27,19 @@ struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
};...
2020 Jul 21
0
[PATCH v3 3/5] mm/notifier: add migration invalidation type
...MMU_NOTIFY_SOFT_DIRTY,
MMU_NOTIFY_RELEASE,
+ MMU_NOTIFY_MIGRATE,
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
@@ -264,6 +269,7 @@ struct mmu_notifier_range {
unsigned long end;
unsigned flags;
enum mmu_notifier_event event;
+ void *migrate_pgmap_owner;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
@@ -513,6 +519,7 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
range->start = start;
range->end = end;
range->flags = flags;
+ range->migrate_pgmap_owner = NULL;
}
#define ptep_clear_flush_young_notify(__vma, __address, _...
2020 Jul 23
0
[PATCH v4 3/6] mm/notifier: add migration invalidation type
...MMU_NOTIFY_SOFT_DIRTY,
MMU_NOTIFY_RELEASE,
+ MMU_NOTIFY_MIGRATE,
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
@@ -264,6 +269,7 @@ struct mmu_notifier_range {
unsigned long end;
unsigned flags;
enum mmu_notifier_event event;
+ void *migrate_pgmap_owner;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
@@ -513,6 +519,7 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
range->start = start;
range->end = end;
range->flags = flags;
+ range->migrate_pgmap_owner = NULL;
}
#define ptep_clear_flush_young_notify(__vma, __address, _...
2020 Jul 13
9
[PATCH v2 0/5] mm/migrate: avoid device private invalidations
The goal for this series is to avoid device private memory TLB
invalidations when migrating a range of addresses from system
memory to device private memory and some of those pages have already
been migrated. The approach taken is to introduce a new mmu notifier
invalidation event type and use that in the device driver to skip
invalidation callbacks from migrate_vma_setup(). The device driver is
2020 Jul 23
9
[PATCH v4 0/6] mm/migrate: avoid device private invalidations
The goal for this series is to avoid device private memory TLB
invalidations when migrating a range of addresses from system
memory to device private memory and some of those pages have already
been migrated. The approach taken is to introduce a new mmu notifier
invalidation event type and use that in the device driver to skip
invalidation callbacks from migrate_vma_setup(). The device driver is
2020 Jul 06
8
[PATCH 0/5] mm/migrate: avoid device private invalidations
The goal for this series is to avoid device private memory TLB
invalidations when migrating a range of addresses from system
memory to device private memory and some of those pages have already
been migrated. The approach taken is to introduce a new mmu notifier
invalidation event type and use that in the device driver to skip
invalidation callbacks from migrate_vma_setup(). The device driver is
2020 Jul 21
6
[PATCH v3 0/5] mm/migrate: avoid device private invalidations
The goal for this series is to avoid device private memory TLB
invalidations when migrating a range of addresses from system
memory to device private memory and some of those pages have already
been migrated. The approach taken is to introduce a new mmu notifier
invalidation event type and use that in the device driver to skip
invalidation callbacks from migrate_vma_setup(). The device driver is
2019 Oct 28
32
[PATCH v2 00/15] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others
2019 Nov 12
20
[PATCH hmm v3 00/14] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others