Displaying 20 results from an estimated 113 matches for "interval_tree".
2020 Jan 13
0
[PATCH v6 3/6] mm/notifier: add mmu_interval_notifier_update()
.../**
* mmu_interval_set_seq - Save the invalidation sequence
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 40c837ae8d90..47ad9cc89aab 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -157,7 +157,14 @@ static void mn_itree_inv_end(struct mmu_notifier_mm *mmn_mm)
else {
interval_tree_remove(&mni->interval_tree,
&mmn_mm->itree);
- if (mni->ops->release)
+ if (mni->updated_last) {
+ mni->interval_tree.start = mni->updated_start;
+ mni->interval_tree.last = mni->updated_last;
+ mni->updated_start = 0;
+ mni->updat...
2020 Jan 14
2
[PATCH v6 4/6] mm/mmu_notifier: add mmu_interval_notifier_find()
...tifier_update);
>
> +struct mmu_interval_notifier *mmu_interval_notifier_find(struct mm_struct *mm,
> + const struct mmu_interval_notifier_ops *ops,
> + unsigned long start, unsigned long last)
> +{
> + struct mmu_notifier_mm *mmn_mm = mm->mmu_notifier_mm;
> + struct interval_tree_node *node;
> + struct mmu_interval_notifier *mni;
> + struct mmu_interval_notifier *res = NULL;
> +
> + spin_lock(&mmn_mm->lock);
> + node = interval_tree_iter_first(&mmn_mm->itree, start, last);
> + if (node) {
> + mni = container_of(node, struct mmu_interval_n...
2019 Oct 28
0
[PATCH v2 02/15] mm/mmu_notifier: add an interval tree notifier
.../mmu_notifier.h b/include/linux/mmu_notifier.h
index 12bd603d318ce7..51b92ba013ddce 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -6,10 +6,12 @@
#include <linux/spinlock.h>
#include <linux/mm_types.h>
#include <linux/srcu.h>
+#include <linux/interval_tree.h>
struct mmu_notifier_mm;
struct mmu_notifier;
struct mmu_notifier_range;
+struct mmu_range_notifier;
/**
* enum mmu_notifier_event - reason for the mmu notifier callback
@@ -32,6 +34,9 @@ struct mmu_notifier_range;
* access flags). User should soft dirty the page in the end callbac...
2019 Nov 12
0
[PATCH v3 02/14] mm/mmu_notifier: add an interval tree notifier
.../mmu_notifier.h b/include/linux/mmu_notifier.h
index 12bd603d318ce7..9e6caa8ecd1938 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -6,10 +6,12 @@
#include <linux/spinlock.h>
#include <linux/mm_types.h>
#include <linux/srcu.h>
+#include <linux/interval_tree.h>
struct mmu_notifier_mm;
struct mmu_notifier;
struct mmu_notifier_range;
+struct mmu_interval_notifier;
/**
* enum mmu_notifier_event - reason for the mmu notifier callback
@@ -32,6 +34,9 @@ struct mmu_notifier_range;
* access flags). User should soft dirty the page in the end call...
2020 Jan 13
0
[PATCH v6 4/6] mm/mmu_notifier: add mmu_interval_notifier_find()
...);
+struct mmu_interval_notifier *mmu_interval_notifier_find(struct mm_struct *mm,
+ const struct mmu_interval_notifier_ops *ops,
+ unsigned long start, unsigned long last);
+
+static inline unsigned long mmu_interval_notifier_start(
+ struct mmu_interval_notifier *mni)
+{
+ return mni->interval_tree.start;
+}
+
+static inline unsigned long mmu_interval_notifier_last(
+ struct mmu_interval_notifier *mni)
+{
+ return mni->interval_tree.last;
+}
/**
* mmu_interval_set_seq - Save the invalidation sequence
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 47ad9cc89aab..4efecc0f13c...
2019 Nov 01
0
[PATCH v2 00/15] Consolidate the mmu notifier interval_tree and locking
...em, radeon_mn, umem_odp, hfi1,
> scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
> they only use invalidate_range_start/end and immediately check the
> invalidating range against some driver data structure to tell if the
> driver is interested. Half of them use an interval_tree, the others are
> simple linear search lists.
>
> Of the ones I checked they largely seem to have various kinds of races,
> bugs and poor implementation. This is a result of the complexity in how
> the notifier interacts with get_user_pages(). It is extremely difficult to
> use i...
2020 Jan 15
0
[PATCH v6 4/6] mm/mmu_notifier: add mmu_interval_notifier_find()
...gt;> +struct mmu_interval_notifier *mmu_interval_notifier_find(struct mm_struct *mm,
>> + const struct mmu_interval_notifier_ops *ops,
>> + unsigned long start, unsigned long last)
>> +{
>> + struct mmu_notifier_mm *mmn_mm = mm->mmu_notifier_mm;
>> + struct interval_tree_node *node;
>> + struct mmu_interval_notifier *mni;
>> + struct mmu_interval_notifier *res = NULL;
>> +
>> + spin_lock(&mmn_mm->lock);
>> + node = interval_tree_iter_first(&mmn_mm->itree, start, last);
>> + if (node) {
>> + mni = container_of...
2019 Nov 07
5
[PATCH v2 02/15] mm/mmu_notifier: add an interval tree notifier
...fier.h
> index 12bd603d318ce7..51b92ba013ddce 100644
> --- a/include/linux/mmu_notifier.h
> +++ b/include/linux/mmu_notifier.h
> @@ -6,10 +6,12 @@
> #include <linux/spinlock.h>
> #include <linux/mm_types.h>
> #include <linux/srcu.h>
> +#include <linux/interval_tree.h>
>
> struct mmu_notifier_mm;
> struct mmu_notifier;
> struct mmu_notifier_range;
> +struct mmu_range_notifier;
Hi Jason,
Nice design, I love the seq foundation! So far, I'm not able to spot anything
actually wrong with the implementation, sorry about that.
Generally...
2019 Nov 07
0
[PATCH v2 02/15] mm/mmu_notifier: add an interval tree notifier
...y.
[...]
> > +struct mmu_range_notifier_ops {
> > + bool (*invalidate)(struct mmu_range_notifier *mrn,
> > + const struct mmu_notifier_range *range,
> > + unsigned long cur_seq);
> > +};
> > +
> > +struct mmu_range_notifier {
> > + struct interval_tree_node interval_tree;
> > + const struct mmu_range_notifier_ops *ops;
> > + struct hlist_node deferred_item;
> > + unsigned long invalidate_seq;
> > + struct mm_struct *mm;
> > +};
> > +
>
> Again, now we have the new struct mmu_range_notifier, and the old...
2019 Nov 12
20
[PATCH hmm v3 00/14] Consolidate the mmu notifier interval_tree and locking
...sing drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others are
simple linear search lists.
Of the ones I checked they largely seem to have various kinds of races,
bugs and poor implementation. This is a result of the complexity in how
the notifier interacts with get_user_pages(). It is extremely difficult to
use it correctly.
Consolidate all...
2020 Jan 13
9
[PATCH v6 0/6] mm/hmm/test: add self tests for HMM
This series adds new functions to the mmu interval notifier API to
allow device drivers with MMUs to dynamically mirror a process' page
tables based on device faults and invalidation callbacks. The Nouveau
driver is updated to use the extended API and a set of stand alone self
tests is added to help validate and maintain correctness.
The patches are based on linux-5.5.0-rc6 and are for
2019 Oct 28
32
[PATCH v2 00/15] Consolidate the mmu notifier interval_tree and locking
...sing drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others are
simple linear search lists.
Of the ones I checked they largely seem to have various kinds of races,
bugs and poor implementation. This is a result of the complexity in how
the notifier interacts with get_user_pages(). It is extremely difficult to
use it correctly.
Consolidate all...
2020 Jan 13
0
[PATCH v6 2/6] mm/mmu_notifier: add mmu_interval_notifier_put()
...lidate_range_start.
+ * avoid using a blocking lock while walking the interval tree.
*/
+ INIT_HLIST_HEAD(&removed_list);
hlist_for_each_entry_safe(mni, next, &mmn_mm->deferred_list,
deferred_item) {
+ hlist_del(&mni->deferred_item);
if (RB_EMPTY_NODE(&mni->interval_tree.rb))
interval_tree_insert(&mni->interval_tree,
&mmn_mm->itree);
- else
+ else {
interval_tree_remove(&mni->interval_tree,
&mmn_mm->itree);
- hlist_del(&mni->deferred_item);
+ if (mni->ops->release)
+ hlist_add_head(&...
2020 Jan 14
2
[PATCH v6 5/6] nouveau: use new mmu interval notifiers
...4 end)
> {
> unsigned long timeout =
> jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
> /* Have HMM fault pages within the fault window to the GPU. */
> struct hmm_range range = {
> - .notifier = ¬ifier->notifier,
> - .start = notifier->notifier.interval_tree.start,
> - .end = notifier->notifier.interval_tree.last + 1,
> + .start = start,
> + .end = end,
> .pfns = pfns,
> .flags = nouveau_svm_pfn_flags,
> .values = nouveau_svm_pfn_values,
> + .default_flags = 0,
> + .pfn_flags_mask = ~0UL,
> .pfn_shift = NV...
2019 Nov 12
0
[PATCH v3 06/14] RDMA/hfi1: Use mmu_interval_notifier_insert for user_exp_rcv
...+ fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
+
hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
- node->mmu.addr, node->phys, phys);
+ node->notifier.interval_tree.start, node->phys,
+ phys);
return 0;
+
+out_unmap:
+ hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
+ node->rcventry, node->notifier.interval_tree.start,
+ node->phys, ret);
+ pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
+ PCI_D...
2019 Oct 28
1
[PATCH v2 06/15] RDMA/hfi1: Use mmu_range_notifier_inset for user_exp_rcv
...+ fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
+
hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
- node->mmu.addr, node->phys, phys);
+ node->notifier.interval_tree.start, node->phys,
+ phys);
return 0;
+
+out_unmap:
+ hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
+ node->rcventry, node->notifier.interval_tree.start,
+ node->phys, ret);
+ pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
+ PCI_D...
2020 Jun 26
1
[PATCH v2] nouveau: fix page fault on device private memory
...nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index ba9f9359c30e..6586d9d39874 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -562,6 +562,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
.end = notifier->notifier.interval_tree.last + 1,
.pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
.hmm_pfns = hmm_pfns,
+ .dev_private_owner = drm->dev,
};
struct mm_struct *mm = notifier->notifier.mm;
int ret;
--
2.20.1
2020 Jun 26
2
[PATCH] nouveau: fix page fault on device private memory
...nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index ba9f9359c30e..6586d9d39874 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -562,6 +562,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
.end = notifier->notifier.interval_tree.last + 1,
.pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
.hmm_pfns = hmm_pfns,
+ .dev_private_owner = drm->dev,
};
struct mm_struct *mm = notifier->notifier.mm;
int ret;
--
2.20.1
2020 Feb 14
2
[PATCH 3/3] iommu/virtio: Enable x86 support
...API
> + select IOMMU_DMA
Can that have an "if X86" for clarity? AIUI it's not necessary for
virtio-iommu itself (and really shouldn't be), but is merely to satisfy
the x86 arch code's expectation that IOMMU drivers bring their own DMA
ops, right?
Robin.
> select INTERVAL_TREE
> help
> Para-virtualised IOMMU driver with virtio.
>
2020 Feb 14
2
[PATCH 3/3] iommu/virtio: Enable x86 support
...API
> + select IOMMU_DMA
Can that have an "if X86" for clarity? AIUI it's not necessary for
virtio-iommu itself (and really shouldn't be), but is merely to satisfy
the x86 arch code's expectation that IOMMU drivers bring their own DMA
ops, right?
Robin.
> select INTERVAL_TREE
> help
> Para-virtualised IOMMU driver with virtio.
>