Displaying 16 results from an estimated 16 matches for "deferred_list".
2012 Mar 09
2
[PATCH] linux-2.6.18/gnttab: add deferred freeing logic
..._ref(ref))
+ return 1;
+ printk(KERN_DEBUG "WARNING: g.e. %#x still in use!\n", ref);
+ return 0;
+}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
+struct deferred_entry {
+ struct list_head list;
+ grant_ref_t ref;
+ uint16_t warn_delay;
+ struct page *page;
+};
+static LIST_HEAD(deferred_list);
+static void gnttab_handle_deferred(unsigned long);
+static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
+
+static void gnttab_handle_deferred(unsigned long unused)
+{
+ unsigned int nr = 10;
+ struct deferred_entry *first = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&g...
2012 Apr 05
0
[PATCH] xen/gnttab: add deferred freeing logic
...eadonly))
+ return 1;
+ pr_warn("WARNING: g.e. %#x still in use!\n", ref);
+ return 0;
+}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
+struct deferred_entry {
+ struct list_head list;
+ grant_ref_t ref;
+ bool ro;
+ uint16_t warn_delay;
+ struct page *page;
+};
+static LIST_HEAD(deferred_list);
+static void gnttab_handle_deferred(unsigned long);
+static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
+
+static void gnttab_handle_deferred(unsigned long unused)
+{
+ unsigned int nr = 10;
+ struct deferred_entry *first = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&g...
2020 Jan 13
0
[PATCH v6 3/6] mm/notifier: add mmu_interval_notifier_update()
...nding point as contained in the interval.
@@ -1038,8 +1047,12 @@ static unsigned long __mmu_interval_notifier_put(
if (RB_EMPTY_NODE(&mni->interval_tree.rb)) {
hlist_del(&mni->deferred_item);
} else {
- hlist_add_head(&mni->deferred_item,
- &mmn_mm->deferred_list);
+ if (mni->updated_last) {
+ mni->updated_start = 0;
+ mni->updated_last = 0;
+ } else
+ hlist_add_head(&mni->deferred_item,
+ &mmn_mm->deferred_list);
seq = mmn_mm->invalidate_seq;
}
} else {
@@ -1108,6 +1121,56 @@ void mmu_interval_noti...
2019 Oct 28
0
[PATCH v2 02/15] mm/mmu_notifier: add an interval tree notifier
...count.
This approach avoids having to intersect the interval tree twice (as
umem_odp does) at the potential cost of a longer device page fault.
- kvm/umem_odp use a sequence counter to drive the collision retry,
via invalidate_seq
- a deferred work todo list on unlock scheme like RTNL, via deferred_list.
This makes adding/removing interval tree members more deterministic
- seqlock, except this version makes the seqlock idea multi-holder on the
write side by protecting it with active_invalidate_ranges and a spinlock
To minimize MM overhead when only the interval tree is being used, the
entire...
2019 Nov 12
0
[PATCH v3 02/14] mm/mmu_notifier: add an interval tree notifier
...count.
This approach avoids having to intersect the interval tree twice (as
umem_odp does) at the potential cost of a longer device page fault.
- kvm/umem_odp use a sequence counter to drive the collision retry,
via invalidate_seq
- a deferred work todo list on unlock scheme like RTNL, via deferred_list.
This makes adding/removing interval tree members more deterministic
- seqlock, except this version makes the seqlock idea multi-holder on the
write side by protecting it with active_invalidate_ranges and a spinlock
To minimize MM overhead when only the interval tree is being used, the
entire...
2019 Nov 05
1
[PATCH v2 01/15] mm/mmu_notifier: define the header pre-processor parts even if disabled
...uct mm_struct *mm);
static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
{
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 2b7485919ecf..107f9406a92d 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -47,6 +47,16 @@ struct mmu_notifier_mm {
struct hlist_head deferred_list;
};
+int mm_has_notifiers(struct mm_struct *mm)
+{
+ return unlikely(mm->mmu_notifier_mm);
+}
+
+void mmu_notifier_mm_init(struct mm_struct *mm)
+{
+ mm->mmu_notifier_mm = NULL;
+}
+
/*
* This is a collision-retry read-side/write-side 'lock', a lot like a
* seqcoun...
2019 Nov 06
0
[PATCH v2 01/15] mm/mmu_notifier: define the header pre-processor parts even if disabled
...n add:
>
> Reviewed-by: John Hubbard <jhubbard at nvidia.com>
>
Thanks
> diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
> index 2b7485919ecf..107f9406a92d 100644
> +++ b/mm/mmu_notifier.c
> @@ -47,6 +47,16 @@ struct mmu_notifier_mm {
> struct hlist_head deferred_list;
> };
>
> +int mm_has_notifiers(struct mm_struct *mm)
> +{
> + return unlikely(mm->mmu_notifier_mm);
> +}
This inline is performance sensitive, it needs to stay inlined..
Jason
2020 Jan 13
0
[PATCH v6 2/6] mm/mmu_notifier: add mmu_interval_notifier_put()
...* they are progressed. This arrangement for tree updates is used to
- * avoid using a blocking lock during invalidate_range_start.
+ * avoid using a blocking lock while walking the interval tree.
*/
+ INIT_HLIST_HEAD(&removed_list);
hlist_for_each_entry_safe(mni, next, &mmn_mm->deferred_list,
deferred_item) {
+ hlist_del(&mni->deferred_item);
if (RB_EMPTY_NODE(&mni->interval_tree.rb))
interval_tree_insert(&mni->interval_tree,
&mmn_mm->itree);
- else
+ else {
interval_tree_remove(&mni->interval_tree,
&mmn_...
2019 Oct 29
1
[PATCH v2 02/15] mm/mmu_notifier: add an interval tree notifier
...having to intersect the interval tree twice (as
> umem_odp does) at the potential cost of a longer device page fault.
>
> - kvm/umem_odp use a sequence counter to drive the collision retry,
> via invalidate_seq
>
> - a deferred work todo list on unlock scheme like RTNL, via deferred_list.
> This makes adding/removing interval tree members more deterministic
>
> - seqlock, except this version makes the seqlock idea multi-holder on the
> write side by protecting it with active_invalidate_ranges and a spinlock
>
> To minimize MM overhead when only the interval...
2019 Nov 07
5
[PATCH v2 02/15] mm/mmu_notifier: add an interval tree notifier
...d list;
> + bool has_interval;
> /* to serialize the list modifications and hlist_unhashed */
> spinlock_t lock;
> + unsigned long invalidate_seq;
> + unsigned long active_invalidate_ranges;
> + struct rb_root_cached itree;
> + wait_queue_head_t wq;
> + struct hlist_head deferred_list;
> };
>
> +/*
> + * This is a collision-retry read-side/write-side 'lock', a lot like a
> + * seqcount, however this allows multiple write-sides to hold it at
> + * once. Conceptually the write side is protecting the values of the PTEs in
> + * this mm, such that PTE...
2019 Nov 07
2
[PATCH v2 02/15] mm/mmu_notifier: add an interval tree notifier
...ven in the idle state.
*/
> > > + spin_lock(&mmn_mm->lock);
> > > + if (mmn_mm->active_invalidate_ranges) {
> > > + if (mn_itree_is_invalidating(mmn_mm))
> > > + hlist_add_head(&mrn->deferred_item,
> > > + &mmn_mm->deferred_list);
> > > + else {
> > > + mmn_mm->invalidate_seq |= 1;
> > > + interval_tree_insert(&mrn->interval_tree,
> > > + &mmn_mm->itree);
> > > + }
> > > + mrn->invalidate_seq = mmn_mm->invalidate_seq;
> > >...
2019 Nov 07
0
[PATCH v2 02/15] mm/mmu_notifier: add an interval tree notifier
...+ * odd, see mmu_range_read_begin()
> > + */
> > + spin_lock(&mmn_mm->lock);
> > + if (mmn_mm->active_invalidate_ranges) {
> > + if (mn_itree_is_invalidating(mmn_mm))
> > + hlist_add_head(&mrn->deferred_item,
> > + &mmn_mm->deferred_list);
> > + else {
> > + mmn_mm->invalidate_seq |= 1;
> > + interval_tree_insert(&mrn->interval_tree,
> > + &mmn_mm->itree);
> > + }
> > + mrn->invalidate_seq = mmn_mm->invalidate_seq;
> > + } else {
> > + WARN_ON(m...
2019 Nov 07
1
[PATCH v2 02/15] mm/mmu_notifier: add an interval tree notifier
...his is really why we have the even/odd thing at all.
> > + spin_lock(&mmn_mm->lock);
> > + if (mmn_mm->active_invalidate_ranges) {
> > + if (mn_itree_is_invalidating(mmn_mm))
> > + hlist_add_head(&mrn->deferred_item,
> > + &mmn_mm->deferred_list);
> > + else {
> > + mmn_mm->invalidate_seq |= 1;
> > + interval_tree_insert(&mrn->interval_tree,
> > + &mmn_mm->itree);
> > + }
> > + mrn->invalidate_seq = mmn_mm->invalidate_seq;
> > + } else {
> > + WARN_ON(m...
2019 Oct 28
32
[PATCH v2 00/15] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others
2020 Jan 13
9
[PATCH v6 0/6] mm/hmm/test: add self tests for HMM
This series adds new functions to the mmu interval notifier API to
allow device drivers with MMUs to dynamically mirror a process' page
tables based on device faults and invalidation callbacks. The Nouveau
driver is updated to use the extended API and a set of stand alone self
tests is added to help validate and maintain correctness.
The patches are based on linux-5.5.0-rc6 and are for
2019 Nov 12
20
[PATCH hmm v3 00/14] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others