Displaying 9 results from an estimated 9 matches for "hash_del".
2019 Oct 29
0
[PATCH v2 13/15] drm/amdgpu: Use mmu_range_insert instead of hmm_mirror
...struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
> - struct amdgpu_device *adev = amn->adev;
> - struct amdgpu_mn_node *node, *next_node;
> - struct amdgpu_bo *bo, *next_bo;
> -
> - mutex_lock(&adev->mn_lock);
> - down_write(&amn->lock);
> - hash_del(&amn->node);
> - rbtree_postorder_for_each_entry_safe(node, next_node,
> - &amn->objects.rb_root, it.rb) {
> - list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
> - bo->mn = NULL;
> - list_del_init(&bo->mn_list);
> - }
>...
2019 Oct 29
0
[PATCH v2 13/15] drm/amdgpu: Use mmu_range_insert instead of hmm_mirror
...struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
> - struct amdgpu_device *adev = amn->adev;
> - struct amdgpu_mn_node *node, *next_node;
> - struct amdgpu_bo *bo, *next_bo;
> -
> - mutex_lock(&adev->mn_lock);
> - down_write(&amn->lock);
> - hash_del(&amn->node);
> - rbtree_postorder_for_each_entry_safe(node, next_node,
> - &amn->objects.rb_root, it.rb) {
> - list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
> - bo->mn = NULL;
> - list_del_init(&bo->mn_list);
> - }
>...
2019 Oct 28
2
[PATCH v2 13/15] drm/amdgpu: Use mmu_range_insert instead of hmm_mirror
...troy(struct work_struct *work)
-{
- struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
- struct amdgpu_device *adev = amn->adev;
- struct amdgpu_mn_node *node, *next_node;
- struct amdgpu_bo *bo, *next_bo;
-
- mutex_lock(&adev->mn_lock);
- down_write(&amn->lock);
- hash_del(&amn->node);
- rbtree_postorder_for_each_entry_safe(node, next_node,
- &amn->objects.rb_root, it.rb) {
- list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
- bo->mn = NULL;
- list_del_init(&bo->mn_list);
- }
- kfree(node);
- }
- up_write(&...
2019 Oct 21
0
[PATCH RFC 1/3] kcov: remote coverage support
...k(&kcov_remote_lock);
> + hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
> + if (remote->kcov != kcov)
> + continue;
> + kcov_debug("removing handle %llx\n", remote->handle);
> + hash_del(&remote->hnode);
> + kfree(remote);
> + }
> + /* Do reset before unlock to prevent races with kcov_remote_start(). */
> + kcov_reset(kcov);
> + spin_unlock(&kcov_remote_lock);
> +}
> +
> static void kcov_get(struct kcov *kcov...
2019 Aug 09
6
[RFC PATCH v6 71/92] mm: add support for remote mapping
...16lx\n", __func__, (unsigned long)tdb->mm);
+
+ target_db_free(tdb);
+}
+
+static void target_db_put(struct target_db *tdb)
+{
+ if (refcount_dec_and_test(&tdb->refcnt)) {
+ pr_debug("%s: mm %016lx\n", __func__, (unsigned long)tdb->mm);
+
+ spin_lock(&tdb_lock);
+ hash_del(&tdb->db_link);
+ spin_unlock(&tdb_lock);
+
+ mm_remote_db_target_release(tdb);
+
+ ASSERT(target_db_empty(tdb));
+
+ mmu_notifier_call_srcu(&tdb->rcu, target_db_free_delayed);
+ }
+}
+
+static struct target_db *target_db_lookup(const struct mm_struct *mm)
+{
+ struct target_d...
2019 Oct 28
32
[PATCH v2 00/15] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others
2019 Nov 12
20
[PATCH hmm v3 00/14] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others
2019 Aug 09
117
[RFC PATCH v6 00/92] VM introspection
The KVM introspection subsystem provides a facility for applications running
on the host or in a separate VM, to control the execution of other VM-s
(pause, resume, shutdown), query the state of the vCPUs (GPRs, MSRs etc.),
alter the page access bits in the shadow page tables (only for the hardware
backed ones, eg. Intel's EPT) and receive notifications when events of
interest have taken place
2019 Aug 09
117
[RFC PATCH v6 00/92] VM introspection
The KVM introspection subsystem provides a facility for applications running
on the host or in a separate VM, to control the execution of other VM-s
(pause, resume, shutdown), query the state of the vCPUs (GPRs, MSRs etc.),
alter the page access bits in the shadow page tables (only for the hardware
backed ones, eg. Intel's EPT) and receive notifications when events of
interest have taken place