search for: lru_lock

Displaying 20 results from an estimated 120 matches for "lru_lock".

2010 Jan 18
1
[PATCH] drm/ttm: Fix race condition in ttm_bo_delayed_delete (v2)
..._all) static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) { struct ttm_bo_global *glob = bdev->glob; - struct ttm_buffer_object *entry, *nentry; - struct list_head *list, *next; - int ret; + struct ttm_buffer_object *entry; + int ret = 0; spin_lock(&glob->lru_lock); - list_for_each_safe(list, next, &bdev->ddestroy) { - entry = list_entry(list, struct ttm_buffer_object, ddestroy); - nentry = NULL; + if (list_empty(&bdev->ddestroy)) { + spin_unlock(&glob->lru_lock); + return 0; + } - /* - * Protect the next list entry from destruc...
2010 Jan 18
2
[PATCH] drm/ttm: Fix race condition in ttm_bo_delayed_delete
..._all) static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) { struct ttm_bo_global *glob = bdev->glob; - struct ttm_buffer_object *entry, *nentry; - struct list_head *list, *next; - int ret; + struct ttm_buffer_object *entry; + int ret = 0; spin_lock(&glob->lru_lock); - list_for_each_safe(list, next, &bdev->ddestroy) { - entry = list_entry(list, struct ttm_buffer_object, ddestroy); - nentry = NULL; + if (list_empty(&bdev->ddestroy)) { + spin_unlock(&glob->lru_lock); + return 0; + } - /* - * Protect the next list entry from destruc...
2014 May 14
0
[RFC PATCH v1 02/16] drm/ttm: kill off some members to ttm_validate_buffer
...qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -350,7 +350,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) ttm_bo_add_to_lru(bo); __ttm_bo_unreserve(bo); - entry->reserved = false; } spin_unlock(&bdev->fence_lock); spin_unlock(&glob->lru_lock); diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 39a11bbd2bac..6db47a72667e 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -32,20 +32,12 @@ #include <linux/sched.h> #include <linux/mo...
2010 Jan 20
0
[PATCH] drm/ttm: Fix race condition in ttm_bo_delayed_delete (v3, final)
...static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) { struct ttm_bo_global *glob = bdev->glob; - struct ttm_buffer_object *entry, *nentry; - struct list_head *list, *next; - int ret; + struct ttm_buffer_object *entry = NULL; + int ret = 0; spin_lock(&glob->lru_lock); - list_for_each_safe(list, next, &bdev->ddestroy) { - entry = list_entry(list, struct ttm_buffer_object, ddestroy); - nentry = NULL; + if (list_empty(&bdev->ddestroy)) + goto out_unlock; - /* - * Protect the next list entry from destruction while we - * unlock the lru_lock...
2014 May 14
0
[RFC PATCH v1 16/16] drm/ttm: use rcu in core ttm
...= 0, i; - int ret = 0; - - fobj = reservation_object_get_list(bo->resv); - if (fobj && fobj->shared_count) { - shared = kmalloc(sizeof(*shared) * fobj->shared_count, - GFP_KERNEL); - - if (!shared) { - ret = -ENOMEM; - __ttm_bo_unreserve(bo); - spin_unlock(&glob->lru_lock); - return ret; - } - - for (i = 0; i < fobj->shared_count; ++i) { - if (!fence_is_signaled(fobj->shared[i])) { - fence_get(fobj->shared[i]); - shared[shared_count++] = fobj->shared[i]; - } - } - if (!shared_count) { - kfree(shared); - shared = NULL; - } - } - -...
2014 May 14
0
[RFC PATCH v1 06/16] drm/ttm: kill fence_lock
...ease.c b/drivers/gpu/drm/qxl/qxl_release.c index e85c4d274dc0..4045ba873ab8 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -337,7 +337,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) glob = bo->glob; spin_lock(&glob->lru_lock); - spin_lock(&bdev->fence_lock); list_for_each_entry(entry, &release->bos, head) { bo = entry->bo; @@ -351,7 +350,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) ttm_bo_add_to_lru(bo); __ttm_bo_unreserve(bo); } - spin_unlock(&bdev->fen...
2008 May 20
4
[PATCH O/4] BIO tracking take2
Hi all, With this series of patches, you can determine the owners of any type of I/Os. I ported the previous version to linux-2.6.26-rc2-mm1. This makes dm-ioband -- I/O bandwidth controller -- be able to control the Block I/O bandwidths even when it accepts delayed write requests. Dm-ioband can find the owner cgroup of each request. It is also possible that OpenVz team and NEC Uchida-san team
2008 May 20
4
[PATCH O/4] BIO tracking take2
Hi all, With this series of patches, you can determine the owners of any type of I/Os. I ported the previous version to linux-2.6.26-rc2-mm1. This makes dm-ioband -- I/O bandwidth controller -- be able to control the Block I/O bandwidths even when it accepts delayed write requests. Dm-ioband can find the owner cgroup of each request. It is also possible that OpenVz team and NEC Uchida-san team
2019 Jun 26
2
[PATCH 04/25] mm: remove MEMORY_DEVICE_PUBLIC support
...) > continue; > > - /* Device public page can not be huge page */ > - if (is_device_public_page(page)) { > - if (locked_pgdat) { > - spin_unlock_irqrestore(&locked_pgdat->lru_lock, > - flags); > - locked_pgdat = NULL; > - } > - put_devmap_managed_page(page); > - continue; > - } > - This collid...
2008 Sep 19
2
[PATCH 0/5] bio-cgroup: Introduction
Hi everyone, Here are new releases of bio-cgroup. Changes from the previous version are as follows: - Accurate dirty-page tracking Support migrating pages between bio-cgroups with minimum overhead, but I think such a situation is quite rare. - Fix a bug of swapcache page handling Sometimes, "bad page state" is occurred since the memory controller has temporarily changed the
2008 Sep 19
2
[PATCH 0/5] bio-cgroup: Introduction
Hi everyone, Here are new releases of bio-cgroup. Changes from the previous version are as follows: - Accurate dirty-page tracking Support migrating pages between bio-cgroups with minimum overhead, but I think such a situation is quite rare. - Fix a bug of swapcache page handling Sometimes, "bad page state" is occurred since the memory controller has temporarily changed the
2008 Sep 19
2
[PATCH 0/5] bio-cgroup: Introduction
Hi everyone, Here are new releases of bio-cgroup. Changes from the previous version are as follows: - Accurate dirty-page tracking Support migrating pages between bio-cgroups with minimum overhead, but I think such a situation is quite rare. - Fix a bug of swapcache page handling Sometimes, "bad page state" is occurred since the memory controller has temporarily changed the
2014 Jul 09
0
[PATCH 05/17] drm/ttm: call ttm_bo_wait while inside a reservation
...bo_wait, the wait should be - * finished, and no new wait object should have been added. - */ - spin_lock(&bdev->fence_lock); - ret = ttm_bo_wait(bo, false, false, true); - WARN_ON(ret); - spin_unlock(&bdev->fence_lock); - if (ret) - return ret; - spin_lock(&glob->lru_lock); ret = __ttm_bo_reserve(bo, false, true, false, 0); @@ -528,8 +517,16 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, spin_unlock(&glob->lru_lock); return 0; } - } else - spin_unlock(&bdev->fence_lock); + + /* + * remove sync_obj with tt...
2014 May 14
17
[RFC PATCH v1 00/16] Convert all ttm drivers to use the new reservation interface
This series depends on the previously posted reservation api patches. 2 of them are not yet in for-next-fences branch of git://git.linaro.org/people/sumit.semwal/linux-3.x.git The missing patches are still in my vmwgfx_wip branch at git://people.freedesktop.org/~mlankhorst/linux All ttm drivers are converted to the fence api, fence_lock is removed and rcu is used in its place. qxl is the first
2014 Jul 31
19
[PATCH 01/19] fence: add debugging lines to fence_is_signaled for the callback
fence_is_signaled callback should support being run in atomic context, but not in irq context. Signed-off-by: Maarten Lankhorst <maarten.lankhorst at canonical.com> --- include/linux/fence.h | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/include/linux/fence.h b/include/linux/fence.h index d174585b874b..c1a4519ba2f5 100644 ---
2014 Jul 09
22
[PATCH 00/17] Convert TTM to the new fence interface.
This series applies on top of the driver-core-next branch of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git Before converting ttm to the new fence interface I had to fix some drivers to require a reservation before poking with fence_obj. After flipping the switch RCU becomes available instead, and the extra reservations can be dropped again. :-) I've done at least basic
2019 Sep 05
1
[PATCH 1/8] drm/ttm: turn ttm_bo_device.vma_manager into a pointer
...@@ extern struct ttm_bo_global { * * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. * @man: An array of mem_type_managers. - * @vma_manager: Address space manager + * @vma_manager: Address space manager (pointer) + * @_vma_manager: Address space manager (enbedded) * lru_lock: Spinlock that protects the buffer+device lru lists and * ddestroy lists. * @dev_mapping: A pointer to the struct address_space representing the @@ -464,7 +465,8 @@ struct ttm_bo_device { /* * Protected by internal locks. */ - struct drm_vma_offset_manager vma_manager; + struct drm_vma_...
2019 Sep 05
1
[PATCH 1/8] drm/ttm: turn ttm_bo_device.vma_manager into a pointer
...@@ extern struct ttm_bo_global { * * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. * @man: An array of mem_type_managers. - * @vma_manager: Address space manager + * @vma_manager: Address space manager (pointer) + * @_vma_manager: Address space manager (enbedded) * lru_lock: Spinlock that protects the buffer+device lru lists and * ddestroy lists. * @dev_mapping: A pointer to the struct address_space representing the @@ -464,7 +465,8 @@ struct ttm_bo_device { /* * Protected by internal locks. */ - struct drm_vma_offset_manager vma_manager; + struct drm_vma_...
2019 Sep 05
1
[PATCH 1/8] drm/ttm: turn ttm_bo_device.vma_manager into a pointer
...@@ extern struct ttm_bo_global { * * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. * @man: An array of mem_type_managers. - * @vma_manager: Address space manager + * @vma_manager: Address space manager (pointer) + * @_vma_manager: Address space manager (enbedded) * lru_lock: Spinlock that protects the buffer+device lru lists and * ddestroy lists. * @dev_mapping: A pointer to the struct address_space representing the @@ -464,7 +465,8 @@ struct ttm_bo_device { /* * Protected by internal locks. */ - struct drm_vma_offset_manager vma_manager; + struct drm_vma_...
2014 May 14
0
[RFC PATCH v1 12/16] drm/ttm: flip the switch, and convert to dma_fence
...ueue(struct ttm_buffer_object *bo) ret = __ttm_bo_reserve(bo, false, true, false, 0); if (!ret) { - (void) ttm_bo_wait(bo, false, false, true); - - if (!bo->sync_obj) { + if (!ttm_bo_wait(bo, false, false, true)) { put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); @@ -426,8 +442,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ttm_bo_list_ref_sub(bo, put_count, true); return; - } - sync_obj = driver->sync_obj_ref(bo->sync_obj); + } else + ttm_bo_flush_all_fences(bo); /* * Make NO_EVICT bos immediatel...