search for: spin_unlock

Displaying 20 results from an estimated 1254 matches for "spin_unlock".

2010 Aug 04
6
[PATCH -v2 0/3] jbd2 scalability patches
This version fixes three bugs in the 2nd patch of this series that caused kernel BUG when the system was under race. We weren't accounting with t_oustanding_credits correctly, and there were race conditions caused by the fact the I had overlooked the fact that __jbd2_log_wait_for_space() and jbd2_get_transaction() requires j_state_lock to be write locked. Theodore Ts'o (3): jbd2: Use
2014 May 14
0
[RFC PATCH v1 06/16] drm/ttm: kill fence_lock
...u/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1196,9 +1196,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, } /* Fallback to software copy. */ - spin_lock(&bo->bdev->fence_lock); ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); - spin_unlock(&bo->bdev->fence_lock); if (ret == 0) ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); @@ -1425,26 +1423,19 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) ttm_pool_unpopulate(ttm); } +static void +nouveau_bo_fence_unref(void **sync_obj) +{ + nouveau_fence_unref((st...
2010 Jun 19
3
[PATCH 1/1] ocfs2 fix o2dlm dlm run purgelist
...struct dlm_ctxt *dlm, int master; int ret = 0; - spin_lock(&res->spinlock); - if (!__dlm_lockres_unused(res)) { - mlog(0, "%s:%.*s: tried to purge but not unused\n", - dlm->name, res->lockname.len, res->lockname.name); - __dlm_print_one_lock_resource(res); - spin_unlock(&res->spinlock); - BUG(); - } - if (res->state & DLM_LOCK_RES_MIGRATING) { mlog(0, "%s:%.*s: Delay dropref as this lockres is " "being remastered\n", dlm->name, res->lockname.len, @@ -184,13 +175,13 @@ static int dlm_purge_lockres(struct dlm_ctx...
2019 Oct 15
7
[PATCH 0/5] virtiofs: Fix couple of deadlocks
Hi, We have couple of places which can result in deadlock. This patch series fixes these. We can be called with fc->bg_lock (for background requests) while submitting a request. This leads to two constraints. - We can't end requests in submitter's context and call fuse_end_request() as it tries to take fc->bg_lock as well. So queue these requests on a list and use a worker to
2023 Mar 02
1
[PATCH v2 7/8] vdpa_sim: replace the spinlock with a mutex to protect the state
...eady) struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; bool old_ready; - spin_lock(&vdpasim->lock); + mutex_lock(&vdpasim->mutex); old_ready = vq->ready; vq->ready = ready; if (vq->ready && !old_ready) { vdpasim_queue_ready(vdpasim, idx); } - spin_unlock(&vdpasim->lock); + mutex_unlock(&vdpasim->mutex); } static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx) @@ -299,9 +299,9 @@ static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; struct vring...
2012 May 25
0
[PATCH 3/3] gnttab: cleanup
...+ op->flags & GNTMAP_readonly, 1, shah, act, status) ) != GNTST_okay ) goto unlock_out; @@ -606,7 +608,7 @@ __gnttab_map_grant_ref( cache_flags = (shah->flags & (GTF_PAT | GTF_PWT | GTF_PCD) ); - spin_unlock(&rd->grant_table->lock); + spin_unlock(&rgt->lock); /* pg may be set, with a refcount included, from __get_paged_frame */ if ( !pg ) @@ -679,7 +681,7 @@ __gnttab_map_grant_ref( goto undo_out; } - double_gt_lock(ld->grant_table, rd->grant_tabl...
2010 Feb 20
2
[PATCH] drm/nouveau: fix missing spin_unlock in failure path
...index 03d8935..d7ace31 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -557,11 +557,11 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, spin_lock(&nvbo->bo.lock); ret = ttm_bo_wait(&nvbo->bo, false, false, false); + spin_unlock(&nvbo->bo.lock); if (ret) { NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); break; } - spin_unlock(&nvbo->bo.lock); nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); } -- 1.6.6.1.476.g01ddb
2016 Apr 18
2
[PATCH v3 11/16] zsmalloc: separate free_zspage from putback_zspage
...well, if we want to VM_BUG_ON_PAGE() at all. there haven't been any problems with compaction, is there any specific reason these macros were added? > + if (putback_zspage(pool, class, src_page) == ZS_EMPTY) { > pool->stats.pages_compacted += class->pages_per_zspage; > - spin_unlock(&class->lock); > + spin_unlock(&class->lock); > + free_zspage(pool, class, src_page); do we really need to free_zspage() out of class->lock? wouldn't something like this if (putback_zspage(pool, class, src_page) == ZS_EMPTY) { pool->stats.pages_compacted +=...
2016 Apr 18
2
[PATCH v3 11/16] zsmalloc: separate free_zspage from putback_zspage
...well, if we want to VM_BUG_ON_PAGE() at all. there haven't been any problems with compaction, is there any specific reason these macros were added? > + if (putback_zspage(pool, class, src_page) == ZS_EMPTY) { > pool->stats.pages_compacted += class->pages_per_zspage; > - spin_unlock(&class->lock); > + spin_unlock(&class->lock); > + free_zspage(pool, class, src_page); do we really need to free_zspage() out of class->lock? wouldn't something like this if (putback_zspage(pool, class, src_page) == ZS_EMPTY) { pool->stats.pages_compacted +=...
2020 Jul 31
0
[PATCH] vdpasim: protect concurrent access to iommu iotlb
...iommu_lock; }; static struct vdpasim *vdpasim_dev; @@ -118,7 +120,9 @@ static void vdpasim_reset(struct vdpasim *vdpasim) for (i = 0; i < VDPASIM_VQ_NUM; i++) vdpasim_vq_reset(&vdpasim->vqs[i]); + spin_lock(&vdpasim->iommu_lock); vhost_iotlb_reset(vdpasim->iommu); + spin_unlock(&vdpasim->iommu_lock); vdpasim->features = 0; vdpasim->status = 0; @@ -236,8 +240,10 @@ static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page, /* For simplicity, use identical mapping to avoid e.g iova * allocator. */ + spin_lock(&vdpasim->iommu_...
2014 Sep 11
1
May be deadlock for wrong locking order, patch request reviewed, thanks
...lmmaster.c 2014-09-11 12:45:45.821657634 +0800 --- ocfs2-ko-3.16_compared/dlm/dlmmaster.c 2014-09-11 18:54:34.970243238 +0800 *************** way_up_top: *** 1506,1512 **** --- 1506,1515 ---- } // mlog(0, "lockres is in progress...\n"); + spin_unlock(&res->spinlock); + spin_lock(&dlm->master_lock); + spin_lock(&res->spinlock); found = dlm_find_mle(dlm, &tmpmle, name, namelen); if (!found) { mlog(ML_ERROR, "no mle found for this lock!\n&quot...
2014 Sep 11
1
May be deadlock for wrong locking order, patch request reviewed, thanks
...lmmaster.c 2014-09-11 12:45:45.821657634 +0800 --- ocfs2-ko-3.16_compared/dlm/dlmmaster.c 2014-09-11 18:54:34.970243238 +0800 *************** way_up_top: *** 1506,1512 **** --- 1506,1515 ---- } // mlog(0, "lockres is in progress...\n"); + spin_unlock(&res->spinlock); + spin_lock(&dlm->master_lock); + spin_lock(&res->spinlock); found = dlm_find_mle(dlm, &tmpmle, name, namelen); if (!found) { mlog(ML_ERROR, "no mle found for this lock!\n&quot...
2012 Dec 18
0
[PATCH] [RFC] Btrfs: Subpagesize blocksize (WIP).
...ost)->io_tree; struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info; - struct extent_buffer *eb, *prev_eb = NULL; + struct extent_buffer *eb, *next, *prev_eb = NULL; struct extent_page_data epd = { .bio = NULL, .tree = tree, @@ -3326,17 +3379,41 @@ retry: spin_unlock(&mapping->private_lock); continue; } + prev_eb = eb; + +next_eb: + next = eb->next; ret = atomic_inc_not_zero(&eb->refs); - spin_unlock(&mapping->private_lock); - if (!ret) - continue; + if (eb->len >= PAGE_SIZE) { + spin_unlock(&mapp...
2019 Oct 15
0
[PATCH 4/5] virtiofs: Count pending forgets as in_flight forgets
...uct virtio_fs_vq *fsvq) -{ - struct virtio_fs_forget *forget; - - spin_lock(&fsvq->lock); - while (1) { - forget = list_first_entry_or_null(&fsvq->queued_reqs, - struct virtio_fs_forget, list); - if (!forget) - break; - list_del(&forget->list); - kfree(forget); - } - spin_unlock(&fsvq->lock); -} - static void virtio_fs_drain_all_queues(struct virtio_fs *fs) { struct virtio_fs_vq *fsvq; @@ -133,9 +130,6 @@ static void virtio_fs_drain_all_queues(struct virtio_fs *fs) for (i = 0; i < fs->nvqs; i++) { fsvq = &fs->vqs[i]; - if (i == VQ_HIPRIO) -...
2019 Sep 05
0
[PATCH 08/18] virtiofs: Drain all pending requests during ->remove time
...t virtqueue *vq) return &vq_to_fsvq(vq)->fud->pq; } +static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq) +{ + WARN_ON(fsvq->in_flight < 0); + + /* Wait for in flight requests to finish.*/ + while (1) { + spin_lock(&fsvq->lock); + if (!fsvq->in_flight) { + spin_unlock(&fsvq->lock); + break; + } + spin_unlock(&fsvq->lock); + usleep_range(1000, 2000); + } + + flush_work(&fsvq->done_work); + flush_delayed_work(&fsvq->dispatch_work); +} + +static inline void drain_hiprio_queued_reqs(struct virtio_fs_vq *fsvq) +{ + struct virtio_fs_fo...
2011 Jul 26
0
[PATCH] Btrfs: use bytes_may_use for all ENOSPC reservations
...-3162,6 +3179,14 @@ static int should_alloc_chunk(struct btrfs_root *root, return 1; /* + * We need to take into account the global rsv because for all intents + * and purposes it''s used space. + */ + spin_lock(&global_rsv->lock); + num_allocated += global_rsv->size; + spin_unlock(&global_rsv->lock); + + /* * in limited mode, we want to have some free space up to * about 1% of the FS size. */ @@ -3304,7 +3329,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, space_info = block_rsv->space_info; smp_mb(); - reserved = space_info->byt...
2017 Mar 01
2
[PATCH] drm: virtio: use kmem_cache
...gpu_vbuffer *vbuf; - int i, count = 0; - - count += virtqueue_get_vring_size(vgdev->ctrlq.vq); - count += virtqueue_get_vring_size(vgdev->cursorq.vq); - - spin_lock(&vgdev->free_vbufs_lock); - for (i = 0; i < count; i++) { - if (WARN_ON(list_empty(&vgdev->free_vbufs))) { - spin_unlock(&vgdev->free_vbufs_lock); - return; - } - vbuf = list_first_entry(&vgdev->free_vbufs, - struct virtio_gpu_vbuffer, list); - list_del(&vbuf->list); - } - spin_unlock(&vgdev->free_vbufs_lock); - kfree(vgdev->vbufs); + kmem_cache_destroy(vgdev->vbufs); + vgde...
2017 Mar 01
2
[PATCH] drm: virtio: use kmem_cache
...gpu_vbuffer *vbuf; - int i, count = 0; - - count += virtqueue_get_vring_size(vgdev->ctrlq.vq); - count += virtqueue_get_vring_size(vgdev->cursorq.vq); - - spin_lock(&vgdev->free_vbufs_lock); - for (i = 0; i < count; i++) { - if (WARN_ON(list_empty(&vgdev->free_vbufs))) { - spin_unlock(&vgdev->free_vbufs_lock); - return; - } - vbuf = list_first_entry(&vgdev->free_vbufs, - struct virtio_gpu_vbuffer, list); - list_del(&vbuf->list); - } - spin_unlock(&vgdev->free_vbufs_lock); - kfree(vgdev->vbufs); + kmem_cache_destroy(vgdev->vbufs); + vgde...
2019 Oct 30
0
[PATCH 1/3] virtiofs: Use a common function to send forget
...+ } else { + pr_debug("virtio-fs: Could not queue FORGET: err=%d." + " Dropping it.\n", ret); + kfree(forget); + if (in_flight) + dec_in_flight_req(fsvq); + } + goto out; + } + + if (!in_flight) + inc_in_flight_req(fsvq); + notify = virtqueue_kick_prepare(vq); + spin_unlock(&fsvq->lock); + + if (notify) + virtqueue_notify(vq); + return ret; +out: + spin_unlock(&fsvq->lock); + return ret; +} + static void virtio_fs_hiprio_dispatch_work(struct work_struct *work) { struct virtio_fs_forget *forget; struct virtio_fs_vq *fsvq = container_of(work, struct...
2011 Jun 29
14
[PATCH v4 0/6] btrfs: generic readeahead interface
This series introduces a generic readahead interface for btrfs trees. The intention is to use it to speed up scrub in a first run, but balance is another hot candidate. In general, every tree walk could be accompanied by a readahead. Deletion of large files comes to mind, where the fetching of the csums takes most of the time. Also the initial build-ups of free-space-caches and