Displaying 20 results from an estimated 266 matches for "list_del_init".
2006 May 11
1
Re: [PATCH] smbfs: Fix slab corruption in samba error path
...* On timeout or on interrupt we want to try and remove the
> * request from the recvq/xmitq.
> */
> smb_lock_server(server);
> if (!(req->rq_flags & SMB_REQ_RECEIVED)) {
> list_del_init(&req->rq_queue);
> smb_rput(req);
> }
> smb_unlock_server(server);
> }
> [...]
> if (signal_pending(current))
> req->rq_errno = -ERESTARTSYS;
>
> I guess that some codepath...
2012 Sep 17
0
[PATCH] Btrfs: do not hold the write_lock on the extent tree while logging V2
...ndex 8d1364d..b8cbc8d 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -407,7 +407,8 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
rb_erase(&em->rb_node, &tree->map);
- list_del_init(&em->list);
+ if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
+ list_del_init(&em->list);
em->in_tree = 0;
return ret;
}
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 8e6294b..6792255 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@...
2019 Oct 15
7
[PATCH 0/5] virtiofs: Fix couple of deadlocks
Hi,
We have couple of places which can result in deadlock. This patch series
fixes these.
We can be called with fc->bg_lock (for background requests) while
submitting a request. This leads to two constraints.
- We can't end requests in submitter's context and call fuse_end_request()
as it tries to take fc->bg_lock as well. So queue these requests on a
list and use a worker to
2007 May 17
1
[PATCH] ocfs: use list_for_each_entry where benefical
...) {
tot++;
}
mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
- list_for_each_safe(iter, iter2, &tmp_list) {
- item = list_entry(iter, struct dlm_work_item, list);
+ list_for_each_entry_safe(item, next, &tmp_list, list) {
workfunc = item->func;
list_del_init(&item->list);
@@ -549,7 +547,6 @@ static int dlm_remaster_locks(struct dlm
{
int status = 0;
struct dlm_reco_node_data *ndata;
- struct list_head *iter;
int all_nodes_done;
int destroy = 0;
int pass = 0;
@@ -567,8 +564,7 @@ static int dlm_remaster_locks(struct dlm
/* safe to...
2020 May 08
0
[RFC v4 04/12] drm/vblank: Add vblank works
...void queue_vbl_work(struct drm_vblank_work *work)
+{
+ struct kthread_flush_work *fwork, *tmp;
+ bool busy, reinit = false;
+
+ busy = kthread_queue_work(work->vblank->worker, &work->base);
+ list_for_each_entry_safe(fwork, tmp, &work->flush_work, work.node) {
+ if (busy) {
+ list_del_init(&fwork->work.node);
+ busy = kthread_queue_flush_work(&work->base, fwork);
+ if (!busy)
+ complete(&fwork->done);
+ } else {
+ complete(&fwork->done);
+ reinit = true;
+ }
+ }
+
+ if (reinit)
+ INIT_LIST_HEAD(&work->flush_work);
+}
+
+static void drm...
2012 Feb 15
7
[PATCH v3] arm: support fewer LR registers than virtual irqs
...GICH[GICH_LR + i] = 0;
+ clear_bit(i, &gic.lr_mask);
+
+ if ( !list_empty(gic.lr_pending.next) ) {
+ p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
+ gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
+ list_del_init(&p->lr_link);
+ set_bit(i, &gic.lr_mask);
+ } else {
+ gic_inject_irq_stop();
+ }
+ spin_unlock(&gic.lock);
spin_lock(¤t->arch.vgic.lock);
p = irq_to_pending(current, virq);
@@ -44...
2019 Jul 31
1
[PATCH 02/13] amdgpu: don't initialize range->list in amdgpu_hmm_init_range
...ge to another list as an entry in the
> core hmm code, so there is no need to initialize it in a driver.
I've seen code that uses list_empty to check whether a list head has
been added to a list or not. For that to work, the list head needs to be
initialized, and it has to be removed with list_del_init. If HMM doesn't
ever do that with range->list, then this patch is Reviewed-by: Felix
Kuehling <Felix.Kuehling at amd.com>
>
> Signed-off-by: Christoph Hellwig <hch at lst.de>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 1 -
> 1 file changed, 1 deletion(...
2019 Oct 21
0
[PATCH 5/5] virtiofs: Retry request submission from worker context
...list);
> > if (!req) {
> > spin_unlock(&fsvq->lock);
> > - return;
> > + break;
> > }
> >
> > list_del_init(&req->list);
> > spin_unlock(&fsvq->lock);
> > fuse_request_end(fc, req);
> > }
> > +
> > + /* Dispatch pending requests */
> > + while (1) {
> > + spin_lock(&fsvq->lock)...
2013 Mar 18
0
[PATCH] Btrfs-progs: fix memory leaks on cleanup
...d *next;
struct btrfs_device *device;
- return 0;
-
list = &fs_info->fs_devices->devices;
- list_for_each(next, list) {
- device = list_entry(next, struct btrfs_device, dev_list);
+ while (!list_empty(list)) {
+ device = list_entry(list->next, struct btrfs_device, dev_list);
+ list_del_init(&device->dev_list);
if (device->fd) {
fsync(device->fd);
posix_fadvise(device->fd, 0, 0, POSIX_FADV_DONTNEED);
}
close(device->fd);
+ kfree(device->name);
+ kfree(device->label);
+ kfree(device);
}
+ kfree(fs_info->fs_devices);
return 0;
}
+sta...
2008 Feb 26
2
Patch to add debugfs interface to o2net
This is a forward port for net_proc.c from 1.2.
2009 Feb 03
10
Convert mle list to a hash
These patches convert the mle list to a hash. The same patches apply on
ocfs2 1.4 too.
Currently, we use the same number of hash pages for mles and lockres'.
This will be addressed in a future patch that will make both of them
configurable.
Sunil
2013 May 06
2
[PATCH v2] xen/gic: EOI irqs on the right pcpu
...p->desc->status &= ~IRQ_INPROGRESS;
- GICC[GICC_DIR] = virq;
+ /* Assume only one pcpu needs to EOI the irq */
+ cpu = cpumask_first(&p->eoimask);
+ cpumask_clear(&p->eoimask);
+ eoi = 1;
}
list_del_init(&p->inflight);
spin_unlock_irq(&v->arch.vgic.lock);
+ if ( eoi ) {
+ /* this is not racy because we can''t receive another irq of the
+ * same type until we EOI it. */
+ if ( cpu == smp_processor_id() )
+ gic_...
2020 Jun 08
2
[PATCH RFC v5 12/13] vhost/vsock: switch to the buf API
..._t iov_len, payload_len;
> - int head;
> + struct vhost_buf buf;
> + int ret;
>
> spin_lock_bh(&vsock->send_pkt_list_lock);
> if (list_empty(&vsock->send_pkt_list)) {
> @@ -117,16 +118,17 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
> list_del_init(&pkt->list);
> spin_unlock_bh(&vsock->send_pkt_list_lock);
>
> - head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
> - &out, &in, NULL, NULL);
> - if (head < 0) {
> + ret = vhost_get_avail_buf(vq, &buf,
> + vq->i...
2020 Jun 08
2
[PATCH RFC v5 12/13] vhost/vsock: switch to the buf API
..._t iov_len, payload_len;
> - int head;
> + struct vhost_buf buf;
> + int ret;
>
> spin_lock_bh(&vsock->send_pkt_list_lock);
> if (list_empty(&vsock->send_pkt_list)) {
> @@ -117,16 +118,17 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
> list_del_init(&pkt->list);
> spin_unlock_bh(&vsock->send_pkt_list_lock);
>
> - head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
> - &out, &in, NULL, NULL);
> - if (head < 0) {
> + ret = vhost_get_avail_buf(vq, &buf,
> + vq->i...
2011 Apr 20
4
[PATCH 1/5] Btrfs: fix bh leak on __btrfs_open_devices path
''bh'' is forgot to release if no error is detected
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
fs/btrfs/volumes.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 8b9fb8c..69fc902 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -631,6 +631,7 @@ static int
2020 Jan 24
4
TTM/Nouveau cleanups
Hi guys,
I've already send this out in September last year, but only got a response from Daniel.
Could you guys please test this and tell me what you think about it?
Basically I'm trying to remove all driver specific features from TTM which don't need to be inside the framework.
Thanks,
Christian.
2016 Mar 11
0
[PATCH v1 05/19] zsmalloc: use first_page rather than page
...5,11 @@ static void remove_zspage(struct page *page, struct size_class *class,
BUG_ON(!*head);
if (list_empty(&(*head)->lru))
*head = NULL;
- else if (*head == page)
+ else if (*head == first_page)
*head = (struct page *)list_entry((*head)->lru.next,
struct page, lru);
- list_del_init(&page->lru);
+ list_del_init(&first_page->lru);
zs_stat_dec(class, fullness == ZS_ALMOST_EMPTY ?
CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
}
@@ -712,21 +714,21 @@ static void remove_zspage(struct page *page, struct size_class *class,
* fullness group.
*/
static enum full...
2016 Apr 26
2
[PATCH 1/2] vhost: simplify work flushing
...seq;
- if (work->flushing)
- wake_up_all(&work->done);
- }
if (kthread_should_stop()) {
spin_unlock_irq(&dev->work_lock);
@@ -336,7 +326,6 @@ static int vhost_worker(void *data)
work = list_first_entry(&dev->work_list,
struct vhost_work, node);
list_del_init(&work->node);
- seq = work->queue_seq;
} else
work = NULL;
spin_unlock_irq(&dev->work_lock);
--
1.8.3.1
2016 Apr 26
2
[PATCH 1/2] vhost: simplify work flushing
...seq;
- if (work->flushing)
- wake_up_all(&work->done);
- }
if (kthread_should_stop()) {
spin_unlock_irq(&dev->work_lock);
@@ -336,7 +326,6 @@ static int vhost_worker(void *data)
work = list_first_entry(&dev->work_list,
struct vhost_work, node);
list_del_init(&work->node);
- seq = work->queue_seq;
} else
work = NULL;
spin_unlock_irq(&dev->work_lock);
--
1.8.3.1
2019 Sep 30
3
[PATCH 1/2] drm/nouveau: move io_reserve_lru handling into the driver
...t;ttm.io_reserve_lru);
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
+}
+
+void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+ list_del_init(&nvbo->io_reserve_lru);
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
+}
+
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu)
@@ -674,8 +696,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
}
man->func = &...