Displaying 20 results from an estimated 76 matches for "list_first_entry_or_nul".
Did you mean:
list_first_entry_or_null
2019 Sep 25
2
[PATCH v2 03/27] drm/dp_mst: Destroy MSTBs asynchronously
...ector lock before destroying the connector, to avoid AB->BA
> + * connector lock before destroying the mstb/port, to avoid AB->BA
> * ordering between this lock and the config mutex.
> */
> - for (;;) {
> - mutex_lock(&mgr->destroy_connector_lock);
> - port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
> - if (!port) {
> - mutex_unlock(&mgr->destroy_connector_lock);
> - break;
> + do {
> + go_again = false;
> +
> + for (;;) {
> + struct drm_dp_mst_branch *mstb;
> +
> + mutex_lock(&...
2018 Jul 10
4
[PATCH v35 1/5] mm: support to get hints of free page blocks
NAK.
On Tue, Jul 10, 2018 at 2:56 AM Wei Wang <wei.w.wang at intel.com> wrote:
>
> +
> + buf_page = list_first_entry_or_null(pages, struct page, lru);
> + if (!buf_page)
> + return -EINVAL;
> + buf = (__le64 *)page_address(buf_page);
Stop this garbage.
Why the hell would you pass in some crazy "liost of pages" that uses
that lru list?
That's just insane shit.
Just pas...
2018 Jul 10
4
[PATCH v35 1/5] mm: support to get hints of free page blocks
NAK.
On Tue, Jul 10, 2018 at 2:56 AM Wei Wang <wei.w.wang at intel.com> wrote:
>
> +
> + buf_page = list_first_entry_or_null(pages, struct page, lru);
> + if (!buf_page)
> + return -EINVAL;
> + buf = (__le64 *)page_address(buf_page);
Stop this garbage.
Why the hell would you pass in some crazy "liost of pages" that uses
that lru list?
That's just insane shit.
Just pas...
2019 Sep 27
1
[PATCH v2 03/27] drm/dp_mst: Destroy MSTBs asynchronously
...BA
> > > + * connector lock before destroying the mstb/port, to avoid AB->BA
> > > * ordering between this lock and the config mutex.
> > > */
> > > - for (;;) {
> > > - mutex_lock(&mgr->destroy_connector_lock);
> > > - port = list_first_entry_or_null(&mgr->destroy_connector_list,
> > > struct drm_dp_mst_port, next);
> > > - if (!port) {
> > > - mutex_unlock(&mgr->destroy_connector_lock);
> > > - break;
> > > + do {
> > > + go_again = false;
> > > +
> > >...
2019 Sep 05
0
[PATCH 08/18] virtiofs: Drain all pending requests during ->remove time
...usleep_range(1000, 2000);
+ }
+
+ flush_work(&fsvq->done_work);
+ flush_delayed_work(&fsvq->dispatch_work);
+}
+
+static inline void drain_hiprio_queued_reqs(struct virtio_fs_vq *fsvq)
+{
+ struct virtio_fs_forget *forget;
+
+ spin_lock(&fsvq->lock);
+ while (1) {
+ forget = list_first_entry_or_null(&fsvq->queued_reqs,
+ struct virtio_fs_forget, list);
+ if (!forget)
+ break;
+ list_del(&forget->list);
+ kfree(forget);
+ }
+ spin_unlock(&fsvq->lock);
+}
+
+static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
+{
+ struct virtio_fs_vq *fsvq;
+ int i;
+
+ f...
2019 Sep 03
0
[PATCH v2 03/27] drm/dp_mst: Destroy MSTBs asynchronously
...to drop the destroy
- * connector lock before destroying the connector, to avoid AB->BA
+ * connector lock before destroying the mstb/port, to avoid AB->BA
* ordering between this lock and the config mutex.
*/
- for (;;) {
- mutex_lock(&mgr->destroy_connector_lock);
- port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
- if (!port) {
- mutex_unlock(&mgr->destroy_connector_lock);
- break;
+ do {
+ go_again = false;
+
+ for (;;) {
+ struct drm_dp_mst_branch *mstb;
+
+ mutex_lock(&mgr->delayed_destroy_lock);
+ mstb = list...
2019 Oct 22
0
[PATCH v5 01/14] drm/dp_mst: Destroy MSTBs asynchronously
...to drop the destroy
- * connector lock before destroying the connector, to avoid AB->BA
+ * connector lock before destroying the mstb/port, to avoid AB->BA
* ordering between this lock and the config mutex.
*/
- for (;;) {
- mutex_lock(&mgr->destroy_connector_lock);
- port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
- if (!port) {
- mutex_unlock(&mgr->destroy_connector_lock);
- break;
+ do {
+ go_again = false;
+
+ for (;;) {
+ struct drm_dp_mst_branch *mstb;
+
+ mutex_lock(&mgr->delayed_destroy_lock);
+ mstb = list...
2019 Sep 25
0
[PATCH v2 03/27] drm/dp_mst: Destroy MSTBs asynchronously
...he connector, to avoid AB->BA
> > + * connector lock before destroying the mstb/port, to avoid AB->BA
> > * ordering between this lock and the config mutex.
> > */
> > - for (;;) {
> > - mutex_lock(&mgr->destroy_connector_lock);
> > - port = list_first_entry_or_null(&mgr->destroy_connector_list,
> > struct drm_dp_mst_port, next);
> > - if (!port) {
> > - mutex_unlock(&mgr->destroy_connector_lock);
> > - break;
> > + do {
> > + go_again = false;
> > +
> > + for (;;) {
> > + struct drm...
2019 Oct 15
7
[PATCH 0/5] virtiofs: Fix couple of deadlocks
Hi,
We have couple of places which can result in deadlock. This patch series
fixes these.
We can be called with fc->bg_lock (for background requests) while
submitting a request. This leads to two constraints.
- We can't end requests in submitter's context and call fuse_end_request()
as it tries to take fc->bg_lock as well. So queue these requests on a
list and use a worker to
2020 Apr 21
2
[PATCH] nouveau/hmm: fix nouveau_dmem_chunk allocations
...chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
{
struct nouveau_dmem_chunk *chunk;
+ struct resource *res;
+ struct page *page;
+ void *ptr;
+ unsigned long i, pfn_first;
int ret;
- if (drm->dmem == NULL)
- return -EINVAL;
-
- mutex_lock(&drm->dmem->mutex);
- chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
- struct nouveau_dmem_chunk,
- list);
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
if (chunk == NULL) {
- mutex_unlock(&drm->dmem->mutex);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
- list_del(&chunk->list);
- mutex_un...
2018 Feb 20
0
[PATCH 1/4] iommu: Add virtio-iommu driver
...te:
[...]
>> +static int viommu_receive_resp(struct viommu_dev *viommu, int nr_sent,
>> + struct list_head *sent)
>> +{
>> +
>> + unsigned int len;
>> + int nr_received = 0;
>> + struct viommu_request *req, *pending;
>> +
>> + pending = list_first_entry_or_null(sent, struct viommu_request, list);
>> + if (WARN_ON(!pending))
>> + return 0;
>> +
>> + while ((req = virtqueue_get_buf(viommu->vq, &len)) != NULL) {
>> + if (req != pending) {
>> + dev_warn(viommu->dev, "discarding stale request\n");
&g...
2018 Jul 11
0
[PATCH v35 1/5] mm: support to get hints of free page blocks
On 07/11/2018 01:33 AM, Linus Torvalds wrote:
> NAK.
>
> On Tue, Jul 10, 2018 at 2:56 AM Wei Wang <wei.w.wang at intel.com> wrote:
>> +
>> + buf_page = list_first_entry_or_null(pages, struct page, lru);
>> + if (!buf_page)
>> + return -EINVAL;
>> + buf = (__le64 *)page_address(buf_page);
> Stop this garbage.
>
> Why the hell would you pass in some crazy "liost of pages" that uses
> that lru list?
>
>...
2018 Jul 11
0
[PATCH v35 1/5] mm: support to get hints of free page blocks
On Tue, Jul 10, 2018 at 10:33:08AM -0700, Linus Torvalds wrote:
> NAK.
>
> On Tue, Jul 10, 2018 at 2:56 AM Wei Wang <wei.w.wang at intel.com> wrote:
> >
> > +
> > + buf_page = list_first_entry_or_null(pages, struct page, lru);
> > + if (!buf_page)
> > + return -EINVAL;
> > + buf = (__le64 *)page_address(buf_page);
>
> Stop this garbage.
>
> Why the hell would you pass in some crazy "liost of pages" that uses
> that lru list?...
2019 Oct 21
0
[PATCH 5/5] virtiofs: Retry request submission from worker context
...spin_unlock(&fsvq->lock);
> > fuse_request_end(fc, req);
> > }
> > +
> > + /* Dispatch pending requests */
> > + while (1) {
> > + spin_lock(&fsvq->lock);
> > + req = list_first_entry_or_null(&fsvq->queued_reqs,
> > + struct fuse_req, list);
> > + if (!req) {
> > + spin_unlock(&fsvq->lock);
> > + return;
> > + }
> > +...
2017 Nov 08
2
[PATCH v3] virtio_balloon: fix deadlock on OOM
..., pages);
+}
+
+/*
+ * balloon_page_pop - remove a page from a page list.
+ * @head : pointer to list
+ * @page : page to be added
+ *
+ * Caller must ensure the page is private and protect the list.
+ */
+static inline struct page *balloon_page_pop(struct list_head *pages)
+{
+ struct page *page = list_first_entry_or_null(pages, struct page, lru);
+
+ if (!page)
+ return NULL;
+
+ list_del(&page->lru);
+ return page;
+}
+
+/*
* balloon_page_insert - insert a page into the balloon's page list and make
* the page->private assignment accordingly.
* @balloon : pointer to balloon device
diff --gi...
2017 Nov 08
2
[PATCH v3] virtio_balloon: fix deadlock on OOM
..., pages);
+}
+
+/*
+ * balloon_page_pop - remove a page from a page list.
+ * @head : pointer to list
+ * @page : page to be added
+ *
+ * Caller must ensure the page is private and protect the list.
+ */
+static inline struct page *balloon_page_pop(struct list_head *pages)
+{
+ struct page *page = list_first_entry_or_null(pages, struct page, lru);
+
+ if (!page)
+ return NULL;
+
+ list_del(&page->lru);
+ return page;
+}
+
+/*
* balloon_page_insert - insert a page into the balloon's page list and make
* the page->private assignment accordingly.
* @balloon : pointer to balloon device
diff --gi...
2019 Sep 05
0
[PATCH v4 15/16] virtio-fs: add virtiofs filesystem
...+ struct scatterlist sg;
> > + struct scatterlist *sgs[] = {&sg};
> > + bool notify;
> > + int ret;
> > +
> > + pr_debug("virtio-fs: worker %s called.\n", __func__);
> > + while (1) {
> > + spin_lock(&fsvq->lock);
> > + forget = list_first_entry_or_null(&fsvq->queued_reqs,
> > + struct virtio_fs_forget, list);
> > + if (!forget) {
> > + spin_unlock(&fsvq->lock);
> > + return;
> > + }
> > +
> > + list_del(&forget->list);
> > + if (!fsvq->connected) {
> > +...
2019 Aug 09
0
[RFC PATCH v6 13/92] kvm: introspection: make the vCPU wait even when its jobs list is empty
...--- a/virt/kvm/kvmi.c
+++ b/virt/kvm/kvmi.c
@@ -135,6 +135,19 @@ static void kvmi_free_job(struct kvmi_job *job)
kmem_cache_free(job_cache, job);
}
+static struct kvmi_job *kvmi_pull_job(struct kvmi_vcpu *ivcpu)
+{
+ struct kvmi_job *job = NULL;
+
+ spin_lock(&ivcpu->job_lock);
+ job = list_first_entry_or_null(&ivcpu->job_list, typeof(*job), link);
+ if (job)
+ list_del(&job->link);
+ spin_unlock(&ivcpu->job_lock);
+
+ return job;
+}
+
static bool alloc_ivcpu(struct kvm_vcpu *vcpu)
{
struct kvmi_vcpu *ivcpu;
@@ -496,6 +509,73 @@ void kvmi_destroy_vm(struct kvm *kvm)
wait_for_...
2019 Oct 15
0
[PATCH 4/5] virtiofs: Count pending forgets as in_flight forgets
...2 +123,6 @@ static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
flush_delayed_work(&fsvq->dispatch_work);
}
-static inline void drain_hiprio_queued_reqs(struct virtio_fs_vq *fsvq)
-{
- struct virtio_fs_forget *forget;
-
- spin_lock(&fsvq->lock);
- while (1) {
- forget = list_first_entry_or_null(&fsvq->queued_reqs,
- struct virtio_fs_forget, list);
- if (!forget)
- break;
- list_del(&forget->list);
- kfree(forget);
- }
- spin_unlock(&fsvq->lock);
-}
-
static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
{
struct virtio_fs_vq *fsvq;
@@ -133,9 +130...
2019 Sep 06
1
[PATCH v4 15/16] virtio-fs: add virtiofs filesystem
...+ struct scatterlist *sgs[] = {&sg};
> > > + bool notify;
> > > + int ret;
> > > +
> > > + pr_debug("virtio-fs: worker %s called.\n", __func__);
> > > + while (1) {
> > > + spin_lock(&fsvq->lock);
> > > + forget = list_first_entry_or_null(&fsvq->queued_reqs,
> > > + struct virtio_fs_forget, list);
> > > + if (!forget) {
> > > + spin_unlock(&fsvq->lock);
> > > + return;
> > > + }
> > > +
> > > + list_del(&forget->list);
> > > + i...