Displaying 20 results from an estimated 34 matches for "list_head_init".
2024 Jul 30
1
[PATCH 2/2] [v6] drm/nouveau: expose GSP-RM logging buffers via debugfs
...> > + * backing resources, such as logging buffers.
> > + */
> > +struct nvif_log {
> > + struct list_head entry;
> > + void (*shutdown)(struct nvif_log *log);
> > +};
> > +
> > +#define NVIF_LOGS_DECLARE(_log) \
> > + struct nvif_log _log = { LIST_HEAD_INIT(_log.entry) }
>
> If you declare this as
>
> #define NVIF_LOGS_DECLARE(_log) \
> struct nvif_log _log = { LIST_HEAD_INIT(_log.entry), nvif_log_shutdown }
>
> and change the signature of nvif_log_shutdown() to
>
> static inline void nvif_log_shutdown(struct nvif_lo...
2012 Feb 06
1
[PATCH] ia64: fix build (next instance)
...efine __XEN_LIST_H__
#include <xen/lib.h>
-#include <xen/prefetch.h>
#include <asm/system.h>
/* These are non-NULL pointers that will result in page faults
@@ -40,6 +39,9 @@ struct list_head {
#define LIST_HEAD_READ_MOSTLY(name) \
struct list_head __read_mostly name = LIST_HEAD_INIT(name)
+/* Do not move this ahead of the struct list_head definition! */
+#include <xen/prefetch.h>
+
static inline void INIT_LIST_HEAD(struct list_head *list)
{
list->next = list;
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -106,6 +106,7 @@ struct xsm_operations {...
2024 Jul 31
1
[PATCH 2/2] [v6] drm/nouveau: expose GSP-RM logging buffers via debugfs
...as logging buffers.
> > > + */
> > > +struct nvif_log {
> > > + struct list_head entry;
> > > + void (*shutdown)(struct nvif_log *log);
> > > +};
> > > +
> > > +#define NVIF_LOGS_DECLARE(_log) \
> > > + struct nvif_log _log = { LIST_HEAD_INIT(_log.entry) }
> >
> > If you declare this as
> >
> > #define NVIF_LOGS_DECLARE(_log) \
> > struct nvif_log _log = { LIST_HEAD_INIT(_log.entry), nvif_log_shutdown }
> >
> > and change the signature of nvif_log_shutdown() to
> >
> > static...
2024 Jul 30
1
[PATCH 2/2] [v6] drm/nouveau: expose GSP-RM logging buffers via debugfs
...driver exits. It should free all
Same here.
> + * backing resources, such as logging buffers.
> + */
> +struct nvif_log {
> + struct list_head entry;
> + void (*shutdown)(struct nvif_log *log);
> +};
> +
> +#define NVIF_LOGS_DECLARE(_log) \
> + struct nvif_log _log = { LIST_HEAD_INIT(_log.entry) }
If you declare this as
#define NVIF_LOGS_DECLARE(_log) \
struct nvif_log _log = { LIST_HEAD_INIT(_log.entry), nvif_log_shutdown }
and change the signature of nvif_log_shutdown() to
static inline void nvif_log_shutdown(struct nvif_log *logs)
you can just call
gsp_logs.shutdo...
2013 Oct 24
1
[Bug 867] New: Null pointer segfault in parser
...OS/Version: Ubuntu
Status: NEW
Severity: normal
Priority: P5
Component: nft
AssignedTo: pablo at netfilter.org
ReportedBy: john at sager.me.uk
Estimated Hours: 0.0
parser_init (parser.y) should initialise the symbols list in top_scope:
list_head_init(&state->top_scope.symbols)
Bug found by trying to use examples/sets_and_maps
--
Configure bugmail: https://bugzilla.netfilter.org/userprefs.cgi?tab=email
------- You are receiving this mail because: -------
You are watching all bug changes.
2008 Feb 26
2
Patch to add debugfs interface to o2net
This is a forward port for net_proc.c from 1.2.
2019 Apr 25
6
[PATCH v4 0/4] vmw_balloon: Compaction and shrinker support
VMware balloon enhancements: adding support for memory compaction,
memory shrinker (to prevent OOM) and splitting of refused pages to
prevent recurring inflations.
Patches 1-2: Support for compaction
Patch 3: Support for memory shrinker - disabled by default
Patch 4: Split refused pages to improve performance
v3->v4:
* "get around to" comment [Michael]
* Put list_add under page lock
2019 Apr 25
6
[PATCH v4 0/4] vmw_balloon: Compaction and shrinker support
VMware balloon enhancements: adding support for memory compaction,
memory shrinker (to prevent OOM) and splitting of refused pages to
prevent recurring inflations.
Patches 1-2: Support for compaction
Patch 3: Support for memory shrinker - disabled by default
Patch 4: Split refused pages to improve performance
v3->v4:
* "get around to" comment [Michael]
* Put list_add under page lock
2007 Apr 18
1
[patch 8/9] Guest page hinting: discarded page list.
...ruct list_head *cpu_list = &__get_cpu_var(page_discard_list);
+
+ if (list_empty(cpu_list))
+ return;
+ spin_lock(&splice_lock);
+ list_splice_init(cpu_list, discard_list);
+ spin_unlock(&splice_lock);
+}
+
+unsigned long page_shrink_discards(void)
+{
+ struct list_head pages_to_free = LIST_HEAD_INIT(pages_to_free);
+ struct page *page, *next;
+ unsigned long freed = 0;
+
+ if (!page_host_discards())
+ return 0;
+
+ on_each_cpu(__page_shrink_discards, &pages_to_free, 0, 1);
+
+ list_for_each_entry_safe(page, next, &pages_to_free, lru) {
+ ClearPageDiscarded(page);
+ free_cold_page(pa...
2007 Apr 18
1
[patch 8/9] Guest page hinting: discarded page list.
...ruct list_head *cpu_list = &__get_cpu_var(page_discard_list);
+
+ if (list_empty(cpu_list))
+ return;
+ spin_lock(&splice_lock);
+ list_splice_init(cpu_list, discard_list);
+ spin_unlock(&splice_lock);
+}
+
+unsigned long page_shrink_discards(void)
+{
+ struct list_head pages_to_free = LIST_HEAD_INIT(pages_to_free);
+ struct page *page, *next;
+ unsigned long freed = 0;
+
+ if (!page_host_discards())
+ return 0;
+
+ on_each_cpu(__page_shrink_discards, &pages_to_free, 0, 1);
+
+ list_for_each_entry_safe(page, next, &pages_to_free, lru) {
+ ClearPageDiscarded(page);
+ free_cold_page(pa...
2019 Apr 23
5
[PATCH v3 0/4] vmw_balloon: compaction and shrinker support
VMware balloon enhancements: adding support for memory compaction,
memory shrinker (to prevent OOM) and splitting of refused pages to
prevent recurring inflations.
Patches 1-2: Support for compaction
Patch 3: Support for memory shrinker - disabled by default
Patch 4: Split refused pages to improve performance
v2->v3:
* Fixing wrong argument type (int->size_t) [Michael]
* Fixing a comment
2020 May 08
0
[RFC v4 01/12] kthread: Add kthread_queue_flush_work()
...nux/kthread.h
@@ -105,6 +105,11 @@ struct kthread_delayed_work {
struct timer_list timer;
};
+struct kthread_flush_work {
+ struct kthread_work work;
+ struct completion done;
+};
+
#define KTHREAD_WORKER_INIT(worker) { \
.lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
.work_list = LIST_HEAD_INIT((worker).work_list), \
@@ -122,6 +127,11 @@ struct kthread_delayed_work {
TIMER_IRQSAFE), \
}
+#define KTHREAD_FLUSH_WORK_INIT(fwork) { \
+ KTHREAD_WORK_INIT((fwork).work, __kthread_flush_work_fn), \
+ COMPLETION_INITIALIZER_ONSTACK((fwork).done), \
+ }
+
#define DEFINE_KTHREAD_WO...
2024 Jul 29
1
[PATCH 2/2] [v6] drm/nouveau: expose GSP-RM logging buffers via debugfs
...r exits.
+ *
+ * The @shutdown function is called when the driver exits. It should free all
+ * backing resources, such as logging buffers.
+ */
+struct nvif_log {
+ struct list_head entry;
+ void (*shutdown)(struct nvif_log *log);
+};
+
+#define NVIF_LOGS_DECLARE(_log) \
+ struct nvif_log _log = { LIST_HEAD_INIT(_log.entry) }
+
+static inline void nvif_log_shutdown(struct list_head *logs)
+{
+ if (!list_empty(logs)) {
+ struct nvif_log *log, *n;
+
+ list_for_each_entry_safe(log, n, logs, entry) {
+ /* shutdown() should also delete the log entry */
+ log->shutdown(log);
+ }
+ }
+}
+
+#ifdef CONFIG...
2015 Jun 07
21
[PATCH RFC 00/20] expose global performance counters
Hello,
This series exposes global performance counters (PCOUNTER) to the userspace
through the nvif interface by reworking most of the code related to the PM
engine.
This interface will allow the userspace to control and monitor complex hardware
events like the proprietary driver already does, for example with CUPTI and
PerfKit.
For now, this series only exposes performance counters on NV50,
2024 Jun 18
1
[PATCH 2/2] [v5] drm/nouveau: expose GSP-RM logging buffers via debugfs
On Mon, 2024-06-17 at 21:54 +0200, Danilo Krummrich wrote:
Hi Timur,
thanks for the follow-up on this patch series.
On Wed, Jun 12, 2024 at 06:52:53PM -0500, Timur Tabi wrote:
The LOGINIT, LOGINTR, LOGRM, and LOGPMU buffers are circular buffers
that have printf-like logs from GSP-RM and PMU encoded in them.
LOGINIT, LOGINTR, and LOGRM are allocated by Nouveau and their DMA
addresses are
2024 Oct 30
1
[PATCH 2/2] [v9] drm/nouveau: expose GSP-RM logging buffers via debugfs
...ces, such as logging buffers.
+ */
+struct nvif_log {
+ struct list_head entry;
+ void (*shutdown)(struct nvif_log *log);
+};
+
+/**
+ * nvif_logs - linked list of nvif_log objects
+ */
+struct nvif_logs {
+ struct list_head head;
+};
+
+#define NVIF_LOGS_DECLARE(logs) \
+ struct nvif_logs logs = { LIST_HEAD_INIT(logs.head) }
+
+static inline void nvif_log_shutdown(struct nvif_logs *logs)
+{
+ if (!list_empty(&logs->head)) {
+ struct nvif_log *log, *n;
+
+ list_for_each_entry_safe(log, n, &logs->head, entry) {
+ /* shutdown() should also delete the log entry */
+ log->shutdown(log);
+...
2012 Jun 12
6
[RFC] net/sched/em_canid: Ematch rule to match CAN frames according to their CAN IDs
...d_dump(struct sk_buff *skb, struct tcf_ematch *m)
+{
+ return 0;
+}
+
+static struct tcf_ematch_ops em_canid_ops = {
+ .kind = TCF_EM_CANID,
+ .change = em_canid_change,
+ .match = em_canid_match,
+ .destroy = em_canid_destroy,
+ .dump = em_canid_dump,
+ .owner = THIS_MODULE,
+ .link = LIST_HEAD_INIT(em_canid_ops.link)
+};
+
+static int __init init_em_canid(void)
+{
+ return tcf_em_register(&em_canid_ops);
+}
+
+static void __exit exit_em_canid(void)
+{
+ tcf_em_unregister(&em_canid_ops);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_em_canid);
+module_exit(exit_em_canid);...
2019 Oct 21
0
[PATCH RFC 1/3] kcov: remote coverage support
...e {
> + u64 handle;
> + struct kcov *kcov;
> + struct hlist_node hnode;
> };
>
> +static DEFINE_SPINLOCK(kcov_remote_lock);
> +static DEFINE_HASHTABLE(kcov_remote_map, 4);
> +static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
> +
> +static struct kcov_remote *kcov_remote_find(u64 handle)
> +{
> + struct kcov_remote *remote;
> +
> + hash_for_each_possible(kcov_remote_map, remote, hnode, handle) {
> + if (remote->handle == handle)
> +...
2024 Aug 02
1
[PATCH 1/2] [v2] drm/nouveau: retain device pointer in nvkm_gsp_mem object
Store the struct device pointer used to allocate the DMA buffer in
the nvkm_gsp_mem object. This allows nvkm_gsp_mem_dtor() to release
the buffer without needing the nvkm_gsp. This is needed so that
we can retain DMA buffers even after the nvkm_gsp object is deleted.
Signed-off-by: Timur Tabi <ttabi at nvidia.com>
---
Notes:
v2:
added get/put_device calls
2011 Jul 21
10
[PATCH v5 0/8] Btrfs scrub: print path to corrupted files and trigger nodatasum fixup
While testing raid-auto-repair patches I''m going to send out later, I just found
the very last bug in my current scrub patch series:
Changelog v4->v5:
- fixed a deadlock when fixup is taking longer while scrub is about to end
Original message follows:
------------------------
This patch set introduces two new features for scrub. They share the backref
iteration code which is the