Boris Brezillon
2025-Jan-22 15:51 UTC
[PATCH] drm/sched: Use struct for drm_sched_init() params
On Wed, 22 Jan 2025 15:08:20 +0100 Philipp Stanner <phasta at kernel.org> wrote:> --- a/drivers/gpu/drm/panthor/panthor_sched.c > +++ b/drivers/gpu/drm/panthor/panthor_sched.c > @@ -3272,6 +3272,7 @@ group_create_queue(struct panthor_group *group, > const struct drm_panthor_queue_create *args) > { > struct drm_gpu_scheduler *drm_sched; > + struct drm_sched_init_params sched_params;nit: Could we use a struct initializer instead of a memset(0)+field-assignment? struct drm_sched_init_params sched_params = { .ops = &panthor_queue_sched_ops, .submit_wq = group->ptdev->scheduler->wq, .num_rqs = 1, .credit_limit = args->ringbuf_size / sizeof(u64), .hang_limit = 0, .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), .timeout_wq = group->ptdev->reset.wq, .name = "panthor-queue", .dev = group->ptdev->base.dev, }; The same comment applies the panfrost changes BTW.> struct panthor_queue *queue; > int ret; > > @@ -3289,6 +3290,8 @@ group_create_queue(struct panthor_group *group, > if (!queue) > return ERR_PTR(-ENOMEM); > > + memset(&sched_params, 0, sizeof(struct drm_sched_init_params)); > + > queue->fence_ctx.id = dma_fence_context_alloc(1); > spin_lock_init(&queue->fence_ctx.lock); > INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs); > @@ -3341,17 +3344,23 @@ group_create_queue(struct panthor_group *group, > if (ret) > goto err_free_queue; > > + sched_params.ops = &panthor_queue_sched_ops; > + sched_params.submit_wq = group->ptdev->scheduler->wq; > + sched_params.num_rqs = 1; > /* > - * Credit limit argument tells us the total number of instructions > + * The credit limit argument tells us the total number of instructions > * across all CS slots in the ringbuffer, with some jobs requiring > * twice as many as others, depending on their profiling status. > */ > - ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops, > - group->ptdev->scheduler->wq, 1, > - args->ringbuf_size / sizeof(u64), > - 0, msecs_to_jiffies(JOB_TIMEOUT_MS), > - group->ptdev->reset.wq, > - NULL, "panthor-queue", group->ptdev->base.dev); > + sched_params.credit_limit = args->ringbuf_size / sizeof(u64); > + sched_params.hang_limit = 0; > + sched_params.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS); > + sched_params.timeout_wq = group->ptdev->reset.wq; > + sched_params.score = NULL; > + sched_params.name = "panthor-queue"; > + sched_params.dev = group->ptdev->base.dev; > + > + ret = drm_sched_init(&queue->scheduler, &sched_params); > if (ret) > goto err_free_queue;
Tvrtko Ursulin
2025-Jan-22 16:15 UTC
[PATCH] drm/sched: Use struct for drm_sched_init() params
On 22/01/2025 15:51, Boris Brezillon wrote:> On Wed, 22 Jan 2025 15:08:20 +0100 > Philipp Stanner <phasta at kernel.org> wrote: > >> --- a/drivers/gpu/drm/panthor/panthor_sched.c >> +++ b/drivers/gpu/drm/panthor/panthor_sched.c >> @@ -3272,6 +3272,7 @@ group_create_queue(struct panthor_group *group, >> const struct drm_panthor_queue_create *args) >> { >> struct drm_gpu_scheduler *drm_sched; >> + struct drm_sched_init_params sched_params; > > nit: Could we use a struct initializer instead of a > memset(0)+field-assignment? > > struct drm_sched_init_params sched_params = { > .ops = &panthor_queue_sched_ops, > .submit_wq = group->ptdev->scheduler->wq, > .num_rqs = 1, > .credit_limit = args->ringbuf_size / sizeof(u64), > .hang_limit = 0, > .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), > .timeout_wq = group->ptdev->reset.wq, > .name = "panthor-queue", > .dev = group->ptdev->base.dev, > };+1 on this as a general approach for the whole series. And I'd drop the explicit zeros and NULLs. Memsets could then go too. Regards, Tvrtko> > The same comment applies the panfrost changes BTW. > >> struct panthor_queue *queue; >> int ret; >> >> @@ -3289,6 +3290,8 @@ group_create_queue(struct panthor_group *group, >> if (!queue) >> return ERR_PTR(-ENOMEM); >> >> + memset(&sched_params, 0, sizeof(struct drm_sched_init_params)); >> + >> queue->fence_ctx.id = dma_fence_context_alloc(1); >> spin_lock_init(&queue->fence_ctx.lock); >> INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs); >> @@ -3341,17 +3344,23 @@ group_create_queue(struct panthor_group *group, >> if (ret) >> goto err_free_queue; >> >> + sched_params.ops = &panthor_queue_sched_ops; >> + sched_params.submit_wq = group->ptdev->scheduler->wq; >> + sched_params.num_rqs = 1; >> /* >> - * Credit limit argument tells us the total number of instructions >> + * The credit limit argument tells us the total number of instructions >> * across all CS slots in the ringbuffer, with some jobs requiring >> * twice as many as others, depending on their profiling status. >> */ >> - ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops, >> - group->ptdev->scheduler->wq, 1, >> - args->ringbuf_size / sizeof(u64), >> - 0, msecs_to_jiffies(JOB_TIMEOUT_MS), >> - group->ptdev->reset.wq, >> - NULL, "panthor-queue", group->ptdev->base.dev); >> + sched_params.credit_limit = args->ringbuf_size / sizeof(u64); >> + sched_params.hang_limit = 0; >> + sched_params.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS); >> + sched_params.timeout_wq = group->ptdev->reset.wq; >> + sched_params.score = NULL; >> + sched_params.name = "panthor-queue"; >> + sched_params.dev = group->ptdev->base.dev; >> + >> + ret = drm_sched_init(&queue->scheduler, &sched_params); >> if (ret) >> goto err_free_queue;
Boris Brezillon
2025-Jan-22 17:04 UTC
[PATCH] drm/sched: Use struct for drm_sched_init() params
On Wed, 22 Jan 2025 16:14:59 +0000 Tvrtko Ursulin <tursulin at ursulin.net> wrote:> On 22/01/2025 15:51, Boris Brezillon wrote: > > On Wed, 22 Jan 2025 15:08:20 +0100 > > Philipp Stanner <phasta at kernel.org> wrote: > > > >> --- a/drivers/gpu/drm/panthor/panthor_sched.c > >> +++ b/drivers/gpu/drm/panthor/panthor_sched.c > >> @@ -3272,6 +3272,7 @@ group_create_queue(struct panthor_group *group, > >> const struct drm_panthor_queue_create *args) > >> { > >> struct drm_gpu_scheduler *drm_sched; > >> + struct drm_sched_init_params sched_params; > > > > nit: Could we use a struct initializer instead of a > > memset(0)+field-assignment? > > > > struct drm_sched_init_params sched_params = {Actually, you can even make it const if it's not modified after the declaration.> > .ops = &panthor_queue_sched_ops, > > .submit_wq = group->ptdev->scheduler->wq, > > .num_rqs = 1, > > .credit_limit = args->ringbuf_size / sizeof(u64), > > .hang_limit = 0, > > .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), > > .timeout_wq = group->ptdev->reset.wq, > > .name = "panthor-queue", > > .dev = group->ptdev->base.dev, > > }; > > +1 on this as a general approach for the whole series. And I'd drop the > explicit zeros and NULLs. Memsets could then go too. > > Regards, > > Tvrtko > > > > > The same comment applies the panfrost changes BTW. > > > >> struct panthor_queue *queue; > >> int ret; > >> > >> @@ -3289,6 +3290,8 @@ group_create_queue(struct panthor_group *group, > >> if (!queue) > >> return ERR_PTR(-ENOMEM); > >> > >> + memset(&sched_params, 0, sizeof(struct drm_sched_init_params)); > >> + > >> queue->fence_ctx.id = dma_fence_context_alloc(1); > >> spin_lock_init(&queue->fence_ctx.lock); > >> INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs); > >> @@ -3341,17 +3344,23 @@ group_create_queue(struct panthor_group *group, > >> if (ret) > >> goto err_free_queue; > >> > >> + sched_params.ops = &panthor_queue_sched_ops; > >> + sched_params.submit_wq = group->ptdev->scheduler->wq; > >> + sched_params.num_rqs = 1; > >> /* > >> - * Credit limit argument tells us the total number of instructions > >> + * The credit limit argument tells us the total number of instructions > >> * across all CS slots in the ringbuffer, with some jobs requiring > >> * twice as many as others, depending on their profiling status. > >> */ > >> - ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops, > >> - group->ptdev->scheduler->wq, 1, > >> - args->ringbuf_size / sizeof(u64), > >> - 0, msecs_to_jiffies(JOB_TIMEOUT_MS), > >> - group->ptdev->reset.wq, > >> - NULL, "panthor-queue", group->ptdev->base.dev); > >> + sched_params.credit_limit = args->ringbuf_size / sizeof(u64); > >> + sched_params.hang_limit = 0; > >> + sched_params.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS); > >> + sched_params.timeout_wq = group->ptdev->reset.wq; > >> + sched_params.score = NULL; > >> + sched_params.name = "panthor-queue"; > >> + sched_params.dev = group->ptdev->base.dev; > >> + > >> + ret = drm_sched_init(&queue->scheduler, &sched_params); > >> if (ret) > >> goto err_free_queue;
Possibly Parallel Threads
- [PATCH] drm/sched: Use struct for drm_sched_init() params
- [PATCH] drm/sched: Use struct for drm_sched_init() params
- [PATCH] drm/sched: Use struct for drm_sched_init() params
- [PATCH] drm/sched: Use struct for drm_sched_init() params
- [PATCH] drm/sched: Use struct for drm_sched_init() params