Displaying 3 results from an estimated 3 matches for "first_changed_connector".
2023 Jun 20
1
[PATCH] drm/nouveau/disp: use drm_kms_helper_connector_hotplug_event()
...u/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -465,7 +465,8 @@ nouveau_display_hpd_work(struct work_struct *work)
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
u32 pending;
- bool changed = false;
+ int changed = 0;
+ struct drm_connector *first_changed_connector = NULL;
pm_runtime_get_sync(dev->dev);
@@ -509,7 +510,12 @@ nouveau_display_hpd_work(struct work_struct *work)
if (old_epoch_counter == connector->epoch_counter)
continue;
- changed = true;
+ changed++;
+ if (!first_changed_connector) {
+ drm_connector_get(connector);
+...
2023 Jun 21
1
[PATCH] drm/nouveau/disp: use drm_kms_helper_connector_hotplug_event()
Hi Lyude!
On Wednesday, June 21st, 2023 at 23:56, Lyude Paul <lyude at redhat.com> wrote:
> > - if (changed)
> > + if (changed == 1)
> > + drm_kms_helper_connector_hotplug_event(first_changed_connector);
> > + else if (changed > 0)
> > drm_kms_helper_hotplug_event(dev);
>
> I'm curious if you think there might be an advantage to doing this per-
> connector even with multiple connectors? Seems like we could do that if we
> stored changed connectors as a bitmask....
2024 May 28
1
[PATCH] drm/nouveau: don't attempt to schedule hpd_work on headless cards
...rm->headless)
> + return;
> +
> spin_lock_irq(&drm->hpd_lock);
> drm->hpd_pending = ~0;
> spin_unlock_irq(&drm->hpd_lock);
> @@ -468,6 +471,11 @@ nouveau_display_hpd_work(struct work_struct *work)
> int changed = 0;
> struct drm_connector *first_changed_connector = NULL;
>
> + WARN_ON_ONCE(drm->headless);
> +
> + if (drm->headless)
> + return;
> +
Same here.
> pm_runtime_get_sync(dev->dev);
>
> spin_lock_irq(&drm->hpd_lock);
> @@ -635,7 +643,7 @@ nouveau_display_fini(struct drm_device *dev, bool...