Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 00/16] MST refcounting/atomic helpers cleanup
This is the series I've been working on for a while now to get all of the atomic DRM drivers in the tree to use the atomic MST helpers, and to make the atomic MST helpers actually idempotent. Turns out it's a lot more difficult to do that without also fixing how port and branch device refcounting works so that it actually makes sense, since the current upstream implementation requires a ton of magic in the atomic helpers to work around properly and in many situations just plain doesn't work as intended. There's still more cleanup that can be done, but I think this is a good place to start off for now :). Lyude Paul (16): drm/dp_mst: Rename drm_dp_mst_get_validated_(port|mstb)_ref and friends drm/dp_mst: Introduce new refcounting scheme for mstbs and ports drm/dp_mst: Restart last_connected_port_and_mstb() if topology ref fails drm/dp_mst: Stop releasing VCPI when removing ports from topology drm/dp_mst: Fix payload deallocation on hotplugs using malloc refs drm/i915: Keep malloc references to MST ports drm/amdgpu/display: Keep malloc ref to MST port drm/nouveau: Remove bogus cleanup in nv50_mstm_add_connector() drm/nouveau: Remove unnecessary VCPI checks in nv50_msto_cleanup() drm/nouveau: Keep malloc references to MST ports drm/nouveau: Stop unsetting mstc->port, use malloc refs drm/nouveau: Grab payload lock in nv50_msto_payload() drm/dp_mst: Add some atomic state iterator macros drm/dp_mst: Start tracking per-port VCPI allocations drm/dp_mst: Check payload count in drm_dp_mst_atomic_check() drm/nouveau: Use atomic VCPI helpers for MST .../gpu/dp-mst/topology-figure-1.dot | 52 + .../gpu/dp-mst/topology-figure-2.dot | 56 ++ .../gpu/dp-mst/topology-figure-3.dot | 59 ++ Documentation/gpu/drm-kms-helpers.rst | 26 +- .../display/amdgpu_dm/amdgpu_dm_mst_types.c | 11 +- drivers/gpu/drm/drm_dp_mst_topology.c | 937 ++++++++++++++---- drivers/gpu/drm/i915/intel_connector.c | 4 + drivers/gpu/drm/i915/intel_display.c | 4 + drivers/gpu/drm/i915/intel_dp_mst.c | 65 +- drivers/gpu/drm/nouveau/dispnv50/disp.c | 94 +- include/drm/drm_dp_mst_helper.h | 151 ++- 11 files changed, 1207 insertions(+), 252 deletions(-) create mode 100644 Documentation/gpu/dp-mst/topology-figure-1.dot create mode 100644 Documentation/gpu/dp-mst/topology-figure-2.dot create mode 100644 Documentation/gpu/dp-mst/topology-figure-3.dot -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 01/16] drm/dp_mst: Rename drm_dp_mst_get_validated_(port|mstb)_ref and friends
s/drm_dp_get_validated_port_ref/drm_dp_mst_topology_get_port_validated/ s/drm_dp_put_port/drm_dp_mst_topology_put_port/ s/drm_dp_get_validated_mstb_ref/drm_dp_mst_topology_get_mstb_validated/ s/drm_dp_put_mst_branch_device/drm_dp_mst_topology_put_mstb/ This is a much more consistent naming scheme, and will make even more sense once we redesign how the current refcounting scheme here works. Signed-off-by: Lyude Paul <lyude at redhat.com> Cc: Daniel Vetter <daniel at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/drm_dp_mst_topology.c | 114 ++++++++++++++------------ 1 file changed, 62 insertions(+), 52 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 2ab16c9e6243..6f9b211069a7 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -46,7 +46,7 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, char *buf); static int test_calc_pbn_mode(void); -static void drm_dp_put_port(struct drm_dp_mst_port *port); +static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port); static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, int id, @@ -888,7 +888,7 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) */ list_for_each_entry_safe(port, tmp, &mstb->ports, next) { list_del(&port->next); - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); } /* drop any tx slots msg */ @@ -911,7 +911,7 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) kref_put(kref, drm_dp_free_mst_branch_device); } -static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) +static void drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) { kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device); } @@ -930,7 +930,7 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) case DP_PEER_DEVICE_MST_BRANCHING: mstb = port->mstb; port->mstb = NULL; - drm_dp_put_mst_branch_device(mstb); + drm_dp_mst_topology_put_mstb(mstb); break; } } @@ -970,12 +970,14 @@ static void drm_dp_destroy_port(struct kref *kref) kfree(port); } -static void drm_dp_put_port(struct drm_dp_mst_port *port) +static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) { kref_put(&port->kref, drm_dp_destroy_port); } -static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find) +static struct drm_dp_mst_branch * +drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_branch *to_find) { struct drm_dp_mst_port *port; struct drm_dp_mst_branch *rmstb; @@ -985,7 +987,8 @@ static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct } list_for_each_entry(port, &mstb->ports, next) { if (port->mstb) { - rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find); + rmstb = drm_dp_mst_topology_get_mstb_validated_locked( + port->mstb, to_find); if (rmstb) return rmstb; } @@ -993,12 +996,15 @@ static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct return NULL; } -static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) +static struct drm_dp_mst_branch * +drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) { struct drm_dp_mst_branch *rmstb = NULL; mutex_lock(&mgr->lock); if (mgr->mst_primary) - rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb); + rmstb = drm_dp_mst_topology_get_mstb_validated_locked( + mgr->mst_primary, mstb); mutex_unlock(&mgr->lock); return rmstb; } @@ -1021,7 +1027,9 @@ static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_ return NULL; } -static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +static struct drm_dp_mst_port * +drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) { struct drm_dp_mst_port *rport = NULL; mutex_lock(&mgr->lock); @@ -1210,7 +1218,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, list_del(&port->next); mutex_unlock(&mstb->mgr->lock); /* drop port list reference */ - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); goto out; } if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || @@ -1224,7 +1232,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, out: /* put reference to this port */ - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); } static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, @@ -1259,7 +1267,7 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, dowork = true; } - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); if (dowork) queue_work(system_long_wq, &mstb->mgr->work); @@ -1362,10 +1370,11 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m drm_dp_send_enum_path_resources(mgr, mstb, port); if (port->mstb) { - mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb); + mstb_child = drm_dp_mst_topology_get_mstb_validated( + mgr, port->mstb); if (mstb_child) { drm_dp_check_and_send_link_address(mgr, mstb_child); - drm_dp_put_mst_branch_device(mstb_child); + drm_dp_mst_topology_put_mstb(mstb_child); } } } @@ -1384,7 +1393,7 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) mutex_unlock(&mgr->lock); if (mstb) { drm_dp_check_and_send_link_address(mgr, mstb); - drm_dp_put_mst_branch_device(mstb); + drm_dp_mst_topology_put_mstb(mstb); } } @@ -1726,17 +1735,17 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, u8 sinks[DRM_DP_MAX_SDP_STREAMS]; int i; - port = drm_dp_get_validated_port_ref(mgr, port); + port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return -EINVAL; port_num = port->port_num; - mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); if (!mstb) { mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); if (!mstb) { - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); return -EINVAL; } } @@ -1766,8 +1775,8 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, } kfree(txmsg); fail_put: - drm_dp_put_mst_branch_device(mstb); - drm_dp_put_port(port); + drm_dp_mst_topology_put_mstb(mstb); + drm_dp_mst_topology_put_port(port); return ret; } @@ -1777,13 +1786,13 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_sideband_msg_tx *txmsg; int len, ret; - port = drm_dp_get_validated_port_ref(mgr, port); + port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return -EINVAL; txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) { - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); return -ENOMEM; } @@ -1799,7 +1808,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, ret = 0; } kfree(txmsg); - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); return ret; } @@ -1888,7 +1897,8 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) if (vcpi) { port = container_of(vcpi, struct drm_dp_mst_port, vcpi); - port = drm_dp_get_validated_port_ref(mgr, port); + port = drm_dp_mst_topology_get_port_validated(mgr, + port); if (!port) { mutex_unlock(&mgr->payload_lock); return -EINVAL; @@ -1925,7 +1935,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) cur_slots += req_payload.num_slots; if (port) - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); } for (i = 0; i < mgr->max_payloads; i++) { @@ -2024,7 +2034,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_mst_branch *mstb; - mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); if (!mstb) return -EINVAL; @@ -2048,7 +2058,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, } kfree(txmsg); fail_put: - drm_dp_put_mst_branch_device(mstb); + drm_dp_mst_topology_put_mstb(mstb); return ret; } @@ -2192,7 +2202,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms out_unlock: mutex_unlock(&mgr->lock); if (mstb) - drm_dp_put_mst_branch_device(mstb); + drm_dp_mst_topology_put_mstb(mstb); return ret; } @@ -2357,7 +2367,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) mgr->down_rep_recv.initial_hdr.lct, mgr->down_rep_recv.initial_hdr.rad[0], mgr->down_rep_recv.msg[0]); - drm_dp_put_mst_branch_device(mstb); + drm_dp_mst_topology_put_mstb(mstb); memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; } @@ -2368,7 +2378,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) } memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); - drm_dp_put_mst_branch_device(mstb); + drm_dp_mst_topology_put_mstb(mstb); mutex_lock(&mgr->qlock); txmsg->state = DRM_DP_SIDEBAND_TX_RX; @@ -2441,7 +2451,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) } if (mstb) - drm_dp_put_mst_branch_device(mstb); + drm_dp_mst_topology_put_mstb(mstb); memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); } @@ -2501,7 +2511,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector enum drm_connector_status status = connector_status_disconnected; /* we need to search for the port in the mgr in case its gone */ - port = drm_dp_get_validated_port_ref(mgr, port); + port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return connector_status_disconnected; @@ -2526,7 +2536,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector break; } out: - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); return status; } EXPORT_SYMBOL(drm_dp_mst_detect_port); @@ -2543,11 +2553,11 @@ bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, { bool ret = false; - port = drm_dp_get_validated_port_ref(mgr, port); + port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return ret; ret = port->has_audio; - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); return ret; } EXPORT_SYMBOL(drm_dp_mst_port_has_audio); @@ -2567,7 +2577,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ struct edid *edid = NULL; /* we need to search for the port in the mgr in case its gone */ - port = drm_dp_get_validated_port_ref(mgr, port); + port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return NULL; @@ -2578,7 +2588,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ drm_connector_set_tile_property(connector); } port->has_audio = drm_detect_monitor_audio(edid); - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); return edid; } EXPORT_SYMBOL(drm_dp_mst_get_edid); @@ -2649,7 +2659,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, if (IS_ERR(topology_state)) return PTR_ERR(topology_state); - port = drm_dp_get_validated_port_ref(mgr, port); + port = drm_dp_mst_topology_get_port_validated(mgr, port); if (port == NULL) return -EINVAL; req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); @@ -2657,14 +2667,14 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, req_slots, topology_state->avail_slots); if (req_slots > topology_state->avail_slots) { - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); return -ENOSPC; } topology_state->avail_slots -= req_slots; DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots); - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); return req_slots; } EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); @@ -2715,7 +2725,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, { int ret; - port = drm_dp_get_validated_port_ref(mgr, port); + port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return false; @@ -2725,7 +2735,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, if (port->vcpi.vcpi > 0) { DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); if (pbn == port->vcpi.pbn) { - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); return true; } } @@ -2739,7 +2749,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots); - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); return true; out: return false; @@ -2749,12 +2759,12 @@ EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi); int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) { int slots = 0; - port = drm_dp_get_validated_port_ref(mgr, port); + port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return slots; slots = port->vcpi.num_slots; - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); return slots; } EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); @@ -2768,11 +2778,11 @@ EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); */ void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) { - port = drm_dp_get_validated_port_ref(mgr, port); + port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return; port->vcpi.num_slots = 0; - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); } EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); @@ -2783,7 +2793,7 @@ EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); */ void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) { - port = drm_dp_get_validated_port_ref(mgr, port); + port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return; @@ -2792,7 +2802,7 @@ void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_ port->vcpi.pbn = 0; port->vcpi.aligned_pbn = 0; port->vcpi.vcpi = 0; - drm_dp_put_port(port); + drm_dp_mst_topology_put_port(port); } EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); @@ -3292,7 +3302,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs struct drm_dp_sideband_msg_tx *txmsg = NULL; int ret; - mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); if (!mstb) return -EREMOTEIO; @@ -3342,7 +3352,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs } out: kfree(txmsg); - drm_dp_put_mst_branch_device(mstb); + drm_dp_mst_topology_put_mstb(mstb); return ret; } -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 02/16] drm/dp_mst: Introduce new refcounting scheme for mstbs and ports
The current way of handling refcounting in the DP MST helpers is really confusing and probably just plain wrong because it's been hacked up many times over the years without anyone actually going over the code and seeing if things could be simplified. To the best of my understanding, the current scheme works like this: drm_dp_mst_port and drm_dp_mst_branch both have a single refcount. When this refcount hits 0 for either of the two, they're removed from the topology state, but not immediately freed. Both ports and branch devices will reinitialize their kref once it's hit 0 before actually destroying themselves. The intended purpose behind this is so that we can avoid problems like not being able to free a remote payload that might still be active, due to us having removed all of the port/branch device structures in memory, as per: 91a25e463130 ("drm/dp/mst: deallocate payload on port destruction") Which may have worked, but then it caused use-after-free errors. Being new to MST at the time, I tried fixing it; 263efde31f97 ("drm/dp/mst: Get validated port ref in drm_dp_update_payload_part1()") But, that was broken: both drm_dp_mst_port and drm_dp_mst_branch structs are validated in almost every DP MST helper function. Simply put, this means we go through the topology and try to see if the given drm_dp_mst_branch or drm_dp_mst_port is still attached to something before trying to use it in order to avoid dereferencing freed memory (something that has happened a LOT in the past with this library). Because of this it doesn't actually matter whether or not we keep keep the ports and branches around in memory as that's not enough, because any function that validates the branches and ports passed to it will still reject them anyway since they're no longer in the topology structure. So, use-after-free errors were fixed but payload deallocation was completely broken. Two years later, AMD informed me about this issue and I attempted to come up with a temporary fix, pending a long-overdue cleanup of this library: c54c7374ff44 ("drm/dp_mst: Skip validating ports during destruction, just ref") But then that introduced use-after-free errors, so I quickly reverted it: 9765635b3075 ("Revert "drm/dp_mst: Skip validating ports during destruction, just ref"") And in the process, learned that there is just no simple fix for this: the design is just broken. Unfortuntely, the usage of these helpers are quite broken as well. Some drivers like i915 have been smart enough to avoid accessing any kind of information from MST port structures, but others like nouveau have assumed, understandably so, that drm_dp_mst_port structures are normal and can just be accessed at any time without worrying about use-after-free errors. After a lot of discussion, me and Daniel Vetter came up with a better idea to replace all of this. To summarize, since this is documented far more indepth in the documentation this patch introduces, we make it so that drm_dp_mst_port and drm_dp_mst_branch structures have two different classes of refcounts: topology_kref, and malloc_kref. topology_kref corresponds to the lifetime of the given drm_dp_mst_port or drm_dp_mst_branch in it's given topology. Once it hits zero, any associated connectors are removed and the branch or port can no longer be validated. malloc_kref corresponds to the lifetime of the memory allocation for the actual structure, and will always be non-zero so long as the topology_kref is non-zero. This gives us a way to allow callers to hold onto port and branch device structures past their topology lifetime, and dramatically simplifies the lifetimes of both structures. This also finally fixes the port deallocation problem, properly. Additionally: since this now means that we can keep ports and branch devices allocated in memory for however long we need, we no longer need a significant amount of the port validation that we currently do. Additionally, there is one last scenario that this fixes, which couldn't have been fixed properly beforehand: - CPU1 unrefs port from topology (refcount 1->0) - CPU2 refs port in topology(refcount 0->1) Since we now can guarantee memory safety for ports and branches as-needed, we also can make our main reference counting functions fix this problem by using kref_get_unless_zero() internally so that topology refcounts can only ever reach 0 once. Changes since v1: * Remove forward declarations - danvet * Move "Branch device and port refcounting" section from documentation into kernel-doc comments - danvet * Export internal topology lifetime functions into their own section in the kernel-docs - danvet * s/@/&/g for struct references in kernel-docs - danvet * Drop the "when they are no longer being used" bits from the kernel docs - danvet * Modify diagrams to show how the DRM driver interacts with the topology and payloads - danvet * Make suggested documentation changes for drm_dp_mst_topology_get_mstb() and drm_dp_mst_topology_get_port() - danvet * Better explain the relationship between malloc refs and topology krefs in the documentation for drm_dp_mst_topology_get_port() and drm_dp_mst_topology_get_mstb() - danvet * Fix "See also" in drm_dp_mst_topology_get_mstb() - danvet * Rename drm_dp_mst_topology_get_(port|mstb)() -> drm_dp_mst_topology_try_get_(port|mstb)() and drm_dp_mst_topology_ref_(port|mstb)() -> drm_dp_mst_topology_get_(port|mstb)() - danvet * s/should/must in docs - danvet * WARN_ON(refcount == 0) in topology_get_(mstb|port) - danvet * Move kdocs for mstb/port structs inline - danvet * Split drm_dp_get_last_connected_port_and_mstb() changes into their own commit - danvet Signed-off-by: Lyude Paul <lyude at redhat.com> Cc: Daniel Vetter <daniel at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- .../gpu/dp-mst/topology-figure-1.dot | 52 ++ .../gpu/dp-mst/topology-figure-2.dot | 56 ++ .../gpu/dp-mst/topology-figure-3.dot | 59 +++ Documentation/gpu/drm-kms-helpers.rst | 26 +- drivers/gpu/drm/drm_dp_mst_topology.c | 481 +++++++++++++++--- include/drm/drm_dp_mst_helper.h | 32 +- 6 files changed, 628 insertions(+), 78 deletions(-) create mode 100644 Documentation/gpu/dp-mst/topology-figure-1.dot create mode 100644 Documentation/gpu/dp-mst/topology-figure-2.dot create mode 100644 Documentation/gpu/dp-mst/topology-figure-3.dot diff --git a/Documentation/gpu/dp-mst/topology-figure-1.dot b/Documentation/gpu/dp-mst/topology-figure-1.dot new file mode 100644 index 000000000000..157e17c7e0b0 --- /dev/null +++ b/Documentation/gpu/dp-mst/topology-figure-1.dot @@ -0,0 +1,52 @@ +digraph T { + /* Make sure our payloads are always drawn below the driver node */ + subgraph cluster_driver { + fillcolor = grey; + style = filled; + driver -> {payload1, payload2} [dir=none]; + } + + /* Driver malloc references */ + edge [style=dashed]; + driver -> port1; + driver -> port2; + driver -> port3:e; + driver -> port4; + + payload1:s -> port1:e; + payload2:s -> port3:e; + edge [style=""]; + + subgraph cluster_topology { + label="Topology Manager"; + labelloc=bottom; + + /* Topology references */ + mstb1 -> {port1, port2}; + port1 -> mstb2; + port2 -> mstb3 -> {port3, port4}; + port3 -> mstb4; + + /* Malloc references */ + edge [style=dashed;dir=back]; + mstb1 -> {port1, port2}; + port1 -> mstb2; + port2 -> mstb3 -> {port3, port4}; + port3 -> mstb4; + } + + driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue]; + + payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue]; + payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue]; + + mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen;shape=oval]; + mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen;shape=oval]; + mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen;shape=oval]; + mstb4 [label="MSTB #4";style=filled;fillcolor=palegreen;shape=oval]; + + port1 [label="Port #1";shape=oval]; + port2 [label="Port #2";shape=oval]; + port3 [label="Port #3";shape=oval]; + port4 [label="Port #4";shape=oval]; +} diff --git a/Documentation/gpu/dp-mst/topology-figure-2.dot b/Documentation/gpu/dp-mst/topology-figure-2.dot new file mode 100644 index 000000000000..4243dd1737cb --- /dev/null +++ b/Documentation/gpu/dp-mst/topology-figure-2.dot @@ -0,0 +1,56 @@ +digraph T { + /* Make sure our payloads are always drawn below the driver node */ + subgraph cluster_driver { + fillcolor = grey; + style = filled; + driver -> {payload1, payload2} [dir=none]; + } + + /* Driver malloc references */ + edge [style=dashed]; + driver -> port1; + driver -> port2; + driver -> port3:e; + driver -> port4 [color=red]; + + payload1:s -> port1:e; + payload2:s -> port3:e; + edge [style=""]; + + subgraph cluster_topology { + label="Topology Manager"; + labelloc=bottom; + + /* Topology references */ + mstb1 -> {port1, port2}; + port1 -> mstb2; + edge [color=red]; + port2 -> mstb3 -> {port3, port4}; + port3 -> mstb4; + edge [color=""]; + + /* Malloc references */ + edge [style=dashed;dir=back]; + mstb1 -> {port1, port2}; + port1 -> mstb2; + port2 -> mstb3 -> port3; + edge [color=red]; + mstb3 -> port4; + port3 -> mstb4; + } + + mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen]; + mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen]; + mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen]; + mstb4 [label="MSTB #4";style=filled;fillcolor=grey]; + + port1 [label="Port #1"]; + port2 [label="Port #2"]; + port3 [label="Port #3"]; + port4 [label="Port #4";style=filled;fillcolor=grey]; + + driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue]; + + payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue]; + payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue]; +} diff --git a/Documentation/gpu/dp-mst/topology-figure-3.dot b/Documentation/gpu/dp-mst/topology-figure-3.dot new file mode 100644 index 000000000000..6cd78d06778b --- /dev/null +++ b/Documentation/gpu/dp-mst/topology-figure-3.dot @@ -0,0 +1,59 @@ +digraph T { + /* Make sure our payloads are always drawn below the driver node */ + subgraph cluster_driver { + fillcolor = grey; + style = filled; + edge [dir=none]; + driver -> payload1; + driver -> payload2 [penwidth=3]; + edge [dir=""]; + } + + /* Driver malloc references */ + edge [style=dashed]; + driver -> port1; + driver -> port2; + driver -> port3:e; + driver -> port4 [color=grey]; + payload1:s -> port1:e; + payload2:s -> port3:e [penwidth=3]; + edge [style=""]; + + subgraph cluster_topology { + label="Topology Manager"; + labelloc=bottom; + + /* Topology references */ + mstb1 -> {port1, port2}; + port1 -> mstb2; + edge [color=grey]; + port2 -> mstb3 -> {port3, port4}; + port3 -> mstb4; + edge [color=""]; + + /* Malloc references */ + edge [style=dashed;dir=back]; + mstb1 -> {port1, port2}; + port1 -> mstb2; + port2 -> mstb3 [penwidth=3]; + mstb3 -> port3 [penwidth=3]; + edge [color=grey]; + mstb3 -> port4; + port3 -> mstb4; + } + + mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen]; + mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen]; + mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen;penwidth=3]; + mstb4 [label="MSTB #4";style=filled;fillcolor=grey]; + + port1 [label="Port #1"]; + port2 [label="Port #2";penwidth=5]; + port3 [label="Port #3";penwidth=3]; + port4 [label="Port #4";style=filled;fillcolor=grey]; + + driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue]; + + payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue]; + payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue;penwidth=3]; +} diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst index b422eb8edf16..7a3fc569bc68 100644 --- a/Documentation/gpu/drm-kms-helpers.rst +++ b/Documentation/gpu/drm-kms-helpers.rst @@ -208,18 +208,40 @@ Display Port Dual Mode Adaptor Helper Functions Reference .. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c :export: -Display Port MST Helper Functions Reference -==========================================+Display Port MST Helpers +=======================+ +Overview +-------- .. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c :doc: dp mst helper +.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c + :doc: Branch device and port refcounting + +Functions Reference +------------------- + .. kernel-doc:: include/drm/drm_dp_mst_helper.h :internal: .. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c :export: +Topology Lifetime Internals +--------------------------- + +These functions aren't exported to drivers, but are documented here to help make +the MST topology helpers easier to understand + +.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c + :functions: drm_dp_mst_topology_try_get_mstb drm_dp_mst_topology_get_mstb + drm_dp_mst_topology_put_mstb + drm_dp_mst_topology_try_get_port drm_dp_mst_topology_get_port + drm_dp_mst_topology_put_port + drm_dp_mst_get_mstb_malloc drm_dp_mst_put_mstb_malloc + MIPI DSI Helper Functions Reference ================================== diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 6f9b211069a7..b380ada09e90 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -850,46 +850,211 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad) if (lct > 1) memcpy(mstb->rad, rad, lct / 2); INIT_LIST_HEAD(&mstb->ports); - kref_init(&mstb->kref); + kref_init(&mstb->topology_kref); + kref_init(&mstb->malloc_kref); return mstb; } -static void drm_dp_free_mst_port(struct kref *kref); - static void drm_dp_free_mst_branch_device(struct kref *kref) { - struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); - if (mstb->port_parent) { - if (list_empty(&mstb->port_parent->next)) - kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port); - } + struct drm_dp_mst_branch *mstb + container_of(kref, struct drm_dp_mst_branch, malloc_kref); + + if (mstb->port_parent) + drm_dp_mst_put_port_malloc(mstb->port_parent); + kfree(mstb); } +/** + * DOC: Branch device and port refcounting + * + * Topology refcount overview + * ~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * The refcounting schemes for &struct drm_dp_mst_branch and &struct + * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have + * two different kinds of refcounts: topology refcounts, and malloc refcounts. + * + * Topology refcounts are not exposed to drivers, and are handled internally + * by the DP MST helpers. The helpers use them in order to prevent the + * in-memory topology state from being changed in the middle of critical + * operations like changing the internal state of payload allocations. This + * means each branch and port will be considered to be connected to the rest + * of the topology until it's topology refcount reaches zero. Additionally, + * for ports this means that their associated &struct drm_connector will stay + * registered with userspace until the port's refcount reaches 0. + * + * Malloc refcount overview + * ~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Malloc references are used to keep a &struct drm_dp_mst_port or &struct + * drm_dp_mst_branch allocated even after all of its topology references have + * been dropped, so that the driver or MST helpers can safely access each + * branch's last known state before it was disconnected from the topology. + * When the malloc refcount of a port or branch reaches 0, the memory + * allocation containing the &struct drm_dp_mst_branch or &struct + * drm_dp_mst_port respectively will be freed. + * + * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed + * to drivers. As of writing this documentation, there are no drivers that + * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST + * helpers. Exposing this API to drivers in a race-free manner would take more + * tweaking of the refcounting scheme, however patches are welcome provided + * there is a legitimate driver usecase for this. + * + * Refcount relationships in a topology + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Let's take a look at why the relationship between topology and malloc + * refcounts is designed the way it is. + * + * .. kernel-figure:: dp-mst/topology-figure-1.dot + * + * An example of topology and malloc refs in a DP MST topology with two + * active payloads. Topology refcount increments are indicated by solid + * lines, and malloc refcount increments are indicated by dashed lines. + * Each starts from the branch which incremented the refcount, and ends at + * the branch to which the refcount belongs to. + * + * As you can see in figure 1, every branch increments the topology refcount + * of it's children, and increments the malloc refcount of it's parent. + * Additionally, every payload increments the malloc refcount of it's assigned + * port by 1. + * + * So, what would happen if MSTB #3 from the above figure was unplugged from + * the system, but the driver hadn't yet removed payload #2 from port #3? The + * topology would start to look like figure 2. + * + * .. kernel-figure:: dp-mst/topology-figure-2.dot + * + * Ports and branch devices which have been released from memory are + * colored grey, and references which have been removed are colored red. + * + * Whenever a port or branch device's topology refcount reaches zero, it will + * decrement the topology refcounts of all its children, the malloc refcount + * of its parent, and finally its own malloc refcount. For MSTB #4 and port + * #4, this means they both have been disconnected from the topology and freed + * from memory. But, because payload #2 is still holding a reference to port + * #3, port #3 is removed from the topology but it's &struct drm_dp_mst_port + * is still accessible from memory. This also means port #3 has not yet + * decremented the malloc refcount of MSTB #3, so it's &struct + * drm_dp_mst_branch will also stay allocated in memory until port #3's + * malloc refcount reaches 0. + * + * This relationship is necessary because in order to release payload #2, we + * need to be able to figure out the last relative of port #3 that's still + * connected to the topology. In this case, we would travel up the topology as + * shown in figure 3. + * + * .. kernel-figure:: dp-mst/topology-figure-3.dot + * + * And finally, remove payload #2 by communicating with port #2 through + * sideband transactions. + */ + +/** + * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch + * device + * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of + * + * Increments &drm_dp_mst_branch.malloc_kref. When + * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb + * will be released and @mstb may no longer be used. + * + * See also: drm_dp_mst_put_mstb_malloc() + */ +static void +drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb) +{ + kref_get(&mstb->malloc_kref); + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref)); +} + +/** + * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch + * device + * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of + * + * Decrements &drm_dp_mst_branch.malloc_kref. When + * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb + * will be released and @mstb may no longer be used. + * + * See also: drm_dp_mst_get_mstb_malloc() + */ +static void +drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb) +{ + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref)-1); + kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device); +} + +static void drm_dp_free_mst_port(struct kref *kref) +{ + struct drm_dp_mst_port *port + container_of(kref, struct drm_dp_mst_port, malloc_kref); + + drm_dp_mst_put_mstb_malloc(port->parent); + kfree(port); +} + +/** + * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port + * @port: The &struct drm_dp_mst_port to increment the malloc refcount of + * + * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref + * reaches 0, the memory allocation for @port will be released and @port may + * no longer be used. + * + * Because @port could potentially be freed at any time by the DP MST helpers + * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this + * function, drivers that which to make use of &struct drm_dp_mst_port should + * ensure that they grab at least one main malloc reference to their MST ports + * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before + * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0. + * + * See also: drm_dp_mst_put_port_malloc() + */ +void +drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port) +{ + kref_get(&port->malloc_kref); + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref)); +} +EXPORT_SYMBOL(drm_dp_mst_get_port_malloc); + +/** + * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port + * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of + * + * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref + * reaches 0, the memory allocation for @port will be released and @port may + * no longer be used. + * + * See also: drm_dp_mst_get_port_malloc() + */ +void +drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port) +{ + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref)-1); + kref_put(&port->malloc_kref, drm_dp_free_mst_port); +} +EXPORT_SYMBOL(drm_dp_mst_put_port_malloc); + static void drm_dp_destroy_mst_branch_device(struct kref *kref) { - struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); + struct drm_dp_mst_branch *mstb + container_of(kref, struct drm_dp_mst_branch, topology_kref); + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port, *tmp; bool wake_tx = false; - /* - * init kref again to be used by ports to remove mst branch when it is - * not needed anymore - */ - kref_init(kref); - - if (mstb->port_parent && list_empty(&mstb->port_parent->next)) - kref_get(&mstb->port_parent->kref); - - /* - * destroy all ports - don't need lock - * as there are no more references to the mst branch - * device at this point. - */ + mutex_lock(&mgr->lock); list_for_each_entry_safe(port, tmp, &mstb->ports, next) { list_del(&port->next); drm_dp_mst_topology_put_port(port); } + mutex_unlock(&mgr->lock); /* drop any tx slots msg */ mutex_lock(&mstb->mgr->qlock); @@ -908,14 +1073,82 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) if (wake_tx) wake_up_all(&mstb->mgr->tx_waitq); - kref_put(kref, drm_dp_free_mst_branch_device); + drm_dp_mst_put_mstb_malloc(mstb); +} + +/** + * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a + * branch device unless its zero + * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of + * + * Attempts to grab a topology reference to @mstb, if it hasn't yet been + * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has + * reached 0). Holding a topology reference implies that a malloc reference + * will be held to @mstb as long as the user holds the topology reference. + * + * Care should be taken to ensure that the user has at least one malloc + * reference to @mstb. If you already have a topology reference to @mstb, you + * should use drm_dp_mst_topology_get_mstb() instead. + * + * See also: + * drm_dp_mst_topology_get_mstb() + * drm_dp_mst_topology_put_mstb() + * + * Returns: + * * 1: A topology reference was grabbed successfully + * * 0: @port is no longer in the topology, no reference was grabbed + */ +static int __must_check +drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) +{ + int ret = kref_get_unless_zero(&mstb->topology_kref); + + if (ret) + DRM_DEBUG("mstb %p (%d)\n", mstb, + kref_read(&mstb->topology_kref)); + + return ret; } -static void drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) +/** + * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a + * branch device + * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of + * + * Increments &drm_dp_mst_branch.topology_refcount without checking whether or + * not it's already reached 0. This is only valid to use in scenarios where + * you are already guaranteed to have at least one active topology reference + * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used. + * + * See also: + * drm_dp_mst_topology_try_get_mstb() + * drm_dp_mst_topology_put_mstb() + */ +static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) { - kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device); + WARN_ON(kref_read(&mstb->topology_kref) == 0); + kref_get(&mstb->topology_kref); + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); } +/** + * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch + * device + * @mstb: The &struct drm_dp_mst_branch to release the topology reference from + * + * Releases a topology reference from @mstb by decrementing + * &drm_dp_mst_branch.topology_kref. + * + * See also: + * drm_dp_mst_topology_try_get_mstb() + * drm_dp_mst_topology_get_mstb() + */ +static void +drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) +{ + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)-1); + kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); +} static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) { @@ -937,7 +1170,8 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) static void drm_dp_destroy_port(struct kref *kref) { - struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); + struct drm_dp_mst_port *port + container_of(kref, struct drm_dp_mst_port, topology_kref); struct drm_dp_mst_topology_mgr *mgr = port->mgr; if (!port->input) { @@ -956,7 +1190,6 @@ static void drm_dp_destroy_port(struct kref *kref) * from an EDID retrieval */ mutex_lock(&mgr->destroy_connector_lock); - kref_get(&port->parent->kref); list_add(&port->next, &mgr->destroy_connector_list); mutex_unlock(&mgr->destroy_connector_lock); schedule_work(&mgr->destroy_connector_work); @@ -967,12 +1200,78 @@ static void drm_dp_destroy_port(struct kref *kref) drm_dp_port_teardown_pdt(port, port->pdt); port->pdt = DP_PEER_DEVICE_NONE; } - kfree(port); + drm_dp_mst_put_port_malloc(port); } +/** + * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a + * port unless its zero + * @port: &struct drm_dp_mst_port to increment the topology refcount of + * + * Attempts to grab a topology reference to @port, if it hasn't yet been + * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached + * 0). Holding a topology reference implies that a malloc reference will be + * held to @port as long as the user holds the topology reference. + * + * Care should be taken to ensure that the user has at least one malloc + * reference to @port. If you already have a topology reference to @port, you + * should use drm_dp_mst_topology_get_port() instead. + * + * See also: + * drm_dp_mst_topology_get_port() + * drm_dp_mst_topology_put_port() + * + * Returns: + * * 1: A topology reference was grabbed successfully + * * 0: @port is no longer in the topology, no reference was grabbed + */ +static int __must_check +drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) +{ + int ret = kref_get_unless_zero(&port->topology_kref); + + if (ret) + DRM_DEBUG("port %p (%d)\n", port, + kref_read(&port->topology_kref)); + + return ret; +} + +/** + * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port + * @port: The &struct drm_dp_mst_port to increment the topology refcount of + * + * Increments &drm_dp_mst_port.topology_refcount without checking whether or + * not it's already reached 0. This is only valid to use in scenarios where + * you are already guaranteed to have at least one active topology reference + * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used. + * + * See also: + * drm_dp_mst_topology_try_get_port() + * drm_dp_mst_topology_put_port() + */ +static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) +{ + WARN_ON(kref_read(&port->topology_kref) == 0); + kref_get(&port->topology_kref); + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref)); +} + +/** + * drm_dp_mst_topology_put_port() - release a topology reference to a port + * @port: The &struct drm_dp_mst_port to release the topology reference from + * + * Releases a topology reference from @port by decrementing + * &drm_dp_mst_port.topology_kref. + * + * See also: + * drm_dp_mst_topology_try_get_port() + * drm_dp_mst_topology_get_port() + */ static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) { - kref_put(&port->kref, drm_dp_destroy_port); + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref)-1); + kref_put(&port->topology_kref, drm_dp_destroy_port); } static struct drm_dp_mst_branch * @@ -981,10 +1280,10 @@ drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb, { struct drm_dp_mst_port *port; struct drm_dp_mst_branch *rmstb; - if (to_find == mstb) { - kref_get(&mstb->kref); + + if (to_find == mstb) return mstb; - } + list_for_each_entry(port, &mstb->ports, next) { if (port->mstb) { rmstb = drm_dp_mst_topology_get_mstb_validated_locked( @@ -1001,25 +1300,32 @@ drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { struct drm_dp_mst_branch *rmstb = NULL; + mutex_lock(&mgr->lock); - if (mgr->mst_primary) + if (mgr->mst_primary) { rmstb = drm_dp_mst_topology_get_mstb_validated_locked( mgr->mst_primary, mstb); + + if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb)) + rmstb = NULL; + } mutex_unlock(&mgr->lock); return rmstb; } -static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find) +static struct drm_dp_mst_port * +drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *to_find) { struct drm_dp_mst_port *port, *mport; list_for_each_entry(port, &mstb->ports, next) { - if (port == to_find) { - kref_get(&port->kref); + if (port == to_find) return port; - } + if (port->mstb) { - mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find); + mport = drm_dp_mst_topology_get_port_validated_locked( + port->mstb, to_find); if (mport) return mport; } @@ -1032,9 +1338,15 @@ drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) { struct drm_dp_mst_port *rport = NULL; + mutex_lock(&mgr->lock); - if (mgr->mst_primary) - rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port); + if (mgr->mst_primary) { + rport = drm_dp_mst_topology_get_port_validated_locked( + mgr->mst_primary, port); + + if (rport && !drm_dp_mst_topology_try_get_port(rport)) + rport = NULL; + } mutex_unlock(&mgr->lock); return rport; } @@ -1042,11 +1354,12 @@ drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr, static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num) { struct drm_dp_mst_port *port; + int ret; list_for_each_entry(port, &mstb->ports, next) { if (port->port_num == port_num) { - kref_get(&port->kref); - return port; + ret = drm_dp_mst_topology_try_get_port(port); + return ret ? port : NULL; } } @@ -1095,6 +1408,11 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) if (port->mstb) { port->mstb->mgr = port->mgr; port->mstb->port_parent = port; + /* + * Make sure this port's memory allocation stays + * around until it's child MSTB releases it + */ + drm_dp_mst_get_port_malloc(port); send_link = true; } @@ -1155,17 +1473,26 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, bool created = false; int old_pdt = 0; int old_ddps = 0; + port = drm_dp_get_port(mstb, port_msg->port_number); if (!port) { port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return; - kref_init(&port->kref); + kref_init(&port->topology_kref); + kref_init(&port->malloc_kref); port->parent = mstb; port->port_num = port_msg->port_number; port->mgr = mstb->mgr; port->aux.name = "DPMST"; port->aux.dev = dev->dev; + + /* + * Make sure the memory allocation for our parent branch stays + * around until our own memory allocation is released + */ + drm_dp_mst_get_mstb_malloc(mstb); + created = true; } else { old_pdt = port->pdt; @@ -1185,7 +1512,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, for this list */ if (created) { mutex_lock(&mstb->mgr->lock); - kref_get(&port->kref); + drm_dp_mst_topology_get_port(port); list_add(&port->next, &mstb->ports); mutex_unlock(&mstb->mgr->lock); } @@ -1210,8 +1537,11 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, if (created && !port->input) { char proppath[255]; - build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); + build_mst_prop_path(mstb, port->port_num, proppath, + sizeof(proppath)); + port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, + port, + proppath); if (!port->connector) { /* remove it from the port list */ mutex_lock(&mstb->mgr->lock); @@ -1221,6 +1551,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, drm_dp_mst_topology_put_port(port); goto out; } + if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || port->pdt == DP_PEER_DEVICE_SST_SINK) && port->port_num >= DP_MST_LOGICAL_PORT_0) { @@ -1278,7 +1609,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ { struct drm_dp_mst_branch *mstb; struct drm_dp_mst_port *port; - int i; + int i, ret; /* find the port by iterating down */ mutex_lock(&mgr->lock); @@ -1303,7 +1634,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ } } } - kref_get(&mstb->kref); + ret = drm_dp_mst_topology_try_get_mstb(mstb); + if (!ret) + mstb = NULL; out: mutex_unlock(&mgr->lock); return mstb; @@ -1333,19 +1666,22 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( return NULL; } -static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid( - struct drm_dp_mst_topology_mgr *mgr, - uint8_t *guid) +static struct drm_dp_mst_branch * +drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, + uint8_t *guid) { struct drm_dp_mst_branch *mstb; + int ret; /* find the port by iterating down */ mutex_lock(&mgr->lock); mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); - - if (mstb) - kref_get(&mstb->kref); + if (mstb) { + ret = drm_dp_mst_topology_try_get_mstb(mstb); + if (!ret) + mstb = NULL; + } mutex_unlock(&mgr->lock); return mstb; @@ -1384,11 +1720,14 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) { struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); struct drm_dp_mst_branch *mstb; + int ret; mutex_lock(&mgr->lock); mstb = mgr->mst_primary; if (mstb) { - kref_get(&mstb->kref); + ret = drm_dp_mst_topology_try_get_mstb(mstb); + if (!ret) + mstb = NULL; } mutex_unlock(&mgr->lock); if (mstb) { @@ -1716,8 +2055,11 @@ static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct if (found_port) { rmstb = found_port->parent; - kref_get(&rmstb->kref); - *port_num = found_port->port_num; + if (drm_dp_mst_topology_try_get_mstb(rmstb)) { + *port_num = found_port->port_num; + } else { + rmstb = NULL; + } } } mutex_unlock(&mgr->lock); @@ -1742,7 +2084,9 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, port_num = port->port_num; mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); if (!mstb) { - mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); + mstb = drm_dp_get_last_connected_port_and_mstb(mgr, + port->parent, + &port_num); if (!mstb) { drm_dp_mst_topology_put_port(port); @@ -2168,7 +2512,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms /* give this the main reference */ mgr->mst_primary = mstb; - kref_get(&mgr->mst_primary->kref); + drm_dp_mst_topology_get_mstb(mgr->mst_primary); ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); @@ -2743,11 +3087,11 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots); if (ret) { DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n", - DIV_ROUND_UP(pbn, mgr->pbn_div), ret); + DIV_ROUND_UP(pbn, mgr->pbn_div), ret); goto out; } DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", - pbn, port->vcpi.num_slots); + pbn, port->vcpi.num_slots); drm_dp_mst_topology_put_port(port); return true; @@ -2791,7 +3135,8 @@ EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); * @mgr: manager for this port * @port: unverified port to deallocate vcpi for */ -void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) { port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) @@ -3086,13 +3431,6 @@ static void drm_dp_tx_work(struct work_struct *work) mutex_unlock(&mgr->qlock); } -static void drm_dp_free_mst_port(struct kref *kref) -{ - struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); - kref_put(&port->parent->kref, drm_dp_free_mst_branch_device); - kfree(port); -} - static void drm_dp_destroy_connector_work(struct work_struct *work) { struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); @@ -3113,7 +3451,6 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) list_del(&port->next); mutex_unlock(&mgr->destroy_connector_lock); - kref_init(&port->kref); INIT_LIST_HEAD(&port->next); mgr->cbs->destroy_connector(mgr, port->connector); @@ -3127,7 +3464,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); } - kref_put(&port->kref, drm_dp_free_mst_port); + drm_dp_mst_put_port_malloc(port); send_hotplug = true; } if (send_hotplug) diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 371cc2816477..8eca5f29242c 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -44,7 +44,6 @@ struct drm_dp_vcpi { /** * struct drm_dp_mst_port - MST port - * @kref: reference count for this port. * @port_num: port number * @input: if this port is an input port. * @mcs: message capability status - DP 1.2 spec. @@ -67,7 +66,18 @@ struct drm_dp_vcpi { * in the MST topology. */ struct drm_dp_mst_port { - struct kref kref; + /** + * @topology_kref: refcount for this port's lifetime in the topology, + * only the DP MST helpers should need to touch this + */ + struct kref topology_kref; + + /** + * @malloc_kref: refcount for the memory allocation containing this + * structure. See drm_dp_mst_get_port_malloc() and + * drm_dp_mst_put_port_malloc(). + */ + struct kref malloc_kref; u8 port_num; bool input; @@ -102,7 +112,6 @@ struct drm_dp_mst_port { /** * struct drm_dp_mst_branch - MST branch device. - * @kref: reference count for this port. * @rad: Relative Address to talk to this branch device. * @lct: Link count total to talk to this branch device. * @num_ports: number of ports on the branch. @@ -121,7 +130,19 @@ struct drm_dp_mst_port { * to downstream port of parent branches. */ struct drm_dp_mst_branch { - struct kref kref; + /** + * @topology_kref: refcount for this branch device's lifetime in the + * topology, only the DP MST helpers should need to touch this + */ + struct kref topology_kref; + + /** + * @malloc_kref: refcount for the memory allocation containing this + * structure. See drm_dp_mst_get_mstb_malloc() and + * drm_dp_mst_put_mstb_malloc(). + */ + struct kref malloc_kref; + u8 rad[8]; u8 lct; int num_ports; @@ -626,4 +647,7 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, bool power_up); +void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); +void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); + #endif -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 03/16] drm/dp_mst: Restart last_connected_port_and_mstb() if topology ref fails
While this isn't a complete fix, this will improve the reliability of drm_dp_get_last_connected_port_and_mstb() pretty significantly during hotplug events, since there's a chance that the in-memory topology tree may not be fully updated when drm_dp_get_last_connected_port_and_mstb() is called and thus might end up causing our search to fail on an mstb whose topology refcount has reached 0, but has not yet been removed from it's parent. Ideally, we should further fix this problem by ensuring that we deal with the potential for racing with a hotplug event, which would look like this: * drm_dp_payload_send_msg() retrieves the last living relative of mstb with drm_dp_get_last_connected_port_and_mstb() * drm_dp_payload_send_msg() starts building payload message At the same time, mstb gets unplugged from the topology and is no longer the actual last living relative of the original mstb * drm_dp_payload_send_msg() tries sending the payload message, hub times out * Hub timed out, we give up and run away-resulting in the payload being leaked This could be fixed by restarting the drm_dp_get_last_connected_port_and_mstb() search whenever we get a timeout, sending the payload to the new mstb, then repeating until either the entire topology is removed from the system or drm_dp_get_last_connected_port_and_mstb() fails. But since the above race condition is not terribly likely, we'll address that in a later patch series once we've improved the recovery handling for VCPI allocations in the rest of the DP MST helpers. Signed-off-by: Lyude Paul <lyude at redhat.com> Cc: Daniel Vetter <daniel at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/drm_dp_mst_topology.c | 55 +++++++++++++++++++++------ 1 file changed, 44 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index b380ada09e90..356a95aba2d8 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2043,25 +2043,50 @@ static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); } -static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb, - int *port_num) +/** + * drm_dp_get_last_connected_port_and_mstb() - Find the last living relatives + * in a topology of a given branch device + * @mgr: The topology manager to use + * @mstb: The disconnected branch device + * @port_num: Where to store the number of the last connected port + * + * Searches upwards in the topology starting from @mstb to try to find the + * closest available parent of @mstb that's still connected to the rest of the + * topology. This can be used in order to perform operations like releasing + * payloads, where the branch device which owned the payload may no longer be + * around and thus would require that the payload on the last living relative + * be freed instead. + * + * Returns: + * The last connected &drm_dp_mst_branch in the topology that was a parent of + * @mstb, if there is one. + */ +static struct drm_dp_mst_branch * +drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + int *port_num) { struct drm_dp_mst_branch *rmstb = NULL; struct drm_dp_mst_port *found_port; + mutex_lock(&mgr->lock); - if (mgr->mst_primary) { + if (!mgr->mst_primary) + goto out; + + do { found_port = drm_dp_get_last_connected_port_to_mstb(mstb); + if (!found_port) + break; - if (found_port) { + if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) { rmstb = found_port->parent; - if (drm_dp_mst_topology_try_get_mstb(rmstb)) { - *port_num = found_port->port_num; - } else { - rmstb = NULL; - } + *port_num = found_port->port_num; + } else { + /* Search again, starting from this parent */ + mstb = found_port->parent; } - } + } while (!rmstb); +out: mutex_unlock(&mgr->lock); return rmstb; } @@ -2110,6 +2135,14 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, drm_dp_queue_down_tx(mgr, txmsg); + /* + * FIXME: there is a small chance that between getting the last + * connected mstb and sending the payload message, the last connected + * mstb could also be removed from the topology. In the future, this + * needs to be fixed by restarting the + * drm_dp_get_last_connected_port_and_mstb() search in the event of a + * timeout if the topology is still connected to the system. + */ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); if (ret > 0) { if (txmsg->reply.reply_type == 1) { -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 04/16] drm/dp_mst: Stop releasing VCPI when removing ports from topology
This has never actually worked, and isn't needed anyway: the driver's always going to try to deallocate VCPI when it tears down the display that the VCPI belongs to. Signed-off-by: Lyude Paul <lyude at redhat.com> Reviewed-by: Daniel Vetter <daniel at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/drm_dp_mst_topology.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 356a95aba2d8..ef8637f37564 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1175,8 +1175,6 @@ static void drm_dp_destroy_port(struct kref *kref) struct drm_dp_mst_topology_mgr *mgr = port->mgr; if (!port->input) { - port->vcpi.num_slots = 0; - kfree(port->cached_edid); /* @@ -3491,12 +3489,6 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) drm_dp_port_teardown_pdt(port, port->pdt); port->pdt = DP_PEER_DEVICE_NONE; - if (!port->input && port->vcpi.vcpi > 0) { - drm_dp_mst_reset_vcpi_slots(mgr, port); - drm_dp_update_payload_part1(mgr); - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); - } - drm_dp_mst_put_port_malloc(port); send_hotplug = true; } -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 05/16] drm/dp_mst: Fix payload deallocation on hotplugs using malloc refs
Up until now, freeing payloads on remote MST hubs that just had ports removed has almost never worked because we've been relying on port validation in order to stop us from accessing ports that have already been freed from memory, but ports which need their payloads released due to being removed will never be a valid part of the topology after they've been removed. Since we've introduced malloc refs, we can replace all of the validation logic in payload helpers which are used for deallocation with some well-placed malloc krefs. This ensures that regardless of whether or not the ports are still valid and in the topology, any port which has an allocated payload will remain allocated in memory until it's payloads have been removed - finally allowing us to actually release said payloads correctly. Signed-off-by: Lyude Paul <lyude at redhat.com> Reviewed-by: Daniel Vetter <daniel.vetter at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/drm_dp_mst_topology.c | 54 +++++++++++++++------------ 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index ef8637f37564..11dd3ede7b7d 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2100,10 +2100,6 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, u8 sinks[DRM_DP_MAX_SDP_STREAMS]; int i; - port = drm_dp_mst_topology_get_port_validated(mgr, port); - if (!port) - return -EINVAL; - port_num = port->port_num; mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); if (!mstb) { @@ -2111,10 +2107,8 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, port->parent, &port_num); - if (!mstb) { - drm_dp_mst_topology_put_port(port); + if (!mstb) return -EINVAL; - } } txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); @@ -2151,7 +2145,6 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, kfree(txmsg); fail_put: drm_dp_mst_topology_put_mstb(mstb); - drm_dp_mst_topology_put_port(port); return ret; } @@ -2256,15 +2249,16 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, */ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) { - int i, j; - int cur_slots = 1; struct drm_dp_payload req_payload; struct drm_dp_mst_port *port; + int i, j; + int cur_slots = 1; mutex_lock(&mgr->payload_lock); for (i = 0; i < mgr->max_payloads; i++) { struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; struct drm_dp_payload *payload = &mgr->payloads[i]; + bool put_port = false; /* solve the current payloads - compare to the hw ones - update the hw view */ @@ -2272,12 +2266,20 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) if (vcpi) { port = container_of(vcpi, struct drm_dp_mst_port, vcpi); - port = drm_dp_mst_topology_get_port_validated(mgr, - port); - if (!port) { - mutex_unlock(&mgr->payload_lock); - return -EINVAL; + + /* Validated ports don't matter if we're releasing + * VCPI + */ + if (vcpi->num_slots) { + port = drm_dp_mst_topology_get_port_validated( + mgr, port); + if (!port) { + mutex_unlock(&mgr->payload_lock); + return -EINVAL; + } + put_port = true; } + req_payload.num_slots = vcpi->num_slots; req_payload.vcpi = vcpi->vcpi; } else { @@ -2309,7 +2311,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) } cur_slots += req_payload.num_slots; - if (port) + if (put_port) drm_dp_mst_topology_put_port(port); } @@ -3124,6 +3126,8 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots); + /* Keep port allocated until it's payload has been removed */ + drm_dp_mst_get_port_malloc(port); drm_dp_mst_topology_put_port(port); return true; out: @@ -3153,11 +3157,12 @@ EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); */ void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) { - port = drm_dp_mst_topology_get_port_validated(mgr, port); - if (!port) - return; + /* + * A port with VCPI will remain allocated until it's VCPI is + * released, no verified ref needed + */ + port->vcpi.num_slots = 0; - drm_dp_mst_topology_put_port(port); } EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); @@ -3169,16 +3174,17 @@ EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) { - port = drm_dp_mst_topology_get_port_validated(mgr, port); - if (!port) - return; + /* + * A port with VCPI will remain allocated until it's VCPI is + * released, no verified ref needed + */ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); port->vcpi.num_slots = 0; port->vcpi.pbn = 0; port->vcpi.aligned_pbn = 0; port->vcpi.vcpi = 0; - drm_dp_mst_topology_put_port(port); + drm_dp_mst_put_port_malloc(port); } EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 06/16] drm/i915: Keep malloc references to MST ports
So that the ports stay around until we've destroyed the connectors, in order to ensure that we don't pass an invalid pointer to any MST helpers once we introduce the new MST VCPI helpers. Changes since v1: * Move drm_dp_mst_get_port_malloc() to where we assign intel_connector->port - danvet Signed-off-by: Lyude Paul <lyude at redhat.com> Cc: Daniel Vetter <daniel at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/i915/intel_connector.c | 4 ++++ drivers/gpu/drm/i915/intel_dp_mst.c | 1 + 2 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c index 18e370f607bc..37d2c644f4b8 100644 --- a/drivers/gpu/drm/i915/intel_connector.c +++ b/drivers/gpu/drm/i915/intel_connector.c @@ -95,6 +95,10 @@ void intel_connector_destroy(struct drm_connector *connector) intel_panel_fini(&intel_connector->panel); drm_connector_cleanup(connector); + + if (intel_connector->port) + drm_dp_mst_put_port_malloc(intel_connector->port); + kfree(connector); } diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index f05427b74e34..631fd1537252 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -457,6 +457,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo intel_connector->get_hw_state = intel_dp_mst_get_hw_state; intel_connector->mst_port = intel_dp; intel_connector->port = port; + drm_dp_mst_get_port_malloc(port); connector = &intel_connector->base; ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 07/16] drm/amdgpu/display: Keep malloc ref to MST port
Just like i915 and nouveau, it's a good idea for us to hold a malloc reference to the port here so that we never pass a freed pointer to any of the DP MST helper functions. Also, we stop unsetting aconnector->port in dm_dp_destroy_mst_connector(). There's literally no point to that assignment that I can see anyway. Signed-off-by: Lyude Paul <lyude at redhat.com> Cc: Daniel Vetter <daniel at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 5e7ca1f3a8d1..24632727e127 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -191,6 +191,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) drm_encoder_cleanup(&amdgpu_encoder->base); kfree(amdgpu_encoder); drm_connector_cleanup(connector); + drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port); kfree(amdgpu_dm_connector); } @@ -363,7 +364,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, amdgpu_dm_connector_funcs_reset(connector); DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", - aconnector, connector->base.id, aconnector->mst_port); + aconnector, connector->base.id, aconnector->mst_port); + + drm_dp_mst_get_port_malloc(port); DRM_DEBUG_KMS(":%d\n", connector->base.id); @@ -379,12 +382,12 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", - aconnector, connector->base.id, aconnector->mst_port); + aconnector, connector->base.id, aconnector->mst_port); - aconnector->port = NULL; if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps(connector, NULL); - dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); + dc_link_remove_remote_sink(aconnector->dc_link, + aconnector->dc_sink); dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; } -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 08/16] drm/nouveau: Remove bogus cleanup in nv50_mstm_add_connector()
Trying to destroy the connector using mstc->connector.funcs->destroy() if connector initialization fails is wrong: there is no possible codepath in nv50_mstc_new where nv50_mstm_add_connector() would return <0 and mstc would be non-NULL. Signed-off-by: Lyude Paul <lyude at redhat.com> Cc: Daniel Vetter <daniel at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 26af45785939..641252208e67 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1099,11 +1099,8 @@ nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr, int ret; ret = nv50_mstc_new(mstm, port, path, &mstc); - if (ret) { - if (mstc) - mstc->connector.funcs->destroy(&mstc->connector); + if (ret) return NULL; - } return &mstc->connector; } -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 09/16] drm/nouveau: Remove unnecessary VCPI checks in nv50_msto_cleanup()
There is no need to look at the port's VCPI allocation before calling drm_dp_mst_deallocate_vcpi(), as we already have msto->disabled to let us avoid cleaning up an msto more then once. The DP MST core will never call drm_dp_mst_deallocate_vcpi() on it's own, which is presumably what these checks are meant to protect against. More importantly though, we're about to stop clearing mstc->port in the next commit, which means if we could potentially hit a use-after-free error if we tried to check mstc->port->vcpi here. So to make life easier for anyone who bisects this code in the future, use msto->disabled instead to check whether or not we need to deallocate VCPI instead. Signed-off-by: Lyude Paul <lyude at redhat.com> Cc: Daniel Vetter <daniel at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 641252208e67..0f7d72518604 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -704,14 +704,17 @@ nv50_msto_cleanup(struct nv50_msto *msto) struct nv50_mstc *mstc = msto->mstc; struct nv50_mstm *mstm = mstc->mstm; + if (!msto->disabled) + return; + NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name); - if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto)) + + if (mstc->port) drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port); - if (msto->disabled) { - msto->mstc = NULL; - msto->head = NULL; - msto->disabled = false; - } + + msto->mstc = NULL; + msto->head = NULL; + msto->disabled = false; } static void -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 10/16] drm/nouveau: Keep malloc references to MST ports
Now that we finally have a sane way to keep port allocations around, use it to fix the potential unchecked ->port accesses that nouveau makes by making sure we keep the mst port allocated for as long as it's drm_connector is accessible. Additionally, now that we've guaranteed that mstc->port is allocated for as long as we keep mstc around we can remove the connector registration checks for codepaths which release payloads, allowing us to release payloads on active topologies properly. These registration checks were only required before in order to avoid situations where mstc->port could technically be pointing at freed memory. Signed-off-by: Lyude Paul <lyude at redhat.com> Cc: Daniel Vetter <daniel at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 0f7d72518604..982054bbcc8b 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -964,7 +964,11 @@ static void nv50_mstc_destroy(struct drm_connector *connector) { struct nv50_mstc *mstc = nv50_mstc(connector); + drm_connector_cleanup(&mstc->connector); + if (mstc->port) + drm_dp_mst_put_port_malloc(mstc->port); + kfree(mstc); } @@ -1012,6 +1016,7 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port, drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0); drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0); drm_connector_set_path_property(&mstc->connector, path); + drm_dp_mst_get_port_malloc(port); return 0; } @@ -1077,6 +1082,7 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr, drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector); drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL); + drm_dp_mst_put_port_malloc(mstc->port); mstc->port = NULL; drm_modeset_unlock(&drm->dev->mode_config.connection_mutex); -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 11/16] drm/nouveau: Stop unsetting mstc->port, use malloc refs
Same as we did for i915, but for nouveau this time. Additionally, we grab a malloc reference to the port that lasts for the entire lifetime of nv50_mstc, which gives us the guarantee that mstc->port will always point to valid memory for as long as the mstc stays around. Signed-off-by: Lyude Paul <lyude at redhat.com> Cc: Daniel Vetter <daniel at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 982054bbcc8b..157d208d37b5 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -709,8 +709,7 @@ nv50_msto_cleanup(struct nv50_msto *msto) NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name); - if (mstc->port) - drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port); + drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port); msto->mstc = NULL; msto->head = NULL; @@ -735,7 +734,7 @@ nv50_msto_prepare(struct nv50_msto *msto) }; NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name); - if (mstc->port && mstc->port->vcpi.vcpi > 0) { + if (mstc->port->vcpi.vcpi > 0) { struct drm_dp_payload *payload = nv50_msto_payload(msto); if (payload) { args.vcpi.start_slot = payload->start_slot; @@ -832,8 +831,7 @@ nv50_msto_disable(struct drm_encoder *encoder) struct nv50_mstc *mstc = msto->mstc; struct nv50_mstm *mstm = mstc->mstm; - if (mstc->port) - drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port); + drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port); mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0); mstm->modified = true; @@ -945,7 +943,7 @@ nv50_mstc_detect(struct drm_connector *connector, bool force) enum drm_connector_status conn_status; int ret; - if (!mstc->port) + if (drm_connector_is_unregistered(connector)) return connector_status_disconnected; ret = pm_runtime_get_sync(connector->dev->dev); @@ -966,8 +964,7 @@ nv50_mstc_destroy(struct drm_connector *connector) struct nv50_mstc *mstc = nv50_mstc(connector); drm_connector_cleanup(&mstc->connector); - if (mstc->port) - drm_dp_mst_put_port_malloc(mstc->port); + drm_dp_mst_put_port_malloc(mstc->port); kfree(mstc); } @@ -1081,11 +1078,6 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr, drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector); - drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL); - drm_dp_mst_put_port_malloc(mstc->port); - mstc->port = NULL; - drm_modeset_unlock(&drm->dev->mode_config.connection_mutex); - drm_connector_put(&mstc->connector); } -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 12/16] drm/nouveau: Grab payload lock in nv50_msto_payload()
Going through the currently programmed payloads isn't safe without holding mgr->payload_lock, so actually do that and warn if anyone tries calling nv50_msto_payload() in the future without grabbing the right locks. Signed-off-by: Lyude Paul <lyude at redhat.com> Cc: Daniel Vetter <daniel at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 157d208d37b5..67f7bf97e5d9 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -680,6 +680,8 @@ nv50_msto_payload(struct nv50_msto *msto) struct nv50_mstm *mstm = mstc->mstm; int vcpi = mstc->port->vcpi.vcpi, i; + WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock)); + NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi); for (i = 0; i < mstm->mgr.max_payloads; i++) { struct drm_dp_payload *payload = &mstm->mgr.payloads[i]; @@ -733,6 +735,8 @@ nv50_msto_prepare(struct nv50_msto *msto) (0x0100 << msto->head->base.index), }; + mutex_lock(&mstm->mgr.payload_lock); + NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name); if (mstc->port->vcpi.vcpi > 0) { struct drm_dp_payload *payload = nv50_msto_payload(msto); @@ -748,7 +752,9 @@ nv50_msto_prepare(struct nv50_msto *msto) msto->encoder.name, msto->head->base.base.name, args.vcpi.start_slot, args.vcpi.num_slots, args.vcpi.pbn, args.vcpi.aligned_pbn); + nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args)); + mutex_unlock(&mstm->mgr.payload_lock); } static int -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 13/16] drm/dp_mst: Add some atomic state iterator macros
Changes since v6: - Move EXPORT_SYMBOL() for drm_dp_mst_topology_state_funcs to this commit - Document __drm_dp_mst_state_iter_get() and note that it shouldn't be called directly Signed-off-by: Lyude Paul <lyude at redhat.com> Reviewed-by: Daniel Vetter <daniel.vetter at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/drm_dp_mst_topology.c | 5 +- include/drm/drm_dp_mst_helper.h | 96 +++++++++++++++++++++++++++ 2 files changed, 99 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 11dd3ede7b7d..22bc54d83b20 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3525,10 +3525,11 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, kfree(mst_state); } -static const struct drm_private_state_funcs mst_state_funcs = { +const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = { .atomic_duplicate_state = drm_dp_mst_duplicate_state, .atomic_destroy_state = drm_dp_mst_destroy_state, }; +EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs); /** * drm_atomic_get_mst_topology_state: get MST topology state @@ -3612,7 +3613,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, drm_atomic_private_obj_init(dev, &mgr->base, &mst_state->base, - &mst_state_funcs); + &drm_dp_mst_topology_state_funcs); return 0; } diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 8eca5f29242c..581163c8d7d7 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -650,4 +650,100 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); +extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs; + +/** + * __drm_dp_mst_state_iter_get - private atomic state iterator function for + * macro-internal use + * @state: &struct drm_atomic_state pointer + * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor + * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state + * iteration cursor + * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state + * iteration cursor + * @i: int iteration cursor, for macro-internal use + * + * Used by for_each_oldnew_mst_mgr_in_state(), + * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't + * call this directly. + * + * Returns: + * True if the current &struct drm_private_obj is a &struct + * drm_dp_mst_topology_mgr, false otherwise. + */ +static inline bool +__drm_dp_mst_state_iter_get(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr **mgr, + struct drm_dp_mst_topology_state **old_state, + struct drm_dp_mst_topology_state **new_state, + int i) +{ + struct __drm_private_objs_state *objs_state = &state->private_objs[i]; + + if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs) + return false; + + *mgr = to_dp_mst_topology_mgr(objs_state->ptr); + if (old_state) + *old_state = to_dp_mst_topology_state(objs_state->old_state); + if (new_state) + *new_state = to_dp_mst_topology_state(objs_state->new_state); + + return true; +} + +/** + * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology + * managers in an atomic update + * @__state: &struct drm_atomic_state pointer + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor + * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old + * state + * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new + * state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all DRM DP MST topology managers in an atomic update, + * tracking both old and new state. This is useful in places where the state + * delta needs to be considered, for example in atomic check functions. + */ +#define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \ + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i))) + +/** + * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers + * in an atomic update + * @__state: &struct drm_atomic_state pointer + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor + * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old + * state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all DRM DP MST topology managers in an atomic update, + * tracking only the old state. This is useful in disable functions, where we + * need the old state the hardware is still in. + */ +#define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \ + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i))) + +/** + * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers + * in an atomic update + * @__state: &struct drm_atomic_state pointer + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor + * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new + * state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all DRM DP MST topology managers in an atomic update, + * tracking only the new state. This is useful in enable functions, where we + * need the new state the hardware should be in when the atomic commit + * operation has completed. + */ +#define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \ + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i))) + #endif -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 14/16] drm/dp_mst: Start tracking per-port VCPI allocations
There has been a TODO waiting for quite a long time in drm_dp_mst_topology.c: /* We cannot rely on port->vcpi.num_slots to update * topology_state->avail_slots as the port may not exist if the parent * branch device was unplugged. This should be fixed by tracking * per-port slot allocation in drm_dp_mst_topology_state instead of * depending on the caller to tell us how many slots to release. */ That's not the only reason we should fix this: forcing the driver to track the VCPI allocations throughout a state's atomic check is error prone, because it means that extra care has to be taken with the order that drm_dp_atomic_find_vcpi_slots() and drm_dp_atomic_release_vcpi_slots() are called in in order to ensure idempotency. Currently the only driver actually using these helpers, i915, doesn't even do this correctly: multiple ->best_encoder() checks with i915's current implementation would not be idempotent and would over-allocate VCPI slots, something I learned trying to implement fallback retraining in MST. So: simplify this whole mess, and teach drm_dp_atomic_find_vcpi_slots() and drm_dp_atomic_release_vcpi_slots() to track the VCPI allocations for each port. This allows us to ensure idempotency without having to rely on the driver as much. Additionally: the driver doesn't need to do any kind of VCPI slot tracking anymore if it doesn't need it for it's own internal state. Additionally; this adds a new drm_dp_mst_atomic_check() helper which must be used by atomic drivers to perform validity checks for the new VCPI allocations incurred by a state. Also: update the documentation and make it more obvious that these /must/ be called by /all/ atomic drivers supporting MST. Changes since v7: - Don't check for mixed stale/valid VCPI allocations, just rely on connector registration to stop such erroneous modesets Changes since v6: - Keep a kref to all of the ports we have allocations on. This required a good bit of changing to when we call drm_dp_find_vcpi_slots(), mainly that we need to ensure that we only redo VCPI allocations on actual mode or CRTC changes, not crtc_state->active changes. Additionally, we no longer take the registration of the DRM connector for each port into account because so long as we have a kref to the port in the new or previous atomic state, the connector will stay registered. - Use the small changes to drm_dp_put_port() to add even more error checking to make misusage of the helpers more obvious. I added this after having to chase down various use-after-free conditions that started popping up from the new helpers so no one else has to troubleshoot that. - Move some accidental DRM_DEBUG_KMS() calls to DRM_DEBUG_ATOMIC() - Update documentation again, note that find/release() should both not be called on the same port in a single atomic check phase (but multiple calls to one or the other is OK) Changes since v4: - Don't skip the atomic checks for VCPI allocations if no new VCPI allocations happen in a state. This makes the next change I'm about to list here a lot easier to implement. - Don't ignore VCPI allocations on destroyed ports, instead ensure that when ports are destroyed and still have VCPI allocations in the topology state, the only state changes allowed are releasing said ports' VCPI. This prevents a state with a mix of VCPI allocations from destroyed ports, and allocations from valid ports. Changes since v3: - Don't release VCPI allocations in the topology state immediately in drm_dp_atomic_release_vcpi_slots(), instead mark them as 0 and skip over them in drm_dp_mst_duplicate_state(). This makes it so drm_dp_atomic_release_vcpi_slots() is still idempotent while also throwing warnings if the driver messes up it's book keeping and tries to release VCPI slots on a port that doesn't have any pre-existing VCPI allocation - danvet - Change mst_state/state in some debugging messages to "mst state" Changes since v2: - Use kmemdup() for duplicating MST state - danvet - Move port validation out of duplicate state callback - danvet - Handle looping through MST topology states in drm_dp_mst_atomic_check() so the driver doesn't have to do it - Fix documentation in drm_dp_atomic_find_vcpi_slots() - Move the atomic check for each individual topology state into it's own function, reduces indenting - Don't consider "stale" MST ports when calculating the bandwidth requirements. This is needed because originally we relied on the state duplication functions to prune any stale ports from the new state, which would prevent us from incorrectly considering their bandwidth requirements alongside legitimate new payloads. - Add function references in drm_dp_atomic_release_vcpi_slots() - danvet - Annotate atomic VCPI and atomic check functions with __must_check - danvet Changes since v1: - Don't use the now-removed ->atomic_check() for private objects hook, just give drivers a function to call themselves Signed-off-by: Lyude Paul <lyude at redhat.com> Cc: Daniel Vetter <daniel.vetter at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/drm_dp_mst_topology.c | 251 ++++++++++++++++++++++---- drivers/gpu/drm/i915/intel_display.c | 4 + drivers/gpu/drm/i915/intel_dp_mst.c | 64 ++++--- include/drm/drm_dp_mst_helper.h | 23 ++- 4 files changed, 277 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 22bc54d83b20..b55b58bf86ba 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3016,21 +3016,42 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, } /** - * drm_dp_atomic_find_vcpi_slots() - Find and add vcpi slots to the state + * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state * @state: global atomic state * @mgr: MST topology manager for the port * @port: port to find vcpi slots for * @pbn: bandwidth required for the mode in PBN * - * RETURNS: - * Total slots in the atomic state assigned for this port or error + * Allocates VCPI slots to @port, replacing any previous VCPI allocations it + * may have had. Any atomic drivers which support MST must call this function + * in their &drm_encoder_helper_funcs.atomic_check() callback to change the + * current VCPI allocation for the new state, but only when + * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set + * to ensure compatibility with userspace applications that still use the + * legacy modesetting UAPI. + * + * Allocations set by this function are not checked against the bandwidth + * restraints of @mgr until the driver calls drm_dp_mst_atomic_check(). + * + * Additionally, it is OK to call this function multiple times on the same + * @port as needed. It is not OK however, to call this function and + * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase. + * + * See also: + * drm_dp_atomic_release_vcpi_slots() + * drm_dp_mst_atomic_check() + * + * Returns: + * Total slots in the atomic state assigned for this port, or a negative error + * code if the port no longer exists */ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn) { struct drm_dp_mst_topology_state *topology_state; - int req_slots; + struct drm_dp_vcpi_allocation *pos, *vcpi = NULL; + int prev_slots, req_slots, ret; topology_state = drm_atomic_get_mst_topology_state(state, mgr); if (IS_ERR(topology_state)) @@ -3039,20 +3060,56 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, port = drm_dp_mst_topology_get_port_validated(mgr, port); if (port == NULL) return -EINVAL; - req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); - DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n", - req_slots, topology_state->avail_slots); - if (req_slots > topology_state->avail_slots) { - drm_dp_mst_topology_put_port(port); - return -ENOSPC; + topology_state->vcpi_allocated = true; + + /* Find the current allocation for this port, if any */ + list_for_each_entry(pos, &topology_state->vcpis, next) { + if (pos->port == port) { + vcpi = pos; + prev_slots = vcpi->vcpi; + + /* + * This should never happen, unless the driver tries + * releasing and allocating the same VCPI allocation, + * which is an error + */ + if (WARN_ON(!prev_slots)) { + DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n", + port); + return -EINVAL; + } + + break; + } } + if (!vcpi) + prev_slots = 0; - topology_state->avail_slots -= req_slots; - DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots); + req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); + + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n", + port->connector->base.id, port->connector->name, + port, prev_slots, req_slots); + /* Add the new allocation to the state */ + if (!vcpi) { + vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL); + if (!vcpi) { + ret = -ENOMEM; + goto out; + } + + drm_dp_mst_get_port_malloc(port); + vcpi->port = port; + list_add(&vcpi->next, &topology_state->vcpis); + } + vcpi->vcpi = req_slots; + + ret = req_slots; +out: drm_dp_mst_topology_put_port(port); - return req_slots; + return ret; } EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); @@ -3060,31 +3117,57 @@ EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots * @state: global atomic state * @mgr: MST topology manager for the port - * @slots: number of vcpi slots to release + * @port: The port to release the VCPI slots from * - * RETURNS: - * 0 if @slots were added back to &drm_dp_mst_topology_state->avail_slots or - * negative error code + * Releases any VCPI slots that have been allocated to a port in the atomic + * state. Any atomic drivers which support MST must call this function in + * their &drm_connector_helper_funcs.atomic_check() callback when the + * connector will no longer have VCPI allocated (e.g. because it's CRTC was + * removed) when it had VCPI allocated in the previous atomic state. + * + * It is OK to call this even if @port has been removed from the system. + * Additionally, it is OK to call this function multiple times on the same + * @port as needed. It is not OK however, to call this function and + * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check + * phase. + * + * See also: + * drm_dp_atomic_find_vcpi_slots() + * drm_dp_mst_atomic_check() + * + * Returns: + * 0 if all slots for this port were added back to + * &drm_dp_mst_topology_state.avail_slots or negative error code */ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, - int slots) + struct drm_dp_mst_port *port) { struct drm_dp_mst_topology_state *topology_state; + struct drm_dp_vcpi_allocation *pos; + bool found = false; topology_state = drm_atomic_get_mst_topology_state(state, mgr); if (IS_ERR(topology_state)) return PTR_ERR(topology_state); - /* We cannot rely on port->vcpi.num_slots to update - * topology_state->avail_slots as the port may not exist if the parent - * branch device was unplugged. This should be fixed by tracking - * per-port slot allocation in drm_dp_mst_topology_state instead of - * depending on the caller to tell us how many slots to release. - */ - topology_state->avail_slots += slots; - DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n", - slots, topology_state->avail_slots); + list_for_each_entry(pos, &topology_state->vcpis, next) { + if (pos->port == port) { + found = true; + break; + } + } + if (WARN_ON(!found)) { + DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n", + port, &topology_state->base); + return -EINVAL; + } + + DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi); + if (pos->vcpi) { + drm_dp_mst_put_port_malloc(port); + pos->vcpi = 0; + } return 0; } @@ -3505,15 +3588,42 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) static struct drm_private_state * drm_dp_mst_duplicate_state(struct drm_private_obj *obj) { - struct drm_dp_mst_topology_state *state; + struct drm_dp_mst_topology_state *state, *old_state + to_dp_mst_topology_state(obj->state); + struct drm_dp_vcpi_allocation *pos, *vcpi; - state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + state = kmemdup(old_state, sizeof(*state), GFP_KERNEL); if (!state) return NULL; __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + INIT_LIST_HEAD(&state->vcpis); + state->vcpi_allocated = false; + + list_for_each_entry(pos, &old_state->vcpis, next) { + /* Prune leftover freed VCPI allocations */ + if (!pos->vcpi) + continue; + + vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL); + if (!vcpi) + goto fail; + + drm_dp_mst_get_port_malloc(vcpi->port); + list_add(&vcpi->next, &state->vcpis); + } + return &state->base; + +fail: + list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) { + drm_dp_mst_put_port_malloc(pos->port); + kfree(pos); + } + kfree(state); + + return NULL; } static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, @@ -3521,10 +3631,89 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, { struct drm_dp_mst_topology_state *mst_state to_dp_mst_topology_state(state); + struct drm_dp_vcpi_allocation *pos, *tmp; + + list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) { + /* We only keep references to ports with non-zero VCPIs */ + if (pos->vcpi) + drm_dp_mst_put_port_malloc(pos->port); + kfree(pos); + } kfree(mst_state); } +static inline int +drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state) +{ + struct drm_dp_vcpi_allocation *vcpi; + int avail_slots = 63; + + list_for_each_entry(vcpi, &mst_state->vcpis, next) { + /* Releasing VCPI is always OK-even if the port is gone */ + if (!vcpi->vcpi) { + DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n", + vcpi->port); + continue; + } + + DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n", + vcpi->port, vcpi->vcpi); + + avail_slots -= vcpi->vcpi; + if (avail_slots < 0) { + DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n", + vcpi->port, mst_state, + avail_slots + vcpi->vcpi); + return -ENOSPC; + } + + } + DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n", + mgr, mst_state, avail_slots, + 63 - avail_slots); + + return 0; +} + +/** + * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an + * atomic update is valid + * @state: Pointer to the new &struct drm_dp_mst_topology_state + * + * Checks the given topology state for an atomic update to ensure that it's + * valid. This includes checking whether there's enough bandwidth to support + * the new VCPI allocations in the atomic update. + * + * Any atomic drivers supporting DP MST must make sure to call this after + * checking the rest of their state in their + * &drm_mode_config_funcs.atomic_check() callback. + * + * See also: + * drm_dp_atomic_find_vcpi_slots() + * drm_dp_atomic_release_vcpi_slots() + * + * Returns: + * + * 0 if the new state is valid, negative error code otherwise. + */ +int drm_dp_mst_atomic_check(struct drm_atomic_state *state) +{ + struct drm_dp_mst_topology_mgr *mgr; + struct drm_dp_mst_topology_state *mst_state; + int i, ret = 0; + + for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { + ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state); + if (ret) + break; + } + + return ret; +} +EXPORT_SYMBOL(drm_dp_mst_atomic_check); + const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = { .atomic_duplicate_state = drm_dp_mst_duplicate_state, .atomic_destroy_state = drm_dp_mst_destroy_state, @@ -3607,9 +3796,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, return -ENOMEM; mst_state->mgr = mgr; - - /* max. time slots - one slot for MTP header */ - mst_state->avail_slots = 63; + INIT_LIST_HEAD(&mst_state->vcpis); drm_atomic_private_obj_init(dev, &mgr->base, &mst_state->base, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3b7094822aa9..9a7e3cd289f9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12690,6 +12690,10 @@ static int intel_atomic_check(struct drm_device *dev, "[modeset]" : "[fastset]"); } + ret = drm_dp_mst_atomic_check(state); + if (ret) + return ret; + if (any_ms) { ret = intel_modeset_checks(state); diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 631fd1537252..9e8a080e3758 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -41,8 +41,13 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, struct drm_connector *connector = conn_state->connector; void *port = to_intel_connector(connector)->port; struct drm_atomic_state *state = pipe_config->base.state; + struct drm_crtc *crtc = pipe_config->base.crtc; + struct drm_crtc_state *old_crtc_state + drm_atomic_get_old_crtc_state(state, crtc); + struct drm_crtc_state *new_crtc_state = &pipe_config->base; int bpp; - int lane_count, slots = 0; + int lane_count, slots + to_intel_crtc_state(old_crtc_state)->dp_m_n.tu; const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; int mst_pbn; bool constant_n = drm_dp_has_quirk(&intel_dp->desc, @@ -77,8 +82,12 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); pipe_config->pbn = mst_pbn; - /* Zombie connectors can't have VCPI slots */ - if (!drm_connector_is_unregistered(connector)) { + /* Only change VCPI allocation on actual mode changes, to prevent us + * from trying to allocate VCPI to ports that no longer exist when we + * may just be trying to disable DPMS on them + */ + if (new_crtc_state->mode_changed || + new_crtc_state->connectors_changed) { slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, port, @@ -107,36 +116,39 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, return true; } -static int intel_dp_mst_atomic_check(struct drm_connector *connector, - struct drm_connector_state *new_conn_state) +static int +intel_dp_mst_atomic_check(struct drm_connector *connector, + struct drm_connector_state *new_conn_state) { + struct intel_connector *intel_connector + to_intel_connector(connector); + struct drm_dp_mst_topology_mgr *mgr + &intel_connector->mst_port->mst_mgr; + struct drm_dp_mst_port *port = intel_connector->port; struct drm_atomic_state *state = new_conn_state->state; - struct drm_connector_state *old_conn_state; - struct drm_crtc *old_crtc; - struct drm_crtc_state *crtc_state; - int slots, ret = 0; + struct drm_connector_state *old_conn_state + drm_atomic_get_old_connector_state(state, connector); + struct drm_crtc *old_crtc = old_conn_state->crtc, + *new_crtc = new_conn_state->crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; - old_conn_state = drm_atomic_get_old_connector_state(state, connector); - old_crtc = old_conn_state->crtc; if (!old_crtc) - return ret; + return 0; - crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc); - slots = to_intel_crtc_state(crtc_state)->dp_m_n.tu; - if (drm_atomic_crtc_needs_modeset(crtc_state) && slots > 0) { - struct drm_dp_mst_topology_mgr *mgr; - struct drm_encoder *old_encoder; + old_crtc_state = drm_atomic_get_old_crtc_state(state, old_crtc); + if (!old_crtc_state || + !to_intel_crtc_state(old_crtc_state)->dp_m_n.tu) + return 0; - old_encoder = old_conn_state->best_encoder; - mgr = &enc_to_mst(old_encoder)->primary->dp.mst_mgr; + /* If we switched CRTCs, clear the previous one's allocation */ + new_crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc); + if (new_crtc_state->connectors_changed && !new_crtc_state->enable) + to_intel_crtc_state(new_crtc_state)->dp_m_n.tu = 0; - ret = drm_dp_atomic_release_vcpi_slots(state, mgr, slots); - if (ret) - DRM_DEBUG_KMS("failed releasing %d vcpi slots:%d\n", slots, ret); - else - to_intel_crtc_state(crtc_state)->dp_m_n.tu = 0; - } - return ret; + if (new_crtc) + return 0; + + return drm_dp_atomic_release_vcpi_slots(state, mgr, port); } static void intel_mst_disable_dp(struct intel_encoder *encoder, diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 581163c8d7d7..451d020f0137 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -425,9 +425,15 @@ struct drm_dp_payload { #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base) +struct drm_dp_vcpi_allocation { + struct drm_dp_mst_port *port; + int vcpi; + struct list_head next; +}; + struct drm_dp_mst_topology_state { struct drm_private_state base; - int avail_slots; + struct list_head vcpis; struct drm_dp_mst_topology_mgr *mgr; }; @@ -638,14 +644,17 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr); -int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, - struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, int pbn); -int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, - struct drm_dp_mst_topology_mgr *mgr, - int slots); +int __must_check +drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, int pbn); +int __must_check +drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, bool power_up); +int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); -- 2.19.2
Lyude Paul
2018-Dec-20 00:19 UTC
[Nouveau] [PATCH v2 15/16] drm/dp_mst: Check payload count in drm_dp_mst_atomic_check()
It occurred to me that we never actually check this! So let's start doing that. Signed-off-by: Lyude Paul <lyude at redhat.com> Reviewed-by: Daniel Vetter <daniel.vetter at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/drm_dp_mst_topology.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index b55b58bf86ba..455fcf0a82df 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3061,8 +3061,6 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, if (port == NULL) return -EINVAL; - topology_state->vcpi_allocated = true; - /* Find the current allocation for this port, if any */ list_for_each_entry(pos, &topology_state->vcpis, next) { if (pos->port == port) { @@ -3599,7 +3597,6 @@ drm_dp_mst_duplicate_state(struct drm_private_obj *obj) __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); INIT_LIST_HEAD(&state->vcpis); - state->vcpi_allocated = false; list_for_each_entry(pos, &old_state->vcpis, next) { /* Prune leftover freed VCPI allocations */ @@ -3648,7 +3645,7 @@ drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_topology_state *mst_state) { struct drm_dp_vcpi_allocation *vcpi; - int avail_slots = 63; + int avail_slots = 63, payload_count = 0; list_for_each_entry(vcpi, &mst_state->vcpis, next) { /* Releasing VCPI is always OK-even if the port is gone */ @@ -3669,6 +3666,12 @@ drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr, return -ENOSPC; } + if (++payload_count > mgr->max_payloads) { + DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n", + mgr, mst_state, mgr->max_payloads); + return -EINVAL; + } + } DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n", mgr, mst_state, avail_slots, -- 2.19.2
Lyude Paul
2018-Dec-20 00:20 UTC
[Nouveau] [PATCH v2 16/16] drm/nouveau: Use atomic VCPI helpers for MST
Currently, nouveau uses the yolo method of setting up MST displays: it uses the old VCPI helpers (drm_dp_find_vcpi_slots()) for computing the display configuration. These helpers don't take care to make sure they take a reference to the mstb port that they're checking, and additionally don't actually check whether or not the topology still has enough bandwidth to provide the VCPI tokens required. So, drop usage of the old helpers and move entirely over to the atomic helpers. Changes since v5: - Update nv50_msto_atomic_check() and nv50_mstc_atomic_check() to the new requirements for drm_dp_atomic_find_vcpi_slots() and drm_dp_atomic_release_vcpi_slots() Signed-off-by: Lyude Paul <lyude at redhat.com> Cc: Daniel Vetter <daniel.vetter at ffwll.ch> Cc: David Airlie <airlied at redhat.com> Cc: Jerry Zuo <Jerry.Zuo at amd.com> Cc: Harry Wentland <harry.wentland at amd.com> Cc: Juston Li <juston.li at intel.com> --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 52 ++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 67f7bf97e5d9..df696008d205 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -762,16 +762,22 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct nv50_mstc *mstc = nv50_mstc(conn_state->connector); + struct drm_atomic_state *state = crtc_state->state; + struct drm_connector *connector = conn_state->connector; + struct nv50_mstc *mstc = nv50_mstc(connector); struct nv50_mstm *mstm = mstc->mstm; - int bpp = conn_state->connector->display_info.bpc * 3; + int bpp = connector->display_info.bpc * 3; int slots; - mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp); + mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, + bpp); - slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn); - if (slots < 0) - return slots; + if (crtc_state->connectors_changed || crtc_state->mode_changed) { + slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, + mstc->port, mstc->pbn); + if (slots < 0) + return slots; + } return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, mstc->native); @@ -934,12 +940,42 @@ nv50_mstc_get_modes(struct drm_connector *connector) return ret; } +static int +nv50_mstc_atomic_check(struct drm_connector *connector, + struct drm_connector_state *new_conn_state) +{ + struct drm_atomic_state *state = new_conn_state->state; + struct nv50_mstc *mstc = nv50_mstc(connector); + struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr; + struct drm_connector_state *old_conn_state + drm_atomic_get_old_connector_state(state, connector); + struct drm_crtc_state *old_crtc_state; + struct drm_crtc *new_crtc = new_conn_state->crtc, + *old_crtc = old_conn_state->crtc; + + if (!old_crtc) + return 0; + + old_crtc_state = drm_atomic_get_old_crtc_state(state, old_crtc); + if (!old_crtc_state || !old_crtc_state->enable) + return 0; + + if (new_crtc) + return 0; + + /* This connector will be left without an enabled CRTC, so its VCPI + * must be released here + */ + return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port); +} + static const struct drm_connector_helper_funcs nv50_mstc_help = { .get_modes = nv50_mstc_get_modes, .mode_valid = nv50_mstc_mode_valid, .best_encoder = nv50_mstc_best_encoder, .atomic_best_encoder = nv50_mstc_atomic_best_encoder, + .atomic_check = nv50_mstc_atomic_check, }; static enum drm_connector_status @@ -2121,6 +2157,10 @@ nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) return ret; } + ret = drm_dp_mst_atomic_check(state); + if (ret) + return ret; + return 0; } -- 2.19.2
Daniel Vetter
2018-Dec-20 09:09 UTC
[Nouveau] [PATCH v2 01/16] drm/dp_mst: Rename drm_dp_mst_get_validated_(port|mstb)_ref and friends
On Wed, Dec 19, 2018 at 07:19:45PM -0500, Lyude Paul wrote:> s/drm_dp_get_validated_port_ref/drm_dp_mst_topology_get_port_validated/ > s/drm_dp_put_port/drm_dp_mst_topology_put_port/ > s/drm_dp_get_validated_mstb_ref/drm_dp_mst_topology_get_mstb_validated/ > s/drm_dp_put_mst_branch_device/drm_dp_mst_topology_put_mstb/ > > This is a much more consistent naming scheme, and will make even more > sense once we redesign how the current refcounting scheme here works. > > Signed-off-by: Lyude Paul <lyude at redhat.com> > Cc: Daniel Vetter <daniel at ffwll.ch> > Cc: David Airlie <airlied at redhat.com> > Cc: Jerry Zuo <Jerry.Zuo at amd.com> > Cc: Harry Wentland <harry.wentland at amd.com> > Cc: Juston Li <juston.li at intel.com>Reviewed-by: Daniel Vetter <daniel.vetter at ffwll.ch>> --- > drivers/gpu/drm/drm_dp_mst_topology.c | 114 ++++++++++++++------------ > 1 file changed, 62 insertions(+), 52 deletions(-) > > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c > index 2ab16c9e6243..6f9b211069a7 100644 > --- a/drivers/gpu/drm/drm_dp_mst_topology.c > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c > @@ -46,7 +46,7 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, > char *buf); > static int test_calc_pbn_mode(void); > > -static void drm_dp_put_port(struct drm_dp_mst_port *port); > +static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port); > > static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, > int id, > @@ -888,7 +888,7 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) > */ > list_for_each_entry_safe(port, tmp, &mstb->ports, next) { > list_del(&port->next); > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > } > > /* drop any tx slots msg */ > @@ -911,7 +911,7 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) > kref_put(kref, drm_dp_free_mst_branch_device); > } > > -static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) > +static void drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) > { > kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device); > } > @@ -930,7 +930,7 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) > case DP_PEER_DEVICE_MST_BRANCHING: > mstb = port->mstb; > port->mstb = NULL; > - drm_dp_put_mst_branch_device(mstb); > + drm_dp_mst_topology_put_mstb(mstb); > break; > } > } > @@ -970,12 +970,14 @@ static void drm_dp_destroy_port(struct kref *kref) > kfree(port); > } > > -static void drm_dp_put_port(struct drm_dp_mst_port *port) > +static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) > { > kref_put(&port->kref, drm_dp_destroy_port); > } > > -static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find) > +static struct drm_dp_mst_branch * > +drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb, > + struct drm_dp_mst_branch *to_find) > { > struct drm_dp_mst_port *port; > struct drm_dp_mst_branch *rmstb; > @@ -985,7 +987,8 @@ static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct > } > list_for_each_entry(port, &mstb->ports, next) { > if (port->mstb) { > - rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find); > + rmstb = drm_dp_mst_topology_get_mstb_validated_locked( > + port->mstb, to_find); > if (rmstb) > return rmstb; > } > @@ -993,12 +996,15 @@ static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct > return NULL; > } > > -static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) > +static struct drm_dp_mst_branch * > +drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr, > + struct drm_dp_mst_branch *mstb) > { > struct drm_dp_mst_branch *rmstb = NULL; > mutex_lock(&mgr->lock); > if (mgr->mst_primary) > - rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb); > + rmstb = drm_dp_mst_topology_get_mstb_validated_locked( > + mgr->mst_primary, mstb); > mutex_unlock(&mgr->lock); > return rmstb; > } > @@ -1021,7 +1027,9 @@ static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_ > return NULL; > } > > -static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) > +static struct drm_dp_mst_port * > +drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr, > + struct drm_dp_mst_port *port) > { > struct drm_dp_mst_port *rport = NULL; > mutex_lock(&mgr->lock); > @@ -1210,7 +1218,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, > list_del(&port->next); > mutex_unlock(&mstb->mgr->lock); > /* drop port list reference */ > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > goto out; > } > if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || > @@ -1224,7 +1232,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, > > out: > /* put reference to this port */ > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > } > > static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, > @@ -1259,7 +1267,7 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, > dowork = true; > } > > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > if (dowork) > queue_work(system_long_wq, &mstb->mgr->work); > > @@ -1362,10 +1370,11 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m > drm_dp_send_enum_path_resources(mgr, mstb, port); > > if (port->mstb) { > - mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb); > + mstb_child = drm_dp_mst_topology_get_mstb_validated( > + mgr, port->mstb); > if (mstb_child) { > drm_dp_check_and_send_link_address(mgr, mstb_child); > - drm_dp_put_mst_branch_device(mstb_child); > + drm_dp_mst_topology_put_mstb(mstb_child); > } > } > } > @@ -1384,7 +1393,7 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) > mutex_unlock(&mgr->lock); > if (mstb) { > drm_dp_check_and_send_link_address(mgr, mstb); > - drm_dp_put_mst_branch_device(mstb); > + drm_dp_mst_topology_put_mstb(mstb); > } > } > > @@ -1726,17 +1735,17 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, > u8 sinks[DRM_DP_MAX_SDP_STREAMS]; > int i; > > - port = drm_dp_get_validated_port_ref(mgr, port); > + port = drm_dp_mst_topology_get_port_validated(mgr, port); > if (!port) > return -EINVAL; > > port_num = port->port_num; > - mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); > + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); > if (!mstb) { > mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); > > if (!mstb) { > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > return -EINVAL; > } > } > @@ -1766,8 +1775,8 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, > } > kfree(txmsg); > fail_put: > - drm_dp_put_mst_branch_device(mstb); > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_mstb(mstb); > + drm_dp_mst_topology_put_port(port); > return ret; > } > > @@ -1777,13 +1786,13 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, > struct drm_dp_sideband_msg_tx *txmsg; > int len, ret; > > - port = drm_dp_get_validated_port_ref(mgr, port); > + port = drm_dp_mst_topology_get_port_validated(mgr, port); > if (!port) > return -EINVAL; > > txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); > if (!txmsg) { > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > return -ENOMEM; > } > > @@ -1799,7 +1808,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, > ret = 0; > } > kfree(txmsg); > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > > return ret; > } > @@ -1888,7 +1897,8 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) > if (vcpi) { > port = container_of(vcpi, struct drm_dp_mst_port, > vcpi); > - port = drm_dp_get_validated_port_ref(mgr, port); > + port = drm_dp_mst_topology_get_port_validated(mgr, > + port); > if (!port) { > mutex_unlock(&mgr->payload_lock); > return -EINVAL; > @@ -1925,7 +1935,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) > cur_slots += req_payload.num_slots; > > if (port) > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > } > > for (i = 0; i < mgr->max_payloads; i++) { > @@ -2024,7 +2034,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, > struct drm_dp_sideband_msg_tx *txmsg; > struct drm_dp_mst_branch *mstb; > > - mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); > + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); > if (!mstb) > return -EINVAL; > > @@ -2048,7 +2058,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, > } > kfree(txmsg); > fail_put: > - drm_dp_put_mst_branch_device(mstb); > + drm_dp_mst_topology_put_mstb(mstb); > return ret; > } > > @@ -2192,7 +2202,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms > out_unlock: > mutex_unlock(&mgr->lock); > if (mstb) > - drm_dp_put_mst_branch_device(mstb); > + drm_dp_mst_topology_put_mstb(mstb); > return ret; > > } > @@ -2357,7 +2367,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) > mgr->down_rep_recv.initial_hdr.lct, > mgr->down_rep_recv.initial_hdr.rad[0], > mgr->down_rep_recv.msg[0]); > - drm_dp_put_mst_branch_device(mstb); > + drm_dp_mst_topology_put_mstb(mstb); > memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); > return 0; > } > @@ -2368,7 +2378,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) > } > > memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); > - drm_dp_put_mst_branch_device(mstb); > + drm_dp_mst_topology_put_mstb(mstb); > > mutex_lock(&mgr->qlock); > txmsg->state = DRM_DP_SIDEBAND_TX_RX; > @@ -2441,7 +2451,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) > } > > if (mstb) > - drm_dp_put_mst_branch_device(mstb); > + drm_dp_mst_topology_put_mstb(mstb); > > memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); > } > @@ -2501,7 +2511,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector > enum drm_connector_status status = connector_status_disconnected; > > /* we need to search for the port in the mgr in case its gone */ > - port = drm_dp_get_validated_port_ref(mgr, port); > + port = drm_dp_mst_topology_get_port_validated(mgr, port); > if (!port) > return connector_status_disconnected; > > @@ -2526,7 +2536,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector > break; > } > out: > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > return status; > } > EXPORT_SYMBOL(drm_dp_mst_detect_port); > @@ -2543,11 +2553,11 @@ bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, > { > bool ret = false; > > - port = drm_dp_get_validated_port_ref(mgr, port); > + port = drm_dp_mst_topology_get_port_validated(mgr, port); > if (!port) > return ret; > ret = port->has_audio; > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > return ret; > } > EXPORT_SYMBOL(drm_dp_mst_port_has_audio); > @@ -2567,7 +2577,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ > struct edid *edid = NULL; > > /* we need to search for the port in the mgr in case its gone */ > - port = drm_dp_get_validated_port_ref(mgr, port); > + port = drm_dp_mst_topology_get_port_validated(mgr, port); > if (!port) > return NULL; > > @@ -2578,7 +2588,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ > drm_connector_set_tile_property(connector); > } > port->has_audio = drm_detect_monitor_audio(edid); > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > return edid; > } > EXPORT_SYMBOL(drm_dp_mst_get_edid); > @@ -2649,7 +2659,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, > if (IS_ERR(topology_state)) > return PTR_ERR(topology_state); > > - port = drm_dp_get_validated_port_ref(mgr, port); > + port = drm_dp_mst_topology_get_port_validated(mgr, port); > if (port == NULL) > return -EINVAL; > req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); > @@ -2657,14 +2667,14 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, > req_slots, topology_state->avail_slots); > > if (req_slots > topology_state->avail_slots) { > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > return -ENOSPC; > } > > topology_state->avail_slots -= req_slots; > DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots); > > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > return req_slots; > } > EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); > @@ -2715,7 +2725,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, > { > int ret; > > - port = drm_dp_get_validated_port_ref(mgr, port); > + port = drm_dp_mst_topology_get_port_validated(mgr, port); > if (!port) > return false; > > @@ -2725,7 +2735,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, > if (port->vcpi.vcpi > 0) { > DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); > if (pbn == port->vcpi.pbn) { > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > return true; > } > } > @@ -2739,7 +2749,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, > DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", > pbn, port->vcpi.num_slots); > > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > return true; > out: > return false; > @@ -2749,12 +2759,12 @@ EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi); > int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) > { > int slots = 0; > - port = drm_dp_get_validated_port_ref(mgr, port); > + port = drm_dp_mst_topology_get_port_validated(mgr, port); > if (!port) > return slots; > > slots = port->vcpi.num_slots; > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > return slots; > } > EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); > @@ -2768,11 +2778,11 @@ EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); > */ > void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) > { > - port = drm_dp_get_validated_port_ref(mgr, port); > + port = drm_dp_mst_topology_get_port_validated(mgr, port); > if (!port) > return; > port->vcpi.num_slots = 0; > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > } > EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); > > @@ -2783,7 +2793,7 @@ EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); > */ > void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) > { > - port = drm_dp_get_validated_port_ref(mgr, port); > + port = drm_dp_mst_topology_get_port_validated(mgr, port); > if (!port) > return; > > @@ -2792,7 +2802,7 @@ void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_ > port->vcpi.pbn = 0; > port->vcpi.aligned_pbn = 0; > port->vcpi.vcpi = 0; > - drm_dp_put_port(port); > + drm_dp_mst_topology_put_port(port); > } > EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); > > @@ -3292,7 +3302,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs > struct drm_dp_sideband_msg_tx *txmsg = NULL; > int ret; > > - mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); > + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); > if (!mstb) > return -EREMOTEIO; > > @@ -3342,7 +3352,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs > } > out: > kfree(txmsg); > - drm_dp_put_mst_branch_device(mstb); > + drm_dp_mst_topology_put_mstb(mstb); > return ret; > } > > -- > 2.19.2 >-- Daniel Vetter Software Engineer, Intel Corporation http://blog.ffwll.ch
Daniel Vetter
2018-Dec-20 09:10 UTC
[Nouveau] [PATCH v2 06/16] drm/i915: Keep malloc references to MST ports
On Wed, Dec 19, 2018 at 07:19:50PM -0500, Lyude Paul wrote:> So that the ports stay around until we've destroyed the connectors, in > order to ensure that we don't pass an invalid pointer to any MST helpers > once we introduce the new MST VCPI helpers. > > Changes since v1: > * Move drm_dp_mst_get_port_malloc() to where we assign > intel_connector->port - danvet > > Signed-off-by: Lyude Paul <lyude at redhat.com> > Cc: Daniel Vetter <daniel at ffwll.ch> > Cc: David Airlie <airlied at redhat.com> > Cc: Jerry Zuo <Jerry.Zuo at amd.com> > Cc: Harry Wentland <harry.wentland at amd.com> > Cc: Juston Li <juston.li at intel.com>Reviewed-by: Daniel Vetter <daniel.vetter at ffwll.ch>> --- > drivers/gpu/drm/i915/intel_connector.c | 4 ++++ > drivers/gpu/drm/i915/intel_dp_mst.c | 1 + > 2 files changed, 5 insertions(+) > > diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c > index 18e370f607bc..37d2c644f4b8 100644 > --- a/drivers/gpu/drm/i915/intel_connector.c > +++ b/drivers/gpu/drm/i915/intel_connector.c > @@ -95,6 +95,10 @@ void intel_connector_destroy(struct drm_connector *connector) > intel_panel_fini(&intel_connector->panel); > > drm_connector_cleanup(connector); > + > + if (intel_connector->port) > + drm_dp_mst_put_port_malloc(intel_connector->port); > + > kfree(connector); > } > > diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c > index f05427b74e34..631fd1537252 100644 > --- a/drivers/gpu/drm/i915/intel_dp_mst.c > +++ b/drivers/gpu/drm/i915/intel_dp_mst.c > @@ -457,6 +457,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo > intel_connector->get_hw_state = intel_dp_mst_get_hw_state; > intel_connector->mst_port = intel_dp; > intel_connector->port = port; > + drm_dp_mst_get_port_malloc(port); > > connector = &intel_connector->base; > ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, > -- > 2.19.2 >-- Daniel Vetter Software Engineer, Intel Corporation http://blog.ffwll.ch
Wentland, Harry
2019-Jan-08 19:16 UTC
[Nouveau] [PATCH v2 03/16] drm/dp_mst: Restart last_connected_port_and_mstb() if topology ref fails
On 2018-12-19 7:19 p.m., Lyude Paul wrote:> While this isn't a complete fix, this will improve the reliability of > drm_dp_get_last_connected_port_and_mstb() pretty significantly during > hotplug events, since there's a chance that the in-memory topology tree > may not be fully updated when drm_dp_get_last_connected_port_and_mstb() > is called and thus might end up causing our search to fail on an mstb > whose topology refcount has reached 0, but has not yet been removed from > it's parent. > > Ideally, we should further fix this problem by ensuring that we deal > with the potential for racing with a hotplug event, which would look > like this: > > * drm_dp_payload_send_msg() retrieves the last living relative of mstb > with drm_dp_get_last_connected_port_and_mstb() > * drm_dp_payload_send_msg() starts building payload message > At the same time, mstb gets unplugged from the topology and is no > longer the actual last living relative of the original mstb > * drm_dp_payload_send_msg() tries sending the payload message, hub times > out > * Hub timed out, we give up and run away-resulting in the payload being > leaked > > This could be fixed by restarting the > drm_dp_get_last_connected_port_and_mstb() search whenever we get a > timeout, sending the payload to the new mstb, then repeating until > either the entire topology is removed from the system or > drm_dp_get_last_connected_port_and_mstb() fails. But since the above > race condition is not terribly likely, we'll address that in a later > patch series once we've improved the recovery handling for VCPI > allocations in the rest of the DP MST helpers. > > Signed-off-by: Lyude Paul <lyude at redhat.com> > Cc: Daniel Vetter <daniel at ffwll.ch> > Cc: David Airlie <airlied at redhat.com> > Cc: Jerry Zuo <Jerry.Zuo at amd.com> > Cc: Harry Wentland <harry.wentland at amd.com> > Cc: Juston Li <juston.li at intel.com>Reviewed-by: Harry Wentland <harry.wentland at amd.com> Harry> --- > drivers/gpu/drm/drm_dp_mst_topology.c | 55 +++++++++++++++++++++------ > 1 file changed, 44 insertions(+), 11 deletions(-) > > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c > index b380ada09e90..356a95aba2d8 100644 > --- a/drivers/gpu/drm/drm_dp_mst_topology.c > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c > @@ -2043,25 +2043,50 @@ static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm > return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); > } > > -static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, > - struct drm_dp_mst_branch *mstb, > - int *port_num) > +/** > + * drm_dp_get_last_connected_port_and_mstb() - Find the last living relatives > + * in a topology of a given branch device > + * @mgr: The topology manager to use > + * @mstb: The disconnected branch device > + * @port_num: Where to store the number of the last connected port > + * > + * Searches upwards in the topology starting from @mstb to try to find the > + * closest available parent of @mstb that's still connected to the rest of the > + * topology. This can be used in order to perform operations like releasing > + * payloads, where the branch device which owned the payload may no longer be > + * around and thus would require that the payload on the last living relative > + * be freed instead. > + * > + * Returns: > + * The last connected &drm_dp_mst_branch in the topology that was a parent of > + * @mstb, if there is one. > + */ > +static struct drm_dp_mst_branch * > +drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, > + struct drm_dp_mst_branch *mstb, > + int *port_num) > { > struct drm_dp_mst_branch *rmstb = NULL; > struct drm_dp_mst_port *found_port; > + > mutex_lock(&mgr->lock); > - if (mgr->mst_primary) { > + if (!mgr->mst_primary) > + goto out; > + > + do { > found_port = drm_dp_get_last_connected_port_to_mstb(mstb); > + if (!found_port) > + break; > > - if (found_port) { > + if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) { > rmstb = found_port->parent; > - if (drm_dp_mst_topology_try_get_mstb(rmstb)) { > - *port_num = found_port->port_num; > - } else { > - rmstb = NULL; > - } > + *port_num = found_port->port_num; > + } else { > + /* Search again, starting from this parent */ > + mstb = found_port->parent; > } > - } > + } while (!rmstb); > +out: > mutex_unlock(&mgr->lock); > return rmstb; > } > @@ -2110,6 +2135,14 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, > > drm_dp_queue_down_tx(mgr, txmsg); > > + /* > + * FIXME: there is a small chance that between getting the last > + * connected mstb and sending the payload message, the last connected > + * mstb could also be removed from the topology. In the future, this > + * needs to be fixed by restarting the > + * drm_dp_get_last_connected_port_and_mstb() search in the event of a > + * timeout if the topology is still connected to the system. > + */ > ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); > if (ret > 0) { > if (txmsg->reply.reply_type == 1) { >
Wentland, Harry
2019-Jan-08 19:27 UTC
[Nouveau] [PATCH v2 04/16] drm/dp_mst: Stop releasing VCPI when removing ports from topology
On 2018-12-19 7:19 p.m., Lyude Paul wrote:> This has never actually worked, and isn't needed anyway: the driver's > always going to try to deallocate VCPI when it tears down the display > that the VCPI belongs to. > > Signed-off-by: Lyude Paul <lyude at redhat.com> > Reviewed-by: Daniel Vetter <daniel at ffwll.ch> > Cc: David Airlie <airlied at redhat.com> > Cc: Jerry Zuo <Jerry.Zuo at amd.com> > Cc: Harry Wentland <harry.wentland at amd.com> > Cc: Juston Li <juston.li at intel.com>Reviewed-by: Harry Wentland <harry.wentland at amd.com> Harry> --- > drivers/gpu/drm/drm_dp_mst_topology.c | 8 -------- > 1 file changed, 8 deletions(-) > > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c > index 356a95aba2d8..ef8637f37564 100644 > --- a/drivers/gpu/drm/drm_dp_mst_topology.c > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c > @@ -1175,8 +1175,6 @@ static void drm_dp_destroy_port(struct kref *kref) > struct drm_dp_mst_topology_mgr *mgr = port->mgr; > > if (!port->input) { > - port->vcpi.num_slots = 0; > - > kfree(port->cached_edid); > > /* > @@ -3491,12 +3489,6 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) > drm_dp_port_teardown_pdt(port, port->pdt); > port->pdt = DP_PEER_DEVICE_NONE; > > - if (!port->input && port->vcpi.vcpi > 0) { > - drm_dp_mst_reset_vcpi_slots(mgr, port); > - drm_dp_update_payload_part1(mgr); > - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); > - } > - > drm_dp_mst_put_port_malloc(port); > send_hotplug = true; > } >
Wentland, Harry
2019-Jan-08 19:27 UTC
[Nouveau] [PATCH v2 05/16] drm/dp_mst: Fix payload deallocation on hotplugs using malloc refs
On 2018-12-19 7:19 p.m., Lyude Paul wrote:> Up until now, freeing payloads on remote MST hubs that just had ports > removed has almost never worked because we've been relying on port > validation in order to stop us from accessing ports that have already > been freed from memory, but ports which need their payloads released due > to being removed will never be a valid part of the topology after > they've been removed. > > Since we've introduced malloc refs, we can replace all of the validation > logic in payload helpers which are used for deallocation with some > well-placed malloc krefs. This ensures that regardless of whether or not > the ports are still valid and in the topology, any port which has an > allocated payload will remain allocated in memory until it's payloads > have been removed - finally allowing us to actually release said > payloads correctly. > > Signed-off-by: Lyude Paul <lyude at redhat.com> > Reviewed-by: Daniel Vetter <daniel.vetter at ffwll.ch> > Cc: David Airlie <airlied at redhat.com> > Cc: Jerry Zuo <Jerry.Zuo at amd.com> > Cc: Harry Wentland <harry.wentland at amd.com> > Cc: Juston Li <juston.li at intel.com>Reviewed-by: Harry Wentland <harry.wentland at amd.com> Harry> --- > drivers/gpu/drm/drm_dp_mst_topology.c | 54 +++++++++++++++------------ > 1 file changed, 30 insertions(+), 24 deletions(-) > > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c > index ef8637f37564..11dd3ede7b7d 100644 > --- a/drivers/gpu/drm/drm_dp_mst_topology.c > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c > @@ -2100,10 +2100,6 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, > u8 sinks[DRM_DP_MAX_SDP_STREAMS]; > int i; > > - port = drm_dp_mst_topology_get_port_validated(mgr, port); > - if (!port) > - return -EINVAL; > - > port_num = port->port_num; > mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); > if (!mstb) { > @@ -2111,10 +2107,8 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, > port->parent, > &port_num); > > - if (!mstb) { > - drm_dp_mst_topology_put_port(port); > + if (!mstb) > return -EINVAL; > - } > } > > txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); > @@ -2151,7 +2145,6 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, > kfree(txmsg); > fail_put: > drm_dp_mst_topology_put_mstb(mstb); > - drm_dp_mst_topology_put_port(port); > return ret; > } > > @@ -2256,15 +2249,16 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, > */ > int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) > { > - int i, j; > - int cur_slots = 1; > struct drm_dp_payload req_payload; > struct drm_dp_mst_port *port; > + int i, j; > + int cur_slots = 1; > > mutex_lock(&mgr->payload_lock); > for (i = 0; i < mgr->max_payloads; i++) { > struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; > struct drm_dp_payload *payload = &mgr->payloads[i]; > + bool put_port = false; > > /* solve the current payloads - compare to the hw ones > - update the hw view */ > @@ -2272,12 +2266,20 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) > if (vcpi) { > port = container_of(vcpi, struct drm_dp_mst_port, > vcpi); > - port = drm_dp_mst_topology_get_port_validated(mgr, > - port); > - if (!port) { > - mutex_unlock(&mgr->payload_lock); > - return -EINVAL; > + > + /* Validated ports don't matter if we're releasing > + * VCPI > + */ > + if (vcpi->num_slots) { > + port = drm_dp_mst_topology_get_port_validated( > + mgr, port); > + if (!port) { > + mutex_unlock(&mgr->payload_lock); > + return -EINVAL; > + } > + put_port = true; > } > + > req_payload.num_slots = vcpi->num_slots; > req_payload.vcpi = vcpi->vcpi; > } else { > @@ -2309,7 +2311,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) > } > cur_slots += req_payload.num_slots; > > - if (port) > + if (put_port) > drm_dp_mst_topology_put_port(port); > } > > @@ -3124,6 +3126,8 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, > DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", > pbn, port->vcpi.num_slots); > > + /* Keep port allocated until it's payload has been removed */ > + drm_dp_mst_get_port_malloc(port); > drm_dp_mst_topology_put_port(port); > return true; > out: > @@ -3153,11 +3157,12 @@ EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); > */ > void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) > { > - port = drm_dp_mst_topology_get_port_validated(mgr, port); > - if (!port) > - return; > + /* > + * A port with VCPI will remain allocated until it's VCPI is > + * released, no verified ref needed > + */ > + > port->vcpi.num_slots = 0; > - drm_dp_mst_topology_put_port(port); > } > EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); > > @@ -3169,16 +3174,17 @@ EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); > void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, > struct drm_dp_mst_port *port) > { > - port = drm_dp_mst_topology_get_port_validated(mgr, port); > - if (!port) > - return; > + /* > + * A port with VCPI will remain allocated until it's VCPI is > + * released, no verified ref needed > + */ > > drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); > port->vcpi.num_slots = 0; > port->vcpi.pbn = 0; > port->vcpi.aligned_pbn = 0; > port->vcpi.vcpi = 0; > - drm_dp_mst_topology_put_port(port); > + drm_dp_mst_put_port_malloc(port); > } > EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); > >
Wentland, Harry
2019-Jan-08 19:38 UTC
[Nouveau] [PATCH v2 07/16] drm/amdgpu/display: Keep malloc ref to MST port
On 2018-12-19 7:19 p.m., Lyude Paul wrote:> Just like i915 and nouveau, it's a good idea for us to hold a malloc > reference to the port here so that we never pass a freed pointer to any > of the DP MST helper functions. > > Also, we stop unsetting aconnector->port in > dm_dp_destroy_mst_connector(). There's literally no point to that > assignment that I can see anyway. > > Signed-off-by: Lyude Paul <lyude at redhat.com> > Cc: Daniel Vetter <daniel at ffwll.ch> > Cc: David Airlie <airlied at redhat.com> > Cc: Jerry Zuo <Jerry.Zuo at amd.com> > Cc: Harry Wentland <harry.wentland at amd.com> > Cc: Juston Li <juston.li at intel.com>Reviewed-by: Harry Wentland <harry.wentland at amd.com> Harry> --- > .../drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 11 +++++++---- > 1 file changed, 7 insertions(+), 4 deletions(-) > > diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c > index 5e7ca1f3a8d1..24632727e127 100644 > --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c > +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c > @@ -191,6 +191,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) > drm_encoder_cleanup(&amdgpu_encoder->base); > kfree(amdgpu_encoder); > drm_connector_cleanup(connector); > + drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port); > kfree(amdgpu_dm_connector); > } > > @@ -363,7 +364,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, > amdgpu_dm_connector_funcs_reset(connector); > > DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", > - aconnector, connector->base.id, aconnector->mst_port); > + aconnector, connector->base.id, aconnector->mst_port); > + > + drm_dp_mst_get_port_malloc(port); > > DRM_DEBUG_KMS(":%d\n", connector->base.id); > > @@ -379,12 +382,12 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, > struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); > > DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", > - aconnector, connector->base.id, aconnector->mst_port); > + aconnector, connector->base.id, aconnector->mst_port); > > - aconnector->port = NULL; > if (aconnector->dc_sink) { > amdgpu_dm_update_freesync_caps(connector, NULL); > - dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); > + dc_link_remove_remote_sink(aconnector->dc_link, > + aconnector->dc_sink); > dc_sink_release(aconnector->dc_sink); > aconnector->dc_sink = NULL; > } >
Reasonably Related Threads
- [WIP PATCH 05/15] drm/dp_mst: Fix payload deallocation on hotplugs using malloc refs
- [WIP PATCH 05/15] drm/dp_mst: Fix payload deallocation on hotplugs using malloc refs
- [PATCH v2 01/16] drm/dp_mst: Rename drm_dp_mst_get_validated_(port|mstb)_ref and friends
- [WIP PATCH 03/15] drm/dp_mst: Introduce new refcounting scheme for mstbs and ports
- [WIP PATCH 03/15] drm/dp_mst: Introduce new refcounting scheme for mstbs and ports