drm/amd/display: Adjust the MST resume flow
authorjsg <jsg@openbsd.org>
Wed, 18 Oct 2023 01:47:42 +0000 (01:47 +0000)
committerjsg <jsg@openbsd.org>
Wed, 18 Oct 2023 01:47:42 +0000 (01:47 +0000)
From Wayne Lin
71472872932b11ca2591104eb73255fecaae9d33 in linux-6.1.y/6.1.57
ec5fa9fcdeca69edf7dab5ca3b2e0ceb1c08fe9a in mainline linux

sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c

index 6d5c0a0..3abbd09 100644 (file)
@@ -2346,14 +2346,62 @@ static int dm_late_init(void *handle)
        return detect_mst_link_for_all_connectors(adev_to_drm(adev));
 }
 
+static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
+{
+       int ret;
+       u8 guid[16];
+       u64 tmp64;
+
+       mutex_lock(&mgr->lock);
+       if (!mgr->mst_primary)
+               goto out_fail;
+
+       if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
+               drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+               goto out_fail;
+       }
+
+       ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+                                DP_MST_EN |
+                                DP_UP_REQ_EN |
+                                DP_UPSTREAM_IS_SRC);
+       if (ret < 0) {
+               drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
+               goto out_fail;
+       }
+
+       /* Some hubs forget their guids after they resume */
+       ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+       if (ret != 16) {
+               drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+               goto out_fail;
+       }
+
+       if (memchr_inv(guid, 0, 16) == NULL) {
+               tmp64 = get_jiffies_64();
+               memcpy(&guid[0], &tmp64, sizeof(u64));
+               memcpy(&guid[8], &tmp64, sizeof(u64));
+
+               ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
+
+               if (ret != 16) {
+                       drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
+                       goto out_fail;
+               }
+       }
+
+       memcpy(mgr->mst_primary->guid, guid, 16);
+
+out_fail:
+       mutex_unlock(&mgr->lock);
+}
+
 static void s3_handle_mst(struct drm_device *dev, bool suspend)
 {
        struct amdgpu_dm_connector *aconnector;
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
        struct drm_dp_mst_topology_mgr *mgr;
-       int ret;
-       bool need_hotplug = false;
 
        drm_connector_list_iter_begin(dev, &iter);
        drm_for_each_connector_iter(connector, &iter) {
@@ -2375,18 +2423,15 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
                        if (!dp_is_lttpr_present(aconnector->dc_link))
                                dc_link_aux_try_to_configure_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
 
-                       ret = drm_dp_mst_topology_mgr_resume(mgr, true);
-                       if (ret < 0) {
-                               dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
-                                       aconnector->dc_link);
-                               need_hotplug = true;
-                       }
+                       /* TODO: move resume_mst_branch_status() into drm mst resume again
+                        * once topology probing work is pulled out from mst resume into mst
+                        * resume 2nd step. mst resume 2nd step should be called after old
+                        * state getting restored (i.e. drm_atomic_helper_resume()).
+                        */
+                       resume_mst_branch_status(mgr);
                }
        }
        drm_connector_list_iter_end(&iter);
-
-       if (need_hotplug)
-               drm_kms_helper_hotplug_event(dev);
 }
 
 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
@@ -2775,7 +2820,8 @@ static int dm_resume(void *handle)
        struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
        enum dc_connection_type new_connection_type = dc_connection_none;
        struct dc_state *dc_state;
-       int i, r, j;
+       int i, r, j, ret;
+       bool need_hotplug = false;
 
        if (amdgpu_in_reset(adev)) {
                dc_state = dm->cached_dc_state;
@@ -2873,7 +2919,7 @@ static int dm_resume(void *handle)
                        continue;
 
                /*
-                * this is the case when traversing through already created
+                * this is the case when traversing through already created end sink
                 * MST connectors, should be skipped
                 */
                if (aconnector && aconnector->mst_port)
@@ -2933,6 +2979,27 @@ static int dm_resume(void *handle)
 
        dm->cached_state = NULL;
 
+       /* Do mst topology probing after resuming cached state*/
+       drm_connector_list_iter_begin(ddev, &iter);
+       drm_for_each_connector_iter(connector, &iter) {
+               aconnector = to_amdgpu_dm_connector(connector);
+               if (aconnector->dc_link->type != dc_connection_mst_branch ||
+                   aconnector->mst_port)
+                       continue;
+
+               ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
+
+               if (ret < 0) {
+                       dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+                                       aconnector->dc_link);
+                       need_hotplug = true;
+               }
+       }
+       drm_connector_list_iter_end(&iter);
+
+       if (need_hotplug)
+               drm_kms_helper_hotplug_event(ddev);
+
        amdgpu_dm_irq_resume_late(adev);
 
        amdgpu_dm_smu_write_watermarks_table(adev);