drm/amd/display: fix linux dp link lost handled only one time
authorjsg <jsg@openbsd.org>
Fri, 28 Jul 2023 07:05:07 +0000 (07:05 +0000)
committerjsg <jsg@openbsd.org>
Fri, 28 Jul 2023 07:05:07 +0000 (07:05 +0000)
From Hersen Wu
78ea2ed76ce94f090d2a9c36b1b58f79ce3b93b8 in linux-6.1.y/6.1.42
e322843e5e33e72ff218d661f3d15ff9c9f2f1b5 in mainline linux

sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c
sys/dev/pci/drm/amd/display/dc/core/dc_link_dp.c
sys/dev/pci/drm/amd/display/dc/inc/dc_link_dp.h

index 87b0d88..b68f877 100644 (file)
@@ -1348,10 +1348,28 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
        } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
                        hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
                        dc_link_dp_allow_hpd_rx_irq(dc_link)) {
-               dc_link_dp_handle_link_loss(dc_link);
+               /* offload_work->data is from handle_hpd_rx_irq->
+                * schedule_hpd_rx_offload_work.this is defer handle
+                * for hpd short pulse. upon here, link status may be
+                * changed, need get latest link status from dpcd
+                * registers. if link status is good, skip run link
+                * training again.
+                */
+               union hpd_irq_data irq_data;
+
+               memset(&irq_data, 0, sizeof(irq_data));
+
+               /* before dc_link_dp_handle_link_loss, allow new link lost handle
+                * request be added to work queue if link lost at end of dc_link_
+                * dp_handle_link_loss
+                */
                spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
                offload_work->offload_wq->is_handling_link_loss = false;
                spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+
+               if ((read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
+                       hpd_rx_irq_check_link_loss_status(dc_link, &irq_data))
+                       dc_link_dp_handle_link_loss(dc_link);
        }
        mutex_unlock(&adev->dm.dc_lock);
 
@@ -3326,7 +3344,7 @@ static void handle_hpd_rx_irq(void *param)
        union hpd_irq_data hpd_irq_data;
        bool link_loss = false;
        bool has_left_work = false;
-       int idx = aconnector->base.index;
+       int idx = dc_link->link_index;
        struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
 
        memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
@@ -3468,7 +3486,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
                                        (void *) aconnector);
 
                        if (adev->dm.hpd_rx_offload_wq)
-                               adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+                               adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
                                        aconnector;
                }
        }
index 4aa4409..55ff9d9 100644 (file)
@@ -3115,7 +3115,7 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
        return max_link_cap;
 }
 
-static enum dc_status read_hpd_rx_irq_data(
+enum dc_status read_hpd_rx_irq_data(
        struct dc_link *link,
        union hpd_irq_data *irq_data)
 {
index dab08ad..197df39 100644 (file)
@@ -82,6 +82,10 @@ bool perform_link_training_with_retries(
        enum amd_signal_type signal,
        bool do_fallback);
 
+enum dc_status read_hpd_rx_irq_data(
+       struct dc_link *link,
+       union hpd_irq_data *irq_data);
+
 bool hpd_rx_irq_check_link_loss_status(
        struct dc_link *link,
        union hpd_irq_data *hpd_irq_dpcd_data);