drm/amd/display: Add polling method to handle MST reply packet
authorjsg <jsg@openbsd.org>
Fri, 28 Jul 2023 07:08:10 +0000 (07:08 +0000)
committerjsg <jsg@openbsd.org>
Fri, 28 Jul 2023 07:08:10 +0000 (07:08 +0000)
From Wayne Lin
2f2ba3c16230e1de649a877e5819673c849ca0f2 in linux-6.1.y/6.1.42
4f6d9e38c4d244ad106eb9ebd8c0e1215e866f35 in mainline linux

sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c
sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.h
sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h

index b68f877..ca5c4e7 100644 (file)
@@ -1327,6 +1327,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
        if (amdgpu_in_reset(adev))
                goto skip;
 
+       if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+               offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+               dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
+               spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+               offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
+               spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+               goto skip;
+       }
+
        mutex_lock(&adev->dm.dc_lock);
        if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
                dc_link_dp_handle_automated_test(dc_link);
@@ -3231,87 +3240,6 @@ static void handle_hpd_irq(void *param)
 
 }
 
-static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
-{
-       u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
-       u8 dret;
-       bool new_irq_handled = false;
-       int dpcd_addr;
-       int dpcd_bytes_to_read;
-
-       const int max_process_count = 30;
-       int process_count = 0;
-
-       const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
-
-       if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
-               dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
-               /* DPCD 0x200 - 0x201 for downstream IRQ */
-               dpcd_addr = DP_SINK_COUNT;
-       } else {
-               dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
-               /* DPCD 0x2002 - 0x2005 for downstream IRQ */
-               dpcd_addr = DP_SINK_COUNT_ESI;
-       }
-
-       dret = drm_dp_dpcd_read(
-               &aconnector->dm_dp_aux.aux,
-               dpcd_addr,
-               esi,
-               dpcd_bytes_to_read);
-
-       while (dret == dpcd_bytes_to_read &&
-               process_count < max_process_count) {
-               u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
-               u8 retry;
-
-               dret = 0;
-
-               process_count++;
-
-               DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
-               /* handle HPD short pulse irq */
-               if (aconnector->mst_mgr.mst_state)
-                       drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
-                                                       esi,
-                                                       ack,
-                                                       &new_irq_handled);
-
-               if (new_irq_handled) {
-                       /* ACK at DPCD to notify down stream */
-                       for (retry = 0; retry < 3; retry++) {
-                               ssize_t wret;
-
-                               wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
-                                                         dpcd_addr + 1,
-                                                         ack[1]);
-                               if (wret == 1)
-                                       break;
-                       }
-
-                       if (retry == 3) {
-                               DRM_ERROR("Failed to ack MST event.\n");
-                               return;
-                       }
-
-                       drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
-                       /* check if there is new irq to be handled */
-                       dret = drm_dp_dpcd_read(
-                               &aconnector->dm_dp_aux.aux,
-                               dpcd_addr,
-                               esi,
-                               dpcd_bytes_to_read);
-
-                       new_irq_handled = false;
-               } else {
-                       break;
-               }
-       }
-
-       if (process_count == max_process_count)
-               DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
-}
-
 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
                                                        union hpd_irq_data hpd_irq_data)
 {
@@ -3373,7 +3301,23 @@ static void handle_hpd_rx_irq(void *param)
        if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
                if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
                        hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
-                       dm_handle_mst_sideband_msg(aconnector);
+                       bool skip = false;
+
+                       /*
+                        * DOWN_REP_MSG_RDY is also handled by polling method
+                        * mgr->cbs->poll_hpd_irq()
+                        */
+                       spin_lock(&offload_wq->offload_lock);
+                       skip = offload_wq->is_handling_mst_msg_rdy_event;
+
+                       if (!skip)
+                               offload_wq->is_handling_mst_msg_rdy_event = true;
+
+                       spin_unlock(&offload_wq->offload_lock);
+
+                       if (!skip)
+                               schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
                        goto out;
                }
 
@@ -3484,11 +3428,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
                        amdgpu_dm_irq_register_interrupt(adev, &int_params,
                                        handle_hpd_rx_irq,
                                        (void *) aconnector);
-
-                       if (adev->dm.hpd_rx_offload_wq)
-                               adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
-                                       aconnector;
                }
+
+               if (adev->dm.hpd_rx_offload_wq)
+                       adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+                               aconnector;
        }
 }
 
@@ -7084,6 +7028,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
        aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
        aconnector->audio_inst = -1;
        rw_init(&aconnector->hpd_lock, "dmhpd");
+       rw_init(&aconnector->handle_mst_msg_ready, "dmmr");
 
        /*
         * configure support HPD hot plug connector_>polled default value is 0
index 25c4b60..a9c6eb7 100644 (file)
@@ -193,6 +193,11 @@ struct hpd_rx_irq_offload_work_queue {
         * we're handling link loss
         */
        bool is_handling_link_loss;
+       /**
+        * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
+        * ready event when we're already handling mst message ready event
+        */
+       bool is_handling_mst_msg_rdy_event;
        /**
         * @aconnector: The aconnector that this work queue is attached to
         */
@@ -614,6 +619,8 @@ struct amdgpu_dm_connector {
        struct drm_dp_mst_port *port;
        struct amdgpu_dm_connector *mst_port;
        struct drm_dp_aux *dsc_aux;
+       struct rwlock handle_mst_msg_ready;
+
        /* TODO see if we can merge with ddc_bus or make a dm_connector */
        struct amdgpu_i2c_adapter *i2c;
 
index 994a370..0570868 100644 (file)
@@ -590,8 +590,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        return connector;
 }
 
+void dm_handle_mst_sideband_msg_ready_event(
+       struct drm_dp_mst_topology_mgr *mgr,
+       enum mst_msg_ready_type msg_rdy_type)
+{
+       uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+       uint8_t dret;
+       bool new_irq_handled = false;
+       int dpcd_addr;
+       uint8_t dpcd_bytes_to_read;
+       const uint8_t max_process_count = 30;
+       uint8_t process_count = 0;
+       u8 retry;
+       struct amdgpu_dm_connector *aconnector =
+                       container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+
+
+       const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
+
+       if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
+               dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
+               /* DPCD 0x200 - 0x201 for downstream IRQ */
+               dpcd_addr = DP_SINK_COUNT;
+       } else {
+               dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
+               /* DPCD 0x2002 - 0x2005 for downstream IRQ */
+               dpcd_addr = DP_SINK_COUNT_ESI;
+       }
+
+       mutex_lock(&aconnector->handle_mst_msg_ready);
+
+       while (process_count < max_process_count) {
+               u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
+
+               process_count++;
+
+               dret = drm_dp_dpcd_read(
+                       &aconnector->dm_dp_aux.aux,
+                       dpcd_addr,
+                       esi,
+                       dpcd_bytes_to_read);
+
+               if (dret != dpcd_bytes_to_read) {
+                       DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
+                       break;
+               }
+
+               DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+
+               switch (msg_rdy_type) {
+               case DOWN_REP_MSG_RDY_EVENT:
+                       /* Only handle DOWN_REP_MSG_RDY case*/
+                       esi[1] &= DP_DOWN_REP_MSG_RDY;
+                       break;
+               case UP_REQ_MSG_RDY_EVENT:
+                       /* Only handle UP_REQ_MSG_RDY case*/
+                       esi[1] &= DP_UP_REQ_MSG_RDY;
+                       break;
+               default:
+                       /* Handle both cases*/
+                       esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
+                       break;
+               }
+
+               if (!esi[1])
+                       break;
+
+               /* handle MST irq */
+               if (aconnector->mst_mgr.mst_state)
+                       drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
+                                                esi,
+                                                ack,
+                                                &new_irq_handled);
+
+               if (new_irq_handled) {
+                       /* ACK at DPCD to notify down stream */
+                       for (retry = 0; retry < 3; retry++) {
+                               ssize_t wret;
+
+                               wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
+                                                         dpcd_addr + 1,
+                                                         ack[1]);
+                               if (wret == 1)
+                                       break;
+                       }
+
+                       if (retry == 3) {
+                               DRM_ERROR("Failed to ack MST event.\n");
+                               return;
+                       }
+
+                       drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
+
+                       new_irq_handled = false;
+               } else {
+                       break;
+               }
+       }
+
+       mutex_unlock(&aconnector->handle_mst_msg_ready);
+
+       if (process_count == max_process_count)
+               DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
+}
+
+static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
+{
+       dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
+}
+
 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
        .add_connector = dm_dp_add_mst_connector,
+       .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
 };
 
 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
index 1e4ede1..37c820a 100644 (file)
 #define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B     1031
 #define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B  1000
 
+enum mst_msg_ready_type {
+       NONE_MSG_RDY_EVENT = 0,
+       DOWN_REP_MSG_RDY_EVENT = 1,
+       UP_REQ_MSG_RDY_EVENT = 2,
+       DOWN_OR_UP_MSG_RDY_EVENT = 3
+};
+
 struct amdgpu_display_manager;
 struct amdgpu_dm_connector;
 
@@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
 void
 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
 
+void dm_handle_mst_sideband_msg_ready_event(
+       struct drm_dp_mst_topology_mgr *mgr,
+       enum mst_msg_ready_type msg_rdy_type);
+
 struct dsc_mst_fairness_vars {
        int pbn;
        bool dsc_enabled;