drm/dp_mst: Clear MSG_RDY flag before sending new message
authorjsg <jsg@openbsd.org>
Fri, 28 Jul 2023 06:56:32 +0000 (06:56 +0000)
committerjsg <jsg@openbsd.org>
Fri, 28 Jul 2023 06:56:32 +0000 (06:56 +0000)
From Wayne Lin
00f68f5c1be12828a6f0b1e0f1017e1399b23a73 in linux-6.1.y/6.1.42
72f1de49ffb90b29748284f27f1d6b829ab1de95 in mainline linux

sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c
sys/dev/pci/drm/display/drm_dp_mst_topology.c
sys/dev/pci/drm/i915/display/intel_dp.c
sys/dev/pci/drm/include/drm/display/drm_dp_mst_helper.h

index 53c3774..79910e2 100644 (file)
@@ -3203,6 +3203,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
 
        while (dret == dpcd_bytes_to_read &&
                process_count < max_process_count) {
+               u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
                u8 retry;
                dret = 0;
 
@@ -3211,28 +3212,29 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
                DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
                /* handle HPD short pulse irq */
                if (aconnector->mst_mgr.mst_state)
-                       drm_dp_mst_hpd_irq(
-                               &aconnector->mst_mgr,
-                               esi,
-                               &new_irq_handled);
+                       drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
+                                                       esi,
+                                                       ack,
+                                                       &new_irq_handled);
 
                if (new_irq_handled) {
                        /* ACK at DPCD to notify down stream */
-                       const int ack_dpcd_bytes_to_write =
-                               dpcd_bytes_to_read - 1;
-
                        for (retry = 0; retry < 3; retry++) {
-                               u8 wret;
-
-                               wret = drm_dp_dpcd_write(
-                                       &aconnector->dm_dp_aux.aux,
-                                       dpcd_addr + 1,
-                                       &esi[1],
-                                       ack_dpcd_bytes_to_write);
-                               if (wret == ack_dpcd_bytes_to_write)
+                               ssize_t wret;
+
+                               wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
+                                                         dpcd_addr + 1,
+                                                         ack[1]);
+                               if (wret == 1)
                                        break;
                        }
 
+                       if (retry == 3) {
+                               DRM_ERROR("Failed to ack MST event.\n");
+                               return;
+                       }
+
+                       drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
                        /* check if there is new irq to be handled */
                        dret = drm_dp_dpcd_read(
                                &aconnector->dm_dp_aux.aux,
index 3c233f5..a0f8b05 100644 (file)
@@ -4063,17 +4063,28 @@ out:
 }
 
 /**
- * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
+ * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
  * @mgr: manager to notify irq for.
  * @esi: 4 bytes from SINK_COUNT_ESI
+ * @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI
  * @handled: whether the hpd interrupt was consumed or not
  *
- * This should be called from the driver when it detects a short IRQ,
+ * This should be called from the driver when it detects a HPD IRQ,
  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
- * topology manager will process the sideband messages received as a result
- * of this.
+ * topology manager will process the sideband messages received
+ * as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the
+ * corresponding flags that Driver has to ack the DP receiver later.
+ *
+ * Note that driver shall also call
+ * drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set
+ * after calling this function, to try to kick off a new request in
+ * the queue if the previous message transaction is completed.
+ *
+ * See also:
+ * drm_dp_mst_hpd_irq_send_new_request()
  */
-int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
+int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,
+                                   u8 *ack, bool *handled)
 {
        int ret = 0;
        int sc;
@@ -4088,18 +4099,47 @@ int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handl
        if (esi[1] & DP_DOWN_REP_MSG_RDY) {
                ret = drm_dp_mst_handle_down_rep(mgr);
                *handled = true;
+               ack[1] |= DP_DOWN_REP_MSG_RDY;
        }
 
        if (esi[1] & DP_UP_REQ_MSG_RDY) {
                ret |= drm_dp_mst_handle_up_req(mgr);
                *handled = true;
+               ack[1] |= DP_UP_REQ_MSG_RDY;
        }
 
-       drm_dp_mst_kick_tx(mgr);
        return ret;
 }
-EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
+EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event);
 
+/**
+ * drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
+ * @mgr: manager to notify irq for.
+ *
+ * This should be called from the driver when mst irq event is handled
+ * and acked. Note that new down request should only be sent when
+ * previous message transaction is completed. Source is not supposed to generate
+ * interleaved message transactions.
+ */
+void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)
+{
+       struct drm_dp_sideband_msg_tx *txmsg;
+       bool kick = true;
+
+       mutex_lock(&mgr->qlock);
+       txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
+                                        struct drm_dp_sideband_msg_tx, next);
+       /* If last transaction is not completed yet*/
+       if (!txmsg ||
+           txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
+           txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
+               kick = false;
+       mutex_unlock(&mgr->qlock);
+
+       if (kick)
+               drm_dp_mst_kick_tx(mgr);
+}
+EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request);
 /**
  * drm_dp_mst_detect_port() - get connection status for an MST port
  * @connector: DRM connector for this port
index dd636c2..68c0692 100644 (file)
@@ -3804,9 +3804,7 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
 {
        bool handled = false;
 
-       drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
-       if (handled)
-               ack[1] |= esi[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
+       drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled);
 
        if (esi[1] & DP_CP_IRQ) {
                intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
@@ -3881,6 +3879,9 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
 
                if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
                        drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
+
+               if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
+                       drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
        }
 
        return link_ok;
index 8622e3c..a7073bd 100644 (file)
@@ -815,8 +815,11 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
 bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
 
-int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
-
+int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
+                                   const u8 *esi,
+                                   u8 *ack,
+                                   bool *handled);
+void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);
 
 int
 drm_dp_mst_detect_port(struct drm_connector *connector,