drm/amd/display: Fork thread to offload work of hpd_rx_irq
authorjsg <jsg@openbsd.org>
Mon, 1 Aug 2022 16:00:11 +0000 (16:00 +0000)
committerjsg <jsg@openbsd.org>
Mon, 1 Aug 2022 16:00:11 +0000 (16:00 +0000)
From Wayne Lin
e980e1d978e0eb4c0399cff37f175779237db53b in linux 5.15.y/5.15.58
8e794421bc981586d0af4e959ec76d668c793a55 in mainline linux

sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c
sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.h
sys/dev/pci/drm/amd/display/dc/core/dc_link_dp.c
sys/dev/pci/drm/amd/display/dc/dc_link.h

index c3bbeb5..b905e16 100644 (file)
@@ -1238,6 +1238,83 @@ static void vblank_control_worker(struct work_struct *work)
 }
 
 #endif
+
+static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+{
+       struct hpd_rx_irq_offload_work *offload_work;
+       struct amdgpu_dm_connector *aconnector;
+       struct dc_link *dc_link;
+       struct amdgpu_device *adev;
+       enum dc_connection_type new_connection_type = dc_connection_none;
+       unsigned long flags;
+
+       offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
+       aconnector = offload_work->offload_wq->aconnector;
+
+       if (!aconnector) {
+               DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+               goto skip;
+       }
+
+       adev = drm_to_adev(aconnector->base.dev);
+       dc_link = aconnector->dc_link;
+
+       mutex_lock(&aconnector->hpd_lock);
+       if (!dc_link_detect_sink(dc_link, &new_connection_type))
+               DRM_ERROR("KMS: Failed to detect connector\n");
+       mutex_unlock(&aconnector->hpd_lock);
+
+       if (new_connection_type == dc_connection_none)
+               goto skip;
+
+       if (amdgpu_in_reset(adev))
+               goto skip;
+
+       mutex_lock(&adev->dm.dc_lock);
+       if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
+               dc_link_dp_handle_automated_test(dc_link);
+       else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+                       hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
+                       dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+               dc_link_dp_handle_link_loss(dc_link);
+               spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+               offload_work->offload_wq->is_handling_link_loss = false;
+               spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+       }
+       mutex_unlock(&adev->dm.dc_lock);
+
+skip:
+       kfree(offload_work);
+
+}
+
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+{
+       int max_caps = dc->caps.max_links;
+       int i = 0;
+       struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
+
+       hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
+
+       if (!hpd_rx_offload_wq)
+               return NULL;
+
+
+       for (i = 0; i < max_caps; i++) {
+               hpd_rx_offload_wq[i].wq =
+                                   create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
+
+               if (hpd_rx_offload_wq[i].wq == NULL) {
+                       DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+                       return NULL;
+               }
+
+               mtx_init(&hpd_rx_offload_wq[i].offload_lock, IPL_TTY);
+       }
+
+       return hpd_rx_offload_wq;
+}
+
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
        struct dc_init_data init_data;
@@ -1364,6 +1441,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
        dc_hardware_init(adev->dm.dc);
 
+       adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+       if (!adev->dm.hpd_rx_offload_wq) {
+               DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+               goto error;
+       }
+
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
                struct dc_phy_addr_space_config pa_config;
@@ -1543,6 +1626,18 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
                adev->dm.freesync_module = NULL;
        }
 
+       if (adev->dm.hpd_rx_offload_wq) {
+               for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+                       if (adev->dm.hpd_rx_offload_wq[i].wq) {
+                               destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
+                               adev->dm.hpd_rx_offload_wq[i].wq = NULL;
+                       }
+               }
+
+               kfree(adev->dm.hpd_rx_offload_wq);
+               adev->dm.hpd_rx_offload_wq = NULL;
+       }
+
        mutex_destroy(&adev->dm.audio_lock);
        mutex_destroy(&adev->dm.dc_lock);
 
@@ -2162,6 +2257,16 @@ context_alloc_fail:
        return res;
 }
 
+static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
+{
+       int i;
+
+       if (dm->hpd_rx_offload_wq) {
+               for (i = 0; i < dm->dc->caps.max_links; i++)
+                       flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
+       }
+}
+
 static int dm_suspend(void *handle)
 {
        struct amdgpu_device *adev = handle;
@@ -2183,6 +2288,8 @@ static int dm_suspend(void *handle)
 
                amdgpu_dm_irq_suspend(adev);
 
+               hpd_rx_irq_work_suspend(dm);
+
                return ret;
        }
 
@@ -2193,6 +2300,8 @@ static int dm_suspend(void *handle)
 
        amdgpu_dm_irq_suspend(adev);
 
+       hpd_rx_irq_work_suspend(dm);
+
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 
        return 0;
@@ -2871,8 +2980,7 @@ static void handle_hpd_irq(void *param)
 
 }
 
-
-static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
+static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
 {
        uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
        uint8_t dret;
@@ -2950,6 +3058,25 @@ static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
                DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
 }
 
+static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+                                                       union hpd_irq_data hpd_irq_data)
+{
+       struct hpd_rx_irq_offload_work *offload_work =
+                               kzalloc(sizeof(*offload_work), GFP_KERNEL);
+
+       if (!offload_work) {
+               DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+               return;
+       }
+
+       INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
+       offload_work->data = hpd_irq_data;
+       offload_work->offload_wq = offload_wq;
+
+       queue_work(offload_wq->wq, &offload_work->work);
+       DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
+}
+
 static void handle_hpd_rx_irq(void *param)
 {
        struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
@@ -2961,14 +3088,16 @@ static void handle_hpd_rx_irq(void *param)
        enum dc_connection_type new_connection_type = dc_connection_none;
        struct amdgpu_device *adev = drm_to_adev(dev);
        union hpd_irq_data hpd_irq_data;
-       bool lock_flag = 0;
+       bool link_loss = false;
+       bool has_left_work = false;
+       int idx = aconnector->base.index;
+       struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
 
        memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
 
        if (adev->dm.disable_hpd_irq)
                return;
 
-
        /*
         * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
         * conflict, after implement i2c helper, this mutex should be
@@ -2976,43 +3105,41 @@ static void handle_hpd_rx_irq(void *param)
         */
        mutex_lock(&aconnector->hpd_lock);
 
-       read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
+       result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
+                                               &link_loss, true, &has_left_work);
 
-       if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
-               (dc_link->type == dc_connection_mst_branch)) {
-               if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
-                       result = true;
-                       dm_handle_hpd_rx_irq(aconnector);
-                       goto out;
-               } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
-                       result = false;
-                       dm_handle_hpd_rx_irq(aconnector);
+       if (!has_left_work)
+               goto out;
+
+       if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+               schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+               goto out;
+       }
+
+       if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+               if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+                       hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+                       dm_handle_mst_sideband_msg(aconnector);
                        goto out;
                }
-       }
 
-       /*
-        * TODO: We need the lock to avoid touching DC state while it's being
-        * modified during automated compliance testing, or when link loss
-        * happens. While this should be split into subhandlers and proper
-        * interfaces to avoid having to conditionally lock like this in the
-        * outer layer, we need this workaround temporarily to allow MST
-        * lightup in some scenarios to avoid timeout.
-        */
-       if (!amdgpu_in_reset(adev) &&
-           (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
-            hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
-               mutex_lock(&adev->dm.dc_lock);
-               lock_flag = 1;
-       }
+               if (link_loss) {
+                       bool skip = false;
 
-#ifdef CONFIG_DRM_AMD_DC_HDCP
-       result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
-#else
-       result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
-#endif
-       if (!amdgpu_in_reset(adev) && lock_flag)
-               mutex_unlock(&adev->dm.dc_lock);
+                       spin_lock(&offload_wq->offload_lock);
+                       skip = offload_wq->is_handling_link_loss;
+
+                       if (!skip)
+                               offload_wq->is_handling_link_loss = true;
+
+                       spin_unlock(&offload_wq->offload_lock);
+
+                       if (!skip)
+                               schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
+                       goto out;
+               }
+       }
 
 out:
        if (result && !is_mst_root_connector) {
@@ -3097,6 +3224,10 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
                        amdgpu_dm_irq_register_interrupt(adev, &int_params,
                                        handle_hpd_rx_irq,
                                        (void *) aconnector);
+
+                       if (adev->dm.hpd_rx_offload_wq)
+                               adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+                                       aconnector;
                }
        }
 }
index 54d7e84..24231c9 100644 (file)
@@ -171,6 +171,48 @@ struct dal_allocation {
        u64 gpu_addr;
 };
 
+/**
+ * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq
+ * offload work
+ */
+struct hpd_rx_irq_offload_work_queue {
+       /**
+        * @wq: workqueue structure to queue offload work.
+        */
+       struct workqueue_struct *wq;
+       /**
+        * @offload_lock: To protect fields of offload work queue.
+        */
+       spinlock_t offload_lock;
+       /**
+        * @is_handling_link_loss: Used to prevent inserting link loss event when
+        * we're handling link loss
+        */
+       bool is_handling_link_loss;
+       /**
+        * @aconnector: The aconnector that this work queue is attached to
+        */
+       struct amdgpu_dm_connector *aconnector;
+};
+
+/**
+ * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure
+ */
+struct hpd_rx_irq_offload_work {
+       /**
+        * @work: offload work
+        */
+       struct work_struct work;
+       /**
+        * @data: reference irq data which is used while handling offload work
+        */
+       union hpd_irq_data data;
+       /**
+        * @offload_wq: offload work queue that this work is queued to
+        */
+       struct hpd_rx_irq_offload_work_queue *offload_wq;
+};
+
 /**
  * struct amdgpu_display_manager - Central amdgpu display manager device
  *
@@ -461,7 +503,12 @@ struct amdgpu_display_manager {
         */
        struct crc_rd_work *crc_rd_wrk;
 #endif
-
+       /**
+        * @hpd_rx_offload_wq:
+        *
+        * Work queue to offload works of hpd_rx_irq
+        */
+       struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq;
        /**
         * @mst_encoders:
         *
index e6dec86..2b34b94 100644 (file)
@@ -2075,7 +2075,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
        return max_link_cap;
 }
 
-enum dc_status read_hpd_rx_irq_data(
+static enum dc_status read_hpd_rx_irq_data(
        struct dc_link *link,
        union hpd_irq_data *irq_data)
 {
@@ -3257,7 +3257,7 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
        }
 }
 
-static bool handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
                                                        bool defer_handling, bool *has_left_work)
 {
        union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
@@ -3379,11 +3379,6 @@ static bool handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_
        return status;
 }
 
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss)
-{
-       return handle_hpd_rx_irq(link, out_hpd_irq_dpcd_data, out_link_loss, false, NULL);
-}
-
 /*query dpcd for version and mst cap addresses*/
 bool is_mst_supported(struct dc_link *link)
 {
index bde6bcb..8ad431f 100644 (file)
@@ -296,7 +296,8 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
  * false - no change in Downstream port status. No further action required
  * from DM. */
 bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
-               union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
+               union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss,
+               bool defer_handling, bool *has_left_work);
 
 /*
  * On eDP links this function call will stall until T12 has elapsed.
@@ -305,9 +306,6 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
  */
 bool dc_link_wait_for_t12(struct dc_link *link);
 
-enum dc_status read_hpd_rx_irq_data(
-       struct dc_link *link,
-       union hpd_irq_data *irq_data);
 void dc_link_dp_handle_automated_test(struct dc_link *link);
 void dc_link_dp_handle_link_loss(struct dc_link *link);
 bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);