-# $OpenBSD: GENERIC,v 1.280 2024/01/19 06:59:10 mlarkin Exp $
+# $OpenBSD: GENERIC,v 1.281 2024/01/22 18:54:01 kettenis Exp $
#
# GENERIC machine description file
#
wskbd* at apldckbd? mux 1
apldcms* at apldchidev?
wsmouse* at apldcms? mux 0
+apldcp* at fdt?
apldma* at fdt?
apldog* at fdt? early 1
+apldrm* at fdt?
+drm* at apldrm?
+wsdisplay* at apldrm?
aplefuse* at fdt? early 1
apliic* at fdt?
iic* at apliic?
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2022 Sven Peter <sven@svenpeter.dev> */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/soc/apple/rtkit.h>
+
+#include "afk.h"
+#include "trace.h"
+
+struct afk_receive_message_work {
+ struct apple_dcp_afkep *ep;
+ u64 message;
+ struct work_struct work;
+};
+
+#define RBEP_TYPE GENMASK(63, 48)
+
+enum rbep_msg_type {
+ RBEP_INIT = 0x80,
+ RBEP_INIT_ACK = 0xa0,
+ RBEP_GETBUF = 0x89,
+ RBEP_GETBUF_ACK = 0xa1,
+ RBEP_INIT_TX = 0x8a,
+ RBEP_INIT_RX = 0x8b,
+ RBEP_START = 0xa3,
+ RBEP_START_ACK = 0x86,
+ RBEP_SEND = 0xa2,
+ RBEP_RECV = 0x85,
+ RBEP_SHUTDOWN = 0xc0,
+ RBEP_SHUTDOWN_ACK = 0xc1,
+};
+
+#define BLOCK_SHIFT 6
+
+#define GETBUF_SIZE GENMASK(31, 16)
+#define GETBUF_TAG GENMASK(15, 0)
+#define GETBUF_ACK_DVA GENMASK(47, 0)
+
+#define INITRB_OFFSET GENMASK(47, 32)
+#define INITRB_SIZE GENMASK(31, 16)
+#define INITRB_TAG GENMASK(15, 0)
+
+#define SEND_WPTR GENMASK(31, 0)
+
+static void afk_send(struct apple_dcp_afkep *ep, u64 message)
+{
+ dcp_send_message(ep->dcp, ep->endpoint, message);
+}
+
+struct apple_dcp_afkep *afk_init(struct apple_dcp *dcp, u32 endpoint,
+ const struct apple_epic_service_ops *ops)
+{
+ struct apple_dcp_afkep *afkep;
+ int ret;
+
+ afkep = devm_kzalloc(dcp->dev, sizeof(*afkep), GFP_KERNEL);
+ if (!afkep)
+ return ERR_PTR(-ENOMEM);
+
+ afkep->ops = ops;
+ afkep->dcp = dcp;
+ afkep->endpoint = endpoint;
+ afkep->wq = alloc_ordered_workqueue("apple-dcp-afkep%02x",
+ WQ_MEM_RECLAIM, endpoint);
+ if (!afkep->wq) {
+ ret = -ENOMEM;
+ goto out_free_afkep;
+ }
+
+ // TODO: devm_ for wq
+
+ init_completion(&afkep->started);
+ init_completion(&afkep->stopped);
+ mtx_init(&afkep->lock, IPL_TTY);
+
+ return afkep;
+
+out_free_afkep:
+ devm_kfree(dcp->dev, afkep);
+ return ERR_PTR(ret);
+}
+
+int afk_start(struct apple_dcp_afkep *ep)
+{
+ int ret;
+
+ reinit_completion(&ep->started);
+ apple_rtkit_start_ep(ep->dcp->rtk, ep->endpoint);
+ afk_send(ep, FIELD_PREP(RBEP_TYPE, RBEP_INIT));
+
+ ret = wait_for_completion_timeout(&ep->started, msecs_to_jiffies(1000));
+ if (ret <= 0)
+ return -ETIMEDOUT;
+ else
+ return 0;
+}
+
+static void afk_getbuf(struct apple_dcp_afkep *ep, u64 message)
+{
+ u16 size = FIELD_GET(GETBUF_SIZE, message) << BLOCK_SHIFT;
+ u16 tag = FIELD_GET(GETBUF_TAG, message);
+ u64 reply;
+
+ trace_afk_getbuf(ep, size, tag);
+
+ if (ep->bfr) {
+ dev_err(ep->dcp->dev,
+ "Got GETBUF message but buffer already exists\n");
+ return;
+ }
+
+ ep->bfr = dmam_alloc_coherent(ep->dcp->dev, size, &ep->bfr_dma,
+ GFP_KERNEL);
+ if (!ep->bfr) {
+ dev_err(ep->dcp->dev, "Failed to allocate %d bytes buffer\n",
+ size);
+ return;
+ }
+
+ ep->bfr_size = size;
+ ep->bfr_tag = tag;
+
+ reply = FIELD_PREP(RBEP_TYPE, RBEP_GETBUF_ACK);
+ reply |= FIELD_PREP(GETBUF_ACK_DVA, ep->bfr_dma);
+ afk_send(ep, reply);
+}
+
+static void afk_init_rxtx(struct apple_dcp_afkep *ep, u64 message,
+ struct afk_ringbuffer *bfr)
+{
+ u16 base = FIELD_GET(INITRB_OFFSET, message) << BLOCK_SHIFT;
+ u16 size = FIELD_GET(INITRB_SIZE, message) << BLOCK_SHIFT;
+ u16 tag = FIELD_GET(INITRB_TAG, message);
+ u32 bufsz, end;
+
+ if (tag != ep->bfr_tag) {
+ dev_err(ep->dcp->dev, "AFK[ep:%02x]: expected tag 0x%x but got 0x%x",
+ ep->endpoint, ep->bfr_tag, tag);
+ return;
+ }
+
+ if (bfr->ready) {
+ dev_err(ep->dcp->dev, "AFK[ep:%02x]: buffer is already initialized\n",
+ ep->endpoint);
+ return;
+ }
+
+ if (base >= ep->bfr_size) {
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: requested base 0x%x >= max size 0x%lx",
+ ep->endpoint, base, ep->bfr_size);
+ return;
+ }
+
+ end = base + size;
+ if (end > ep->bfr_size) {
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: requested end 0x%x > max size 0x%lx",
+ ep->endpoint, end, ep->bfr_size);
+ return;
+ }
+
+ bfr->hdr = ep->bfr + base;
+ bufsz = le32_to_cpu(bfr->hdr->bufsz);
+ if (bufsz + sizeof(*bfr->hdr) != size) {
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: ring buffer size 0x%x != expected 0x%lx",
+ ep->endpoint, bufsz, sizeof(*bfr->hdr));
+ return;
+ }
+
+ bfr->buf = bfr->hdr + 1;
+ bfr->bufsz = bufsz;
+ bfr->ready = true;
+
+ if (ep->rxbfr.ready && ep->txbfr.ready)
+ afk_send(ep, FIELD_PREP(RBEP_TYPE, RBEP_START));
+}
+
+static const struct apple_epic_service_ops *
+afk_match_service(struct apple_dcp_afkep *ep, const char *name)
+{
+ const struct apple_epic_service_ops *ops;
+
+ if (!name[0])
+ return NULL;
+ if (!ep->ops)
+ return NULL;
+
+ for (ops = ep->ops; ops->name[0]; ops++) {
+ if (strcmp(ops->name, name))
+ continue;
+
+ return ops;
+ }
+
+ return NULL;
+}
+
+static struct apple_epic_service *afk_epic_find_service(struct apple_dcp_afkep *ep,
+ u32 channel)
+{
+ for (u32 i = 0; i < ep->num_channels; i++)
+ if (ep->services[i].enabled && ep->services[i].channel == channel)
+ return &ep->services[i];
+
+ return NULL;
+}
+
+static void afk_recv_handle_init(struct apple_dcp_afkep *ep, u32 channel,
+ u8 *payload, size_t payload_size)
+{
+ char name[32];
+ s64 epic_unit = -1;
+ u32 ch_idx;
+ const char *service_name = name;
+ const char *epic_name = NULL, *epic_class = NULL;
+ const struct apple_epic_service_ops *ops;
+ struct dcp_parse_ctx ctx;
+ u8 *props = payload + sizeof(name);
+ size_t props_size = payload_size - sizeof(name);
+
+ WARN_ON(afk_epic_find_service(ep, channel));
+
+ if (payload_size < sizeof(name)) {
+ dev_err(ep->dcp->dev, "AFK[ep:%02x]: payload too small: %lx\n",
+ ep->endpoint, payload_size);
+ return;
+ }
+
+ if (ep->num_channels >= AFK_MAX_CHANNEL) {
+ dev_err(ep->dcp->dev, "AFK[ep:%02x]: too many enabled services!\n",
+ ep->endpoint);
+ return;
+ }
+
+ strlcpy(name, payload, sizeof(name));
+
+ /*
+ * in DCP firmware 13.2 DCP reports interface-name as name which starts
+ * with "dispext%d" using -1 s ID for "dcp". In the 12.3 firmware
+ * EPICProviderClass was used. If the init call has props parse them and
+ * use EPICProviderClass to match the service.
+ */
+ if (props_size > 36) {
+ int ret = parse(props, props_size, &ctx);
+ if (ret) {
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: Failed to parse service init props for %s\n",
+ ep->endpoint, name);
+ return;
+ }
+ ret = parse_epic_service_init(&ctx, &epic_name, &epic_class, &epic_unit);
+ if (ret) {
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: failed to extract init props: %d\n",
+ ep->endpoint, ret);
+ return;
+ }
+ service_name = epic_class;
+ } else {
+ service_name = name;
+ }
+
+ ops = afk_match_service(ep, service_name);
+ if (!ops) {
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: unable to match service %s on channel %d\n",
+ ep->endpoint, service_name, channel);
+ goto free;
+ }
+
+ ch_idx = ep->num_channels++;
+ mtx_init(&ep->services[ch_idx].lock, IPL_TTY);
+ ep->services[ch_idx].enabled = true;
+ ep->services[ch_idx].ops = ops;
+ ep->services[ch_idx].ep = ep;
+ ep->services[ch_idx].channel = channel;
+ ep->services[ch_idx].cmd_tag = 0;
+ ops->init(&ep->services[ch_idx], epic_name, epic_class, epic_unit);
+ dev_info(ep->dcp->dev, "AFK[ep:%02x]: new service %s on channel %d\n",
+ ep->endpoint, service_name, channel);
+free:
+ kfree(epic_name);
+ kfree(epic_class);
+}
+
+static void afk_recv_handle_teardown(struct apple_dcp_afkep *ep, u32 channel)
+{
+ struct apple_epic_service *service;
+ const struct apple_epic_service_ops *ops;
+ unsigned long flags;
+
+ service = afk_epic_find_service(ep, channel);
+ if (!service) {
+ dev_warn(ep->dcp->dev, "AFK[ep:%02x]: teardown for disabled channel %u\n",
+ ep->endpoint, channel);
+ return;
+ }
+
+ // TODO: think through what locking is necessary
+ spin_lock_irqsave(&service->lock, flags);
+ service->enabled = false;
+ ops = service->ops;
+ spin_unlock_irqrestore(&service->lock, flags);
+
+ if (ops->teardown)
+ ops->teardown(service);
+}
+
+static void afk_recv_handle_reply(struct apple_dcp_afkep *ep, u32 channel,
+ u16 tag, void *payload, size_t payload_size)
+{
+ struct epic_cmd *cmd = payload;
+ struct apple_epic_service *service;
+ unsigned long flags;
+ u8 idx = tag & 0xff;
+ void *rxbuf, *txbuf;
+ dma_addr_t rxbuf_dma, txbuf_dma;
+ size_t rxlen, txlen;
+
+ service = afk_epic_find_service(ep, channel);
+ if (!service) {
+ dev_warn(ep->dcp->dev, "AFK[ep:%02x]: command reply on disabled channel %u\n",
+ ep->endpoint, channel);
+ return;
+ }
+
+ if (payload_size < sizeof(*cmd)) {
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: command reply on channel %d too small: %ld\n",
+ ep->endpoint, channel, payload_size);
+ return;
+ }
+
+ if (idx >= MAX_PENDING_CMDS) {
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: command reply on channel %d out of range: %d\n",
+ ep->endpoint, channel, idx);
+ return;
+ }
+
+ spin_lock_irqsave(&service->lock, flags);
+ if (service->cmds[idx].done) {
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: command reply on channel %d already handled\n",
+ ep->endpoint, channel);
+ spin_unlock_irqrestore(&service->lock, flags);
+ return;
+ }
+
+ if (tag != service->cmds[idx].tag) {
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: command reply on channel %d has invalid tag: expected 0x%04x != 0x%04x\n",
+ ep->endpoint, channel, tag, service->cmds[idx].tag);
+ spin_unlock_irqrestore(&service->lock, flags);
+ return;
+ }
+
+ service->cmds[idx].done = true;
+ service->cmds[idx].retcode = le32_to_cpu(cmd->retcode);
+ if (service->cmds[idx].free_on_ack) {
+ /* defer freeing until we're no longer in atomic context */
+ rxbuf = service->cmds[idx].rxbuf;
+ txbuf = service->cmds[idx].txbuf;
+ rxlen = service->cmds[idx].rxlen;
+ txlen = service->cmds[idx].txlen;
+ rxbuf_dma = service->cmds[idx].rxbuf_dma;
+ txbuf_dma = service->cmds[idx].txbuf_dma;
+ bitmap_release_region(service->cmd_map, idx, 0);
+ } else {
+ rxbuf = txbuf = NULL;
+ rxlen = txlen = 0;
+ }
+ if (service->cmds[idx].completion)
+ complete(service->cmds[idx].completion);
+
+ spin_unlock_irqrestore(&service->lock, flags);
+
+ if (rxbuf && rxlen)
+ dma_free_coherent(ep->dcp->dev, rxlen, rxbuf, rxbuf_dma);
+ if (txbuf && txlen)
+ dma_free_coherent(ep->dcp->dev, txlen, txbuf, txbuf_dma);
+}
+
+struct epic_std_service_ap_call {
+ __le32 unk0;
+ __le32 unk1;
+ __le32 type;
+ __le32 len;
+ __le32 magic;
+ u8 _unk[48];
+} __attribute__((packed));
+
+static void afk_recv_handle_std_service(struct apple_dcp_afkep *ep, u32 channel,
+ u32 type, struct epic_hdr *ehdr,
+ struct epic_sub_hdr *eshdr,
+ void *payload, size_t payload_size)
+{
+ struct apple_epic_service *service = afk_epic_find_service(ep, channel);
+
+ if (!service) {
+ dev_warn(ep->dcp->dev,
+ "AFK[ep:%02x]: std service notify on disabled channel %u\n",
+ ep->endpoint, channel);
+ return;
+ }
+
+ if (type == EPIC_TYPE_NOTIFY && eshdr->category == EPIC_CAT_NOTIFY) {
+ struct epic_std_service_ap_call *call = payload;
+ size_t call_size;
+ void *reply;
+ int ret;
+
+ if (payload_size < sizeof(*call))
+ return;
+
+ call_size = le32_to_cpu(call->len);
+ if (payload_size < sizeof(*call) + call_size)
+ return;
+
+ if (!service->ops->call)
+ return;
+ reply = kzalloc(payload_size, GFP_KERNEL);
+ if (!reply)
+ return;
+
+ ret = service->ops->call(service, le32_to_cpu(call->type),
+ payload + sizeof(*call), call_size,
+ reply + sizeof(*call), call_size);
+ if (ret) {
+ kfree(reply);
+ return;
+ }
+
+ memcpy(reply, call, sizeof(*call));
+ afk_send_epic(ep, channel, le16_to_cpu(eshdr->tag),
+ EPIC_TYPE_NOTIFY_ACK, EPIC_CAT_REPLY,
+ EPIC_SUBTYPE_STD_SERVICE, reply, payload_size);
+ kfree(reply);
+
+ return;
+ }
+
+ if (type == EPIC_TYPE_NOTIFY && eshdr->category == EPIC_CAT_REPORT) {
+ if (service->ops->report)
+ service->ops->report(service, le16_to_cpu(eshdr->type),
+ payload, payload_size);
+ return;
+ }
+
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: channel %d received unhandled standard service message: %x / %x\n",
+ ep->endpoint, channel, type, eshdr->category);
+ print_hex_dump(KERN_INFO, "AFK: ", DUMP_PREFIX_NONE, 16, 1, payload,
+ payload_size, true);
+}
+
+static void afk_recv_handle(struct apple_dcp_afkep *ep, u32 channel, u32 type,
+ u8 *data, size_t data_size)
+{
+ struct apple_epic_service *service;
+ struct epic_hdr *ehdr = (struct epic_hdr *)data;
+ struct epic_sub_hdr *eshdr =
+ (struct epic_sub_hdr *)(data + sizeof(*ehdr));
+ u16 subtype = le16_to_cpu(eshdr->type);
+ u8 *payload = data + sizeof(*ehdr) + sizeof(*eshdr);
+ size_t payload_size;
+
+ if (data_size < sizeof(*ehdr) + sizeof(*eshdr)) {
+ dev_err(ep->dcp->dev, "AFK[ep:%02x]: payload too small: %lx\n",
+ ep->endpoint, data_size);
+ return;
+ }
+ payload_size = data_size - sizeof(*ehdr) - sizeof(*eshdr);
+
+ trace_afk_recv_handle(ep, channel, type, data_size, ehdr, eshdr);
+
+ service = afk_epic_find_service(ep, channel);
+
+ if (!service) {
+ if (type != EPIC_TYPE_NOTIFY && type != EPIC_TYPE_REPLY) {
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: expected notify but got 0x%x on channel %d\n",
+ ep->endpoint, type, channel);
+ return;
+ }
+ if (eshdr->category != EPIC_CAT_REPORT) {
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: expected report but got 0x%x on channel %d\n",
+ ep->endpoint, eshdr->category, channel);
+ return;
+ }
+ if (subtype == EPIC_SUBTYPE_TEARDOWN) {
+ dev_dbg(ep->dcp->dev,
+ "AFK[ep:%02x]: teardown without service on channel %d\n",
+ ep->endpoint, channel);
+ return;
+ }
+ if (subtype != EPIC_SUBTYPE_ANNOUNCE) {
+ dev_err(ep->dcp->dev,
+ "AFK[ep:%02x]: expected announce but got 0x%x on channel %d\n",
+ ep->endpoint, subtype, channel);
+ return;
+ }
+
+ return afk_recv_handle_init(ep, channel, payload, payload_size);
+ }
+
+ if (!service) {
+ dev_err(ep->dcp->dev, "AFK[ep:%02x]: channel %d has no service\n",
+ ep->endpoint, channel);
+ return;
+ }
+
+ if (type == EPIC_TYPE_NOTIFY && eshdr->category == EPIC_CAT_REPORT &&
+ subtype == EPIC_SUBTYPE_TEARDOWN)
+ return afk_recv_handle_teardown(ep, channel);
+
+ if (type == EPIC_TYPE_REPLY && eshdr->category == EPIC_CAT_REPLY)
+ return afk_recv_handle_reply(ep, channel,
+ le16_to_cpu(eshdr->tag), payload,
+ payload_size);
+
+ if (subtype == EPIC_SUBTYPE_STD_SERVICE)
+ return afk_recv_handle_std_service(
+ ep, channel, type, ehdr, eshdr, payload, payload_size);
+
+ dev_err(ep->dcp->dev, "AFK[ep:%02x]: channel %d received unhandled message "
+ "(type %x subtype %x)\n", ep->endpoint, channel, type, subtype);
+ print_hex_dump(KERN_INFO, "AFK: ", DUMP_PREFIX_NONE, 16, 1, payload,
+ payload_size, true);
+}
+
+static bool afk_recv(struct apple_dcp_afkep *ep)
+{
+ struct afk_qe *hdr;
+ u32 rptr, wptr;
+ u32 magic, size, channel, type;
+
+ if (!ep->rxbfr.ready) {
+ dev_err(ep->dcp->dev, "AFK[ep:%02x]: got RECV but not ready\n",
+ ep->endpoint);
+ return false;
+ }
+
+ rptr = le32_to_cpu(ep->rxbfr.hdr->rptr);
+ wptr = le32_to_cpu(ep->rxbfr.hdr->wptr);
+ trace_afk_recv_rwptr_pre(ep, rptr, wptr);
+
+ if (rptr == wptr)
+ return false;
+
+ if (rptr > (ep->rxbfr.bufsz - sizeof(*hdr))) {
+ dev_warn(ep->dcp->dev,
+ "AFK[ep:%02x]: rptr out of bounds: 0x%x > 0x%lx\n",
+ ep->endpoint, rptr, ep->rxbfr.bufsz - sizeof(*hdr));
+ return false;
+ }
+
+ dma_rmb();
+
+ hdr = ep->rxbfr.buf + rptr;
+ magic = le32_to_cpu(hdr->magic);
+ size = le32_to_cpu(hdr->size);
+ trace_afk_recv_qe(ep, rptr, magic, size);
+
+ if (magic != QE_MAGIC) {
+ dev_warn(ep->dcp->dev, "AFK[ep:%02x]: invalid queue entry magic: 0x%x\n",
+ ep->endpoint, magic);
+ return false;
+ }
+
+ /*
+ * If there's not enough space for the payload the co-processor inserted
+ * the current dummy queue entry and we have to advance to the next one
+ * which will contain the real data.
+ */
+ if (rptr + size + sizeof(*hdr) > ep->rxbfr.bufsz) {
+ rptr = 0;
+ hdr = ep->rxbfr.buf + rptr;
+ magic = le32_to_cpu(hdr->magic);
+ size = le32_to_cpu(hdr->size);
+ trace_afk_recv_qe(ep, rptr, magic, size);
+
+ if (magic != QE_MAGIC) {
+ dev_warn(ep->dcp->dev,
+ "AFK[ep:%02x]: invalid next queue entry magic: 0x%x\n",
+ ep->endpoint, magic);
+ return false;
+ }
+
+ ep->rxbfr.hdr->rptr = cpu_to_le32(rptr);
+ }
+
+ if (rptr + size + sizeof(*hdr) > ep->rxbfr.bufsz) {
+ dev_warn(ep->dcp->dev,
+ "AFK[ep:%02x]: queue entry out of bounds: 0x%lx > 0x%lx\n",
+ ep->endpoint, rptr + size + sizeof(*hdr), ep->rxbfr.bufsz);
+ return false;
+ }
+
+ channel = le32_to_cpu(hdr->channel);
+ type = le32_to_cpu(hdr->type);
+
+ rptr = ALIGN(rptr + sizeof(*hdr) + size, 1 << BLOCK_SHIFT);
+ if (WARN_ON(rptr > ep->rxbfr.bufsz))
+ rptr = 0;
+ if (rptr == ep->rxbfr.bufsz)
+ rptr = 0;
+
+ dma_mb();
+
+ ep->rxbfr.hdr->rptr = cpu_to_le32(rptr);
+ trace_afk_recv_rwptr_post(ep, rptr, wptr);
+
+ /*
+ * TODO: this is theoretically unsafe since DCP could overwrite data
+ * after the read pointer was updated above. Do it anyway since
+ * it avoids 2 problems in the DCP tracer:
+ * 1. the tracer sees replies before the the notifies from dcp
+ * 2. the tracer tries to read buffers after they are unmapped.
+ */
+ afk_recv_handle(ep, channel, type, hdr->data, size);
+
+ return true;
+}
+
+static void afk_receive_message_worker(struct work_struct *work_)
+{
+ struct afk_receive_message_work *work;
+ u16 type;
+
+ work = container_of(work_, struct afk_receive_message_work, work);
+
+ type = FIELD_GET(RBEP_TYPE, work->message);
+ switch (type) {
+ case RBEP_INIT_ACK:
+ break;
+
+ case RBEP_START_ACK:
+ complete_all(&work->ep->started);
+ break;
+
+ case RBEP_SHUTDOWN_ACK:
+ complete_all(&work->ep->stopped);
+ break;
+
+ case RBEP_GETBUF:
+ afk_getbuf(work->ep, work->message);
+ break;
+
+ case RBEP_INIT_TX:
+ afk_init_rxtx(work->ep, work->message, &work->ep->txbfr);
+ break;
+
+ case RBEP_INIT_RX:
+ afk_init_rxtx(work->ep, work->message, &work->ep->rxbfr);
+ break;
+
+ case RBEP_RECV:
+ while (afk_recv(work->ep))
+ ;
+ break;
+
+ default:
+ dev_err(work->ep->dcp->dev,
+ "Received unknown AFK message type: 0x%x\n", type);
+ }
+
+ kfree(work);
+}
+
+int afk_receive_message(struct apple_dcp_afkep *ep, u64 message)
+{
+ struct afk_receive_message_work *work;
+
+ // TODO: comment why decoupling from rtkit thread is required here
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ work->ep = ep;
+ work->message = message;
+ INIT_WORK(&work->work, afk_receive_message_worker);
+ queue_work(ep->wq, &work->work);
+
+ return 0;
+}
+
+int afk_send_epic(struct apple_dcp_afkep *ep, u32 channel, u16 tag,
+ enum epic_type etype, enum epic_category ecat, u8 stype,
+ const void *payload, size_t payload_len)
+{
+ u32 rptr, wptr;
+ struct afk_qe *hdr, *hdr2;
+ struct epic_hdr *ehdr;
+ struct epic_sub_hdr *eshdr;
+ unsigned long flags;
+ size_t total_epic_size, total_size;
+ int ret;
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ dma_rmb();
+ rptr = le32_to_cpu(ep->txbfr.hdr->rptr);
+ wptr = le32_to_cpu(ep->txbfr.hdr->wptr);
+ trace_afk_send_rwptr_pre(ep, rptr, wptr);
+ total_epic_size = sizeof(*ehdr) + sizeof(*eshdr) + payload_len;
+ total_size = sizeof(*hdr) + total_epic_size;
+
+ hdr = hdr2 = NULL;
+
+ /*
+ * We need to figure out how to place the entire headers and payload
+ * into the ring buffer:
+ * - If the write pointer is in front of the read pointer we just need
+ * enough space inbetween to store everything.
+ * - If the read pointer has already wrapper around the end of the
+ * buffer we can
+ * a) either store the entire payload at the writer pointer if
+ * there's enough space until the end,
+ * b) or just store the queue entry at the write pointer to indicate
+ * that we need to wrap to the start and then store the headers
+ * and the payload at the beginning of the buffer. The queue
+ * header has to be store twice in this case.
+ * In either case we have to ensure that there's always enough space
+ * so that we don't accidentally overwrite other buffers.
+ */
+ if (wptr < rptr) {
+ /*
+ * If wptr < rptr we can't wrap around and only have to make
+ * sure that there's enough space for the entire payload.
+ */
+ if (wptr + total_size > rptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hdr = ep->txbfr.buf + wptr;
+ wptr += sizeof(*hdr);
+ } else {
+ /* We need enough space to place at least a queue entry */
+ if (wptr + sizeof(*hdr) > ep->txbfr.bufsz) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * If we can place a single queue entry but not the full payload
+ * we need to place one queue entry at the end of the ring
+ * buffer and then another one together with the entire
+ * payload at the beginning.
+ */
+ if (wptr + total_size > ep->txbfr.bufsz) {
+ /*
+ * Ensure there's space for the queue entry at the
+ * beginning
+ */
+ if (sizeof(*hdr) > rptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Place two queue entries to indicate we want to wrap
+ * around to the firmware.
+ */
+ hdr = ep->txbfr.buf + wptr;
+ hdr2 = ep->txbfr.buf;
+ wptr = sizeof(*hdr);
+
+ /* Ensure there's enough space for the entire payload */
+ if (wptr + total_epic_size > rptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ } else {
+ /* We have enough space to place the entire payload */
+ hdr = ep->txbfr.buf + wptr;
+ wptr += sizeof(*hdr);
+ }
+ }
+ /*
+ * At this point we're guaranteed that hdr (and possibly hdr2) point
+ * to a buffer large enough to fit the queue entry and that we have
+ * enough space at wptr to store the payload.
+ */
+
+ hdr->magic = cpu_to_le32(QE_MAGIC);
+ hdr->size = cpu_to_le32(total_epic_size);
+ hdr->channel = cpu_to_le32(channel);
+ hdr->type = cpu_to_le32(etype);
+ if (hdr2)
+ memcpy(hdr2, hdr, sizeof(*hdr));
+
+ ehdr = ep->txbfr.buf + wptr;
+ memset(ehdr, 0, sizeof(*ehdr));
+ ehdr->version = 2;
+ ehdr->seq = cpu_to_le16(ep->qe_seq++);
+ ehdr->timestamp = cpu_to_le64(0);
+ wptr += sizeof(*ehdr);
+
+ eshdr = ep->txbfr.buf + wptr;
+ memset(eshdr, 0, sizeof(*eshdr));
+ eshdr->length = cpu_to_le32(payload_len);
+ eshdr->version = 4;
+ eshdr->category = ecat;
+ eshdr->type = cpu_to_le16(stype);
+ eshdr->timestamp = cpu_to_le64(0);
+ eshdr->tag = cpu_to_le16(tag);
+ if (ecat == EPIC_CAT_REPLY)
+ eshdr->inline_len = cpu_to_le16(payload_len - 4);
+ else
+ eshdr->inline_len = cpu_to_le16(0);
+ wptr += sizeof(*eshdr);
+
+ memcpy(ep->txbfr.buf + wptr, payload, payload_len);
+ wptr += payload_len;
+ wptr = ALIGN(wptr, 1 << BLOCK_SHIFT);
+ if (wptr == ep->txbfr.bufsz)
+ wptr = 0;
+ trace_afk_send_rwptr_post(ep, rptr, wptr);
+
+ ep->txbfr.hdr->wptr = cpu_to_le32(wptr);
+ afk_send(ep, FIELD_PREP(RBEP_TYPE, RBEP_SEND) |
+ FIELD_PREP(SEND_WPTR, wptr));
+ ret = 0;
+
+out:
+ spin_unlock_irqrestore(&ep->lock, flags);
+ return ret;
+}
+
+int afk_send_command(struct apple_epic_service *service, u8 type,
+ const void *payload, size_t payload_len, void *output,
+ size_t output_len, u32 *retcode)
+{
+ struct epic_cmd cmd;
+ void *rxbuf, *txbuf;
+ dma_addr_t rxbuf_dma, txbuf_dma;
+ unsigned long flags;
+ int ret, idx;
+ u16 tag;
+ struct apple_dcp_afkep *ep = service->ep;
+ DECLARE_COMPLETION_ONSTACK(completion);
+
+ rxbuf = dma_alloc_coherent(ep->dcp->dev, output_len, &rxbuf_dma,
+ GFP_KERNEL);
+ if (!rxbuf)
+ return -ENOMEM;
+ txbuf = dma_alloc_coherent(ep->dcp->dev, payload_len, &txbuf_dma,
+ GFP_KERNEL);
+ if (!txbuf) {
+ ret = -ENOMEM;
+ goto err_free_rxbuf;
+ }
+
+ memcpy(txbuf, payload, payload_len);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.retcode = cpu_to_le32(0);
+ cmd.rxbuf = cpu_to_le64(rxbuf_dma);
+ cmd.rxlen = cpu_to_le32(output_len);
+ cmd.txbuf = cpu_to_le64(txbuf_dma);
+ cmd.txlen = cpu_to_le32(payload_len);
+
+ spin_lock_irqsave(&service->lock, flags);
+ idx = bitmap_find_free_region(service->cmd_map, MAX_PENDING_CMDS, 0);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto err_unlock;
+ }
+
+ tag = (service->cmd_tag & 0xff) << 8;
+ tag |= idx & 0xff;
+ service->cmd_tag++;
+
+ service->cmds[idx].tag = tag;
+ service->cmds[idx].rxbuf = rxbuf;
+ service->cmds[idx].txbuf = txbuf;
+ service->cmds[idx].rxbuf_dma = rxbuf_dma;
+ service->cmds[idx].txbuf_dma = txbuf_dma;
+ service->cmds[idx].rxlen = output_len;
+ service->cmds[idx].txlen = payload_len;
+ service->cmds[idx].free_on_ack = false;
+ service->cmds[idx].done = false;
+ service->cmds[idx].completion = &completion;
+ init_completion(&completion);
+
+ spin_unlock_irqrestore(&service->lock, flags);
+
+ ret = afk_send_epic(service->ep, service->channel, tag,
+ EPIC_TYPE_COMMAND, EPIC_CAT_COMMAND, type, &cmd,
+ sizeof(cmd));
+ if (ret)
+ goto err_free_cmd;
+
+ ret = wait_for_completion_timeout(&completion,
+ msecs_to_jiffies(MSEC_PER_SEC));
+
+ if (ret <= 0) {
+ spin_lock_irqsave(&service->lock, flags);
+ /*
+ * Check again while we're inside the lock to make sure
+ * the command wasn't completed just after
+ * wait_for_completion_timeout returned.
+ */
+ if (!service->cmds[idx].done) {
+ service->cmds[idx].completion = NULL;
+ service->cmds[idx].free_on_ack = true;
+ spin_unlock_irqrestore(&service->lock, flags);
+ return -ETIMEDOUT;
+ }
+ spin_unlock_irqrestore(&service->lock, flags);
+ }
+
+ ret = 0;
+ if (retcode)
+ *retcode = service->cmds[idx].retcode;
+ if (output && output_len)
+ memcpy(output, rxbuf, output_len);
+
+err_free_cmd:
+ spin_lock_irqsave(&service->lock, flags);
+ bitmap_release_region(service->cmd_map, idx, 0);
+err_unlock:
+ spin_unlock_irqrestore(&service->lock, flags);
+ dma_free_coherent(ep->dcp->dev, payload_len, txbuf, txbuf_dma);
+err_free_rxbuf:
+ dma_free_coherent(ep->dcp->dev, output_len, rxbuf, rxbuf_dma);
+ return ret;
+}
+
+int afk_service_call(struct apple_epic_service *service, u16 group, u32 command,
+ const void *data, size_t data_len, size_t data_pad,
+ void *output, size_t output_len, size_t output_pad)
+{
+ struct epic_service_call *call;
+ void *bfr;
+ size_t bfr_len = max(data_len + data_pad, output_len + output_pad) +
+ sizeof(*call);
+ int ret;
+ u32 retcode;
+ u32 retlen;
+
+ bfr = kzalloc(bfr_len, GFP_KERNEL);
+ if (!bfr)
+ return -ENOMEM;
+
+ call = bfr;
+
+ memset(call, 0, sizeof(*call));
+ call->group = cpu_to_le16(group);
+ call->command = cpu_to_le32(command);
+ call->data_len = cpu_to_le32(data_len + data_pad);
+ call->magic = cpu_to_le32(EPIC_SERVICE_CALL_MAGIC);
+
+ memcpy(bfr + sizeof(*call), data, data_len);
+
+ ret = afk_send_command(service, EPIC_SUBTYPE_STD_SERVICE, bfr, bfr_len,
+ bfr, bfr_len, &retcode);
+ if (ret)
+ goto out;
+ if (retcode) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (le32_to_cpu(call->magic) != EPIC_SERVICE_CALL_MAGIC ||
+ le16_to_cpu(call->group) != group ||
+ le32_to_cpu(call->command) != command) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ retlen = le32_to_cpu(call->data_len);
+ if (output_len < retlen)
+ retlen = output_len;
+ if (output && output_len) {
+ memset(output, 0, output_len);
+ memcpy(output, bfr + sizeof(*call), retlen);
+ }
+
+out:
+ kfree(bfr);
+ return ret;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * AFK (Apple Firmware Kit) EPIC (EndPoint Interface Client) support
+ */
+/* Copyright 2022 Sven Peter <sven@svenpeter.dev> */
+
+#ifndef _DRM_APPLE_DCP_AFK_H
+#define _DRM_APPLE_DCP_AFK_H
+
+#include <linux/completion.h>
+#include <linux/types.h>
+
+#include "dcp.h"
+
+#define AFK_MAX_CHANNEL 16
+#define MAX_PENDING_CMDS 16
+
+struct apple_epic_service_ops;
+struct apple_dcp_afkep;
+
+struct epic_cmd_info {
+ u16 tag;
+
+ void *rxbuf;
+ void *txbuf;
+ dma_addr_t rxbuf_dma;
+ dma_addr_t txbuf_dma;
+ size_t rxlen;
+ size_t txlen;
+
+ u32 retcode;
+ bool done;
+ bool free_on_ack;
+ struct completion *completion;
+};
+
+struct apple_epic_service {
+ const struct apple_epic_service_ops *ops;
+ struct apple_dcp_afkep *ep;
+
+ struct epic_cmd_info cmds[MAX_PENDING_CMDS];
+ DECLARE_BITMAP(cmd_map, MAX_PENDING_CMDS);
+ u8 cmd_tag;
+ spinlock_t lock;
+
+ u32 channel;
+ bool enabled;
+
+ void *cookie;
+};
+
+enum epic_subtype;
+
+struct apple_epic_service_ops {
+ const char name[32];
+
+ void (*init)(struct apple_epic_service *service, const char *name,
+ const char *class, s64 unit);
+ int (*call)(struct apple_epic_service *service, u32 idx,
+ const void *data, size_t data_size, void *reply,
+ size_t reply_size);
+ int (*report)(struct apple_epic_service *service, enum epic_subtype type,
+ const void *data, size_t data_size);
+ void (*teardown)(struct apple_epic_service *service);
+};
+
+struct afk_ringbuffer_header {
+ __le32 bufsz;
+ u32 unk;
+ u32 _pad1[14];
+ __le32 rptr;
+ u32 _pad2[15];
+ __le32 wptr;
+ u32 _pad3[15];
+};
+
+struct afk_qe {
+#define QE_MAGIC 0x20504f49 // ' POI'
+ __le32 magic;
+ __le32 size;
+ __le32 channel;
+ __le32 type;
+ u8 data[];
+};
+
+struct epic_hdr {
+ u8 version;
+ __le16 seq;
+ u8 _pad;
+ __le32 unk;
+ __le64 timestamp;
+} __attribute__((packed));
+
+struct epic_sub_hdr {
+ __le32 length;
+ u8 version;
+ u8 category;
+ __le16 type;
+ __le64 timestamp;
+ __le16 tag;
+ __le16 unk;
+ __le32 inline_len;
+} __attribute__((packed));
+
+struct epic_cmd {
+ __le32 retcode;
+ __le64 rxbuf;
+ __le64 txbuf;
+ __le32 rxlen;
+ __le32 txlen;
+ u8 rxcookie;
+ u8 txcookie;
+} __attribute__((packed));
+
+struct epic_service_call {
+ u8 _pad0[2];
+ __le16 group;
+ __le32 command;
+ __le32 data_len;
+#define EPIC_SERVICE_CALL_MAGIC 0x69706378
+ __le32 magic;
+ u8 _pad1[48];
+} __attribute__((packed));
+static_assert(sizeof(struct epic_service_call) == 64);
+
+enum epic_type {
+ EPIC_TYPE_NOTIFY = 0,
+ EPIC_TYPE_COMMAND = 3,
+ EPIC_TYPE_REPLY = 4,
+ EPIC_TYPE_NOTIFY_ACK = 8,
+};
+
+enum epic_category {
+ EPIC_CAT_REPORT = 0x00,
+ EPIC_CAT_NOTIFY = 0x10,
+ EPIC_CAT_REPLY = 0x20,
+ EPIC_CAT_COMMAND = 0x30,
+};
+
+enum epic_subtype {
+ EPIC_SUBTYPE_ANNOUNCE = 0x30,
+ EPIC_SUBTYPE_TEARDOWN = 0x32,
+ EPIC_SUBTYPE_STD_SERVICE = 0xc0,
+};
+
+struct afk_ringbuffer {
+ bool ready;
+ struct afk_ringbuffer_header *hdr;
+ u32 rptr;
+ void *buf;
+ size_t bufsz;
+};
+
+struct apple_dcp_afkep {
+ struct apple_dcp *dcp;
+
+ u32 endpoint;
+ struct workqueue_struct *wq;
+
+ struct completion started;
+ struct completion stopped;
+
+ void *bfr;
+ u16 bfr_tag;
+ size_t bfr_size;
+ dma_addr_t bfr_dma;
+
+ struct afk_ringbuffer txbfr;
+ struct afk_ringbuffer rxbfr;
+
+ spinlock_t lock;
+ u16 qe_seq;
+
+ const struct apple_epic_service_ops *ops;
+ struct apple_epic_service services[AFK_MAX_CHANNEL];
+ u32 num_channels;
+};
+
+struct apple_dcp_afkep *afk_init(struct apple_dcp *dcp, u32 endpoint,
+ const struct apple_epic_service_ops *ops);
+int afk_start(struct apple_dcp_afkep *ep);
+int afk_receive_message(struct apple_dcp_afkep *ep, u64 message);
+int afk_send_epic(struct apple_dcp_afkep *ep, u32 channel, u16 tag,
+ enum epic_type etype, enum epic_category ecat, u8 stype,
+ const void *payload, size_t payload_len);
+int afk_send_command(struct apple_epic_service *service, u8 type,
+ const void *payload, size_t payload_len, void *output,
+ size_t output_len, u32 *retcode);
+int afk_service_call(struct apple_epic_service *service, u16 group, u32 command,
+ const void *data, size_t data_len, size_t data_pad,
+ void *output, size_t output_len, size_t output_pad);
+#endif
--- /dev/null
+/* $OpenBSD: apldcp.c,v 1.1 2024/01/22 18:54:01 kettenis Exp $ */
+/*
+ * Copyright (c) 2023 Mark Kettenis <kettenis@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/device.h>
+
+#include <machine/intr.h>
+#include <machine/bus.h>
+#include <machine/fdt.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/fdt.h>
+#include <dev/ofw/ofw_power.h>
+#include <dev/ofw/ofw_clock.h>
+
+static const void *of_device_get_match_data(const struct device *);
+
+#include "dcp.c"
+
+struct apldcp_softc {
+ struct platform_device sc_dev;
+};
+
+int apldcp_match(struct device *, void *, void *);
+void apldcp_attach(struct device *, struct device *, void *);
+int apldcp_activate(struct device *, int);
+
+const struct cfattach apldcp_ca = {
+ sizeof (struct apldcp_softc), apldcp_match, apldcp_attach,
+ NULL, apldcp_activate
+};
+
+struct cfdriver apldcp_cd = {
+ NULL, "apldcp", DV_DULL
+};
+
+int
+apldcp_match(struct device *parent, void *match, void *aux)
+{
+ struct fdt_attach_args *faa = aux;
+
+ return OF_is_compatible(faa->fa_node, "apple,dcp") ||
+ OF_is_compatible(faa->fa_node, "apple,dcpext");
+}
+
+void
+apldcp_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct apldcp_softc *sc = (struct apldcp_softc *)self;
+ struct fdt_attach_args *faa = aux;
+
+ power_domain_enable(faa->fa_node);
+ reset_deassert_all(faa->fa_node);
+
+ printf("\n");
+
+ sc->sc_dev.faa = faa;
+ platform_device_register(&sc->sc_dev);
+
+ dcp_platform_probe(&sc->sc_dev);
+}
+
+int
+apldcp_activate(struct device *self, int act)
+{
+ int rv;
+
+ switch (act) {
+ case DVACT_QUIESCE:
+ rv = config_activate_children(self, act);
+ dcp_platform_suspend(self);
+ break;
+ case DVACT_WAKEUP:
+ dcp_platform_resume(self);
+ rv = config_activate_children(self, act);
+ break;
+ default:
+ rv = config_activate_children(self, act);
+ break;
+ }
+
+ return rv;
+}
+
+/*
+ * Linux RTKit interfaces.
+ */
+
+#include <arm64/dev/rtkit.h>
+
+struct apple_rtkit_ep {
+ struct apple_rtkit *rtk;
+ uint8_t ep;
+
+ struct task task;
+ uint64_t msg;
+};
+
+struct apple_rtkit {
+ struct rtkit_state *state;
+ struct apple_rtkit_ep ep[64];
+ void *cookie;
+ struct platform_device *pdev;
+ const struct apple_rtkit_ops *ops;
+ struct taskq *tq;
+};
+
+paddr_t
+apple_rtkit_logmap(void *cookie, bus_addr_t addr)
+{
+ struct apple_rtkit *rtk = cookie;
+ int idx, len, node;
+ uint32_t *phandles;
+ uint32_t iommu_addresses[5];
+ bus_addr_t start;
+ bus_size_t size;
+ uint64_t reg[2];
+
+ len = OF_getproplen(rtk->pdev->node, "memory-region");
+ idx = OF_getindex(rtk->pdev->node, "dcp_data", "memory-region-names");
+ if (idx < 0 || idx >= len / sizeof(uint32_t))
+ return addr;
+
+ phandles = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
+ OF_getpropintarray(rtk->pdev->node, "memory-region",
+ phandles, len);
+ node = OF_getnodebyphandle(phandles[idx]);
+ free(phandles, M_TEMP, len);
+
+ if (node == 0)
+ return addr;
+
+ if (!OF_is_compatible(node, "apple,asc-mem"))
+ return addr;
+
+ if (OF_getpropint64array(node, "reg", reg, sizeof(reg)) != sizeof(reg))
+ return addr;
+
+ if (OF_getpropintarray(node, "iommu-addresses", iommu_addresses,
+ sizeof(iommu_addresses)) < sizeof(iommu_addresses))
+ return addr;
+ start = (uint64_t)iommu_addresses[1] << 32 | iommu_addresses[2];
+ size = (uint64_t)iommu_addresses[3] << 32 | iommu_addresses[4];
+ if (addr >= start && addr < start + size)
+ return reg[0] + (addr - start);
+
+ /* XXX some machines have truncated DVAs in "iommu-addresses" */
+ addr &= 0xffffffff;
+ if (addr >= start && addr < start + size)
+ return reg[0] + (addr - start);
+
+ return (paddr_t)-1;
+}
+
+void
+apple_rtkit_do_recv(void *arg)
+{
+ struct apple_rtkit_ep *rtkep = arg;
+ struct apple_rtkit *rtk = rtkep->rtk;
+
+ rtk->ops->recv_message(rtk->cookie, rtkep->ep, rtkep->msg);
+}
+
+void
+apple_rtkit_recv(void *cookie, uint64_t msg)
+{
+ struct apple_rtkit_ep *rtkep = cookie;
+ struct apple_rtkit *rtk = rtkep->rtk;
+
+ rtkep->msg = msg;
+ task_add(rtk->tq, &rtkep->task);
+}
+
+int
+apple_rtkit_start_ep(struct apple_rtkit *rtk, uint8_t ep)
+{
+ struct apple_rtkit_ep *rtkep;
+ int error;
+
+ rtkep = &rtk->ep[ep];
+ rtkep->rtk = rtk;
+ rtkep->ep = ep;
+ task_set(&rtkep->task, apple_rtkit_do_recv, rtkep);
+
+ error = rtkit_start_endpoint(rtk->state, ep, apple_rtkit_recv, rtkep);
+ return -error;
+}
+
+int
+apple_rtkit_send_message(struct apple_rtkit *rtk, uint8_t ep, uint64_t msg,
+ struct completion *completion, int atomic)
+{
+ int error;
+
+ error = rtkit_send_endpoint(rtk->state, ep, msg);
+ return -error;
+}
+
+int
+apple_rtkit_wake(struct apple_rtkit *rtk)
+{
+ int error;
+
+ error = rtkit_set_iop_pwrstate(rtk->state, RTKIT_MGMT_PWR_STATE_INIT);
+ if (error)
+ return -error;
+
+ error = rtkit_set_ap_pwrstate(rtk->state, RTKIT_MGMT_PWR_STATE_ON);
+ return -error;
+}
+
+struct apple_rtkit *
+devm_apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx, const struct apple_rtkit_ops *ops)
+{
+ struct platform_device *pdev = (struct platform_device *)dev;
+ struct apple_rtkit *rtk;
+ struct rtkit *rk;
+
+ rtk = malloc(sizeof(*rtk), M_DEVBUF, M_WAITOK | M_ZERO);
+ rtk->tq = taskq_create("drmrtk", 1, IPL_HIGH, 0);
+ if (rtk->tq == NULL) {
+ free(rtk, M_DEVBUF, sizeof(*rtk));
+ return ERR_PTR(ENOMEM);
+ }
+
+ rk = malloc(sizeof(*rk), M_DEVBUF, M_WAITOK | M_ZERO);
+ rk->rk_cookie = rtk;
+ rk->rk_dmat = pdev->dmat;
+ rk->rk_logmap = apple_rtkit_logmap;
+
+ rtk->state = rtkit_init(pdev->node, mbox_name, 0, rk);
+ rtk->cookie = cookie;
+ rtk->pdev = pdev;
+ rtk->ops = ops;
+
+ return rtk;
+}
+
+static const void *
+of_device_get_match_data(const struct device *dev)
+{
+ struct platform_device *pdev = (struct platform_device *)dev;
+ int i;
+
+ for (i = 0; i < nitems(of_match); i++) {
+ if (OF_is_compatible(pdev->node, of_match[i].compatible))
+ return of_match[i].data;
+ }
+
+ return NULL;
+}
--- /dev/null
+/* $OpenBSD: apldrm.c,v 1.1 2024/01/22 18:54:01 kettenis Exp $ */
+/*
+ * Copyright (c) 2023 Mark Kettenis <kettenis@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/device.h>
+
+#include <machine/fdt.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/fdt.h>
+
+#include <dev/wscons/wsconsio.h>
+#include <dev/wscons/wsdisplayvar.h>
+#include <dev/rasops/rasops.h>
+
+#include <linux/platform_device.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_framebuffer.h>
+
+struct apldrm_softc {
+ struct platform_device sc_dev;
+ struct drm_device sc_ddev;
+
+ int sc_node;
+
+ struct rasops_info sc_ri;
+ struct wsscreen_descr sc_wsd;
+ struct wsscreen_list sc_wsl;
+ struct wsscreen_descr *sc_scrlist[1];
+
+ void (*sc_switchcb)(void *, int, int);
+ void *sc_switchcbarg;
+ void *sc_switchcookie;
+ struct task sc_switchtask;
+
+ int sc_burner_fblank;
+ struct task sc_burner_task;
+};
+
+#include "apple_drv.c"
+
+int apldrm_match(struct device *, void *, void *);
+void apldrm_attach(struct device *, struct device *, void *);
+int apldrm_activate(struct device *, int);
+
+const struct cfattach apldrm_ca = {
+ sizeof (struct apldrm_softc), apldrm_match, apldrm_attach,
+ NULL, apldrm_activate
+};
+
+struct cfdriver apldrm_cd = {
+ NULL, "apldrm", DV_DULL
+};
+
+void apldrm_attachhook(struct device *);
+
+int
+apldrm_match(struct device *parent, void *match, void *aux)
+{
+ struct fdt_attach_args *faa = aux;
+
+ return OF_is_compatible(faa->fa_node, "apple,display-subsystem");
+}
+
+void
+apldrm_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct apldrm_softc *sc = (struct apldrm_softc *)self;
+ struct fdt_attach_args *faa = aux;
+ int idx, len, node;
+ uint32_t *phandles;
+ uint64_t reg[2];
+
+ sc->sc_node = faa->fa_node;
+
+ /* Claim framebuffer to prevent attaching other drivers. */
+ len = OF_getproplen(faa->fa_node, "memory-region");
+ idx = OF_getindex(faa->fa_node, "framebuffer", "memory-region-names");
+ if (idx >= 0 && idx < len / sizeof(uint32_t)) {
+ phandles = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
+ OF_getpropintarray(faa->fa_node, "memory-region",
+ phandles, len);
+ node = OF_getnodebyphandle(phandles[idx]);
+ if (node) {
+ if (OF_getpropint64array(node, "reg", reg,
+ sizeof(reg)) == sizeof(reg))
+ rasops_claim_framebuffer(reg[0], reg[1], self);
+ }
+ free(phandles, M_TEMP, len);
+ }
+
+ /*
+ * Update our understanding of the console output node if
+ * we're using the framebuffer console.
+ */
+ if (OF_is_compatible(stdout_node, "simple-framebuffer"))
+ stdout_node = sc->sc_node;
+
+ printf("\n");
+
+ sc->sc_dev.faa = faa;
+ platform_device_register(&sc->sc_dev);
+
+ drm_attach_platform((struct drm_driver *)&apple_drm_driver,
+ faa->fa_iot, faa->fa_dmat, self, &sc->sc_ddev);
+ config_mountroot(self, apldrm_attachhook);
+}
+
+int
+apldrm_activate(struct device *self, int act)
+{
+ int rv;
+
+ switch (act) {
+ case DVACT_QUIESCE:
+ rv = config_activate_children(self, act);
+ apple_platform_suspend(self);
+ break;
+ case DVACT_WAKEUP:
+ apple_platform_resume(self);
+ rv = config_activate_children(self, act);
+ break;
+ default:
+ rv = config_activate_children(self, act);
+ break;
+ }
+
+ return rv;
+}
+
+int
+apldrm_wsioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p)
+{
+ struct rasops_info *ri = v;
+ struct apldrm_softc *sc = ri->ri_hw;
+ struct wsdisplay_param *dp = (struct wsdisplay_param *)data;
+ struct wsdisplay_fbinfo *wdf;
+ struct backlight_device *bd;
+
+ bd = backlight_device_get_by_name("apple-panel-bl");
+
+ switch (cmd) {
+ case WSDISPLAYIO_GTYPE:
+ *(u_int *)data = WSDISPLAY_TYPE_KMS;
+ return 0;
+ case WSDISPLAYIO_GINFO:
+ wdf = (struct wsdisplay_fbinfo *)data;
+ wdf->width = ri->ri_width;
+ wdf->height = ri->ri_height;
+ wdf->depth = ri->ri_depth;
+ wdf->stride = ri->ri_stride;
+ wdf->offset = 0; /* XXX */
+ wdf->cmsize = 0;
+ return 0;
+ case WSDISPLAYIO_GETPARAM:
+ if (bd == NULL)
+ return -1;
+
+ switch (dp->param) {
+ case WSDISPLAYIO_PARAM_BRIGHTNESS:
+ dp->min = 0;
+ dp->max = bd->props.max_brightness;
+ dp->curval = bd->props.brightness;
+ return (dp->max > dp->min) ? 0 : -1;
+ }
+ break;
+ case WSDISPLAYIO_SETPARAM:
+ if (bd == NULL)
+ return -1;
+
+ switch (dp->param) {
+ case WSDISPLAYIO_PARAM_BRIGHTNESS:
+ bd->props.brightness = dp->curval;
+ backlight_update_status(bd);
+ knote_locked(&sc->sc_ddev.note, NOTE_CHANGE);
+ return 0;
+ }
+ break;
+ case WSDISPLAYIO_SVIDEO:
+ case WSDISPLAYIO_GVIDEO:
+ return 0;
+ }
+
+ return (-1);
+}
+
+paddr_t
+apldrm_wsmmap(void *v, off_t off, int prot)
+{
+ return (-1);
+}
+
+int
+apldrm_alloc_screen(void *v, const struct wsscreen_descr *type,
+ void **cookiep, int *curxp, int *curyp, uint32_t *attrp)
+{
+ return rasops_alloc_screen(v, cookiep, curxp, curyp, attrp);
+}
+
+void
+apldrm_free_screen(void *v, void *cookie)
+{
+ return rasops_free_screen(v, cookie);
+}
+
+void
+apldrm_doswitch(void *v)
+{
+ struct rasops_info *ri = v;
+ struct apldrm_softc *sc = ri->ri_hw;
+ struct drm_fb_helper *fb_helper = sc->sc_ddev.fb_helper;
+
+ rasops_show_screen(ri, sc->sc_switchcookie, 0, NULL, NULL);
+ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+
+ if (sc->sc_switchcb)
+ (sc->sc_switchcb)(sc->sc_switchcbarg, 0, 0);
+}
+
+int
+apldrm_show_screen(void *v, void *cookie, int waitok,
+ void (*cb)(void *, int, int), void *cbarg)
+{
+ struct rasops_info *ri = v;
+ struct apldrm_softc *sc = ri->ri_hw;
+
+ if (cookie == ri->ri_active)
+ return (0);
+
+ sc->sc_switchcb = cb;
+ sc->sc_switchcbarg = cbarg;
+ sc->sc_switchcookie = cookie;
+ if (cb) {
+ task_add(systq, &sc->sc_switchtask);
+ return (EAGAIN);
+ }
+
+ apldrm_doswitch(v);
+
+ return (0);
+}
+
+void
+apldrm_enter_ddb(void *v, void *cookie)
+{
+ struct rasops_info *ri = v;
+ struct apldrm_softc *sc = ri->ri_hw;
+ struct drm_fb_helper *fb_helper = sc->sc_ddev.fb_helper;
+
+ if (cookie == ri->ri_active)
+ return;
+
+ rasops_show_screen(ri, cookie, 0, NULL, NULL);
+ drm_fb_helper_debug_enter(fb_helper->info);
+}
+
+void
+apldrm_burner(void *v, u_int on, u_int flags)
+{
+ struct rasops_info *ri = v;
+ struct apldrm_softc *sc = ri->ri_hw;
+
+ task_del(systq, &sc->sc_burner_task);
+
+ if (on)
+ sc->sc_burner_fblank = FB_BLANK_UNBLANK;
+ else {
+ if (flags & WSDISPLAY_BURN_VBLANK)
+ sc->sc_burner_fblank = FB_BLANK_VSYNC_SUSPEND;
+ else
+ sc->sc_burner_fblank = FB_BLANK_NORMAL;
+ }
+
+ /*
+ * Setting the DPMS mode may sleep while waiting for vblank so
+ * hand things off to a taskq.
+ */
+ task_add(systq, &sc->sc_burner_task);
+}
+
+void
+apldrm_burner_cb(void *arg)
+{
+ struct apldrm_softc *sc = arg;
+ struct drm_fb_helper *fb_helper = sc->sc_ddev.fb_helper;
+
+ drm_fb_helper_blank(sc->sc_burner_fblank, fb_helper->info);
+}
+
+struct wsdisplay_accessops apldrm_accessops = {
+ .ioctl = apldrm_wsioctl,
+ .mmap = apldrm_wsmmap,
+ .alloc_screen = apldrm_alloc_screen,
+ .free_screen = apldrm_free_screen,
+ .show_screen = apldrm_show_screen,
+ .enter_ddb = apldrm_enter_ddb,
+ .getchar = rasops_getchar,
+ .load_font = rasops_load_font,
+ .list_font = rasops_list_font,
+ .scrollback = rasops_scrollback,
+ .burn_screen = apldrm_burner
+};
+
+void
+apldrm_attachhook(struct device *self)
+{
+ struct apldrm_softc *sc = (struct apldrm_softc *)self;
+ struct drm_fb_helper *fb_helper;
+ struct rasops_info *ri = &sc->sc_ri;
+ struct wsemuldisplaydev_attach_args waa;
+ int console = 0;
+ uint32_t defattr;
+ int error;
+
+ error = apple_platform_probe(&sc->sc_dev);
+ if (error)
+ return;
+
+ if (sc->sc_node == stdout_node)
+ console = 1;
+
+ fb_helper = sc->sc_ddev.fb_helper;
+ ri->ri_hw = sc;
+ ri->ri_bits = fb_helper->info->screen_buffer;
+ ri->ri_flg = RI_CENTER | RI_VCONS | RI_WRONLY;
+ ri->ri_depth = fb_helper->fb->format->cpp[0] * 8;
+ ri->ri_stride = fb_helper->fb->pitches[0];
+ ri->ri_width = fb_helper->info->var.xres;
+ ri->ri_height = fb_helper->info->var.yres;
+
+ switch (fb_helper->fb->format->format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ ri->ri_rnum = 8;
+ ri->ri_rpos = 16;
+ ri->ri_gnum = 8;
+ ri->ri_gpos = 8;
+ ri->ri_bnum = 8;
+ ri->ri_bpos = 0;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ ri->ri_rnum = 10;
+ ri->ri_rpos = 20;
+ ri->ri_gnum = 10;
+ ri->ri_gpos = 10;
+ ri->ri_bnum = 10;
+ ri->ri_bpos = 0;
+ break;
+ }
+
+ rasops_init(ri, 160, 160);
+
+ strlcpy(sc->sc_wsd.name, "std", sizeof(sc->sc_wsd.name));
+ sc->sc_wsd.capabilities = ri->ri_caps;
+ sc->sc_wsd.nrows = ri->ri_rows;
+ sc->sc_wsd.ncols = ri->ri_cols;
+ sc->sc_wsd.textops = &ri->ri_ops;
+ sc->sc_wsd.fontwidth = ri->ri_font->fontwidth;
+ sc->sc_wsd.fontheight = ri->ri_font->fontheight;
+
+ sc->sc_scrlist[0] = &sc->sc_wsd;
+ sc->sc_wsl.nscreens = 1;
+ sc->sc_wsl.screens = (const struct wsscreen_descr **)sc->sc_scrlist;
+
+ task_set(&sc->sc_switchtask, apldrm_doswitch, ri);
+ task_set(&sc->sc_burner_task, apldrm_burner_cb, sc);
+
+ if (console) {
+ ri->ri_ops.pack_attr(ri->ri_active, 0, 0, 0, &defattr);
+ wsdisplay_cnattach(&sc->sc_wsd, ri->ri_active,
+ ri->ri_ccol, ri->ri_crow, defattr);
+ }
+
+ memset(&waa, 0, sizeof(waa));
+ waa.scrdata = &sc->sc_wsl;
+ waa.accessops = &apldrm_accessops;
+ waa.accesscookie = ri;
+ waa.console = console;
+
+ printf("%s: %dx%d, %dbpp\n", sc->sc_dev.dev.dv_xname,
+ ri->ri_width, ri->ri_height, ri->ri_depth);
+
+ config_found_sm(self, &waa, wsemuldisplaydevprint,
+ wsemuldisplaydevsubmatch);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+/* Based on meson driver which is
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ */
+
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_module.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_fixed.h>
+
+#include "dcp.h"
+
+#define DRIVER_NAME "apple"
+#define DRIVER_DESC "Apple display controller DRM driver"
+
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
+#define MAX_COPROCESSORS 2
+
+struct apple_drm_private {
+ struct drm_device drm;
+};
+
+DEFINE_DRM_GEM_DMA_FOPS(apple_fops);
+
+#define DART_PAGE_SIZE 16384
+
+static int apple_drm_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), 64);
+ args->size = round_up(args->pitch * args->height, DART_PAGE_SIZE);
+
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
+}
+
+static const struct drm_driver apple_drm_driver = {
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(apple_drm_gem_dumb_create),
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = "20221106",
+ .major = 1,
+ .minor = 0,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ .fops = &apple_fops,
+};
+
+static int apple_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_crtc_state *crtc_state;
+
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+ if (!new_plane_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ /*
+ * DCP limits downscaling to 2x and upscaling to 4x. Attempting to
+ * scale outside these bounds errors out when swapping.
+ *
+ * This function also takes care of clipping the src/dest rectangles,
+ * which is required for correct operation. Partially off-screen
+ * surfaces may appear corrupted.
+ *
+ * DCP does not distinguish plane types in the hardware, so we set
+ * can_position. If the primary plane does not fill the screen, the
+ * hardware will fill in zeroes (black).
+ */
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
+ FRAC_16_16(1, 4),
+ FRAC_16_16(2, 1),
+ true, true);
+}
+
+static void apple_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ /* Handled in atomic_flush */
+}
+
+static const struct drm_plane_helper_funcs apple_plane_helper_funcs = {
+ .atomic_check = apple_plane_atomic_check,
+ .atomic_update = apple_plane_atomic_update,
+};
+
+static void apple_plane_cleanup(struct drm_plane *plane)
+{
+ drm_plane_cleanup(plane);
+ kfree(plane);
+}
+
+static const struct drm_plane_funcs apple_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = apple_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/*
+ * Table of supported formats, mapping from DRM fourccs to DCP fourccs.
+ *
+ * For future work, DCP supports more formats not listed, including YUV
+ * formats, an extra RGBA format, and a biplanar RGB10_A8 format (fourcc b3a8)
+ * used for HDR.
+ *
+ * Note: we don't have non-alpha formats but userspace breaks without XRGB. It
+ * doesn't matter for the primary plane, but cursors/overlays must not
+ * advertise formats without alpha.
+ */
+static const u32 dcp_formats[] = {
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+};
+
+u64 apple_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static struct drm_plane *apple_plane_init(struct drm_device *dev,
+ unsigned long possible_crtcs,
+ enum drm_plane_type type)
+{
+ int ret;
+ struct drm_plane *plane;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+
+ ret = drm_universal_plane_init(dev, plane, possible_crtcs,
+ &apple_plane_funcs,
+ dcp_formats, ARRAY_SIZE(dcp_formats),
+ apple_format_modifiers, type, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_plane_helper_add(plane, &apple_plane_helper_funcs);
+
+ return plane;
+}
+
+static enum drm_connector_status
+apple_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct apple_connector *apple_connector = to_apple_connector(connector);
+
+ return apple_connector->connected ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void apple_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (crtc_state->active_changed && crtc_state->active) {
+ struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+ dev_dbg(&apple_crtc->dcp->dev, "%s", __func__);
+ dcp_poweron(apple_crtc->dcp);
+ dev_dbg(&apple_crtc->dcp->dev, "%s finished", __func__);
+ }
+
+ if (crtc_state->active)
+ dcp_crtc_atomic_modeset(crtc, state);
+}
+
+static void apple_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (crtc_state->active_changed && !crtc_state->active) {
+ struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+ dev_dbg(&apple_crtc->dcp->dev, "%s", __func__);
+ dcp_poweroff(apple_crtc->dcp);
+ dev_dbg(&apple_crtc->dcp->dev, "%s finished", __func__);
+ }
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
+}
+
+static void apple_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+ unsigned long flags;
+
+ if (crtc->state->event) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ apple_crtc->event = crtc->state->event;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ crtc->state->event = NULL;
+ }
+}
+
+static void dcp_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = old_state->dev;
+
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
+
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+ drm_atomic_helper_fake_vblank(old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+
+ drm_atomic_helper_wait_for_flip_done(dev, old_state);
+
+ drm_atomic_helper_cleanup_planes(dev, old_state);
+}
+
+static void apple_crtc_cleanup(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(to_apple_crtc(crtc));
+}
+
+static const struct drm_crtc_funcs apple_crtc_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .destroy = apple_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .set_config = drm_atomic_helper_set_config,
+};
+
+static const struct drm_mode_config_funcs apple_mode_config_funcs = {
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = drm_gem_fb_create,
+};
+
+static const struct drm_mode_config_helper_funcs apple_mode_config_helpers = {
+ .atomic_commit_tail = dcp_atomic_commit_tail,
+};
+
+static void appledrm_connector_cleanup(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+ kfree(to_apple_connector(connector));
+}
+
+static const struct drm_connector_funcs apple_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = appledrm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .detect = apple_connector_detect,
+};
+
+static const struct drm_connector_helper_funcs apple_connector_helper_funcs = {
+ .get_modes = dcp_get_modes,
+ .mode_valid = dcp_mode_valid,
+};
+
+static const struct drm_crtc_helper_funcs apple_crtc_helper_funcs = {
+ .atomic_begin = apple_crtc_atomic_begin,
+ .atomic_check = dcp_crtc_atomic_check,
+ .atomic_flush = dcp_flush,
+ .atomic_enable = apple_crtc_atomic_enable,
+ .atomic_disable = apple_crtc_atomic_disable,
+ .mode_fixup = dcp_crtc_mode_fixup,
+};
+
+static int apple_probe_per_dcp(struct device *dev,
+ struct drm_device *drm,
+ struct platform_device *dcp,
+ int num, bool dcp_ext)
+{
+ struct apple_crtc *crtc;
+ struct apple_connector *connector;
+ struct apple_encoder *enc;
+ struct drm_plane *primary;
+ int ret;
+
+ primary = apple_plane_init(drm, 1U << num, DRM_PLANE_TYPE_PRIMARY);
+
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+ ret = drm_crtc_init_with_planes(drm, &crtc->base, primary, NULL,
+ &apple_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(&crtc->base, &apple_crtc_helper_funcs);
+ drm_crtc_enable_color_mgmt(&crtc->base, 0, true, 0);
+
+ enc = drmm_simple_encoder_alloc(drm, struct apple_encoder, base,
+ DRM_MODE_ENCODER_TMDS);
+ if (IS_ERR(enc))
+ return PTR_ERR(enc);
+ enc->base.possible_crtcs = drm_crtc_mask(&crtc->base);
+
+ connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ drm_connector_helper_add(&connector->base,
+ &apple_connector_helper_funcs);
+
+#ifdef __linux__
+ // HACK:
+ if (dcp_ext)
+ connector->base.fwnode = fwnode_handle_get(dev->fwnode);
+#endif
+
+ ret = drm_connector_init(drm, &connector->base, &apple_connector_funcs,
+ dcp_get_connector_type(dcp));
+ if (ret)
+ return ret;
+
+ connector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ connector->connected = false;
+ connector->dcp = dcp;
+
+ INIT_WORK(&connector->hotplug_wq, dcp_hotplug);
+
+ crtc->dcp = dcp;
+ dcp_link(dcp, crtc, connector);
+
+ return drm_connector_attach_encoder(&connector->base, &enc->base);
+}
+
+static int apple_get_fb_resource(struct device *dev, const char *name,
+ struct resource *fb_r)
+{
+ int idx, ret = -ENODEV;
+ struct device_node *node;
+
+ idx = of_property_match_string(dev->of_node, "memory-region-names", name);
+
+ node = of_parse_phandle(dev->of_node, "memory-region", idx);
+ if (!node) {
+ dev_err(dev, "reserved-memory node '%s' not found\n", name);
+ return -ENODEV;
+ }
+
+ if (!of_device_is_available(node)) {
+ dev_err(dev, "reserved-memory node '%s' is unavailable\n", name);
+ goto err;
+ }
+
+ if (!of_device_is_compatible(node, "framebuffer")) {
+ dev_err(dev, "reserved-memory node '%s' is incompatible\n",
+ node->full_name);
+ goto err;
+ }
+
+ ret = of_address_to_resource(node, 0, fb_r);
+
+err:
+ of_node_put(node);
+ return ret;
+}
+
+static const struct of_device_id apple_dcp_id_tbl[] = {
+ { .compatible = "apple,dcp" },
+ { .compatible = "apple,dcpext" },
+ {},
+};
+
+static int apple_drm_init_dcp(struct device *dev)
+{
+ struct apple_drm_private *apple = dev_get_drvdata(dev);
+ struct platform_device *dcp[MAX_COPROCESSORS];
+ struct device_node *np;
+ u64 timeout;
+ int i, ret, num_dcp = 0;
+
+ for_each_matching_node(np, apple_dcp_id_tbl) {
+ bool dcp_ext;
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ continue;
+ }
+ dcp_ext = of_device_is_compatible(np, "apple,dcpext");
+
+ dcp[num_dcp] = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!dcp[num_dcp])
+ continue;
+
+ ret = apple_probe_per_dcp(dev, &apple->drm, dcp[num_dcp],
+ num_dcp, dcp_ext);
+ if (ret)
+ continue;
+
+ ret = dcp_start(dcp[num_dcp]);
+ if (ret)
+ continue;
+
+ num_dcp++;
+ }
+
+ if (num_dcp < 1)
+ return -ENODEV;
+
+ /*
+ * Starting DPTX might take some time.
+ */
+ timeout = get_jiffies_64() + msecs_to_jiffies(3000);
+
+ for (i = 0; i < num_dcp; ++i) {
+ u64 jiffies = get_jiffies_64();
+ u64 wait = time_after_eq64(jiffies, timeout) ?
+ 0 :
+ timeout - jiffies;
+ ret = dcp_wait_ready(dcp[i], wait);
+ /* There is nothing we can do if a dcp/dcpext does not boot
+ * (successfully). Ignoring it should not do any harm now.
+ * Needs to reevaluated whenn adding dcpext support.
+ */
+ if (ret)
+ dev_warn(dev, "DCP[%d] not ready: %d\n", i, ret);
+ }
+ /* HACK: Wait for dcp* to settle before a modeset */
+ drm_msleep(100);
+
+ return 0;
+}
+
+static int apple_drm_init(struct device *dev)
+{
+ struct apple_drm_private *apple;
+ struct resource fb_r;
+ resource_size_t fb_size;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(42));
+ if (ret)
+ return ret;
+
+ ret = apple_get_fb_resource(dev, "framebuffer", &fb_r);
+ if (ret)
+ return ret;
+
+ fb_size = fb_r.end - fb_r.start + 1;
+ ret = drm_aperture_remove_conflicting_framebuffers(fb_r.start, fb_size,
+ &apple_drm_driver);
+ if (ret) {
+ dev_err(dev, "Failed remove fb: %d\n", ret);
+ goto err_unbind;
+ }
+
+#ifdef __linux__
+ apple = devm_drm_dev_alloc(dev, &apple_drm_driver,
+ struct apple_drm_private, drm);
+ if (IS_ERR(apple))
+ return PTR_ERR(apple);
+#else
+ struct apldrm_softc *sc = (struct apldrm_softc *)dev;
+ apple = (struct apple_drm_private *)&sc->sc_ddev;
+#endif
+
+ dev_set_drvdata(dev, apple);
+
+ ret = component_bind_all(dev, apple);
+ if (ret)
+ return ret;
+
+ ret = drmm_mode_config_init(&apple->drm);
+ if (ret)
+ goto err_unbind;
+
+ /*
+ * IOMFB::UPPipeDCP_H13P::verify_surfaces produces the error "plane
+ * requires a minimum of 32x32 for the source buffer" if smaller
+ */
+ apple->drm.mode_config.min_width = 32;
+ apple->drm.mode_config.min_height = 32;
+
+ /*
+ * TODO: this is the max framebuffer size not the maximal supported
+ * output resolution. DCP reports the maximal framebuffer size take it
+ * from there.
+ * Hardcode it for now to the M1 Max DCP reported 'MaxSrcBufferWidth'
+ * and 'MaxSrcBufferHeight' of 16384.
+ */
+ apple->drm.mode_config.max_width = 16384;
+ apple->drm.mode_config.max_height = 16384;
+
+ apple->drm.mode_config.funcs = &apple_mode_config_funcs;
+ apple->drm.mode_config.helper_private = &apple_mode_config_helpers;
+
+ ret = apple_drm_init_dcp(dev);
+ if (ret)
+ goto err_unbind;
+
+ drm_mode_config_reset(&apple->drm);
+
+ ret = drm_dev_register(&apple->drm, 0);
+ if (ret)
+ goto err_unbind;
+
+ drm_fbdev_dma_setup(&apple->drm, 32);
+
+ return 0;
+
+err_unbind:
+ component_unbind_all(dev, NULL);
+ return ret;
+}
+
+static void apple_drm_uninit(struct device *dev)
+{
+ struct apple_drm_private *apple = dev_get_drvdata(dev);
+
+ drm_dev_unregister(&apple->drm);
+ drm_atomic_helper_shutdown(&apple->drm);
+
+ component_unbind_all(dev, NULL);
+
+ dev_set_drvdata(dev, NULL);
+}
+
+static int apple_drm_bind(struct device *dev)
+{
+ return apple_drm_init(dev);
+}
+
+static void apple_drm_unbind(struct device *dev)
+{
+ apple_drm_uninit(dev);
+}
+
+const struct component_master_ops apple_drm_ops = {
+ .bind = apple_drm_bind,
+ .unbind = apple_drm_unbind,
+};
+
+static int add_dcp_components(struct device *dev,
+ struct component_match **matchptr)
+{
+ struct device_node *np;
+ int num = 0;
+
+ for_each_matching_node(np, apple_dcp_id_tbl) {
+ if (of_device_is_available(np)) {
+ drm_of_component_match_add(dev, matchptr,
+ component_compare_of, np);
+ num++;
+ }
+ of_node_put(np);
+ }
+
+ return num;
+}
+
+static int apple_platform_probe(struct platform_device *pdev)
+{
+ struct device *mdev = &pdev->dev;
+ struct component_match *match = NULL;
+ int num_dcp;
+
+ /* add DCP components, handle less than 1 as probe error */
+ num_dcp = add_dcp_components(mdev, &match);
+ if (num_dcp < 1)
+ return -ENODEV;
+
+ return component_master_add_with_match(mdev, &apple_drm_ops, match);
+}
+
+#ifdef __linux__
+
+static int apple_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &apple_drm_ops);
+
+ return 0;
+}
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "apple,display-subsystem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int apple_platform_suspend(struct device *dev)
+{
+ struct apple_drm_private *apple = dev_get_drvdata(dev);
+
+ if (apple)
+ return drm_mode_config_helper_suspend(&apple->drm);
+
+ return 0;
+}
+
+static int apple_platform_resume(struct device *dev)
+{
+ struct apple_drm_private *apple = dev_get_drvdata(dev);
+
+ if (apple)
+ drm_mode_config_helper_resume(&apple->drm);
+
+ return 0;
+}
+
+static const struct dev_pm_ops apple_platform_pm_ops = {
+ .suspend = apple_platform_suspend,
+ .resume = apple_platform_resume,
+};
+#endif
+
+#ifdef __linux__
+
+static struct platform_driver apple_platform_driver = {
+ .driver = {
+ .name = "apple-drm",
+ .of_match_table = of_match,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &apple_platform_pm_ops,
+#endif
+ },
+ .probe = apple_platform_probe,
+ .remove = apple_platform_remove,
+};
+
+drm_module_platform_driver(apple_platform_driver);
+
+MODULE_AUTHOR("Alyssa Rosenzweig <alyssa@rosenzweig.io>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("Dual MIT/GPL");
+
+#endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCP_INTERNAL_H__
+#define __APPLE_DCP_INTERNAL_H__
+
+#include <linux/backlight.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+#include <linux/mux/consumer.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include "dptxep.h"
+#include "iomfb.h"
+#include "iomfb_v12_3.h"
+#include "iomfb_v13_3.h"
+
+#define DCP_MAX_PLANES 2
+
+struct apple_dcp;
+struct apple_dcp_afkep;
+
+enum dcp_firmware_version {
+ DCP_FIRMWARE_UNKNOWN,
+ DCP_FIRMWARE_V_12_3,
+ DCP_FIRMWARE_V_13_5,
+};
+
+enum {
+ SYSTEM_ENDPOINT = 0x20,
+ TEST_ENDPOINT = 0x21,
+ DCP_EXPERT_ENDPOINT = 0x22,
+ DISP0_ENDPOINT = 0x23,
+ DPTX_ENDPOINT = 0x2a,
+ HDCP_ENDPOINT = 0x2b,
+ REMOTE_ALLOC_ENDPOINT = 0x2d,
+ IOMFB_ENDPOINT = 0x37,
+};
+
+/* Temporary backing for a chunked transfer via setDCPAVPropStart/Chunk/End */
+struct dcp_chunks {
+ size_t length;
+ void *data;
+};
+
+#define DCP_MAX_MAPPINGS (128) /* should be enough */
+#define MAX_DISP_REGISTERS (7)
+
+struct dcp_mem_descriptor {
+ size_t size;
+ void *buf;
+ dma_addr_t dva;
+ struct sg_table map;
+ u64 reg;
+};
+
+/* Limit on call stack depth (arbitrary). Some nesting is required */
+#define DCP_MAX_CALL_DEPTH 8
+
+typedef void (*dcp_callback_t)(struct apple_dcp *, void *, void *);
+
+struct dcp_channel {
+ dcp_callback_t callbacks[DCP_MAX_CALL_DEPTH];
+ void *cookies[DCP_MAX_CALL_DEPTH];
+ void *output[DCP_MAX_CALL_DEPTH];
+ u16 end[DCP_MAX_CALL_DEPTH];
+
+ /* Current depth of the call stack. Less than DCP_MAX_CALL_DEPTH */
+ u8 depth;
+};
+
+struct dcp_fb_reference {
+ struct list_head head;
+ struct drm_framebuffer *fb;
+ u32 swap_id;
+};
+
+#define MAX_NOTCH_HEIGHT 160
+
+struct dcp_brightness {
+ struct backlight_device *bl_dev;
+ u32 maximum;
+ u32 dac;
+ int nits;
+ int scale;
+ bool update;
+};
+
+/** laptop/AiO integrated panel parameters from DT */
+struct dcp_panel {
+ /// panel width in millimeter
+ int width_mm;
+ /// panel height in millimeter
+ int height_mm;
+ /// panel has a mini-LED backllight
+ bool has_mini_led;
+};
+
+struct apple_dcp_hw_data {
+ u32 num_dptx_ports;
+};
+
+/* TODO: move IOMFB members to its own struct */
+struct apple_dcp {
+ struct device *dev;
+ struct platform_device *piodma;
+ struct iommu_domain *iommu_dom;
+ struct apple_rtkit *rtk;
+ struct apple_crtc *crtc;
+ struct apple_connector *connector;
+
+ struct apple_dcp_hw_data hw;
+
+ /* firmware version and compatible firmware version */
+ enum dcp_firmware_version fw_compat;
+
+ /* Coprocessor control register */
+ void __iomem *coproc_reg;
+
+ /* DCP has crashed */
+ bool crashed;
+
+ /************* IOMFB **************************************************
+ * everything below is mostly used inside IOMFB but it could make *
+ * sense keep some of the the members in apple_dcp. *
+ **********************************************************************/
+
+ /* clock rate request by dcp in */
+ struct clk *clk;
+
+ /* DCP shared memory */
+ void *shmem;
+
+ /* Display registers mappable to the DCP */
+ struct resource *disp_registers[MAX_DISP_REGISTERS];
+ unsigned int nr_disp_registers;
+
+ struct resource disp_bw_scratch_res;
+ struct resource disp_bw_doorbell_res;
+ u32 disp_bw_scratch_index;
+ u32 disp_bw_scratch_offset;
+ u32 disp_bw_doorbell_index;
+ u32 disp_bw_doorbell_offset;
+
+ u32 index;
+
+ /* Bitmap of memory descriptors used for mappings made by the DCP */
+ DECLARE_BITMAP(memdesc_map, DCP_MAX_MAPPINGS);
+
+ /* Indexed table of memory descriptors */
+ struct dcp_mem_descriptor memdesc[DCP_MAX_MAPPINGS];
+
+ struct dcp_channel ch_cmd, ch_oobcmd;
+ struct dcp_channel ch_cb, ch_oobcb, ch_async, ch_oobasync;
+
+ /* iomfb EP callback handlers */
+ const iomfb_cb_handler *cb_handlers;
+
+ /* Active chunked transfer. There can only be one at a time. */
+ struct dcp_chunks chunks;
+
+ /* Queued swap. Owned by the DCP to avoid per-swap memory allocation */
+ union {
+ struct dcp_swap_submit_req_v12_3 v12_3;
+ struct dcp_swap_submit_req_v13_3 v13_3;
+ } swap;
+
+ /* swap id of the last completed swap */
+ u32 last_swap_id;
+
+ /* Current display mode */
+ bool during_modeset;
+ bool valid_mode;
+ struct dcp_set_digital_out_mode_req mode;
+
+ /* completion for active turning true */
+ struct completion start_done;
+
+ /* Is the DCP booted? */
+ bool active;
+
+ /* eDP display without DP-HDMI conversion */
+ bool main_display;
+
+ /* clear all surfaces on init */
+ bool surfaces_cleared;
+
+ /* Modes valid for the connected display */
+ struct dcp_display_mode *modes;
+ unsigned int nr_modes;
+
+ /* Attributes of the connector */
+ int connector_type;
+
+ /* Attributes of the connected display */
+ int width_mm, height_mm;
+
+ unsigned notch_height;
+
+ /* Workqueue for sending vblank events when a dcp swap is not possible */
+ struct work_struct vblank_wq;
+
+ /* List of referenced drm_framebuffers which can be unreferenced
+ * on the next successfully completed swap.
+ */
+ struct list_head swapped_out_fbs;
+
+ struct dcp_brightness brightness;
+ /* Workqueue for updating the initial initial brightness */
+ struct work_struct bl_register_wq;
+ struct rwlock bl_register_mutex;
+
+ /* integrated panel if present */
+ struct dcp_panel panel;
+
+ struct apple_dcp_afkep *systemep;
+ struct completion systemep_done;
+
+ struct apple_dcp_afkep *ibootep;
+
+ struct apple_dcp_afkep *dptxep;
+
+ struct dptx_port dptxport[2];
+
+ /* these fields are output port specific */
+ struct phy *phy;
+ struct mux_control *xbar;
+
+ struct gpio_desc *hdmi_hpd;
+ struct gpio_desc *hdmi_pwren;
+ struct gpio_desc *dp2hdmi_pwren;
+
+ struct rwlock hpd_mutex;
+
+ u32 dptx_phy;
+ u32 dptx_die;
+ int hdmi_hpd_irq;
+};
+
+int dcp_backlight_register(struct apple_dcp *dcp);
+bool dcp_has_panel(struct apple_dcp *dcp);
+
+#define DCP_AUDIO_MAX_CHANS 15
+
+#endif /* __APPLE_DCP_INTERNAL_H__ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/align.h>
+#include <linux/apple-mailbox.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iommu.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_module.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "afk.h"
+#include "dcp.h"
+#include "dcp-internal.h"
+#include "iomfb.h"
+#include "parser.h"
+#include "trace.h"
+
+#define APPLE_DCP_COPROC_CPU_CONTROL 0x44
+#define APPLE_DCP_COPROC_CPU_CONTROL_RUN BIT(4)
+
+#define DCP_BOOT_TIMEOUT msecs_to_jiffies(1000)
+
+static bool show_notch;
+module_param(show_notch, bool, 0644);
+MODULE_PARM_DESC(show_notch, "Use the full display height and shows the notch");
+
+/* HACK: moved here to avoid circular dependency between apple_drv and dcp */
+void dcp_drm_crtc_vblank(struct apple_crtc *crtc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&crtc->base.dev->event_lock, flags);
+ if (crtc->event) {
+ drm_crtc_send_vblank_event(&crtc->base, crtc->event);
+ crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&crtc->base.dev->event_lock, flags);
+}
+
+void dcp_set_dimensions(struct apple_dcp *dcp)
+{
+ int i;
+ int width_mm = dcp->width_mm;
+ int height_mm = dcp->height_mm;
+
+ if (width_mm == 0 || height_mm == 0) {
+ width_mm = dcp->panel.width_mm;
+ height_mm = dcp->panel.height_mm;
+ }
+
+ /* Set the connector info */
+ if (dcp->connector) {
+ struct drm_connector *connector = &dcp->connector->base;
+
+ mutex_lock(&connector->dev->mode_config.mutex);
+ connector->display_info.width_mm = width_mm;
+ connector->display_info.height_mm = height_mm;
+ mutex_unlock(&connector->dev->mode_config.mutex);
+ }
+
+ /*
+ * Fix up any probed modes. Modes are created when parsing
+ * TimingElements, dimensions are calculated when parsing
+ * DisplayAttributes, and TimingElements may be sent first
+ */
+ for (i = 0; i < dcp->nr_modes; ++i) {
+ dcp->modes[i].mode.width_mm = width_mm;
+ dcp->modes[i].mode.height_mm = height_mm;
+ }
+}
+
+bool dcp_has_panel(struct apple_dcp *dcp)
+{
+ return dcp->panel.width_mm > 0;
+}
+
+/*
+ * Helper to send a DRM vblank event. We do not know how call swap_submit_dcp
+ * without surfaces. To avoid timeouts in drm_atomic_helper_wait_for_vblanks
+ * send a vblank event via a workqueue.
+ */
+static void dcp_delayed_vblank(struct work_struct *work)
+{
+ struct apple_dcp *dcp;
+
+ dcp = container_of(work, struct apple_dcp, vblank_wq);
+ mdelay(5);
+ dcp_drm_crtc_vblank(dcp->crtc);
+}
+
+static void dcp_recv_msg(void *cookie, u8 endpoint, u64 message)
+{
+ struct apple_dcp *dcp = cookie;
+
+ trace_dcp_recv_msg(dcp, endpoint, message);
+
+ switch (endpoint) {
+ case IOMFB_ENDPOINT:
+ return iomfb_recv_msg(dcp, message);
+ case SYSTEM_ENDPOINT:
+ afk_receive_message(dcp->systemep, message);
+ return;
+ case DISP0_ENDPOINT:
+ afk_receive_message(dcp->ibootep, message);
+ return;
+ case DPTX_ENDPOINT:
+ afk_receive_message(dcp->dptxep, message);
+ return;
+ default:
+ WARN(endpoint, "unknown DCP endpoint %hhu", endpoint);
+ }
+}
+
+static void dcp_rtk_crashed(void *cookie)
+{
+ struct apple_dcp *dcp = cookie;
+
+ dcp->crashed = true;
+ dev_err(dcp->dev, "DCP has crashed");
+ if (dcp->connector) {
+ dcp->connector->connected = 0;
+ schedule_work(&dcp->connector->hotplug_wq);
+ }
+ complete(&dcp->start_done);
+}
+
+static int dcp_rtk_shmem_setup(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+ struct apple_dcp *dcp = cookie;
+
+ if (bfr->iova) {
+ struct iommu_domain *domain =
+ iommu_get_domain_for_dev(dcp->dev);
+ phys_addr_t phy_addr;
+
+ if (!domain)
+ return -ENOMEM;
+
+ // TODO: get map from device-tree
+ phy_addr = iommu_iova_to_phys(domain, bfr->iova);
+ if (!phy_addr)
+ return -ENOMEM;
+
+ // TODO: verify phy_addr, cache attribute
+ bfr->buffer = memremap(phy_addr, bfr->size, MEMREMAP_WB);
+ if (!bfr->buffer)
+ return -ENOMEM;
+
+ bfr->is_mapped = true;
+ dev_info(dcp->dev,
+ "shmem_setup: iova: %lx -> pa: %lx -> iomem: %lx",
+ (uintptr_t)bfr->iova, (uintptr_t)phy_addr,
+ (uintptr_t)bfr->buffer);
+ } else {
+ bfr->buffer = dma_alloc_coherent(dcp->dev, bfr->size,
+ &bfr->iova, GFP_KERNEL);
+ if (!bfr->buffer)
+ return -ENOMEM;
+
+ dev_info(dcp->dev, "shmem_setup: iova: %lx, buffer: %lx",
+ (uintptr_t)bfr->iova, (uintptr_t)bfr->buffer);
+ }
+
+ return 0;
+}
+
+static void dcp_rtk_shmem_destroy(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+ struct apple_dcp *dcp = cookie;
+
+ if (bfr->is_mapped)
+ memunmap(bfr->buffer);
+ else
+ dma_free_coherent(dcp->dev, bfr->size, bfr->buffer, bfr->iova);
+}
+
+static struct apple_rtkit_ops rtkit_ops = {
+ .crashed = dcp_rtk_crashed,
+ .recv_message = dcp_recv_msg,
+ .shmem_setup = dcp_rtk_shmem_setup,
+ .shmem_destroy = dcp_rtk_shmem_destroy,
+};
+
+void dcp_send_message(struct apple_dcp *dcp, u8 endpoint, u64 message)
+{
+ trace_dcp_send_msg(dcp, endpoint, message);
+ apple_rtkit_send_message(dcp->rtk, endpoint, message, NULL,
+ true);
+}
+
+int dcp_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct platform_device *pdev = to_apple_crtc(crtc)->dcp;
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+ struct drm_plane_state *new_state;
+ struct drm_plane *plane;
+ struct drm_crtc_state *crtc_state;
+ int plane_idx, plane_count = 0;
+ bool needs_modeset;
+
+ if (dcp->crashed)
+ return -EINVAL;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ needs_modeset = drm_atomic_crtc_needs_modeset(crtc_state) || !dcp->valid_mode;
+ if (!needs_modeset && !dcp->connector->connected) {
+ dev_err(dcp->dev, "crtc_atomic_check: disconnected but no modeset");
+ return -EINVAL;
+ }
+
+ for_each_new_plane_in_state(state, plane, new_state, plane_idx) {
+ /* skip planes not for this crtc */
+ if (new_state->crtc != crtc)
+ continue;
+
+ plane_count += 1;
+ }
+
+ if (plane_count > DCP_MAX_PLANES) {
+ dev_err(dcp->dev, "crtc_atomic_check: Blend supports only 2 layers!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dcp_crtc_atomic_check);
+
+int dcp_get_connector_type(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ return (dcp->connector_type);
+}
+EXPORT_SYMBOL_GPL(dcp_get_connector_type);
+
+#define DPTX_CONNECT_TIMEOUT msecs_to_jiffies(1000)
+
+static int dcp_dptx_connect(struct apple_dcp *dcp, u32 port)
+{
+ int ret = 0;
+
+ if (!dcp->phy) {
+ dev_warn(dcp->dev, "dcp_dptx_connect: missing phy\n");
+ return -ENODEV;
+ }
+ dev_info(dcp->dev, "%s(port=%d)\n", __func__, port);
+
+ mutex_lock(&dcp->hpd_mutex);
+ if (!dcp->dptxport[port].enabled) {
+ dev_warn(dcp->dev, "dcp_dptx_connect: dptx service for port %d not enabled\n", port);
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (dcp->dptxport[port].connected)
+ goto out_unlock;
+
+ reinit_completion(&dcp->dptxport[port].linkcfg_completion);
+ dcp->dptxport[port].atcphy = dcp->phy;
+ dptxport_connect(dcp->dptxport[port].service, 0, dcp->dptx_phy, dcp->dptx_die);
+ dptxport_request_display(dcp->dptxport[port].service);
+ dcp->dptxport[port].connected = true;
+
+ mutex_unlock(&dcp->hpd_mutex);
+ ret = wait_for_completion_timeout(&dcp->dptxport[port].linkcfg_completion,
+ DPTX_CONNECT_TIMEOUT);
+ if (ret < 0)
+ dev_warn(dcp->dev, "dcp_dptx_connect: port %d link complete failed:%d\n",
+ port, ret);
+ else
+ dev_dbg(dcp->dev, "dcp_dptx_connect: waited %d ms for link\n",
+ jiffies_to_msecs(DPTX_CONNECT_TIMEOUT - ret));
+
+ usleep_range(5, 10);
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&dcp->hpd_mutex);
+ return ret;
+}
+
+static int dcp_dptx_disconnect(struct apple_dcp *dcp, u32 port)
+{
+ dev_info(dcp->dev, "%s(port=%d)\n", __func__, port);
+
+ mutex_lock(&dcp->hpd_mutex);
+ if (dcp->dptxport[port].enabled && dcp->dptxport[port].connected) {
+ dptxport_release_display(dcp->dptxport[port].service);
+ dcp->dptxport[port].connected = false;
+ }
+ mutex_unlock(&dcp->hpd_mutex);
+
+ return 0;
+}
+
+static irqreturn_t dcp_dp2hdmi_hpd(int irq, void *data)
+{
+ struct apple_dcp *dcp = data;
+ bool connected = gpiod_get_value_cansleep(dcp->hdmi_hpd);
+
+ /* do nothing on disconnect and trust that dcp detects it itself.
+ * Parallel disconnect HPDs result drm disabling the CRTC even when it
+ * should not.
+ * The interrupt should be changed to rising but for now the disconnect
+ * IRQs might be helpful for debugging.
+ */
+ dev_info(dcp->dev, "DP2HDMI HPD irq, connected:%d\n", connected);
+
+ if (connected)
+ dcp_dptx_connect(dcp, 0);
+
+ return IRQ_HANDLED;
+}
+
+void dcp_link(struct platform_device *pdev, struct apple_crtc *crtc,
+ struct apple_connector *connector)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ dcp->crtc = crtc;
+ dcp->connector = connector;
+}
+EXPORT_SYMBOL_GPL(dcp_link);
+
+int dcp_start(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+ int ret;
+
+ init_completion(&dcp->start_done);
+
+ /* start RTKit endpoints */
+ ret = systemep_init(dcp);
+ if (ret)
+ dev_warn(dcp->dev, "Failed to start system endpoint: %d", ret);
+
+ if (dcp->phy && dcp->fw_compat >= DCP_FIRMWARE_V_13_5) {
+ ret = ibootep_init(dcp);
+ if (ret)
+ dev_warn(dcp->dev, "Failed to start IBOOT endpoint: %d",
+ ret);
+
+ ret = dptxep_init(dcp);
+ if (ret)
+ dev_warn(dcp->dev, "Failed to start DPTX endpoint: %d",
+ ret);
+ else if (dcp->dptxport[0].enabled) {
+ bool connected;
+ /* force disconnect on start - necessary if the display
+ * is already up from m1n1
+ */
+ dptxport_set_hpd(dcp->dptxport[0].service, false);
+ dptxport_release_display(dcp->dptxport[0].service);
+ usleep_range(10 * USEC_PER_MSEC, 25 * USEC_PER_MSEC);
+
+ connected = gpiod_get_value_cansleep(dcp->hdmi_hpd);
+ dev_info(dcp->dev, "%s: DP2HDMI HPD connected:%d\n", __func__, connected);
+
+ // necessary on j473/j474 but not on j314c
+ if (connected)
+ dcp_dptx_connect(dcp, 0);
+ }
+ } else if (dcp->phy)
+ dev_warn(dcp->dev, "OS firmware incompatible with dptxport EP\n");
+
+ ret = iomfb_start_rtkit(dcp);
+ if (ret)
+ dev_err(dcp->dev, "Failed to start IOMFB endpoint: %d", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(dcp_start);
+
+static int dcp_enable_dp2hdmi_hpd(struct apple_dcp *dcp)
+{
+ if (dcp->hdmi_hpd_irq)
+ enable_irq(dcp->hdmi_hpd_irq);
+
+ return 0;
+}
+
+int dcp_wait_ready(struct platform_device *pdev, u64 timeout)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+ int ret;
+
+ if (dcp->crashed)
+ return -ENODEV;
+ if (dcp->active)
+ return dcp_enable_dp2hdmi_hpd(dcp);;
+ if (timeout <= 0)
+ return -ETIMEDOUT;
+
+ ret = wait_for_completion_timeout(&dcp->start_done, timeout);
+ if (ret < 0)
+ return ret;
+
+ if (dcp->crashed)
+ return -ENODEV;
+
+ if (dcp->active)
+ dcp_enable_dp2hdmi_hpd(dcp);
+
+ return dcp->active ? 0 : -ETIMEDOUT;
+}
+EXPORT_SYMBOL(dcp_wait_ready);
+
+static void __maybe_unused dcp_sleep(struct apple_dcp *dcp)
+{
+ switch (dcp->fw_compat) {
+ case DCP_FIRMWARE_V_12_3:
+ iomfb_sleep_v12_3(dcp);
+ break;
+ case DCP_FIRMWARE_V_13_5:
+ iomfb_sleep_v13_3(dcp);
+ break;
+ default:
+ WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
+ break;
+ }
+}
+
+void dcp_poweron(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ if (dcp->hdmi_hpd) {
+ bool connected = gpiod_get_value_cansleep(dcp->hdmi_hpd);
+ dev_info(dcp->dev, "%s: DP2HDMI HPD connected:%d\n", __func__, connected);
+
+ if (connected)
+ dcp_dptx_connect(dcp, 0);
+ }
+
+ switch (dcp->fw_compat) {
+ case DCP_FIRMWARE_V_12_3:
+ iomfb_poweron_v12_3(dcp);
+ break;
+ case DCP_FIRMWARE_V_13_5:
+ iomfb_poweron_v13_3(dcp);
+ break;
+ default:
+ WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
+ break;
+ }
+}
+EXPORT_SYMBOL(dcp_poweron);
+
+void dcp_poweroff(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ switch (dcp->fw_compat) {
+ case DCP_FIRMWARE_V_12_3:
+ iomfb_poweroff_v12_3(dcp);
+ break;
+ case DCP_FIRMWARE_V_13_5:
+ iomfb_poweroff_v13_3(dcp);
+ break;
+ default:
+ WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
+ break;
+ }
+
+ if (dcp->hdmi_hpd) {
+ bool connected = gpiod_get_value_cansleep(dcp->hdmi_hpd);
+ if (!connected)
+ dcp_dptx_disconnect(dcp, 0);
+ }
+}
+EXPORT_SYMBOL(dcp_poweroff);
+
+static void dcp_work_register_backlight(struct work_struct *work)
+{
+ int ret;
+ struct apple_dcp *dcp;
+
+ dcp = container_of(work, struct apple_dcp, bl_register_wq);
+
+ mutex_lock(&dcp->bl_register_mutex);
+ if (dcp->brightness.bl_dev)
+ goto out_unlock;
+
+ /* try to register backlight device, */
+ ret = dcp_backlight_register(dcp);
+ if (ret) {
+ dev_err(dcp->dev, "Unable to register backlight device\n");
+ dcp->brightness.maximum = 0;
+ }
+
+out_unlock:
+ mutex_unlock(&dcp->bl_register_mutex);
+}
+
+static int dcp_create_piodma_iommu_dev(struct apple_dcp *dcp)
+{
+ int ret;
+ struct device_node *node = of_get_child_by_name(dcp->dev->of_node, "piodma");
+
+ if (!node)
+ return dev_err_probe(dcp->dev, -ENODEV,
+ "Failed to get piodma child DT node\n");
+
+ dcp->piodma = of_platform_device_create(node, NULL, dcp->dev);
+ if (!dcp->piodma) {
+ of_node_put(node);
+ return dev_err_probe(dcp->dev, -ENODEV, "Failed to gcreate piodma pdev for %pOF\n", node);
+ }
+
+ ret = dma_set_mask_and_coherent(&dcp->piodma->dev, DMA_BIT_MASK(42));
+ if (ret)
+ goto err_destroy_pdev;
+
+ ret = of_dma_configure(&dcp->piodma->dev, node, true);
+ if (ret) {
+ ret = dev_err_probe(dcp->dev, ret,
+ "Failed to configure IOMMU child DMA\n");
+ goto err_destroy_pdev;
+ }
+ of_node_put(node);
+
+ dcp->iommu_dom = iommu_domain_alloc(&platform_bus_type);
+ if (!dcp->iommu_dom) {
+ ret = -ENOMEM;
+ goto err_destroy_pdev;
+ }
+
+ ret = iommu_attach_device(dcp->iommu_dom, &dcp->piodma->dev);
+ if (ret) {
+ ret = dev_err_probe(dcp->dev, ret,
+ "Failed to attach IOMMU child domain\n");
+ goto err_free_domain;
+ }
+
+ return 0;
+err_free_domain:
+ iommu_domain_free(dcp->iommu_dom);
+err_destroy_pdev:
+ of_node_put(node);
+ of_platform_device_destroy(&dcp->piodma->dev, NULL);
+ return ret;
+}
+
+static int dcp_get_bw_scratch_reg(struct apple_dcp *dcp, u32 expected)
+{
+ struct of_phandle_args ph_args;
+ u32 addr_idx, disp_idx, offset;
+ int ret;
+
+ ret = of_parse_phandle_with_args(dcp->dev->of_node, "apple,bw-scratch",
+ "#apple,bw-scratch-cells", 0, &ph_args);
+ if (ret < 0) {
+ dev_err(dcp->dev, "Failed to read 'apple,bw-scratch': %d\n", ret);
+ return ret;
+ }
+
+ if (ph_args.args_count != 3) {
+ dev_err(dcp->dev, "Unexpected 'apple,bw-scratch' arg count %d\n",
+ ph_args.args_count);
+ ret = -EINVAL;
+ goto err_of_node_put;
+ }
+
+ addr_idx = ph_args.args[0];
+ disp_idx = ph_args.args[1];
+ offset = ph_args.args[2];
+
+ if (disp_idx != expected || disp_idx >= MAX_DISP_REGISTERS) {
+ dev_err(dcp->dev, "Unexpected disp_reg value in 'apple,bw-scratch': %d\n",
+ disp_idx);
+ ret = -EINVAL;
+ goto err_of_node_put;
+ }
+
+ ret = of_address_to_resource(ph_args.np, addr_idx, &dcp->disp_bw_scratch_res);
+ if (ret < 0) {
+ dev_err(dcp->dev, "Failed to get 'apple,bw-scratch' resource %d from %pOF\n",
+ addr_idx, ph_args.np);
+ goto err_of_node_put;
+ }
+ if (offset > resource_size(&dcp->disp_bw_scratch_res) - 4) {
+ ret = -EINVAL;
+ goto err_of_node_put;
+ }
+
+ dcp->disp_registers[disp_idx] = &dcp->disp_bw_scratch_res;
+ dcp->disp_bw_scratch_index = disp_idx;
+ dcp->disp_bw_scratch_offset = offset;
+ ret = 0;
+
+err_of_node_put:
+ of_node_put(ph_args.np);
+ return ret;
+}
+
+static int dcp_get_bw_doorbell_reg(struct apple_dcp *dcp, u32 expected)
+{
+ struct of_phandle_args ph_args;
+ u32 addr_idx, disp_idx;
+ int ret;
+
+ ret = of_parse_phandle_with_args(dcp->dev->of_node, "apple,bw-doorbell",
+ "#apple,bw-doorbell-cells", 0, &ph_args);
+ if (ret < 0) {
+ dev_err(dcp->dev, "Failed to read 'apple,bw-doorbell': %d\n", ret);
+ return ret;
+ }
+
+ if (ph_args.args_count != 2) {
+ dev_err(dcp->dev, "Unexpected 'apple,bw-doorbell' arg count %d\n",
+ ph_args.args_count);
+ ret = -EINVAL;
+ goto err_of_node_put;
+ }
+
+ addr_idx = ph_args.args[0];
+ disp_idx = ph_args.args[1];
+
+ if (disp_idx != expected || disp_idx >= MAX_DISP_REGISTERS) {
+ dev_err(dcp->dev, "Unexpected disp_reg value in 'apple,bw-doorbell': %d\n",
+ disp_idx);
+ ret = -EINVAL;
+ goto err_of_node_put;
+ }
+
+ ret = of_address_to_resource(ph_args.np, addr_idx, &dcp->disp_bw_doorbell_res);
+ if (ret < 0) {
+ dev_err(dcp->dev, "Failed to get 'apple,bw-doorbell' resource %d from %pOF\n",
+ addr_idx, ph_args.np);
+ goto err_of_node_put;
+ }
+ dcp->disp_bw_doorbell_index = disp_idx;
+ dcp->disp_registers[disp_idx] = &dcp->disp_bw_doorbell_res;
+ ret = 0;
+
+err_of_node_put:
+ of_node_put(ph_args.np);
+ return ret;
+}
+
+static int dcp_get_disp_regs(struct apple_dcp *dcp)
+{
+ struct platform_device *pdev = to_platform_device(dcp->dev);
+ int count = pdev->num_resources - 1;
+ int i, ret;
+
+ if (count <= 0 || count > MAX_DISP_REGISTERS)
+ return -EINVAL;
+
+ for (i = 0; i < count; ++i) {
+ dcp->disp_registers[i] =
+ platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
+ }
+
+ /* load pmgr bandwidth scratch resource and offset */
+ ret = dcp_get_bw_scratch_reg(dcp, count);
+ if (ret < 0)
+ return ret;
+ count += 1;
+
+ /* load pmgr bandwidth doorbell resource if present (only on t8103) */
+ if (of_property_present(dcp->dev->of_node, "apple,bw-doorbell")) {
+ ret = dcp_get_bw_doorbell_reg(dcp, count);
+ if (ret < 0)
+ return ret;
+ count += 1;
+ }
+
+ dcp->nr_disp_registers = count;
+ return 0;
+}
+
+#define DCP_FW_VERSION_MIN_LEN 3
+#define DCP_FW_VERSION_MAX_LEN 5
+#define DCP_FW_VERSION_STR_LEN (DCP_FW_VERSION_MAX_LEN * 4)
+
+static int dcp_read_fw_version(struct device *dev, const char *name,
+ char *version_str)
+{
+ u32 ver[DCP_FW_VERSION_MAX_LEN];
+ int len_str;
+ int len;
+
+ len = of_property_read_variable_u32_array(dev->of_node, name, ver,
+ DCP_FW_VERSION_MIN_LEN,
+ DCP_FW_VERSION_MAX_LEN);
+
+ switch (len) {
+ case 3:
+ len_str = scnprintf(version_str, DCP_FW_VERSION_STR_LEN,
+ "%d.%d.%d", ver[0], ver[1], ver[2]);
+ break;
+ case 4:
+ len_str = scnprintf(version_str, DCP_FW_VERSION_STR_LEN,
+ "%d.%d.%d.%d", ver[0], ver[1], ver[2],
+ ver[3]);
+ break;
+ case 5:
+ len_str = scnprintf(version_str, DCP_FW_VERSION_STR_LEN,
+ "%d.%d.%d.%d.%d", ver[0], ver[1], ver[2],
+ ver[3], ver[4]);
+ break;
+ default:
+ len_str = strscpy(version_str, "UNKNOWN",
+ DCP_FW_VERSION_STR_LEN);
+ if (len >= 0)
+ len = -EOVERFLOW;
+ break;
+ }
+
+ if (len_str >= DCP_FW_VERSION_STR_LEN)
+ dev_warn(dev, "'%s' truncated: '%s'\n", name, version_str);
+
+ return len;
+}
+
+static enum dcp_firmware_version dcp_check_firmware_version(struct device *dev)
+{
+ char compat_str[DCP_FW_VERSION_STR_LEN];
+ char fw_str[DCP_FW_VERSION_STR_LEN];
+ int ret;
+
+ /* firmware version is just informative */
+ dcp_read_fw_version(dev, "apple,firmware-version", fw_str);
+
+ ret = dcp_read_fw_version(dev, "apple,firmware-compat", compat_str);
+ if (ret < 0) {
+ dev_err(dev, "Could not read 'apple,firmware-compat': %d\n", ret);
+ return DCP_FIRMWARE_UNKNOWN;
+ }
+
+ if (strncmp(compat_str, "12.3.0", sizeof(compat_str)) == 0)
+ return DCP_FIRMWARE_V_12_3;
+ /*
+ * m1n1 reports firmware version 13.5 as compatible with 13.3. This is
+ * only true for the iomfb endpoint. The interface for the dptx-port
+ * endpoint changed between 13.3 and 13.5. The driver will only support
+ * firmware 13.5. Check the actual firmware version for compat version
+ * 13.3 until m1n1 reports 13.5 as "firmware-compat".
+ */
+ else if ((strncmp(compat_str, "13.3.0", sizeof(compat_str)) == 0) &&
+ (strncmp(fw_str, "13.5.0", sizeof(compat_str)) == 0))
+ return DCP_FIRMWARE_V_13_5;
+ else if (strncmp(compat_str, "13.5.0", sizeof(compat_str)) == 0)
+ return DCP_FIRMWARE_V_13_5;
+
+ dev_err(dev, "DCP firmware-compat %s (FW: %s) is not supported\n",
+ compat_str, fw_str);
+
+ return DCP_FIRMWARE_UNKNOWN;
+}
+
+static int dcp_comp_bind(struct device *dev, struct device *main, void *data)
+{
+ struct device_node *panel_np;
+ struct apple_dcp *dcp = dev_get_drvdata(dev);
+ u32 cpu_ctrl;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(42));
+ if (ret)
+ return ret;
+
+ dcp->coproc_reg = devm_platform_ioremap_resource_byname(to_platform_device(dev), "coproc");
+ if (IS_ERR(dcp->coproc_reg))
+ return PTR_ERR(dcp->coproc_reg);
+
+ of_property_read_u32(dev->of_node, "apple,dcp-index",
+ &dcp->index);
+ of_property_read_u32(dev->of_node, "apple,dptx-phy",
+ &dcp->dptx_phy);
+ of_property_read_u32(dev->of_node, "apple,dptx-die",
+ &dcp->dptx_die);
+ if (dcp->index || dcp->dptx_phy || dcp->dptx_die)
+ dev_info(dev, "DCP index:%u dptx target phy: %u dptx die: %u\n",
+ dcp->index, dcp->dptx_phy, dcp->dptx_die);
+ rw_init(&dcp->hpd_mutex, "aplhpd");
+
+ if (!show_notch)
+ ret = of_property_read_u32(dev->of_node, "apple,notch-height",
+ &dcp->notch_height);
+
+ if (dcp->notch_height > MAX_NOTCH_HEIGHT)
+ dcp->notch_height = MAX_NOTCH_HEIGHT;
+ if (dcp->notch_height > 0)
+ dev_info(dev, "Detected display with notch of %u pixel\n", dcp->notch_height);
+
+ /* intialize brightness scale to a sensible default to avoid divide by 0*/
+ dcp->brightness.scale = 65536;
+ panel_np = of_get_compatible_child(dev->of_node, "apple,panel-mini-led");
+ if (panel_np)
+ dcp->panel.has_mini_led = true;
+ else
+ panel_np = of_get_compatible_child(dev->of_node, "apple,panel");
+
+ if (panel_np) {
+ const char height_prop[2][16] = { "adj-height-mm", "height-mm" };
+
+ if (of_device_is_available(panel_np)) {
+ ret = of_property_read_u32(panel_np, "apple,max-brightness",
+ &dcp->brightness.maximum);
+ if (ret)
+ dev_err(dev, "Missing property 'apple,max-brightness'\n");
+ }
+
+ of_property_read_u32(panel_np, "width-mm", &dcp->panel.width_mm);
+ /* use adjusted height as long as the notch is hidden */
+ of_property_read_u32(panel_np, height_prop[!dcp->notch_height],
+ &dcp->panel.height_mm);
+
+ of_node_put(panel_np);
+ dcp->connector_type = DRM_MODE_CONNECTOR_eDP;
+ INIT_WORK(&dcp->bl_register_wq, dcp_work_register_backlight);
+ rw_init(&dcp->bl_register_mutex, "dcpbl");
+ } else if (of_property_match_string(dev->of_node, "apple,connector-type", "HDMI-A") >= 0)
+ dcp->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ else if (of_property_match_string(dev->of_node, "apple,connector-type", "DP") >= 0)
+ dcp->connector_type = DRM_MODE_CONNECTOR_DisplayPort;
+ else if (of_property_match_string(dev->of_node, "apple,connector-type", "USB-C") >= 0)
+ dcp->connector_type = DRM_MODE_CONNECTOR_USB;
+ else
+ dcp->connector_type = DRM_MODE_CONNECTOR_Unknown;
+
+ ret = dcp_create_piodma_iommu_dev(dcp);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to created PIODMA iommu child device");
+
+ ret = dcp_get_disp_regs(dcp);
+ if (ret) {
+ dev_err(dev, "failed to find display registers\n");
+ return ret;
+ }
+
+ dcp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(dcp->clk))
+ return dev_err_probe(dev, PTR_ERR(dcp->clk),
+ "Unable to find clock\n");
+
+ bitmap_zero(dcp->memdesc_map, DCP_MAX_MAPPINGS);
+ // TDOD: mem_desc IDs start at 1, for simplicity just skip '0' entry
+ set_bit(0, dcp->memdesc_map);
+
+ INIT_WORK(&dcp->vblank_wq, dcp_delayed_vblank);
+
+ dcp->swapped_out_fbs =
+ (struct list_head)LIST_HEAD_INIT(dcp->swapped_out_fbs);
+
+ cpu_ctrl =
+ readl_relaxed(dcp->coproc_reg + APPLE_DCP_COPROC_CPU_CONTROL);
+ writel_relaxed(cpu_ctrl | APPLE_DCP_COPROC_CPU_CONTROL_RUN,
+ dcp->coproc_reg + APPLE_DCP_COPROC_CPU_CONTROL);
+
+ dcp->rtk = devm_apple_rtkit_init(dev, dcp, "mbox", 0, &rtkit_ops);
+ if (IS_ERR(dcp->rtk))
+ return dev_err_probe(dev, PTR_ERR(dcp->rtk),
+ "Failed to intialize RTKit");
+
+ ret = apple_rtkit_wake(dcp->rtk);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to boot RTKit: %d", ret);
+ return ret;
+}
+
+/*
+ * We need to shutdown DCP before tearing down the display subsystem. Otherwise
+ * the DCP will crash and briefly flash a green screen of death.
+ */
+static void dcp_comp_unbind(struct device *dev, struct device *main, void *data)
+{
+ struct apple_dcp *dcp = dev_get_drvdata(dev);
+
+ if (dcp->hdmi_hpd_irq)
+ disable_irq(dcp->hdmi_hpd_irq);
+
+ if (dcp && dcp->shmem)
+ iomfb_shutdown(dcp);
+
+ if (dcp->piodma) {
+ iommu_detach_device(dcp->iommu_dom, &dcp->piodma->dev);
+ iommu_domain_free(dcp->iommu_dom);
+ /* TODO: the piodma platform device has to be destroyed but
+ * doing so leads to all kind of breakage.
+ */
+ // of_platform_device_destroy(&dcp->piodma->dev, NULL);
+ dcp->piodma = NULL;
+ }
+
+ devm_clk_put(dev, dcp->clk);
+ dcp->clk = NULL;
+}
+
+static const struct component_ops dcp_comp_ops = {
+ .bind = dcp_comp_bind,
+ .unbind = dcp_comp_unbind,
+};
+
+static int dcp_platform_probe(struct platform_device *pdev)
+{
+ enum dcp_firmware_version fw_compat;
+ struct device *dev = &pdev->dev;
+ struct apple_dcp *dcp;
+ u32 mux_index;
+
+ fw_compat = dcp_check_firmware_version(dev);
+ if (fw_compat == DCP_FIRMWARE_UNKNOWN)
+ return -ENODEV;
+
+ /* Check for "apple,bw-scratch" to avoid probing appledrm with outdated
+ * device trees. This prevents replacing simpledrm and ending up without
+ * display.
+ */
+ if (!of_property_present(dev->of_node, "apple,bw-scratch"))
+ return dev_err_probe(dev, -ENODEV, "Incompatible devicetree! "
+ "Use devicetree matching this kernel.\n");
+
+ dcp = devm_kzalloc(dev, sizeof(*dcp), GFP_KERNEL);
+ if (!dcp)
+ return -ENOMEM;
+
+ dcp->fw_compat = fw_compat;
+ dcp->dev = dev;
+ dcp->hw = *(struct apple_dcp_hw_data *)of_device_get_match_data(dev);
+
+ platform_set_drvdata(pdev, dcp);
+
+ dcp->phy = devm_phy_optional_get(dev, "dp-phy");
+ if (IS_ERR(dcp->phy)) {
+ dev_err(dev, "Failed to get dp-phy: %ld", PTR_ERR(dcp->phy));
+ return PTR_ERR(dcp->phy);
+ }
+ if (dcp->phy) {
+ int ret;
+ /*
+ * Request DP2HDMI related GPIOs as optional for DP-altmode
+ * compatibility. J180D misses a dp2hdmi-pwren GPIO in the
+ * template ADT. TODO: check device ADT
+ */
+ dcp->hdmi_hpd = devm_gpiod_get_optional(dev, "hdmi-hpd", GPIOD_IN);
+ if (IS_ERR(dcp->hdmi_hpd))
+ return PTR_ERR(dcp->hdmi_hpd);
+ if (dcp->hdmi_hpd) {
+ int irq = gpiod_to_irq(dcp->hdmi_hpd);
+ if (irq < 0) {
+ dev_err(dev, "failed to translate HDMI hpd GPIO to IRQ\n");
+ return irq;
+ }
+ dcp->hdmi_hpd_irq = irq;
+
+ ret = devm_request_threaded_irq(dev, dcp->hdmi_hpd_irq,
+ NULL, dcp_dp2hdmi_hpd,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN |
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "dp2hdmi-hpd-irq", dcp);
+ if (ret < 0) {
+ dev_err(dev, "failed to request HDMI hpd irq %d: %d",
+ irq, ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Power DP2HDMI on as it is required for the HPD irq.
+ * TODO: check if one is sufficient for the hpd to save power
+ * on battery powered Macbooks.
+ */
+ dcp->hdmi_pwren = devm_gpiod_get_optional(dev, "hdmi-pwren", GPIOD_OUT_HIGH);
+ if (IS_ERR(dcp->hdmi_pwren))
+ return PTR_ERR(dcp->hdmi_pwren);
+
+ dcp->dp2hdmi_pwren = devm_gpiod_get_optional(dev, "dp2hdmi-pwren", GPIOD_OUT_HIGH);
+ if (IS_ERR(dcp->dp2hdmi_pwren))
+ return PTR_ERR(dcp->dp2hdmi_pwren);
+
+ ret = of_property_read_u32(dev->of_node, "mux-index", &mux_index);
+ if (!ret) {
+ dcp->xbar = devm_mux_control_get(dev, "dp-xbar");
+ if (IS_ERR(dcp->xbar)) {
+ dev_err(dev, "Failed to get dp-xbar: %ld", PTR_ERR(dcp->xbar));
+ return PTR_ERR(dcp->xbar);
+ }
+ ret = mux_control_select(dcp->xbar, mux_index);
+ if (ret)
+ dev_warn(dev, "mux_control_select failed: %d\n", ret);
+ }
+ }
+
+ return component_add(&pdev->dev, &dcp_comp_ops);
+}
+
+#ifdef __linux__
+
+static int dcp_platform_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dcp_comp_ops);
+
+ return 0;
+}
+
+static void dcp_platform_shutdown(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dcp_comp_ops);
+}
+
+#endif
+
+static int dcp_platform_suspend(struct device *dev)
+{
+ struct apple_dcp *dcp = dev_get_drvdata(dev);
+
+ if (dcp->hdmi_hpd_irq) {
+ disable_irq(dcp->hdmi_hpd_irq);
+ dcp_dptx_disconnect(dcp, 0);
+ }
+ /*
+ * Set the device as a wakeup device, which forces its power
+ * domains to stay on. We need this as we do not support full
+ * shutdown properly yet.
+ */
+ device_set_wakeup_path(dev);
+
+ return 0;
+}
+
+static int dcp_platform_resume(struct device *dev)
+{
+ struct apple_dcp *dcp = dev_get_drvdata(dev);
+
+ if (dcp->hdmi_hpd_irq)
+ enable_irq(dcp->hdmi_hpd_irq);
+
+ if (dcp->hdmi_hpd) {
+ bool connected = gpiod_get_value_cansleep(dcp->hdmi_hpd);
+ dev_info(dcp->dev, "resume: HPD connected:%d\n", connected);
+ if (connected)
+ dcp_dptx_connect(dcp, 0);
+ }
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(dcp_platform_pm_ops,
+ dcp_platform_suspend, dcp_platform_resume);
+
+
+static const struct apple_dcp_hw_data apple_dcp_hw_t6020 = {
+ .num_dptx_ports = 1,
+};
+
+static const struct apple_dcp_hw_data apple_dcp_hw_t8112 = {
+ .num_dptx_ports = 2,
+};
+
+static const struct apple_dcp_hw_data apple_dcp_hw_dcp = {
+ .num_dptx_ports = 0,
+};
+
+static const struct apple_dcp_hw_data apple_dcp_hw_dcpext = {
+ .num_dptx_ports = 2,
+};
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "apple,t6020-dcp", .data = &apple_dcp_hw_t6020, },
+ { .compatible = "apple,t8112-dcp", .data = &apple_dcp_hw_t8112, },
+ { .compatible = "apple,dcp", .data = &apple_dcp_hw_dcp, },
+ { .compatible = "apple,dcpext", .data = &apple_dcp_hw_dcpext, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+#ifdef __linux__
+
+static struct platform_driver apple_platform_driver = {
+ .probe = dcp_platform_probe,
+ .remove = dcp_platform_remove,
+ .shutdown = dcp_platform_shutdown,
+ .driver = {
+ .name = "apple-dcp",
+ .of_match_table = of_match,
+ .pm = pm_sleep_ptr(&dcp_platform_pm_ops),
+ },
+};
+
+drm_module_platform_driver(apple_platform_driver);
+
+MODULE_AUTHOR("Alyssa Rosenzweig <alyssa@rosenzweig.io>");
+MODULE_DESCRIPTION("Apple Display Controller DRM driver");
+MODULE_LICENSE("Dual MIT/GPL");
+
+#endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCP_H__
+#define __APPLE_DCP_H__
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fourcc.h>
+
+#include "dcp-internal.h"
+#include "parser.h"
+
+struct apple_crtc {
+ struct drm_crtc base;
+ struct drm_pending_vblank_event *event;
+ bool vsync_disabled;
+
+ /* Reference to the DCP device owning this CRTC */
+ struct platform_device *dcp;
+};
+
+#define to_apple_crtc(x) container_of(x, struct apple_crtc, base)
+
+void dcp_hotplug(struct work_struct *work);
+
+struct apple_connector {
+ struct drm_connector base;
+ bool connected;
+
+ struct platform_device *dcp;
+
+ /* Workqueue for sending hotplug events to the associated device */
+ struct work_struct hotplug_wq;
+};
+
+#define to_apple_connector(x) container_of(x, struct apple_connector, base)
+
+struct apple_encoder {
+ struct drm_encoder base;
+};
+
+#define to_apple_encoder(x) container_of(x, struct apple_encoder, base)
+
+void dcp_poweroff(struct platform_device *pdev);
+void dcp_poweron(struct platform_device *pdev);
+int dcp_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
+int dcp_get_connector_type(struct platform_device *pdev);
+void dcp_link(struct platform_device *pdev, struct apple_crtc *apple,
+ struct apple_connector *connector);
+int dcp_start(struct platform_device *pdev);
+int dcp_wait_ready(struct platform_device *pdev, u64 timeout);
+void dcp_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
+bool dcp_is_initialized(struct platform_device *pdev);
+void apple_crtc_vblank(struct apple_crtc *apple);
+void dcp_drm_crtc_vblank(struct apple_crtc *crtc);
+int dcp_get_modes(struct drm_connector *connector);
+int dcp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+int dcp_crtc_atomic_modeset(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+bool dcp_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void dcp_set_dimensions(struct apple_dcp *dcp);
+void dcp_send_message(struct apple_dcp *dcp, u8 endpoint, u64 message);
+
+int iomfb_start_rtkit(struct apple_dcp *dcp);
+void iomfb_shutdown(struct apple_dcp *dcp);
+/* rtkit message handler for IOMFB messages */
+void iomfb_recv_msg(struct apple_dcp *dcp, u64 message);
+
+int systemep_init(struct apple_dcp *dcp);
+int dptxep_init(struct apple_dcp *dcp);
+int ibootep_init(struct apple_dcp *dcp);
+#endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (C) The Asahi Linux Contributors */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_modeset_lock.h>
+
+#include <linux/backlight.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include "linux/jiffies.h"
+
+#include "dcp.h"
+#include "dcp-internal.h"
+
+#define MIN_BRIGHTNESS_PART1 2U
+#define MAX_BRIGHTNESS_PART1 99U
+#define MIN_BRIGHTNESS_PART2 103U
+#define MAX_BRIGHTNESS_PART2 510U
+
+/*
+ * lookup for display brightness 2 to 99 nits
+ * */
+static u32 brightness_part1[] = {
+ 0x0000000, 0x0810038, 0x0f000bd, 0x143011c,
+ 0x1850165, 0x1bc01a1, 0x1eb01d4, 0x2140200,
+ 0x2380227, 0x2590249, 0x2770269, 0x2930285,
+ 0x2ac02a0, 0x2c402b8, 0x2d902cf, 0x2ee02e4,
+ 0x30102f8, 0x314030b, 0x325031c, 0x335032d,
+ 0x345033d, 0x354034d, 0x362035b, 0x3700369,
+ 0x37d0377, 0x38a0384, 0x3960390, 0x3a2039c,
+ 0x3ad03a7, 0x3b803b3, 0x3c303bd, 0x3cd03c8,
+ 0x3d703d2, 0x3e103dc, 0x3ea03e5, 0x3f303ef,
+ 0x3fc03f8, 0x4050400, 0x40d0409, 0x4150411,
+ 0x41d0419, 0x4250421, 0x42d0429, 0x4340431,
+ 0x43c0438, 0x443043f, 0x44a0446, 0x451044d,
+ 0x4570454, 0x45e045b, 0x4640461, 0x46b0468,
+ 0x471046e, 0x4770474, 0x47d047a, 0x4830480,
+ 0x4890486, 0x48e048b, 0x4940491, 0x4990497,
+ 0x49f049c, 0x4a404a1, 0x4a904a7, 0x4ae04ac,
+ 0x4b304b1, 0x4b804b6, 0x4bd04bb, 0x4c204c0,
+ 0x4c704c5, 0x4cc04c9, 0x4d004ce, 0x4d504d3,
+ 0x4d904d7, 0x4de04dc, 0x4e204e0, 0x4e704e4,
+ 0x4eb04e9, 0x4ef04ed, 0x4f304f1, 0x4f704f5,
+ 0x4fb04f9, 0x4ff04fd, 0x5030501, 0x5070505,
+ 0x50b0509, 0x50f050d, 0x5130511, 0x5160515,
+ 0x51a0518, 0x51e051c, 0x5210520, 0x5250523,
+ 0x5290527, 0x52c052a, 0x52f052e, 0x5330531,
+ 0x5360535, 0x53a0538, 0x53d053b, 0x540053f,
+ 0x5440542, 0x5470545, 0x54a0548, 0x54d054c,
+ 0x550054f, 0x5530552, 0x5560555, 0x5590558,
+ 0x55c055b, 0x55f055e, 0x5620561, 0x5650564,
+ 0x5680567, 0x56b056a, 0x56e056d, 0x571056f,
+ 0x5740572, 0x5760575, 0x5790578, 0x57c057b,
+ 0x57f057d, 0x5810580, 0x5840583, 0x5870585,
+ 0x5890588, 0x58c058b, 0x58f058d
+};
+
+static u32 brightness_part12[] = { 0x58f058d, 0x59d058f };
+
+/*
+ * lookup table for display brightness 103.3 to 510 nits
+ * */
+static u32 brightness_part2[] = {
+ 0x59d058f, 0x5b805ab, 0x5d105c5, 0x5e805dd,
+ 0x5fe05f3, 0x6120608, 0x625061c, 0x637062e,
+ 0x6480640, 0x6580650, 0x6680660, 0x677066f,
+ 0x685067e, 0x693068c, 0x6a00699, 0x6ac06a6,
+ 0x6b806b2, 0x6c406be, 0x6cf06ca, 0x6da06d5,
+ 0x6e506df, 0x6ef06ea, 0x6f906f4, 0x70206fe,
+ 0x70c0707, 0x7150710, 0x71e0719, 0x7260722,
+ 0x72f072a, 0x7370733, 0x73f073b, 0x7470743,
+ 0x74e074a, 0x7560752, 0x75d0759, 0x7640760,
+ 0x76b0768, 0x772076e, 0x7780775, 0x77f077c,
+ 0x7850782, 0x78c0789, 0x792078f, 0x7980795,
+ 0x79e079b, 0x7a407a1, 0x7aa07a7, 0x7af07ac,
+ 0x7b507b2, 0x7ba07b8, 0x7c007bd, 0x7c507c2,
+ 0x7ca07c8, 0x7cf07cd, 0x7d407d2, 0x7d907d7,
+ 0x7de07dc, 0x7e307e1, 0x7e807e5, 0x7ec07ea,
+ 0x7f107ef, 0x7f607f3, 0x7fa07f8, 0x7fe07fc
+};
+
+
+static int dcp_get_brightness(struct backlight_device *bd)
+{
+ struct apple_dcp *dcp = bl_get_data(bd);
+
+ return dcp->brightness.nits;
+}
+
+#define SCALE_FACTOR (1 << 10)
+
+static u32 interpolate(int val, int min, int max, u32 *tbl, size_t tbl_size)
+{
+ u32 frac;
+ u64 low, high;
+ u32 interpolated = (tbl_size - 1) * ((val - min) * SCALE_FACTOR) / (max - min);
+
+ size_t index = interpolated / SCALE_FACTOR;
+
+ if (WARN(index + 1 >= tbl_size, "invalid index %zu for brightness %u", index, val))
+ return tbl[tbl_size / 2];
+
+ frac = interpolated & (SCALE_FACTOR - 1);
+ low = tbl[index];
+ high = tbl[index + 1];
+
+ return ((frac * high) + ((SCALE_FACTOR - frac) * low)) / SCALE_FACTOR;
+}
+
+static u32 calculate_dac(struct apple_dcp *dcp, int val)
+{
+ u32 dac;
+
+ if (val <= MIN_BRIGHTNESS_PART1)
+ return 16 * brightness_part1[0];
+ else if (val == MAX_BRIGHTNESS_PART1)
+ return 16 * brightness_part1[ARRAY_SIZE(brightness_part1) - 1];
+ else if (val == MIN_BRIGHTNESS_PART2)
+ return 16 * brightness_part2[0];
+ else if (val >= MAX_BRIGHTNESS_PART2)
+ return brightness_part2[ARRAY_SIZE(brightness_part2) - 1];
+
+ if (val < MAX_BRIGHTNESS_PART1) {
+ dac = interpolate(val, MIN_BRIGHTNESS_PART1, MAX_BRIGHTNESS_PART1,
+ brightness_part1, ARRAY_SIZE(brightness_part1));
+ } else if (val > MIN_BRIGHTNESS_PART2) {
+ dac = interpolate(val, MIN_BRIGHTNESS_PART2, MAX_BRIGHTNESS_PART2,
+ brightness_part2, ARRAY_SIZE(brightness_part2));
+ } else {
+ dac = interpolate(val, MAX_BRIGHTNESS_PART1, MIN_BRIGHTNESS_PART2,
+ brightness_part12, ARRAY_SIZE(brightness_part12));
+ }
+
+ return 16 * dac;
+}
+
+static int drm_crtc_set_brightness(struct apple_dcp *dcp)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc *crtc = &dcp->crtc->base;
+ int ret = 0;
+
+ DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret);
+
+ if (!dcp->brightness.update)
+ goto done;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = &ctx;
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ crtc_state->color_mgmt_changed |= true;
+
+ ret = drm_atomic_commit(state);
+
+fail:
+ drm_atomic_state_put(state);
+done:
+ DRM_MODESET_LOCK_ALL_END(crtc->dev, ctx, ret);
+
+ return ret;
+}
+
+static int dcp_set_brightness(struct backlight_device *bd)
+{
+ int ret = 0;
+ struct apple_dcp *dcp = bl_get_data(bd);
+ struct drm_modeset_acquire_ctx ctx;
+ int brightness = backlight_get_brightness(bd);
+
+ DRM_MODESET_LOCK_ALL_BEGIN(dcp->crtc->base.dev, ctx, 0, ret);
+
+ dcp->brightness.dac = calculate_dac(dcp, brightness);
+ dcp->brightness.update = true;
+
+ DRM_MODESET_LOCK_ALL_END(dcp->crtc->base.dev, ctx, ret);
+
+ /*
+ * Do not actively try to change brightness if no mode is set.
+ * TODO: should this be reflected the in backlight's power property?
+ * defer this hopefully until it becomes irrelevant due to proper
+ * drm integrated backlight handling
+ */
+ if (!dcp->valid_mode)
+ return 0;
+
+ /* Wait 1 vblank cycle in the hope an atomic swap has already updated
+ * the brightness */
+ drm_msleep((1001 + 23) / 24); // 42ms for 23.976 fps
+
+ return drm_crtc_set_brightness(dcp);
+}
+
+static const struct backlight_ops dcp_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = dcp_get_brightness,
+ .update_status = dcp_set_brightness,
+};
+
+int dcp_backlight_register(struct apple_dcp *dcp)
+{
+ struct device *dev = dcp->dev;
+ struct backlight_device *bl_dev;
+ struct backlight_properties props = {
+ .type = BACKLIGHT_PLATFORM,
+ .brightness = dcp->brightness.nits,
+ .scale = BACKLIGHT_SCALE_LINEAR,
+ };
+ props.max_brightness = min(dcp->brightness.maximum, MAX_BRIGHTNESS_PART2 - 1);
+
+ bl_dev = devm_backlight_device_register(dev, "apple-panel-bl", dev, dcp,
+ &dcp_backlight_ops, &props);
+ if (IS_ERR(bl_dev))
+ return PTR_ERR(bl_dev);
+
+ dcp->brightness.bl_dev = bl_dev;
+ dcp->brightness.dac = calculate_dac(dcp, dcp->brightness.nits);
+
+ return 0;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2022 Sven Peter <sven@svenpeter.dev> */
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/phy/phy.h>
+#include <linux/delay.h>
+
+#include "afk.h"
+#include "dcp.h"
+#include "dptxep.h"
+#include "parser.h"
+#include "trace.h"
+
+struct dcpdptx_connection_cmd {
+ __le32 unk;
+ __le32 target;
+} __attribute__((packed));
+
+struct dcpdptx_hotplug_cmd {
+ u8 _pad0[16];
+ __le32 unk;
+} __attribute__((packed));
+
+struct dptxport_apcall_link_rate {
+ __le32 retcode;
+ u8 _unk0[12];
+ __le32 link_rate;
+ u8 _unk1[12];
+} __attribute__((packed));
+
+struct dptxport_apcall_lane_count {
+ __le32 retcode;
+ u8 _unk0[12];
+ __le64 lane_count;
+ u8 _unk1[8];
+} __attribute__((packed));
+
+struct dptxport_apcall_set_active_lane_count {
+ __le32 retcode;
+ u8 _unk0[12];
+ __le64 lane_count;
+ u8 _unk1[8];
+} __packed;
+
+struct dptxport_apcall_get_support {
+ __le32 retcode;
+ u8 _unk0[12];
+ __le32 supported;
+ u8 _unk1[12];
+} __attribute__((packed));
+
+struct dptxport_apcall_max_drive_settings {
+ __le32 retcode;
+ u8 _unk0[12];
+ __le32 max_drive_settings[2];
+ u8 _unk1[8];
+};
+
+struct dptxport_apcall_drive_settings {
+ __le32 retcode;
+ u8 _unk0[12];
+ __le32 unk1;
+ __le32 unk2;
+ __le32 unk3;
+ __le32 unk4;
+ __le32 unk5;
+ __le32 unk6;
+ __le32 unk7;
+};
+
+int dptxport_validate_connection(struct apple_epic_service *service, u8 core,
+ u8 atc, u8 die)
+{
+ struct dptx_port *dptx = service->cookie;
+ struct dcpdptx_connection_cmd cmd, resp;
+ int ret;
+ u32 target = FIELD_PREP(DCPDPTX_REMOTE_PORT_CORE, core) |
+ FIELD_PREP(DCPDPTX_REMOTE_PORT_ATC, atc) |
+ FIELD_PREP(DCPDPTX_REMOTE_PORT_DIE, die) |
+ DCPDPTX_REMOTE_PORT_CONNECTED;
+
+ trace_dptxport_validate_connection(dptx, core, atc, die);
+
+ cmd.target = cpu_to_le32(target);
+ cmd.unk = cpu_to_le32(0x100);
+ ret = afk_service_call(service, 0, 12, &cmd, sizeof(cmd), 40, &resp,
+ sizeof(resp), 40);
+ if (ret)
+ return ret;
+
+ if (le32_to_cpu(resp.target) != target)
+ return -EINVAL;
+ if (le32_to_cpu(resp.unk) != 0x100)
+ return -EINVAL;
+
+ return 0;
+}
+
+int dptxport_connect(struct apple_epic_service *service, u8 core, u8 atc,
+ u8 die)
+{
+ struct dptx_port *dptx = service->cookie;
+ struct dcpdptx_connection_cmd cmd, resp;
+ u32 unk_field = 0x0; // seen as 0x100 under some conditions
+ int ret;
+ u32 target = FIELD_PREP(DCPDPTX_REMOTE_PORT_CORE, core) |
+ FIELD_PREP(DCPDPTX_REMOTE_PORT_ATC, atc) |
+ FIELD_PREP(DCPDPTX_REMOTE_PORT_DIE, die) |
+ DCPDPTX_REMOTE_PORT_CONNECTED;
+
+ trace_dptxport_connect(dptx, core, atc, die);
+
+ cmd.target = cpu_to_le32(target);
+ cmd.unk = cpu_to_le32(unk_field);
+ ret = afk_service_call(service, 0, 11, &cmd, sizeof(cmd), 24, &resp,
+ sizeof(resp), 24);
+ if (ret)
+ return ret;
+
+ if (le32_to_cpu(resp.target) != target)
+ return -EINVAL;
+ if (le32_to_cpu(resp.unk) != unk_field)
+ dev_notice(service->ep->dcp->dev, "unexpected unk field in reply: 0x%x (0x%x)\n",
+ le32_to_cpu(resp.unk), unk_field);
+
+ return 0;
+}
+
+int dptxport_request_display(struct apple_epic_service *service)
+{
+ return afk_service_call(service, 0, 6, NULL, 0, 16, NULL, 0, 16);
+}
+
+int dptxport_release_display(struct apple_epic_service *service)
+{
+ return afk_service_call(service, 0, 7, NULL, 0, 16, NULL, 0, 16);
+}
+
+int dptxport_set_hpd(struct apple_epic_service *service, bool hpd)
+{
+ struct dcpdptx_hotplug_cmd cmd, resp;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ if (hpd)
+ cmd.unk = cpu_to_le32(1);
+
+ ret = afk_service_call(service, 8, 8, &cmd, sizeof(cmd), 12, &resp,
+ sizeof(resp), 12);
+ if (ret)
+ return ret;
+ if (le32_to_cpu(resp.unk) != 1)
+ return -EINVAL;
+ return 0;
+}
+
+static int
+dptxport_call_get_max_drive_settings(struct apple_epic_service *service,
+ void *reply_, size_t reply_size)
+{
+ struct dptxport_apcall_max_drive_settings *reply = reply_;
+
+ if (reply_size < sizeof(*reply))
+ return -EINVAL;
+
+ reply->retcode = cpu_to_le32(0);
+ reply->max_drive_settings[0] = cpu_to_le32(0x3);
+ reply->max_drive_settings[1] = cpu_to_le32(0x3);
+
+ return 0;
+}
+
+static int
+dptxport_call_get_drive_settings(struct apple_epic_service *service,
+ const void *request_, size_t request_size,
+ void *reply_, size_t reply_size)
+{
+ struct dptx_port *dptx = service->cookie;
+ const struct dptxport_apcall_drive_settings *request = request_;
+ struct dptxport_apcall_drive_settings *reply = reply_;
+
+ if (reply_size < sizeof(*reply) || request_size < sizeof(*request))
+ return -EINVAL;
+
+ *reply = *request;
+
+ /* Clear the rest of the buffer */
+ memset(reply_ + sizeof(*reply), 0, reply_size - sizeof(*reply));
+
+ if (reply->retcode != 4)
+ dev_err(service->ep->dcp->dev,
+ "get_drive_settings: unexpected retcode %d\n",
+ reply->retcode);
+
+ reply->retcode = 4; /* Should already be 4? */
+ reply->unk5 = dptx->drive_settings[0];
+ reply->unk6 = 0;
+ reply->unk7 = dptx->drive_settings[1];
+
+ return 0;
+}
+
+static int
+dptxport_call_set_drive_settings(struct apple_epic_service *service,
+ const void *request_, size_t request_size,
+ void *reply_, size_t reply_size)
+{
+ struct dptx_port *dptx = service->cookie;
+ const struct dptxport_apcall_drive_settings *request = request_;
+ struct dptxport_apcall_drive_settings *reply = reply_;
+
+ if (reply_size < sizeof(*reply) || request_size < sizeof(*request))
+ return -EINVAL;
+
+ *reply = *request;
+ reply->retcode = cpu_to_le32(0);
+
+ dev_info(service->ep->dcp->dev, "set_drive_settings: %d:%d:%d:%d:%d:%d:%d\n",
+ request->unk1, request->unk2, request->unk3, request->unk4,
+ request->unk5, request->unk6, request->unk7);
+
+ dptx->drive_settings[0] = reply->unk5;
+ dptx->drive_settings[1] = reply->unk7;
+
+ return 0;
+}
+
+static int dptxport_call_get_max_link_rate(struct apple_epic_service *service,
+ void *reply_, size_t reply_size)
+{
+ struct dptxport_apcall_link_rate *reply = reply_;
+
+ if (reply_size < sizeof(*reply))
+ return -EINVAL;
+
+ reply->retcode = cpu_to_le32(0);
+ reply->link_rate = cpu_to_le32(LINK_RATE_HBR3);
+
+ return 0;
+}
+
+static int dptxport_call_get_max_lane_count(struct apple_epic_service *service,
+ void *reply_, size_t reply_size)
+{
+ struct dptxport_apcall_lane_count *reply = reply_;
+
+ if (reply_size < sizeof(*reply))
+ return -EINVAL;
+
+ reply->retcode = cpu_to_le32(0);
+ reply->lane_count = cpu_to_le64(4);
+
+ return 0;
+}
+
+static int dptxport_call_set_active_lane_count(struct apple_epic_service *service,
+ const void *data, size_t data_size,
+ void *reply_, size_t reply_size)
+{
+ struct dptx_port *dptx = service->cookie;
+ const struct dptxport_apcall_set_active_lane_count *request = data;
+ struct dptxport_apcall_set_active_lane_count *reply = reply_;
+ int ret = 0;
+ int retcode = 0;
+
+ if (reply_size < sizeof(*reply))
+ return -1;
+ if (data_size < sizeof(*request))
+ return -1;
+
+ u64 lane_count = cpu_to_le64(request->lane_count);
+
+ switch (lane_count) {
+ case 0 ... 2:
+ case 4:
+ dptx->phy_ops.dp.lanes = lane_count;
+ dptx->phy_ops.dp.set_lanes = 1;
+ break;
+ default:
+ dev_err(service->ep->dcp->dev, "set_active_lane_count: invalid lane count:%llu\n", lane_count);
+ retcode = 1;
+ lane_count = 0;
+ break;
+ }
+
+ if (dptx->phy_ops.dp.set_lanes) {
+ if (dptx->atcphy) {
+ ret = phy_configure(dptx->atcphy, &dptx->phy_ops);
+ if (ret)
+ return ret;
+ }
+ dptx->phy_ops.dp.set_lanes = 0;
+ }
+
+ dptx->lane_count = lane_count;
+
+ reply->retcode = cpu_to_le32(retcode);
+ reply->lane_count = cpu_to_le64(lane_count);
+
+ if (dptx->lane_count > 0)
+ complete(&dptx->linkcfg_completion);
+
+ return ret;
+}
+
+static int dptxport_call_get_link_rate(struct apple_epic_service *service,
+ void *reply_, size_t reply_size)
+{
+ struct dptx_port *dptx = service->cookie;
+ struct dptxport_apcall_link_rate *reply = reply_;
+
+ if (reply_size < sizeof(*reply))
+ return -EINVAL;
+
+ reply->retcode = cpu_to_le32(0);
+ reply->link_rate = cpu_to_le32(dptx->link_rate);
+
+ return 0;
+}
+
+static int
+dptxport_call_will_change_link_config(struct apple_epic_service *service)
+{
+ struct dptx_port *dptx = service->cookie;
+
+ dptx->phy_ops.dp.set_lanes = 0;
+ dptx->phy_ops.dp.set_rate = 0;
+ dptx->phy_ops.dp.set_voltages = 0;
+
+ return 0;
+}
+
+static int
+dptxport_call_did_change_link_config(struct apple_epic_service *service)
+{
+ /* assume the link config did change and wait a little bit */
+ mdelay(10);
+
+ return 0;
+}
+
+static int dptxport_call_set_link_rate(struct apple_epic_service *service,
+ const void *data, size_t data_size,
+ void *reply_, size_t reply_size)
+{
+ struct dptx_port *dptx = service->cookie;
+ const struct dptxport_apcall_link_rate *request = data;
+ struct dptxport_apcall_link_rate *reply = reply_;
+ u32 link_rate, phy_link_rate;
+ bool phy_set_rate = false;
+ int ret;
+
+ if (reply_size < sizeof(*reply))
+ return -EINVAL;
+ if (data_size < sizeof(*request))
+ return -EINVAL;
+
+ link_rate = le32_to_cpu(request->link_rate);
+ trace_dptxport_call_set_link_rate(dptx, link_rate);
+
+ switch (link_rate) {
+ case LINK_RATE_RBR:
+ phy_link_rate = 1620;
+ phy_set_rate = true;
+ break;
+ case LINK_RATE_HBR:
+ phy_link_rate = 2700;
+ phy_set_rate = true;
+ break;
+ case LINK_RATE_HBR2:
+ phy_link_rate = 5400;
+ phy_set_rate = true;
+ break;
+ case LINK_RATE_HBR3:
+ phy_link_rate = 8100;
+ phy_set_rate = true;
+ break;
+ case 0:
+ phy_link_rate = 0;
+ phy_set_rate = true;
+ break;
+ default:
+ dev_err(service->ep->dcp->dev,
+ "DPTXPort: Unsupported link rate 0x%x requested\n",
+ link_rate);
+ link_rate = 0;
+ phy_set_rate = false;
+ break;
+ }
+
+ if (phy_set_rate) {
+ dptx->phy_ops.dp.link_rate = phy_link_rate;
+ dptx->phy_ops.dp.set_rate = 1;
+
+ if (dptx->atcphy) {
+ ret = phy_configure(dptx->atcphy, &dptx->phy_ops);
+ if (ret)
+ return ret;
+ }
+
+ //if (dptx->phy_ops.dp.set_rate)
+ dptx->link_rate = dptx->pending_link_rate = link_rate;
+
+ }
+
+ //dptx->pending_link_rate = link_rate;
+ reply->retcode = cpu_to_le32(0);
+ reply->link_rate = cpu_to_le32(link_rate);
+
+ return 0;
+}
+
+static int dptxport_call_get_supports_hpd(struct apple_epic_service *service,
+ void *reply_, size_t reply_size)
+{
+ struct dptxport_apcall_get_support *reply = reply_;
+
+ if (reply_size < sizeof(*reply))
+ return -EINVAL;
+
+ reply->retcode = cpu_to_le32(0);
+ reply->supported = cpu_to_le32(0);
+ return 0;
+}
+
+static int
+dptxport_call_get_supports_downspread(struct apple_epic_service *service,
+ void *reply_, size_t reply_size)
+{
+ struct dptxport_apcall_get_support *reply = reply_;
+
+ if (reply_size < sizeof(*reply))
+ return -EINVAL;
+
+ reply->retcode = cpu_to_le32(0);
+ reply->supported = cpu_to_le32(0);
+ return 0;
+}
+
+static int
+dptxport_call_activate(struct apple_epic_service *service,
+ const void *data, size_t data_size,
+ void *reply, size_t reply_size)
+{
+ struct dptx_port *dptx = service->cookie;
+ const struct apple_dcp *dcp = service->ep->dcp;
+
+ // TODO: hack, use phy_set_mode to select the correct DCP(EXT) input
+ phy_set_mode_ext(dptx->atcphy, PHY_MODE_DP, dcp->index);
+
+ memcpy(reply, data, min(reply_size, data_size));
+ if (reply_size >= 4)
+ memset(reply, 0, 4);
+
+ return 0;
+}
+
+static int
+dptxport_call_deactivate(struct apple_epic_service *service,
+ const void *data, size_t data_size,
+ void *reply, size_t reply_size)
+{
+ struct dptx_port *dptx = service->cookie;
+
+ /* deactivate phy */
+ phy_set_mode_ext(dptx->atcphy, PHY_MODE_INVALID, 0);
+
+ memcpy(reply, data, min(reply_size, data_size));
+ if (reply_size >= 4)
+ memset(reply, 0, 4);
+
+ return 0;
+}
+
+static int dptxport_call(struct apple_epic_service *service, u32 idx,
+ const void *data, size_t data_size, void *reply,
+ size_t reply_size)
+{
+ struct dptx_port *dptx = service->cookie;
+ trace_dptxport_apcall(dptx, idx, data_size);
+
+ switch (idx) {
+ case DPTX_APCALL_WILL_CHANGE_LINKG_CONFIG:
+ return dptxport_call_will_change_link_config(service);
+ case DPTX_APCALL_DID_CHANGE_LINK_CONFIG:
+ return dptxport_call_did_change_link_config(service);
+ case DPTX_APCALL_GET_MAX_LINK_RATE:
+ return dptxport_call_get_max_link_rate(service, reply,
+ reply_size);
+ case DPTX_APCALL_GET_LINK_RATE:
+ return dptxport_call_get_link_rate(service, reply, reply_size);
+ case DPTX_APCALL_SET_LINK_RATE:
+ return dptxport_call_set_link_rate(service, data, data_size,
+ reply, reply_size);
+ case DPTX_APCALL_GET_MAX_LANE_COUNT:
+ return dptxport_call_get_max_lane_count(service, reply, reply_size);
+ case DPTX_APCALL_SET_ACTIVE_LANE_COUNT:
+ return dptxport_call_set_active_lane_count(service, data, data_size,
+ reply, reply_size);
+ case DPTX_APCALL_GET_SUPPORTS_HPD:
+ return dptxport_call_get_supports_hpd(service, reply,
+ reply_size);
+ case DPTX_APCALL_GET_SUPPORTS_DOWN_SPREAD:
+ return dptxport_call_get_supports_downspread(service, reply,
+ reply_size);
+ case DPTX_APCALL_GET_MAX_DRIVE_SETTINGS:
+ return dptxport_call_get_max_drive_settings(service, reply,
+ reply_size);
+ case DPTX_APCALL_GET_DRIVE_SETTINGS:
+ return dptxport_call_get_drive_settings(service, data, data_size,
+ reply, reply_size);
+ case DPTX_APCALL_SET_DRIVE_SETTINGS:
+ return dptxport_call_set_drive_settings(service, data, data_size,
+ reply, reply_size);
+ case DPTX_APCALL_ACTIVATE:
+ return dptxport_call_activate(service, data, data_size,
+ reply, reply_size);
+ case DPTX_APCALL_DEACTIVATE:
+ return dptxport_call_deactivate(service, data, data_size,
+ reply, reply_size);
+ default:
+ /* just try to ACK and hope for the best... */
+ dev_info(service->ep->dcp->dev, "DPTXPort: acking unhandled call %u\n",
+ idx);
+ memcpy(reply, data, min(reply_size, data_size));
+ if (reply_size >= 4)
+ memset(reply, 0, 4);
+ return 0;
+ }
+}
+
+static void dptxport_init(struct apple_epic_service *service, const char *name,
+ const char *class, s64 unit)
+{
+
+ if (strcmp(name, "dcpdptx-port-epic"))
+ return;
+ if (strcmp(class, "AppleDCPDPTXRemotePort"))
+ return;
+
+ trace_dptxport_init(service->ep->dcp, unit);
+
+ switch (unit) {
+ case 0:
+ case 1:
+ if (service->ep->dcp->dptxport[unit].enabled) {
+ dev_err(service->ep->dcp->dev,
+ "DPTXPort: unit %lld already exists\n", unit);
+ return;
+ }
+ service->ep->dcp->dptxport[unit].unit = unit;
+ service->ep->dcp->dptxport[unit].service = service;
+ service->ep->dcp->dptxport[unit].enabled = true;
+ service->cookie = (void *)&service->ep->dcp->dptxport[unit];
+ complete(&service->ep->dcp->dptxport[unit].enable_completion);
+ break;
+ default:
+ dev_err(service->ep->dcp->dev, "DPTXPort: invalid unit %lld\n",
+ unit);
+ }
+}
+
+static const struct apple_epic_service_ops dptxep_ops[] = {
+ {
+ .name = "AppleDCPDPTXRemotePort",
+ .init = dptxport_init,
+ .call = dptxport_call,
+ },
+ {}
+};
+
+int dptxep_init(struct apple_dcp *dcp)
+{
+ int ret;
+ u32 port;
+ unsigned long timeout = msecs_to_jiffies(1000);
+
+ init_completion(&dcp->dptxport[0].enable_completion);
+ init_completion(&dcp->dptxport[1].enable_completion);
+ init_completion(&dcp->dptxport[0].linkcfg_completion);
+ init_completion(&dcp->dptxport[1].linkcfg_completion);
+
+ dcp->dptxep = afk_init(dcp, DPTX_ENDPOINT, dptxep_ops);
+ if (IS_ERR(dcp->dptxep))
+ return PTR_ERR(dcp->dptxep);
+
+ ret = afk_start(dcp->dptxep);
+ if (ret)
+ return ret;
+
+ for (port = 0; port < dcp->hw.num_dptx_ports; port++) {
+ ret = wait_for_completion_timeout(&dcp->dptxport[port].enable_completion,
+ timeout);
+ if (!ret)
+ return -ETIMEDOUT;
+ else if (ret < 0)
+ return ret;
+ timeout = ret;
+ }
+
+ return 0;
+}
--- /dev/null
+#ifndef __APPLE_DCP_DPTXEP_H__
+#define __APPLE_DCP_DPTXEP_H__
+
+#include <linux/phy/phy.h>
+#include <linux/mux/consumer.h>
+
+enum dptx_apcall {
+ DPTX_APCALL_ACTIVATE = 0,
+ DPTX_APCALL_DEACTIVATE = 1,
+ DPTX_APCALL_GET_MAX_DRIVE_SETTINGS = 2,
+ DPTX_APCALL_SET_DRIVE_SETTINGS = 3,
+ DPTX_APCALL_GET_DRIVE_SETTINGS = 4,
+ DPTX_APCALL_WILL_CHANGE_LINKG_CONFIG = 5,
+ DPTX_APCALL_DID_CHANGE_LINK_CONFIG = 6,
+ DPTX_APCALL_GET_MAX_LINK_RATE = 7,
+ DPTX_APCALL_GET_LINK_RATE = 8,
+ DPTX_APCALL_SET_LINK_RATE = 9,
+ DPTX_APCALL_GET_MAX_LANE_COUNT = 10,
+ DPTX_APCALL_GET_ACTIVE_LANE_COUNT = 11,
+ DPTX_APCALL_SET_ACTIVE_LANE_COUNT = 12,
+ DPTX_APCALL_GET_SUPPORTS_DOWN_SPREAD = 13,
+ DPTX_APCALL_GET_DOWN_SPREAD = 14,
+ DPTX_APCALL_SET_DOWN_SPREAD = 15,
+ DPTX_APCALL_GET_SUPPORTS_LANE_MAPPING = 16,
+ DPTX_APCALL_SET_LANE_MAP = 17,
+ DPTX_APCALL_GET_SUPPORTS_HPD = 18,
+ DPTX_APCALL_FORCE_HOTPLUG_DETECT = 19,
+ DPTX_APCALL_INACTIVE_SINK_DETECTED = 20,
+ DPTX_APCALL_SET_TILED_DISPLAY_HINTS = 21,
+ DPTX_APCALL_DEVICE_NOT_RESPONDING = 22,
+ DPTX_APCALL_DEVICE_BUSY_TIMEOUT = 23,
+ DPTX_APCALL_DEVICE_NOT_STARTED = 24,
+};
+
+#define DCPDPTX_REMOTE_PORT_CORE GENMASK(3, 0)
+#define DCPDPTX_REMOTE_PORT_ATC GENMASK(7, 4)
+#define DCPDPTX_REMOTE_PORT_DIE GENMASK(11, 8)
+#define DCPDPTX_REMOTE_PORT_CONNECTED BIT(15)
+
+enum dptx_link_rate {
+ LINK_RATE_RBR = 0x06,
+ LINK_RATE_HBR = 0x0a,
+ LINK_RATE_HBR2 = 0x14,
+ LINK_RATE_HBR3 = 0x1e,
+};
+
+struct apple_epic_service;
+
+struct dptx_port {
+ bool enabled, connected;
+ struct completion enable_completion;
+ struct completion linkcfg_completion;
+ u32 unit;
+ struct apple_epic_service *service;
+ union phy_configure_opts phy_ops;
+ struct phy *atcphy;
+ struct mux_control *mux;
+ u32 lane_count;
+ u32 link_rate, pending_link_rate;
+ u32 drive_settings[2];
+};
+
+int dptxport_validate_connection(struct apple_epic_service *service, u8 core,
+ u8 atc, u8 die);
+int dptxport_connect(struct apple_epic_service *service, u8 core, u8 atc,
+ u8 die);
+int dptxport_request_display(struct apple_epic_service *service);
+int dptxport_release_display(struct apple_epic_service *service);
+int dptxport_set_hpd(struct apple_epic_service *service, bool hpd);
+#endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2023 */
+
+#include <linux/completion.h>
+
+#include "afk.h"
+#include "dcp.h"
+
+static void disp_service_init(struct apple_epic_service *service, const char *name,
+ const char *class, s64 unit)
+{
+}
+
+
+static const struct apple_epic_service_ops ibootep_ops[] = {
+ {
+ .name = "disp0-service",
+ .init = disp_service_init,
+ },
+ {}
+};
+
+int ibootep_init(struct apple_dcp *dcp)
+{
+ dcp->ibootep = afk_init(dcp, DISP0_ENDPOINT, ibootep_ops);
+ afk_start(dcp->ibootep);
+
+ return 0;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/align.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/soc/apple/rtkit.h>
+
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dcp.h"
+#include "dcp-internal.h"
+#include "iomfb.h"
+#include "iomfb_internal.h"
+#include "parser.h"
+#include "trace.h"
+
+static int dcp_tx_offset(enum dcp_context_id id)
+{
+ switch (id) {
+ case DCP_CONTEXT_CB:
+ case DCP_CONTEXT_CMD:
+ return 0x00000;
+ case DCP_CONTEXT_OOBCB:
+ case DCP_CONTEXT_OOBCMD:
+ return 0x08000;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int dcp_channel_offset(enum dcp_context_id id)
+{
+ switch (id) {
+ case DCP_CONTEXT_ASYNC:
+ return 0x40000;
+ case DCP_CONTEXT_OOBASYNC:
+ return 0x48000;
+ case DCP_CONTEXT_CB:
+ return 0x60000;
+ case DCP_CONTEXT_OOBCB:
+ return 0x68000;
+ default:
+ return dcp_tx_offset(id);
+ }
+}
+
+static inline u64 dcpep_set_shmem(u64 dart_va)
+{
+ return FIELD_PREP(IOMFB_MESSAGE_TYPE, IOMFB_MESSAGE_TYPE_SET_SHMEM) |
+ FIELD_PREP(IOMFB_SHMEM_FLAG, IOMFB_SHMEM_FLAG_VALUE) |
+ FIELD_PREP(IOMFB_SHMEM_DVA, dart_va);
+}
+
+static inline u64 dcpep_msg(enum dcp_context_id id, u32 length, u16 offset)
+{
+ return FIELD_PREP(IOMFB_MESSAGE_TYPE, IOMFB_MESSAGE_TYPE_MSG) |
+ FIELD_PREP(IOMFB_MSG_CONTEXT, id) |
+ FIELD_PREP(IOMFB_MSG_OFFSET, offset) |
+ FIELD_PREP(IOMFB_MSG_LENGTH, length);
+}
+
+static inline u64 dcpep_ack(enum dcp_context_id id)
+{
+ return dcpep_msg(id, 0, 0) | IOMFB_MSG_ACK;
+}
+
+/*
+ * A channel is busy if we have sent a message that has yet to be
+ * acked. The driver must not sent a message to a busy channel.
+ */
+static bool dcp_channel_busy(struct dcp_channel *ch)
+{
+ return (ch->depth != 0);
+}
+
+/*
+ * Get the context ID passed to the DCP for a command we push. The rule is
+ * simple: callback contexts are used when replying to the DCP, command
+ * contexts are used otherwise. That corresponds to a non/zero call stack
+ * depth. This rule frees the caller from tracking the call context manually.
+ */
+static enum dcp_context_id dcp_call_context(struct apple_dcp *dcp, bool oob)
+{
+ u8 depth = oob ? dcp->ch_oobcmd.depth : dcp->ch_cmd.depth;
+
+ if (depth)
+ return oob ? DCP_CONTEXT_OOBCB : DCP_CONTEXT_CB;
+ else
+ return oob ? DCP_CONTEXT_OOBCMD : DCP_CONTEXT_CMD;
+}
+
+/* Get a channel for a context */
+static struct dcp_channel *dcp_get_channel(struct apple_dcp *dcp,
+ enum dcp_context_id context)
+{
+ switch (context) {
+ case DCP_CONTEXT_CB:
+ return &dcp->ch_cb;
+ case DCP_CONTEXT_CMD:
+ return &dcp->ch_cmd;
+ case DCP_CONTEXT_OOBCB:
+ return &dcp->ch_oobcb;
+ case DCP_CONTEXT_OOBCMD:
+ return &dcp->ch_oobcmd;
+ case DCP_CONTEXT_ASYNC:
+ return &dcp->ch_async;
+ case DCP_CONTEXT_OOBASYNC:
+ return &dcp->ch_oobasync;
+ default:
+ return NULL;
+ }
+}
+
+/* Get the start of a packet: after the end of the previous packet */
+static u16 dcp_packet_start(struct dcp_channel *ch, u8 depth)
+{
+ if (depth > 0)
+ return ch->end[depth - 1];
+ else
+ return 0;
+}
+
+/* Pushes and pops the depth of the call stack with safety checks */
+static u8 dcp_push_depth(u8 *depth)
+{
+ u8 ret = (*depth)++;
+
+ WARN_ON(ret >= DCP_MAX_CALL_DEPTH);
+ return ret;
+}
+
+static u8 dcp_pop_depth(u8 *depth)
+{
+ WARN_ON((*depth) == 0);
+
+ return --(*depth);
+}
+
+/* Call a DCP function given by a tag */
+void dcp_push(struct apple_dcp *dcp, bool oob, const struct dcp_method_entry *call,
+ u32 in_len, u32 out_len, void *data, dcp_callback_t cb,
+ void *cookie)
+{
+ enum dcp_context_id context = dcp_call_context(dcp, oob);
+ struct dcp_channel *ch = dcp_get_channel(dcp, context);
+
+ struct dcp_packet_header header = {
+ .in_len = in_len,
+ .out_len = out_len,
+
+ /* Tag is reversed due to endianness of the fourcc */
+ .tag[0] = call->tag[3],
+ .tag[1] = call->tag[2],
+ .tag[2] = call->tag[1],
+ .tag[3] = call->tag[0],
+ };
+
+ u8 depth = dcp_push_depth(&ch->depth);
+ u16 offset = dcp_packet_start(ch, depth);
+
+ void *out = dcp->shmem + dcp_tx_offset(context) + offset;
+ void *out_data = out + sizeof(header);
+ size_t data_len = sizeof(header) + in_len + out_len;
+
+ memcpy(out, &header, sizeof(header));
+
+ if (in_len > 0)
+ memcpy(out_data, data, in_len);
+
+ trace_iomfb_push(dcp, call, context, offset, depth);
+
+ ch->callbacks[depth] = cb;
+ ch->cookies[depth] = cookie;
+ ch->output[depth] = out + sizeof(header) + in_len;
+ ch->end[depth] = offset + ALIGN(data_len, DCP_PACKET_ALIGNMENT);
+
+ dcp_send_message(dcp, IOMFB_ENDPOINT,
+ dcpep_msg(context, data_len, offset));
+}
+
+/* Parse a callback tag "D123" into the ID 123. Returns -EINVAL on failure. */
+int dcp_parse_tag(char tag[4])
+{
+ u32 d[3];
+ int i;
+
+ if (tag[3] != 'D')
+ return -EINVAL;
+
+ for (i = 0; i < 3; ++i) {
+ d[i] = (u32)(tag[i] - '0');
+
+ if (d[i] > 9)
+ return -EINVAL;
+ }
+
+ return d[0] + (d[1] * 10) + (d[2] * 100);
+}
+
+/* Ack a callback from the DCP */
+void dcp_ack(struct apple_dcp *dcp, enum dcp_context_id context)
+{
+ struct dcp_channel *ch = dcp_get_channel(dcp, context);
+
+ dcp_pop_depth(&ch->depth);
+ dcp_send_message(dcp, IOMFB_ENDPOINT,
+ dcpep_ack(context));
+}
+
+/*
+ * Helper to send a DRM hotplug event. The DCP is accessed from a single
+ * (RTKit) thread. To handle hotplug callbacks, we need to call
+ * drm_kms_helper_hotplug_event, which does an atomic commit (via DCP) and
+ * waits for vblank (a DCP callback). That means we deadlock if we call from
+ * the RTKit thread! Instead, move the call to another thread via a workqueue.
+ */
+void dcp_hotplug(struct work_struct *work)
+{
+ struct apple_connector *connector;
+ struct apple_dcp *dcp;
+
+ connector = container_of(work, struct apple_connector, hotplug_wq);
+
+ dcp = platform_get_drvdata(connector->dcp);
+ dev_info(dcp->dev, "%s() connected:%d valid_mode:%d nr_modes:%u\n", __func__,
+ connector->connected, dcp->valid_mode, dcp->nr_modes);
+
+ /*
+ * DCP defers link training until we set a display mode. But we set
+ * display modes from atomic_flush, so userspace needs to trigger a
+ * flush, or the CRTC gets no signal.
+ */
+ if (connector->base.state && !dcp->valid_mode && connector->connected)
+ drm_connector_set_link_status_property(&connector->base,
+ DRM_MODE_LINK_STATUS_BAD);
+
+ drm_kms_helper_connector_hotplug_event(&connector->base);
+}
+EXPORT_SYMBOL_GPL(dcp_hotplug);
+
+static void dcpep_handle_cb(struct apple_dcp *dcp, enum dcp_context_id context,
+ void *data, u32 length, u16 offset)
+{
+ struct device *dev = dcp->dev;
+ struct dcp_packet_header *hdr = data;
+ void *in, *out;
+ int tag = dcp_parse_tag(hdr->tag);
+ struct dcp_channel *ch = dcp_get_channel(dcp, context);
+ u8 depth;
+
+ if (tag < 0 || tag >= IOMFB_MAX_CB || !dcp->cb_handlers || !dcp->cb_handlers[tag]) {
+ dev_warn(dev, "received unknown callback %c%c%c%c\n",
+ hdr->tag[3], hdr->tag[2], hdr->tag[1], hdr->tag[0]);
+ return;
+ }
+
+ in = data + sizeof(*hdr);
+ out = in + hdr->in_len;
+
+ // TODO: verify that in_len and out_len match our prototypes
+ // for now just clear the out data to have at least consistant results
+ if (hdr->out_len)
+ memset(out, 0, hdr->out_len);
+
+ depth = dcp_push_depth(&ch->depth);
+ ch->output[depth] = out;
+ ch->end[depth] = offset + ALIGN(length, DCP_PACKET_ALIGNMENT);
+
+ if (dcp->cb_handlers[tag](dcp, tag, out, in))
+ dcp_ack(dcp, context);
+}
+
+static void dcpep_handle_ack(struct apple_dcp *dcp, enum dcp_context_id context,
+ void *data, u32 length)
+{
+ struct dcp_packet_header *header = data;
+ struct dcp_channel *ch = dcp_get_channel(dcp, context);
+ void *cookie;
+ dcp_callback_t cb;
+
+ if (!ch) {
+ dev_warn(dcp->dev, "ignoring ack on context %X\n", context);
+ return;
+ }
+
+ dcp_pop_depth(&ch->depth);
+
+ cb = ch->callbacks[ch->depth];
+ cookie = ch->cookies[ch->depth];
+
+ ch->callbacks[ch->depth] = NULL;
+ ch->cookies[ch->depth] = NULL;
+
+ if (cb)
+ cb(dcp, data + sizeof(*header) + header->in_len, cookie);
+}
+
+static void dcpep_got_msg(struct apple_dcp *dcp, u64 message)
+{
+ enum dcp_context_id ctx_id;
+ u16 offset;
+ u32 length;
+ int channel_offset;
+ void *data;
+
+ ctx_id = FIELD_GET(IOMFB_MSG_CONTEXT, message);
+ offset = FIELD_GET(IOMFB_MSG_OFFSET, message);
+ length = FIELD_GET(IOMFB_MSG_LENGTH, message);
+
+ channel_offset = dcp_channel_offset(ctx_id);
+
+ if (channel_offset < 0) {
+ dev_warn(dcp->dev, "invalid context received %u", ctx_id);
+ return;
+ }
+
+ data = dcp->shmem + channel_offset + offset;
+
+ if (FIELD_GET(IOMFB_MSG_ACK, message))
+ dcpep_handle_ack(dcp, ctx_id, data, length);
+ else
+ dcpep_handle_cb(dcp, ctx_id, data, length, offset);
+}
+
+/*
+ * DRM specifies rectangles as start and end coordinates. DCP specifies
+ * rectangles as a start coordinate and a width/height. Convert a DRM rectangle
+ * to a DCP rectangle.
+ */
+struct dcp_rect drm_to_dcp_rect(struct drm_rect *rect)
+{
+ return (struct dcp_rect){ .x = rect->x1,
+ .y = rect->y1,
+ .w = drm_rect_width(rect),
+ .h = drm_rect_height(rect) };
+}
+
+u32 drm_format_to_dcp(u32 drm)
+{
+ switch (drm) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return fourcc_code('A', 'R', 'G', 'B');
+
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return fourcc_code('A', 'B', 'G', 'R');
+
+ case DRM_FORMAT_XRGB2101010:
+ return fourcc_code('r', '0', '3', 'w');
+ }
+
+ pr_warn("DRM format %X not supported in DCP\n", drm);
+ return 0;
+}
+
+int dcp_get_modes(struct drm_connector *connector)
+{
+ struct apple_connector *apple_connector = to_apple_connector(connector);
+ struct platform_device *pdev = apple_connector->dcp;
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+ int i;
+
+ for (i = 0; i < dcp->nr_modes; ++i) {
+ mode = drm_mode_duplicate(dev, &dcp->modes[i].mode);
+
+ if (!mode) {
+ dev_err(dev->dev, "Failed to duplicate display mode\n");
+ return 0;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return dcp->nr_modes;
+}
+EXPORT_SYMBOL_GPL(dcp_get_modes);
+
+/* The user may own drm_display_mode, so we need to search for our copy */
+struct dcp_display_mode *lookup_mode(struct apple_dcp *dcp,
+ const struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < dcp->nr_modes; ++i) {
+ if (drm_mode_match(mode, &dcp->modes[i].mode,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_CLOCK))
+ return &dcp->modes[i];
+ }
+
+ return NULL;
+}
+
+int dcp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct apple_connector *apple_connector = to_apple_connector(connector);
+ struct platform_device *pdev = apple_connector->dcp;
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ return lookup_mode(dcp, mode) ? MODE_OK : MODE_BAD;
+}
+EXPORT_SYMBOL_GPL(dcp_mode_valid);
+
+int dcp_crtc_atomic_modeset(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+ struct apple_dcp *dcp = platform_get_drvdata(apple_crtc->dcp);
+ struct drm_crtc_state *crtc_state;
+ int ret = -EIO;
+ bool modeset;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!crtc_state)
+ return 0;
+
+ modeset = drm_atomic_crtc_needs_modeset(crtc_state) || !dcp->valid_mode;
+
+ if (!modeset)
+ return 0;
+
+ /* ignore no mode, poweroff is handled elsewhere */
+ if (crtc_state->mode.hdisplay == 0 && crtc_state->mode.vdisplay == 0)
+ return 0;
+
+ switch (dcp->fw_compat) {
+ case DCP_FIRMWARE_V_12_3:
+ ret = iomfb_modeset_v12_3(dcp, crtc_state);
+ break;
+ case DCP_FIRMWARE_V_13_5:
+ ret = iomfb_modeset_v13_3(dcp, crtc_state);
+ break;
+ default:
+ WARN_ONCE(true, "Unexpected firmware version: %u\n",
+ dcp->fw_compat);
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dcp_crtc_atomic_modeset);
+
+bool dcp_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+ struct platform_device *pdev = apple_crtc->dcp;
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ /* TODO: support synthesized modes through scaling */
+ return lookup_mode(dcp, mode) != NULL;
+}
+EXPORT_SYMBOL(dcp_crtc_mode_fixup);
+
+
+void dcp_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct platform_device *pdev = to_apple_crtc(crtc)->dcp;
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ if (dcp_channel_busy(&dcp->ch_cmd))
+ {
+ dev_err(dcp->dev, "unexpected busy command channel");
+ /* HACK: issue a delayed vblank event to avoid timeouts in
+ * drm_atomic_helper_wait_for_vblanks().
+ */
+ schedule_work(&dcp->vblank_wq);
+ return;
+ }
+
+ switch (dcp->fw_compat) {
+ case DCP_FIRMWARE_V_12_3:
+ iomfb_flush_v12_3(dcp, crtc, state);
+ break;
+ case DCP_FIRMWARE_V_13_5:
+ iomfb_flush_v13_3(dcp, crtc, state);
+ break;
+ default:
+ WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(dcp_flush);
+
+static void iomfb_start(struct apple_dcp *dcp)
+{
+ switch (dcp->fw_compat) {
+ case DCP_FIRMWARE_V_12_3:
+ iomfb_start_v12_3(dcp);
+ break;
+ case DCP_FIRMWARE_V_13_5:
+ iomfb_start_v13_3(dcp);
+ break;
+ default:
+ WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
+ break;
+ }
+}
+
+bool dcp_is_initialized(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ return dcp->active;
+}
+EXPORT_SYMBOL_GPL(dcp_is_initialized);
+
+void iomfb_recv_msg(struct apple_dcp *dcp, u64 message)
+{
+ enum dcpep_type type = FIELD_GET(IOMFB_MESSAGE_TYPE, message);
+
+ if (type == IOMFB_MESSAGE_TYPE_INITIALIZED)
+ iomfb_start(dcp);
+ else if (type == IOMFB_MESSAGE_TYPE_MSG)
+ dcpep_got_msg(dcp, message);
+ else
+ dev_warn(dcp->dev, "Ignoring unknown message %llx\n", message);
+}
+
+int iomfb_start_rtkit(struct apple_dcp *dcp)
+{
+ dma_addr_t shmem_iova;
+ apple_rtkit_start_ep(dcp->rtk, IOMFB_ENDPOINT);
+
+ dcp->shmem = dma_alloc_coherent(dcp->dev, DCP_SHMEM_SIZE, &shmem_iova,
+ GFP_KERNEL);
+
+ dcp_send_message(dcp, IOMFB_ENDPOINT, dcpep_set_shmem(shmem_iova));
+
+ return 0;
+}
+
+void iomfb_shutdown(struct apple_dcp *dcp)
+{
+ /* We're going down */
+ dcp->active = false;
+ dcp->valid_mode = false;
+
+ switch (dcp->fw_compat) {
+ case DCP_FIRMWARE_V_12_3:
+ iomfb_shutdown_v12_3(dcp);
+ break;
+ case DCP_FIRMWARE_V_13_5:
+ iomfb_shutdown_v13_3(dcp);
+ break;
+ default:
+ WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
+ break;
+ }
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCPEP_H__
+#define __APPLE_DCPEP_H__
+
+#include <linux/types.h>
+
+#include "version_utils.h"
+
+/* Fixed size of shared memory between DCP and AP */
+#define DCP_SHMEM_SIZE 0x100000
+
+/* DCP message contexts */
+enum dcp_context_id {
+ /* Callback */
+ DCP_CONTEXT_CB = 0,
+
+ /* Command */
+ DCP_CONTEXT_CMD = 2,
+
+ /* Asynchronous */
+ DCP_CONTEXT_ASYNC = 3,
+
+ /* Out-of-band callback */
+ DCP_CONTEXT_OOBCB = 4,
+
+ /* Out-of-band command */
+ DCP_CONTEXT_OOBCMD = 6,
+
+ /* Out-of-band Asynchronous */
+ DCP_CONTEXT_OOBASYNC = 7,
+
+ DCP_NUM_CONTEXTS
+};
+
+/* RTKit endpoint message types */
+enum dcpep_type {
+ /* Set shared memory */
+ IOMFB_MESSAGE_TYPE_SET_SHMEM = 0,
+
+ /* DCP is initialized */
+ IOMFB_MESSAGE_TYPE_INITIALIZED = 1,
+
+ /* Remote procedure call */
+ IOMFB_MESSAGE_TYPE_MSG = 2,
+};
+
+#define IOMFB_MESSAGE_TYPE GENMASK_ULL( 3, 0)
+
+/* Message */
+#define IOMFB_MSG_LENGTH GENMASK_ULL(63, 32)
+#define IOMFB_MSG_OFFSET GENMASK_ULL(31, 16)
+#define IOMFB_MSG_CONTEXT GENMASK_ULL(11, 8)
+#define IOMFB_MSG_ACK BIT_ULL(6)
+
+/* Set shmem */
+#define IOMFB_SHMEM_DVA GENMASK_ULL(63, 16)
+#define IOMFB_SHMEM_FLAG GENMASK_ULL( 7, 4)
+#define IOMFB_SHMEM_FLAG_VALUE 4
+
+struct dcp_packet_header {
+ char tag[4];
+ u32 in_len;
+ u32 out_len;
+} __packed;
+
+#define DCP_IS_NULL(ptr) ((ptr) ? 1 : 0)
+#define DCP_PACKET_ALIGNMENT (0x40)
+
+enum iomfb_property_id {
+ IOMFB_PROPERTY_NITS = 15, // divide by Brightness_Scale
+};
+
+#define IOMFB_BRIGHTNESS_MIN 0x10000000
+
+/* Structures used in v12.0 firmware */
+
+#define SWAP_SURFACES 4
+#define MAX_PLANES 3
+
+enum dcp_colorspace {
+ DCP_COLORSPACE_BG_SRGB = 0,
+ DCP_COLORSPACE_BG_BT2020 = 9,
+ DCP_COLORSPACE_NATIVE = 12,
+};
+
+enum dcp_xfer_func {
+ DCP_XFER_FUNC_SDR = 13,
+ DCP_XFER_FUNC_HDR = 16,
+};
+
+struct dcp_iouserclient {
+ /* Handle for the IOUserClient. macOS sets this to a kernel VA. */
+ u64 handle;
+ u32 unk;
+ u8 flag1;
+ u8 flag2;
+ u8 padding[2];
+} __packed;
+
+struct dcp_rect {
+ u32 x;
+ u32 y;
+ u32 w;
+ u32 h;
+} __packed;
+
+/*
+ * Update background color to struct dcp_swap.bg_color
+ */
+#define IOMFB_SET_BACKGROUND BIT(31)
+
+/* Information describing a plane of a planar compressed surface */
+struct dcp_plane_info {
+ u32 width;
+ u32 height;
+ u32 base;
+ u32 offset;
+ u32 stride;
+ u32 size;
+ u16 tile_size;
+ u8 tile_w;
+ u8 tile_h;
+ u32 unk[13];
+} __packed;
+
+struct dcp_component_types {
+ u8 count;
+ u8 types[7];
+} __packed;
+
+struct dcp_allocate_bandwidth_req {
+ u64 unk1;
+ u64 unk2;
+ u64 unk3;
+ u8 unk1_null;
+ u8 unk2_null;
+ u8 padding[8];
+} __packed;
+
+struct dcp_allocate_bandwidth_resp {
+ u64 unk1;
+ u64 unk2;
+ u32 ret;
+} __packed;
+
+struct dcp_rt_bandwidth {
+ u64 unk1;
+ u64 reg_scratch;
+ u64 reg_doorbell;
+ u32 unk2;
+ u32 doorbell_bit;
+ u32 padding[7];
+} __packed;
+
+struct frame_sync_props {
+ u8 unk[28];
+};
+
+struct dcp_set_frame_sync_props_req {
+ struct frame_sync_props props;
+ u8 frame_sync_props_null;
+ u8 padding[3];
+} __packed;
+
+struct dcp_set_frame_sync_props_resp {
+ struct frame_sync_props props;
+} __packed;
+
+/* Method calls */
+
+enum dcpep_method {
+ dcpep_late_init_signal,
+ dcpep_setup_video_limits,
+ dcpep_set_create_dfb,
+ dcpep_start_signal,
+ dcpep_swap_start,
+ dcpep_swap_submit,
+ dcpep_set_display_device,
+ dcpep_set_digital_out_mode,
+ dcpep_create_default_fb,
+ dcpep_set_display_refresh_properties,
+ dcpep_flush_supports_power,
+ dcpep_set_power_state,
+ dcpep_first_client_open,
+ dcpep_set_parameter_dcp,
+ dcpep_enable_disable_video_power_savings,
+ dcpep_is_main_display,
+ iomfbep_a131_pmu_service_matched,
+ iomfbep_a132_backlight_service_matched,
+ iomfbep_a358_vi_set_temperature_hint,
+ iomfbep_get_color_remap_mode,
+ iomfbep_last_client_close,
+ iomfbep_abort_swaps_dcp,
+ iomfbep_set_matrix,
+ dcpep_num_methods
+};
+
+#define IOMFB_METHOD(tag, name) [name] = { #name, tag }
+
+struct dcp_method_entry {
+ const char *name;
+ char tag[4];
+};
+
+#define IOMFB_MAX_CB (1000)
+struct apple_dcp;
+
+typedef bool (*iomfb_cb_handler)(struct apple_dcp *, int, void *, void *);
+
+/* Prototypes */
+
+struct dcp_set_digital_out_mode_req {
+ u32 color_mode_id;
+ u32 timing_mode_id;
+} __packed;
+
+struct dcp_map_buf_req {
+ u64 buffer;
+ u8 unk;
+ u8 buf_null;
+ u8 vaddr_null;
+ u8 dva_null;
+} __packed;
+
+struct dcp_map_buf_resp {
+ u64 vaddr;
+ u64 dva;
+ u32 ret;
+} __packed;
+
+struct dcp_unmap_buf_resp {
+ u64 buffer;
+ u64 vaddr;
+ u64 dva;
+ u8 unk;
+ u8 buf_null;
+} __packed;
+
+struct dcp_allocate_buffer_req {
+ u32 unk0;
+ u64 size;
+ u32 unk2;
+ u8 paddr_null;
+ u8 dva_null;
+ u8 dva_size_null;
+ u8 padding;
+} __packed;
+
+struct dcp_allocate_buffer_resp {
+ u64 paddr;
+ u64 dva;
+ u64 dva_size;
+ u32 mem_desc_id;
+} __packed;
+
+struct dcp_map_physical_req {
+ u64 paddr;
+ u64 size;
+ u32 flags;
+ u8 dva_null;
+ u8 dva_size_null;
+ u8 padding[2];
+} __packed;
+
+struct dcp_map_physical_resp {
+ u64 dva;
+ u64 dva_size;
+ u32 mem_desc_id;
+} __packed;
+
+struct dcp_swap_start_req {
+ u32 swap_id;
+ struct dcp_iouserclient client;
+ u8 swap_id_null;
+ u8 client_null;
+ u8 padding[2];
+} __packed;
+
+struct dcp_swap_start_resp {
+ u32 swap_id;
+ struct dcp_iouserclient client;
+ u32 ret;
+} __packed;
+
+struct dcp_get_uint_prop_req {
+ char obj[4];
+ char key[0x40];
+ u64 value;
+ u8 value_null;
+ u8 padding[3];
+} __packed;
+
+struct dcp_get_uint_prop_resp {
+ u64 value;
+ u8 ret;
+ u8 padding[3];
+} __packed;
+
+struct iomfb_sr_set_property_int_req {
+ char obj[4];
+ char key[0x40];
+ u64 value;
+ u8 value_null;
+ u8 padding[3];
+} __packed;
+
+struct iomfb_set_fx_prop_req {
+ char obj[4];
+ char key[0x40];
+ u32 value;
+} __packed;
+
+struct dcp_set_power_state_req {
+ u64 unklong;
+ u8 unkbool;
+ u8 unkint_null;
+ u8 padding[2];
+} __packed;
+
+struct dcp_set_power_state_resp {
+ u32 unkint;
+ u32 ret;
+} __packed;
+
+struct dcp_set_dcpav_prop_chunk_req {
+ char data[0x1000];
+ u32 offset;
+ u32 length;
+} __packed;
+
+struct dcp_set_dcpav_prop_end_req {
+ char key[0x40];
+} __packed;
+
+struct dcp_set_parameter_dcp {
+ u32 param;
+ u32 value[8];
+ u32 count;
+} __packed;
+
+struct dcp_swap_complete_intent_gated {
+ u32 swap_id;
+ u8 unkBool;
+ u32 unkInt;
+ u32 width;
+ u32 height;
+} __packed;
+
+struct dcp_read_edt_data_req {
+ char key[0x40];
+ u32 count;
+ u32 value[8];
+} __packed;
+
+struct dcp_read_edt_data_resp {
+ u32 value[8];
+ u8 ret;
+} __packed;
+
+struct iomfb_property {
+ u32 id;
+ u32 value;
+} __packed;
+
+struct iomfb_get_color_remap_mode_req {
+ u32 mode;
+ u8 mode_null;
+ u8 padding[3];
+} __packed;
+
+struct iomfb_get_color_remap_mode_resp {
+ u32 mode;
+ u32 ret;
+} __packed;
+
+struct iomfb_last_client_close_req {
+ u8 unkint_null;
+ u8 padding[3];
+} __packed;
+
+struct iomfb_last_client_close_resp {
+ u32 unkint;
+} __packed;
+
+struct io_user_client {
+ u64 addr;
+ u32 unk;
+ u8 flag1;
+ u8 flag2;
+ u8 pad[2];
+} __packed;
+
+struct iomfb_abort_swaps_dcp_req {
+ struct io_user_client client;
+ u8 client_null;
+ u8 pad[3];
+} __packed;
+
+struct iomfb_abort_swaps_dcp_resp {
+ struct io_user_client client;
+ u32 ret;
+} __packed;
+
+struct iomfb_set_matrix_req {
+ u32 unk_u32; // maybe length?
+ u64 r[3];
+ u64 g[3];
+ u64 b[3];
+ u8 matrix_null;
+ u8 padding[3];
+} __packed;
+
+struct iomfb_set_matrix_resp {
+ u32 ret;
+} __packed;
+
+struct dcpep_get_tiling_state_req {
+ u32 event;
+ u32 param;
+ u32 value;
+ u8 value_null;
+ u8 padding[3];
+} __packed;
+
+struct dcpep_get_tiling_state_resp {
+ u32 value;
+ u32 ret;
+} __packed;
+
+#endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#include <drm/drm_modes.h>
+#include <drm/drm_rect.h>
+
+#include "dcp-internal.h"
+
+struct apple_dcp;
+
+typedef void (*dcp_callback_t)(struct apple_dcp *, void *, void *);
+
+
+#define DCP_THUNK_VOID(func, handle) \
+ static void func(struct apple_dcp *dcp, bool oob, dcp_callback_t cb, \
+ void *cookie) \
+ { \
+ dcp_push(dcp, oob, &dcp_methods[handle], 0, 0, NULL, cb, cookie); \
+ }
+
+#define DCP_THUNK_OUT(func, handle, T) \
+ static void func(struct apple_dcp *dcp, bool oob, dcp_callback_t cb, \
+ void *cookie) \
+ { \
+ dcp_push(dcp, oob, &dcp_methods[handle], 0, sizeof(T), NULL, cb, cookie); \
+ }
+
+#define DCP_THUNK_IN(func, handle, T) \
+ static void func(struct apple_dcp *dcp, bool oob, T *data, \
+ dcp_callback_t cb, void *cookie) \
+ { \
+ dcp_push(dcp, oob, &dcp_methods[handle], sizeof(T), 0, data, cb, cookie); \
+ }
+
+#define DCP_THUNK_INOUT(func, handle, T_in, T_out) \
+ static void func(struct apple_dcp *dcp, bool oob, T_in *data, \
+ dcp_callback_t cb, void *cookie) \
+ { \
+ dcp_push(dcp, oob, &dcp_methods[handle], sizeof(T_in), sizeof(T_out), data, \
+ cb, cookie); \
+ }
+
+#define IOMFB_THUNK_INOUT(name) \
+ static void iomfb_ ## name(struct apple_dcp *dcp, bool oob, \
+ struct iomfb_ ## name ## _req *data, \
+ dcp_callback_t cb, void *cookie) \
+ { \
+ dcp_push(dcp, oob, &dcp_methods[iomfbep_ ## name], \
+ sizeof(struct iomfb_ ## name ## _req), \
+ sizeof(struct iomfb_ ## name ## _resp), \
+ data, cb, cookie); \
+ }
+
+/*
+ * Define type-safe trampolines. Define typedefs to enforce type-safety on the
+ * input data (so if the types don't match, gcc errors out).
+ */
+
+#define TRAMPOLINE_VOID(func, handler) \
+ static bool __maybe_unused func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+ { \
+ trace_iomfb_callback(dcp, tag, #handler); \
+ handler(dcp); \
+ return true; \
+ }
+
+#define TRAMPOLINE_IN(func, handler, T_in) \
+ typedef void (*callback_##handler)(struct apple_dcp *, T_in *); \
+ \
+ static bool __maybe_unused func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+ { \
+ callback_##handler cb = handler; \
+ \
+ trace_iomfb_callback(dcp, tag, #handler); \
+ cb(dcp, in); \
+ return true; \
+ }
+
+#define TRAMPOLINE_INOUT(func, handler, T_in, T_out) \
+ typedef T_out (*callback_##handler)(struct apple_dcp *, T_in *); \
+ \
+ static bool __maybe_unused func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+ { \
+ T_out *typed_out = out; \
+ callback_##handler cb = handler; \
+ \
+ trace_iomfb_callback(dcp, tag, #handler); \
+ *typed_out = cb(dcp, in); \
+ return true; \
+ }
+
+#define TRAMPOLINE_OUT(func, handler, T_out) \
+ static bool __maybe_unused func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+ { \
+ T_out *typed_out = out; \
+ \
+ trace_iomfb_callback(dcp, tag, #handler); \
+ *typed_out = handler(dcp); \
+ return true; \
+ }
+
+/* Call a DCP function given by a tag */
+void dcp_push(struct apple_dcp *dcp, bool oob, const struct dcp_method_entry *call,
+ u32 in_len, u32 out_len, void *data, dcp_callback_t cb,
+ void *cookie);
+
+/* Parse a callback tag "D123" into the ID 123. Returns -EINVAL on failure. */
+int dcp_parse_tag(char tag[4]);
+
+void dcp_ack(struct apple_dcp *dcp, enum dcp_context_id context);
+
+/*
+ * DRM specifies rectangles as start and end coordinates. DCP specifies
+ * rectangles as a start coordinate and a width/height. Convert a DRM rectangle
+ * to a DCP rectangle.
+ */
+struct dcp_rect drm_to_dcp_rect(struct drm_rect *rect);
+
+u32 drm_format_to_dcp(u32 drm);
+
+/* The user may own drm_display_mode, so we need to search for our copy */
+struct dcp_display_mode *lookup_mode(struct apple_dcp *dcp,
+ const struct drm_display_mode *mode);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/align.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dcp.h"
+#include "dcp-internal.h"
+#include "iomfb.h"
+#include "iomfb_internal.h"
+#include "parser.h"
+#include "trace.h"
+#include "version_utils.h"
+
+/* Register defines used in bandwidth setup structure */
+#define REG_DOORBELL_BIT(idx) (2 + (idx))
+
+struct dcp_wait_cookie {
+ struct kref refcount;
+ struct completion done;
+};
+
+static void release_wait_cookie(struct kref *ref)
+{
+ struct dcp_wait_cookie *cookie;
+ cookie = container_of(ref, struct dcp_wait_cookie, refcount);
+
+ kfree(cookie);
+}
+
+DCP_THUNK_OUT(iomfb_a131_pmu_service_matched, iomfbep_a131_pmu_service_matched, u32);
+DCP_THUNK_OUT(iomfb_a132_backlight_service_matched, iomfbep_a132_backlight_service_matched, u32);
+DCP_THUNK_OUT(iomfb_a358_vi_set_temperature_hint, iomfbep_a358_vi_set_temperature_hint, u32);
+
+IOMFB_THUNK_INOUT(set_matrix);
+IOMFB_THUNK_INOUT(get_color_remap_mode);
+IOMFB_THUNK_INOUT(last_client_close);
+IOMFB_THUNK_INOUT(abort_swaps_dcp);
+
+DCP_THUNK_INOUT(dcp_swap_submit, dcpep_swap_submit,
+ struct DCP_FW_NAME(dcp_swap_submit_req),
+ struct DCP_FW_NAME(dcp_swap_submit_resp));
+
+DCP_THUNK_INOUT(dcp_swap_start, dcpep_swap_start, struct dcp_swap_start_req,
+ struct dcp_swap_start_resp);
+
+DCP_THUNK_INOUT(dcp_set_power_state, dcpep_set_power_state,
+ struct dcp_set_power_state_req,
+ struct dcp_set_power_state_resp);
+
+DCP_THUNK_INOUT(dcp_set_digital_out_mode, dcpep_set_digital_out_mode,
+ struct dcp_set_digital_out_mode_req, u32);
+
+DCP_THUNK_INOUT(dcp_set_display_device, dcpep_set_display_device, u32, u32);
+
+DCP_THUNK_OUT(dcp_set_display_refresh_properties,
+ dcpep_set_display_refresh_properties, u32);
+
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+DCP_THUNK_INOUT(dcp_late_init_signal, dcpep_late_init_signal, u32, u32);
+#else
+DCP_THUNK_OUT(dcp_late_init_signal, dcpep_late_init_signal, u32);
+#endif
+DCP_THUNK_IN(dcp_flush_supports_power, dcpep_flush_supports_power, u32);
+DCP_THUNK_OUT(dcp_create_default_fb, dcpep_create_default_fb, u32);
+DCP_THUNK_OUT(dcp_start_signal, dcpep_start_signal, u32);
+DCP_THUNK_VOID(dcp_setup_video_limits, dcpep_setup_video_limits);
+DCP_THUNK_VOID(dcp_set_create_dfb, dcpep_set_create_dfb);
+DCP_THUNK_VOID(dcp_first_client_open, dcpep_first_client_open);
+
+DCP_THUNK_INOUT(dcp_set_parameter_dcp, dcpep_set_parameter_dcp,
+ struct dcp_set_parameter_dcp, u32);
+
+DCP_THUNK_INOUT(dcp_enable_disable_video_power_savings,
+ dcpep_enable_disable_video_power_savings, u32, int);
+
+DCP_THUNK_OUT(dcp_is_main_display, dcpep_is_main_display, u32);
+
+/* DCP callback handlers */
+static void dcpep_cb_nop(struct apple_dcp *dcp)
+{
+ /* No operation */
+}
+
+static u8 dcpep_cb_true(struct apple_dcp *dcp)
+{
+ return true;
+}
+
+static u8 dcpep_cb_false(struct apple_dcp *dcp)
+{
+ return false;
+}
+
+static u32 dcpep_cb_zero(struct apple_dcp *dcp)
+{
+ return 0;
+}
+
+static void dcpep_cb_swap_complete(struct apple_dcp *dcp,
+ struct DCP_FW_NAME(dc_swap_complete_resp) *resp)
+{
+ trace_iomfb_swap_complete(dcp, resp->swap_id);
+ dcp->last_swap_id = resp->swap_id;
+
+ dcp_drm_crtc_vblank(dcp->crtc);
+}
+
+/* special */
+static void complete_vi_set_temperature_hint(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ // ack D100 cb_match_pmu_service
+ dcp_ack(dcp, DCP_CONTEXT_CB);
+}
+
+static bool iomfbep_cb_match_pmu_service(struct apple_dcp *dcp, int tag, void *out, void *in)
+{
+ trace_iomfb_callback(dcp, tag, __func__);
+ iomfb_a358_vi_set_temperature_hint(dcp, false,
+ complete_vi_set_temperature_hint,
+ NULL);
+
+ // return false for deferred ACK
+ return false;
+}
+
+static void complete_pmu_service_matched(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_channel *ch = &dcp->ch_cb;
+ u8 *succ = ch->output[ch->depth - 1];
+
+ *succ = true;
+
+ // ack D206 cb_match_pmu_service_2
+ dcp_ack(dcp, DCP_CONTEXT_CB);
+}
+
+static bool iomfbep_cb_match_pmu_service_2(struct apple_dcp *dcp, int tag, void *out, void *in)
+{
+ trace_iomfb_callback(dcp, tag, __func__);
+
+ iomfb_a131_pmu_service_matched(dcp, false, complete_pmu_service_matched,
+ out);
+
+ // return false for deferred ACK
+ return false;
+}
+
+static void complete_backlight_service_matched(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_channel *ch = &dcp->ch_cb;
+ u8 *succ = ch->output[ch->depth - 1];
+
+ *succ = true;
+
+ // ack D206 cb_match_backlight_service
+ dcp_ack(dcp, DCP_CONTEXT_CB);
+}
+
+static bool iomfbep_cb_match_backlight_service(struct apple_dcp *dcp, int tag, void *out, void *in)
+{
+ trace_iomfb_callback(dcp, tag, __func__);
+
+ if (!dcp_has_panel(dcp)) {
+ u8 *succ = out;
+ *succ = true;
+ return true;
+ }
+
+ iomfb_a132_backlight_service_matched(dcp, false, complete_backlight_service_matched, out);
+
+ // return false for deferred ACK
+ return false;
+}
+
+static void iomfb_cb_pr_publish(struct apple_dcp *dcp, struct iomfb_property *prop)
+{
+ switch (prop->id) {
+ case IOMFB_PROPERTY_NITS:
+ {
+ if (dcp_has_panel(dcp)) {
+ dcp->brightness.nits = prop->value / dcp->brightness.scale;
+ /* notify backlight device of the initial brightness */
+ if (!dcp->brightness.bl_dev && dcp->brightness.maximum > 0)
+ schedule_work(&dcp->bl_register_wq);
+ trace_iomfb_brightness(dcp, prop->value);
+ }
+ break;
+ }
+ default:
+ dev_dbg(dcp->dev, "pr_publish: id: %d = %u\n", prop->id, prop->value);
+ }
+}
+
+static struct dcp_get_uint_prop_resp
+dcpep_cb_get_uint_prop(struct apple_dcp *dcp, struct dcp_get_uint_prop_req *req)
+{
+ struct dcp_get_uint_prop_resp resp = (struct dcp_get_uint_prop_resp){
+ .value = 0
+ };
+
+ if (dcp->panel.has_mini_led &&
+ memcmp(req->obj, "SUMP", sizeof(req->obj)) == 0) { /* "PMUS */
+ if (strncmp(req->key, "Temperature", sizeof(req->key)) == 0) {
+ /*
+ * TODO: value from j314c, find out if it is temperature in
+ * centigrade C and which temperature sensor reports it
+ */
+ resp.value = 3029;
+ resp.ret = true;
+ }
+ }
+
+ return resp;
+}
+
+static u8 iomfbep_cb_sr_set_property_int(struct apple_dcp *dcp,
+ struct iomfb_sr_set_property_int_req *req)
+{
+ if (memcmp(req->obj, "FMOI", sizeof(req->obj)) == 0) { /* "IOMF */
+ if (strncmp(req->key, "Brightness_Scale", sizeof(req->key)) == 0) {
+ if (!req->value_null)
+ dcp->brightness.scale = req->value;
+ }
+ }
+
+ return 1;
+}
+
+static void iomfbep_cb_set_fx_prop(struct apple_dcp *dcp, struct iomfb_set_fx_prop_req *req)
+{
+ // TODO: trace this, see if there properties which needs to used later
+}
+
+/*
+ * Callback to map a buffer allocated with allocate_buf for PIODMA usage.
+ * PIODMA is separate from the main DCP and uses own IOVA space on a dedicated
+ * stream of the display DART, rather than the expected DCP DART.
+ */
+static struct dcp_map_buf_resp dcpep_cb_map_piodma(struct apple_dcp *dcp,
+ struct dcp_map_buf_req *req)
+{
+ struct dcp_mem_descriptor *memdesc;
+ struct sg_table *map;
+ ssize_t ret;
+
+ if (req->buffer >= ARRAY_SIZE(dcp->memdesc))
+ goto reject;
+
+ memdesc = &dcp->memdesc[req->buffer];
+ map = &memdesc->map;
+
+ if (!map->sgl)
+ goto reject;
+
+ /* use the piodma iommu domain to map against the right IOMMU */
+ ret = iommu_map_sgtable(dcp->iommu_dom, memdesc->dva, map,
+ IOMMU_READ | IOMMU_WRITE);
+
+ /* HACK: expect size to be 16K aligned since the iommu API only maps
+ * full pages
+ */
+ if (ret < 0 || ret != ALIGN(memdesc->size, SZ_16K)) {
+ dev_err(dcp->dev, "iommu_map_sgtable() returned %zd instead of expected buffer size of %zu\n", ret, memdesc->size);
+ goto reject;
+ }
+
+ return (struct dcp_map_buf_resp){ .dva = memdesc->dva };
+
+reject:
+ dev_err(dcp->dev, "denying map of invalid buffer %llx for pidoma\n",
+ req->buffer);
+ return (struct dcp_map_buf_resp){ .ret = EINVAL };
+}
+
+static void dcpep_cb_unmap_piodma(struct apple_dcp *dcp,
+ struct dcp_unmap_buf_resp *resp)
+{
+ struct dcp_mem_descriptor *memdesc;
+
+ if (resp->buffer >= ARRAY_SIZE(dcp->memdesc)) {
+ dev_warn(dcp->dev, "unmap request for out of range buffer %llu",
+ resp->buffer);
+ return;
+ }
+
+ memdesc = &dcp->memdesc[resp->buffer];
+
+ if (!memdesc->buf) {
+ dev_warn(dcp->dev,
+ "unmap for non-mapped buffer %llu iova:0x%08llx",
+ resp->buffer, resp->dva);
+ return;
+ }
+
+ if (memdesc->dva != resp->dva) {
+ dev_warn(dcp->dev, "unmap buffer %llu address mismatch "
+ "memdesc.dva:%llx dva:%llx", resp->buffer,
+ memdesc->dva, resp->dva);
+ return;
+ }
+
+ /* use the piodma iommu domain to unmap from the right IOMMU */
+ iommu_unmap(dcp->iommu_dom, memdesc->dva, memdesc->size);
+}
+
+/*
+ * Allocate an IOVA contiguous buffer mapped to the DCP. The buffer need not be
+ * physically contigiuous, however we should save the sgtable in case the
+ * buffer needs to be later mapped for PIODMA.
+ */
+static struct dcp_allocate_buffer_resp
+dcpep_cb_allocate_buffer(struct apple_dcp *dcp,
+ struct dcp_allocate_buffer_req *req)
+{
+ struct dcp_allocate_buffer_resp resp = { 0 };
+ struct dcp_mem_descriptor *memdesc;
+ size_t size;
+ u32 id;
+
+ resp.dva_size = ALIGN(req->size, 4096);
+ resp.mem_desc_id =
+ find_first_zero_bit(dcp->memdesc_map, DCP_MAX_MAPPINGS);
+
+ if (resp.mem_desc_id >= DCP_MAX_MAPPINGS) {
+ dev_warn(dcp->dev, "DCP overflowed mapping table, ignoring");
+ resp.dva_size = 0;
+ resp.mem_desc_id = 0;
+ return resp;
+ }
+ id = resp.mem_desc_id;
+ set_bit(id, dcp->memdesc_map);
+
+ memdesc = &dcp->memdesc[id];
+
+ memdesc->size = resp.dva_size;
+ /* HACK: align size to 16K since the iommu API only maps full pages */
+ size = ALIGN(resp.dva_size, SZ_16K);
+ memdesc->buf = dma_alloc_coherent(dcp->dev, size,
+ &memdesc->dva, GFP_KERNEL);
+
+ dma_get_sgtable(dcp->dev, &memdesc->map, memdesc->buf, memdesc->dva,
+ size);
+ resp.dva = memdesc->dva;
+
+ return resp;
+}
+
+static u8 dcpep_cb_release_mem_desc(struct apple_dcp *dcp, u32 *mem_desc_id)
+{
+ struct dcp_mem_descriptor *memdesc;
+ u32 id = *mem_desc_id;
+
+ if (id >= DCP_MAX_MAPPINGS) {
+ dev_warn(dcp->dev,
+ "unmap request for out of range mem_desc_id %u", id);
+ return 0;
+ }
+
+ if (!test_and_clear_bit(id, dcp->memdesc_map)) {
+ dev_warn(dcp->dev, "unmap request for unused mem_desc_id %u",
+ id);
+ return 0;
+ }
+
+ memdesc = &dcp->memdesc[id];
+ if (memdesc->buf) {
+ dma_free_coherent(dcp->dev, memdesc->size, memdesc->buf,
+ memdesc->dva);
+
+ memdesc->buf = NULL;
+ memset(&memdesc->map, 0, sizeof(memdesc->map));
+ } else {
+ memdesc->reg = 0;
+ }
+
+ memdesc->size = 0;
+
+ return 1;
+}
+
+/* Validate that the specified region is a display register */
+static bool is_disp_register(struct apple_dcp *dcp, u64 start, u64 end)
+{
+ int i;
+
+ for (i = 0; i < dcp->nr_disp_registers; ++i) {
+ struct resource *r = dcp->disp_registers[i];
+
+ if ((start >= r->start) && (end <= r->end))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Map contiguous physical memory into the DCP's address space. The firmware
+ * uses this to map the display registers we advertise in
+ * sr_map_device_memory_with_index, so we bounds check against that to guard
+ * safe against malicious coprocessors.
+ */
+static struct dcp_map_physical_resp
+dcpep_cb_map_physical(struct apple_dcp *dcp, struct dcp_map_physical_req *req)
+{
+ int size = ALIGN(req->size, 4096);
+ dma_addr_t dva;
+ u32 id;
+
+ if (!is_disp_register(dcp, req->paddr, req->paddr + size - 1)) {
+ dev_err(dcp->dev, "refusing to map phys address %llx size %llx",
+ req->paddr, req->size);
+ return (struct dcp_map_physical_resp){};
+ }
+
+ id = find_first_zero_bit(dcp->memdesc_map, DCP_MAX_MAPPINGS);
+ set_bit(id, dcp->memdesc_map);
+ dcp->memdesc[id].size = size;
+ dcp->memdesc[id].reg = req->paddr;
+
+ dva = dma_map_resource(dcp->dev, req->paddr, size, DMA_BIDIRECTIONAL, 0);
+ WARN_ON(dva == DMA_MAPPING_ERROR);
+
+ return (struct dcp_map_physical_resp){
+ .dva_size = size,
+ .mem_desc_id = id,
+ .dva = dva,
+ };
+}
+
+static u64 dcpep_cb_get_frequency(struct apple_dcp *dcp)
+{
+ return clk_get_rate(dcp->clk);
+}
+
+static struct DCP_FW_NAME(dcp_map_reg_resp) dcpep_cb_map_reg(struct apple_dcp *dcp,
+ struct DCP_FW_NAME(dcp_map_reg_req) *req)
+{
+ if (req->index >= dcp->nr_disp_registers) {
+ dev_warn(dcp->dev, "attempted to read invalid reg index %u",
+ req->index);
+
+ return (struct DCP_FW_NAME(dcp_map_reg_resp)){ .ret = 1 };
+ } else {
+ struct resource *rsrc = dcp->disp_registers[req->index];
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ dma_addr_t dva = dma_map_resource(dcp->dev, rsrc->start, resource_size(rsrc),
+ DMA_BIDIRECTIONAL, 0);
+ WARN_ON(dva == DMA_MAPPING_ERROR);
+#endif
+
+ return (struct DCP_FW_NAME(dcp_map_reg_resp)){
+ .addr = rsrc->start,
+ .length = resource_size(rsrc),
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ .dva = dva,
+#endif
+ };
+ }
+}
+
+static struct dcp_read_edt_data_resp
+dcpep_cb_read_edt_data(struct apple_dcp *dcp, struct dcp_read_edt_data_req *req)
+{
+ return (struct dcp_read_edt_data_resp){
+ .value[0] = req->value[0],
+ .ret = 0,
+ };
+}
+
+static void iomfbep_cb_enable_backlight_message_ap_gated(struct apple_dcp *dcp,
+ u8 *enabled)
+{
+ /*
+ * update backlight brightness on next swap, on non mini-LED displays
+ * DCP seems to set an invalid iDAC value after coming out of DPMS.
+ * syslog: "[BrightnessLCD.cpp:743][AFK]nitsToDBV: iDAC out of range"
+ */
+ dcp->brightness.update = true;
+}
+
+/* Chunked data transfer for property dictionaries */
+static u8 dcpep_cb_prop_start(struct apple_dcp *dcp, u32 *length)
+{
+ if (dcp->chunks.data != NULL) {
+ dev_warn(dcp->dev, "ignoring spurious transfer start\n");
+ return false;
+ }
+
+ dcp->chunks.length = *length;
+ dcp->chunks.data = devm_kzalloc(dcp->dev, *length, GFP_KERNEL);
+
+ if (!dcp->chunks.data) {
+ dev_warn(dcp->dev, "failed to allocate chunks\n");
+ return false;
+ }
+
+ return true;
+}
+
+static u8 dcpep_cb_prop_chunk(struct apple_dcp *dcp,
+ struct dcp_set_dcpav_prop_chunk_req *req)
+{
+ if (!dcp->chunks.data) {
+ dev_warn(dcp->dev, "ignoring spurious chunk\n");
+ return false;
+ }
+
+ if (req->offset + req->length > dcp->chunks.length) {
+ dev_warn(dcp->dev, "ignoring overflowing chunk\n");
+ return false;
+ }
+
+ memcpy(dcp->chunks.data + req->offset, req->data, req->length);
+ return true;
+}
+
+static bool dcpep_process_chunks(struct apple_dcp *dcp,
+ struct dcp_set_dcpav_prop_end_req *req)
+{
+ struct dcp_parse_ctx ctx;
+ int ret;
+
+ if (!dcp->chunks.data) {
+ dev_warn(dcp->dev, "ignoring spurious end\n");
+ return false;
+ }
+
+ /* used just as opaque pointer for tracing */
+ ctx.dcp = dcp;
+
+ ret = parse(dcp->chunks.data, dcp->chunks.length, &ctx);
+
+ if (ret) {
+ dev_warn(dcp->dev, "bad header on dcpav props\n");
+ return false;
+ }
+
+ if (!strcmp(req->key, "TimingElements")) {
+ dcp->modes = enumerate_modes(&ctx, &dcp->nr_modes,
+ dcp->width_mm, dcp->height_mm,
+ dcp->notch_height);
+
+ if (IS_ERR(dcp->modes)) {
+ dev_warn(dcp->dev, "failed to parse modes\n");
+ dcp->modes = NULL;
+ dcp->nr_modes = 0;
+ return false;
+ }
+ if (dcp->nr_modes == 0)
+ dev_warn(dcp->dev, "TimingElements without valid modes!\n");
+ } else if (!strcmp(req->key, "DisplayAttributes")) {
+ ret = parse_display_attributes(&ctx, &dcp->width_mm,
+ &dcp->height_mm);
+
+ if (ret) {
+ dev_warn(dcp->dev, "failed to parse display attribs\n");
+ return false;
+ }
+
+ dcp_set_dimensions(dcp);
+ }
+
+ return true;
+}
+
+static u8 dcpep_cb_prop_end(struct apple_dcp *dcp,
+ struct dcp_set_dcpav_prop_end_req *req)
+{
+ u8 resp = dcpep_process_chunks(dcp, req);
+
+ /* Reset for the next transfer */
+ devm_kfree(dcp->dev, dcp->chunks.data);
+ dcp->chunks.data = NULL;
+
+ return resp;
+}
+
+/* Boot sequence */
+static void boot_done(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_channel *ch = &dcp->ch_cb;
+ u8 *succ = ch->output[ch->depth - 1];
+ dev_dbg(dcp->dev, "boot done");
+
+ *succ = true;
+ dcp_ack(dcp, DCP_CONTEXT_CB);
+}
+
+static void boot_5(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_set_display_refresh_properties(dcp, false, boot_done, NULL);
+}
+
+static void boot_4(struct apple_dcp *dcp, void *out, void *cookie)
+{
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ u32 v_true = 1;
+ dcp_late_init_signal(dcp, false, &v_true, boot_5, NULL);
+#else
+ dcp_late_init_signal(dcp, false, boot_5, NULL);
+#endif
+}
+
+static void boot_3(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ u32 v_true = true;
+
+ dcp_flush_supports_power(dcp, false, &v_true, boot_4, NULL);
+}
+
+static void boot_2(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_setup_video_limits(dcp, false, boot_3, NULL);
+}
+
+static void boot_1_5(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_create_default_fb(dcp, false, boot_2, NULL);
+}
+
+/* Use special function signature to defer the ACK */
+static bool dcpep_cb_boot_1(struct apple_dcp *dcp, int tag, void *out, void *in)
+{
+ trace_iomfb_callback(dcp, tag, __func__);
+ dcp_set_create_dfb(dcp, false, boot_1_5, NULL);
+ return false;
+}
+
+static struct dcp_allocate_bandwidth_resp dcpep_cb_allocate_bandwidth(struct apple_dcp *dcp,
+ struct dcp_allocate_bandwidth_req *req)
+{
+ return (struct dcp_allocate_bandwidth_resp){
+ .unk1 = req->unk1,
+ .unk2 = req->unk2,
+ .ret = 1,
+ };
+}
+
+static struct dcp_rt_bandwidth dcpep_cb_rt_bandwidth(struct apple_dcp *dcp)
+{
+ struct dcp_rt_bandwidth rt_bw = (struct dcp_rt_bandwidth){
+ .reg_scratch = 0,
+ .reg_doorbell = 0,
+ .doorbell_bit = 0,
+ };
+
+ if (dcp->disp_bw_scratch_index) {
+ u32 offset = dcp->disp_bw_scratch_offset;
+ u32 index = dcp->disp_bw_scratch_index;
+ rt_bw.reg_scratch = dcp->disp_registers[index]->start + offset;
+ }
+
+ if (dcp->disp_bw_doorbell_index) {
+ u32 index = dcp->disp_bw_doorbell_index;
+ rt_bw.reg_doorbell = dcp->disp_registers[index]->start;
+ rt_bw.doorbell_bit = REG_DOORBELL_BIT(dcp->index);
+ /*
+ * This is most certainly not padding. t8103-dcp crashes without
+ * setting this immediately during modeset on 12.3 and 13.5
+ * firmware.
+ */
+ rt_bw.padding[3] = 0x4;
+ }
+
+ return rt_bw;
+}
+
+static struct dcp_set_frame_sync_props_resp
+dcpep_cb_set_frame_sync_props(struct apple_dcp *dcp,
+ struct dcp_set_frame_sync_props_req *req)
+{
+ return (struct dcp_set_frame_sync_props_resp){};
+}
+
+/* Callback to get the current time as milliseconds since the UNIX epoch */
+static u64 dcpep_cb_get_time(struct apple_dcp *dcp)
+{
+ return ktime_to_ms(ktime_get_real());
+}
+
+struct dcp_swap_cookie {
+ struct kref refcount;
+ struct completion done;
+ u32 swap_id;
+};
+
+static void release_swap_cookie(struct kref *ref)
+{
+ struct dcp_swap_cookie *cookie;
+ cookie = container_of(ref, struct dcp_swap_cookie, refcount);
+
+ kfree(cookie);
+}
+
+static void dcp_swap_cleared(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ struct DCP_FW_NAME(dcp_swap_submit_resp) *resp = data;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ if (cookie) {
+ struct dcp_swap_cookie *info = cookie;
+ complete(&info->done);
+ kref_put(&info->refcount, release_swap_cookie);
+ }
+
+ if (resp->ret) {
+ dev_err(dcp->dev, "swap_clear failed! status %u\n", resp->ret);
+ dcp_drm_crtc_vblank(dcp->crtc);
+ return;
+ }
+
+ while (!list_empty(&dcp->swapped_out_fbs)) {
+ struct dcp_fb_reference *entry;
+ entry = list_first_entry(&dcp->swapped_out_fbs,
+ struct dcp_fb_reference, head);
+ if (entry->swap_id == dcp->last_swap_id)
+ break;
+ if (entry->fb)
+ drm_framebuffer_put(entry->fb);
+ list_del(&entry->head);
+ kfree(entry);
+ }
+}
+
+static void dcp_swap_clear_started(struct apple_dcp *dcp, void *data,
+ void *cookie)
+{
+ struct dcp_swap_start_resp *resp = data;
+ dev_dbg(dcp->dev, "%s swap_id: %u", __func__, resp->swap_id);
+ DCP_FW_UNION(dcp->swap).swap.swap_id = resp->swap_id;
+
+ if (cookie) {
+ struct dcp_swap_cookie *info = cookie;
+ info->swap_id = resp->swap_id;
+ }
+
+ dcp_swap_submit(dcp, false, &DCP_FW_UNION(dcp->swap), dcp_swap_cleared, cookie);
+}
+
+static void dcp_on_final(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_wait_cookie *wait = cookie;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ if (wait) {
+ complete(&wait->done);
+ kref_put(&wait->refcount, release_wait_cookie);
+ }
+}
+
+static void dcp_on_set_power_state(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_set_power_state_req req = {
+ .unklong = 1,
+ };
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ dcp_set_power_state(dcp, false, &req, dcp_on_final, cookie);
+}
+
+static void dcp_on_set_parameter(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_set_parameter_dcp param = {
+ .param = 14,
+ .value = { 0 },
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ .count = 3,
+#else
+ .count = 1,
+#endif
+ };
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ dcp_set_parameter_dcp(dcp, false, ¶m, dcp_on_set_power_state, cookie);
+}
+
+void DCP_FW_NAME(iomfb_poweron)(struct apple_dcp *dcp)
+{
+ struct dcp_wait_cookie *cookie;
+ int ret;
+ u32 handle;
+ dev_err(dcp->dev, "dcp_poweron() starting\n");
+
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie)
+ return;
+
+ init_completion(&cookie->done);
+ kref_init(&cookie->refcount);
+ /* increase refcount to ensure the receiver has a reference */
+ kref_get(&cookie->refcount);
+
+ if (dcp->main_display) {
+ handle = 0;
+ dcp_set_display_device(dcp, false, &handle, dcp_on_set_power_state,
+ cookie);
+ } else {
+ handle = 2;
+ dcp_set_display_device(dcp, false, &handle,
+ dcp_on_set_parameter, cookie);
+ }
+ ret = wait_for_completion_timeout(&cookie->done, msecs_to_jiffies(500));
+
+ if (ret == 0)
+ dev_warn(dcp->dev, "wait for power timed out");
+
+ kref_put(&cookie->refcount, release_wait_cookie);;
+
+ /* Force a brightness update after poweron, to restore the brightness */
+ dcp->brightness.update = true;
+}
+
+static void complete_set_powerstate(struct apple_dcp *dcp, void *out,
+ void *cookie)
+{
+ struct dcp_wait_cookie *wait = cookie;
+
+ if (wait) {
+ complete(&wait->done);
+ kref_put(&wait->refcount, release_wait_cookie);
+ }
+}
+
+static void last_client_closed_poff(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_set_power_state_req power_req = {
+ .unklong = 0,
+ };
+ dcp_set_power_state(dcp, false, &power_req, complete_set_powerstate,
+ cookie);
+}
+
+static void aborted_swaps_dcp_poff(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct iomfb_last_client_close_req last_client_req = {};
+ iomfb_last_client_close(dcp, false, &last_client_req,
+ last_client_closed_poff, cookie);
+}
+
+void DCP_FW_NAME(iomfb_poweroff)(struct apple_dcp *dcp)
+{
+ int ret, swap_id;
+ struct iomfb_abort_swaps_dcp_req abort_req = {
+ .client = {
+ .flag2 = 1,
+ },
+ };
+ struct dcp_swap_cookie *cookie;
+ struct dcp_wait_cookie *poff_cookie;
+ struct dcp_swap_start_req swap_req = { 0 };
+ struct DCP_FW_NAME(dcp_swap_submit_req) *swap = &DCP_FW_UNION(dcp->swap);
+
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie)
+ return;
+ init_completion(&cookie->done);
+ kref_init(&cookie->refcount);
+ /* increase refcount to ensure the receiver has a reference */
+ kref_get(&cookie->refcount);
+
+ // clear surfaces
+ memset(swap, 0, sizeof(*swap));
+
+ swap->swap.swap_enabled =
+ swap->swap.swap_completed = IOMFB_SET_BACKGROUND | 0x7;
+ swap->swap.bg_color = 0xFF000000;
+
+ /*
+ * Turn off the backlight. This matters because the DCP's idea of
+ * backlight brightness gets desynced after a power change, and it
+ * needs to be told it's going to turn off so it will consider the
+ * subsequent update on poweron an actual change and restore the
+ * brightness.
+ */
+ if (dcp_has_panel(dcp)) {
+ swap->swap.bl_unk = 1;
+ swap->swap.bl_value = 0;
+ swap->swap.bl_power = 0;
+ }
+
+ for (int l = 0; l < SWAP_SURFACES; l++)
+ swap->surf_null[l] = true;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ for (int l = 0; l < 5; l++)
+ swap->surf2_null[l] = true;
+ swap->unkU32Ptr_null = true;
+ swap->unkU32out_null = true;
+#endif
+
+ dcp_swap_start(dcp, false, &swap_req, dcp_swap_clear_started, cookie);
+
+ ret = wait_for_completion_timeout(&cookie->done, msecs_to_jiffies(50));
+ swap_id = cookie->swap_id;
+ kref_put(&cookie->refcount, release_swap_cookie);
+ if (ret <= 0) {
+ dcp->crashed = true;
+ return;
+ }
+
+ dev_dbg(dcp->dev, "%s: clear swap submitted: %u", __func__, swap_id);
+
+ poff_cookie = kzalloc(sizeof(*poff_cookie), GFP_KERNEL);
+ if (!poff_cookie)
+ return;
+ init_completion(&poff_cookie->done);
+ kref_init(&poff_cookie->refcount);
+ /* increase refcount to ensure the receiver has a reference */
+ kref_get(&poff_cookie->refcount);
+
+ iomfb_abort_swaps_dcp(dcp, false, &abort_req,
+ aborted_swaps_dcp_poff, poff_cookie);
+ ret = wait_for_completion_timeout(&poff_cookie->done,
+ msecs_to_jiffies(1000));
+
+ if (ret == 0)
+ dev_warn(dcp->dev, "setPowerState(0) timeout %u ms", 1000);
+ else if (ret > 0)
+ dev_dbg(dcp->dev,
+ "setPowerState(0) finished with %d ms to spare",
+ jiffies_to_msecs(ret));
+
+ kref_put(&poff_cookie->refcount, release_wait_cookie);
+ dev_dbg(dcp->dev, "%s: setPowerState(0) done", __func__);
+
+ dev_err(dcp->dev, "dcp_poweroff() done\n");
+}
+
+static void last_client_closed_sleep(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_set_power_state_req power_req = {
+ .unklong = 0,
+ };
+ dcp_set_power_state(dcp, false, &power_req, complete_set_powerstate, cookie);
+}
+
+static void aborted_swaps_dcp_sleep(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct iomfb_last_client_close_req req = { 0 };
+ iomfb_last_client_close(dcp, false, &req, last_client_closed_sleep, cookie);
+}
+
+void DCP_FW_NAME(iomfb_sleep)(struct apple_dcp *dcp)
+{
+ int ret;
+ struct iomfb_abort_swaps_dcp_req req = {
+ .client = {
+ .flag2 = 1,
+ },
+ };
+
+ struct dcp_wait_cookie *cookie;
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie)
+ return;
+ init_completion(&cookie->done);
+ kref_init(&cookie->refcount);
+ /* increase refcount to ensure the receiver has a reference */
+ kref_get(&cookie->refcount);
+
+ iomfb_abort_swaps_dcp(dcp, false, &req, aborted_swaps_dcp_sleep,
+ cookie);
+ ret = wait_for_completion_timeout(&cookie->done,
+ msecs_to_jiffies(1000));
+
+ if (ret == 0)
+ dev_warn(dcp->dev, "setDCPPower(0) timeout %u ms", 1000);
+
+ kref_put(&cookie->refcount, release_wait_cookie);
+ dev_dbg(dcp->dev, "%s: setDCPPower(0) done", __func__);
+
+ dev_err(dcp->dev, "dcp_sleep() done\n");
+}
+
+static void dcpep_cb_hotplug(struct apple_dcp *dcp, u64 *connected)
+{
+ struct apple_connector *connector = dcp->connector;
+
+ /* DCP issues hotplug_gated callbacks after SetPowerState() calls on
+ * devices with display (macbooks, imacs). This must not result in
+ * connector state changes on DRM side. Some applications won't enable
+ * a CRTC with a connector in disconnected state. Weston after DPMS off
+ * is one example. dcp_is_main_display() returns true on devices with
+ * integrated display. Ignore the hotplug_gated() callbacks there.
+ */
+ if (dcp->main_display)
+ return;
+
+ if (dcp->during_modeset) {
+ dev_info(dcp->dev,
+ "cb_hotplug() ignored during modeset connected:%llu\n",
+ *connected);
+ return;
+ }
+
+ dev_info(dcp->dev, "cb_hotplug() connected:%llu, valid_mode:%d\n",
+ *connected, dcp->valid_mode);
+
+ /* Hotplug invalidates mode. DRM doesn't always handle this. */
+ if (!(*connected)) {
+ dcp->valid_mode = false;
+ /* after unplug swap will not complete until the next
+ * set_digital_out_mode */
+ schedule_work(&dcp->vblank_wq);
+ }
+
+ if (connector && connector->connected != !!(*connected)) {
+ connector->connected = !!(*connected);
+ dcp->valid_mode = false;
+ schedule_work(&connector->hotplug_wq);
+ }
+}
+
+static void
+dcpep_cb_swap_complete_intent_gated(struct apple_dcp *dcp,
+ struct dcp_swap_complete_intent_gated *info)
+{
+ trace_iomfb_swap_complete_intent_gated(dcp, info->swap_id,
+ info->width, info->height);
+}
+
+static void
+dcpep_cb_abort_swap_ap_gated(struct apple_dcp *dcp, u32 *swap_id)
+{
+ trace_iomfb_abort_swap_ap_gated(dcp, *swap_id);
+}
+
+static struct dcpep_get_tiling_state_resp
+dcpep_cb_get_tiling_state(struct apple_dcp *dcp,
+ struct dcpep_get_tiling_state_req *req)
+{
+ return (struct dcpep_get_tiling_state_resp){
+ .value = 0,
+ .ret = 1,
+ };
+}
+
+static u8 dcpep_cb_create_backlight_service(struct apple_dcp *dcp)
+{
+ return dcp_has_panel(dcp);
+}
+
+TRAMPOLINE_VOID(trampoline_nop, dcpep_cb_nop);
+TRAMPOLINE_OUT(trampoline_true, dcpep_cb_true, u8);
+TRAMPOLINE_OUT(trampoline_false, dcpep_cb_false, u8);
+TRAMPOLINE_OUT(trampoline_zero, dcpep_cb_zero, u32);
+TRAMPOLINE_IN(trampoline_swap_complete, dcpep_cb_swap_complete,
+ struct DCP_FW_NAME(dc_swap_complete_resp));
+TRAMPOLINE_INOUT(trampoline_get_uint_prop, dcpep_cb_get_uint_prop,
+ struct dcp_get_uint_prop_req, struct dcp_get_uint_prop_resp);
+TRAMPOLINE_IN(trampoline_set_fx_prop, iomfbep_cb_set_fx_prop,
+ struct iomfb_set_fx_prop_req)
+TRAMPOLINE_INOUT(trampoline_map_piodma, dcpep_cb_map_piodma,
+ struct dcp_map_buf_req, struct dcp_map_buf_resp);
+TRAMPOLINE_IN(trampoline_unmap_piodma, dcpep_cb_unmap_piodma,
+ struct dcp_unmap_buf_resp);
+TRAMPOLINE_INOUT(trampoline_sr_set_property_int, iomfbep_cb_sr_set_property_int,
+ struct iomfb_sr_set_property_int_req, u8);
+TRAMPOLINE_INOUT(trampoline_allocate_buffer, dcpep_cb_allocate_buffer,
+ struct dcp_allocate_buffer_req,
+ struct dcp_allocate_buffer_resp);
+TRAMPOLINE_INOUT(trampoline_map_physical, dcpep_cb_map_physical,
+ struct dcp_map_physical_req, struct dcp_map_physical_resp);
+TRAMPOLINE_INOUT(trampoline_release_mem_desc, dcpep_cb_release_mem_desc, u32,
+ u8);
+TRAMPOLINE_INOUT(trampoline_map_reg, dcpep_cb_map_reg,
+ struct DCP_FW_NAME(dcp_map_reg_req),
+ struct DCP_FW_NAME(dcp_map_reg_resp));
+TRAMPOLINE_INOUT(trampoline_read_edt_data, dcpep_cb_read_edt_data,
+ struct dcp_read_edt_data_req, struct dcp_read_edt_data_resp);
+TRAMPOLINE_INOUT(trampoline_prop_start, dcpep_cb_prop_start, u32, u8);
+TRAMPOLINE_INOUT(trampoline_prop_chunk, dcpep_cb_prop_chunk,
+ struct dcp_set_dcpav_prop_chunk_req, u8);
+TRAMPOLINE_INOUT(trampoline_prop_end, dcpep_cb_prop_end,
+ struct dcp_set_dcpav_prop_end_req, u8);
+TRAMPOLINE_INOUT(trampoline_allocate_bandwidth, dcpep_cb_allocate_bandwidth,
+ struct dcp_allocate_bandwidth_req, struct dcp_allocate_bandwidth_resp);
+TRAMPOLINE_OUT(trampoline_rt_bandwidth, dcpep_cb_rt_bandwidth,
+ struct dcp_rt_bandwidth);
+TRAMPOLINE_INOUT(trampoline_set_frame_sync_props, dcpep_cb_set_frame_sync_props,
+ struct dcp_set_frame_sync_props_req,
+ struct dcp_set_frame_sync_props_resp);
+TRAMPOLINE_OUT(trampoline_get_frequency, dcpep_cb_get_frequency, u64);
+TRAMPOLINE_OUT(trampoline_get_time, dcpep_cb_get_time, u64);
+TRAMPOLINE_IN(trampoline_hotplug, dcpep_cb_hotplug, u64);
+TRAMPOLINE_IN(trampoline_swap_complete_intent_gated,
+ dcpep_cb_swap_complete_intent_gated,
+ struct dcp_swap_complete_intent_gated);
+TRAMPOLINE_IN(trampoline_abort_swap_ap_gated, dcpep_cb_abort_swap_ap_gated, u32);
+TRAMPOLINE_IN(trampoline_enable_backlight_message_ap_gated,
+ iomfbep_cb_enable_backlight_message_ap_gated, u8);
+TRAMPOLINE_IN(trampoline_pr_publish, iomfb_cb_pr_publish,
+ struct iomfb_property);
+TRAMPOLINE_INOUT(trampoline_get_tiling_state, dcpep_cb_get_tiling_state,
+ struct dcpep_get_tiling_state_req, struct dcpep_get_tiling_state_resp);
+TRAMPOLINE_OUT(trampoline_create_backlight_service, dcpep_cb_create_backlight_service, u8);
+
+/*
+ * Callback for swap requests. If a swap failed, we'll never get a swap
+ * complete event so we need to fake a vblank event early to avoid a hang.
+ */
+
+static void dcp_swapped(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ struct DCP_FW_NAME(dcp_swap_submit_resp) *resp = data;
+
+ if (resp->ret) {
+ dev_err(dcp->dev, "swap failed! status %u\n", resp->ret);
+ dcp_drm_crtc_vblank(dcp->crtc);
+ return;
+ }
+
+ while (!list_empty(&dcp->swapped_out_fbs)) {
+ struct dcp_fb_reference *entry;
+ entry = list_first_entry(&dcp->swapped_out_fbs,
+ struct dcp_fb_reference, head);
+ if (entry->swap_id == dcp->last_swap_id)
+ break;
+ if (entry->fb)
+ drm_framebuffer_put(entry->fb);
+ list_del(&entry->head);
+ kfree(entry);
+ }
+}
+
+static void dcp_swap_started(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ struct dcp_swap_start_resp *resp = data;
+
+ DCP_FW_UNION(dcp->swap).swap.swap_id = resp->swap_id;
+
+ trace_iomfb_swap_submit(dcp, resp->swap_id);
+ dcp_swap_submit(dcp, false, &DCP_FW_UNION(dcp->swap), dcp_swapped, NULL);
+}
+
+/* Helpers to modeset and swap, used to flush */
+static void do_swap(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ struct dcp_swap_start_req start_req = { 0 };
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ if (dcp->connector && dcp->connector->connected)
+ dcp_swap_start(dcp, false, &start_req, dcp_swap_started, NULL);
+ else
+ dcp_drm_crtc_vblank(dcp->crtc);
+}
+
+static void complete_set_digital_out_mode(struct apple_dcp *dcp, void *data,
+ void *cookie)
+{
+ struct dcp_wait_cookie *wait = cookie;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ if (wait) {
+ complete(&wait->done);
+ kref_put(&wait->refcount, release_wait_cookie);
+ }
+}
+
+int DCP_FW_NAME(iomfb_modeset)(struct apple_dcp *dcp,
+ struct drm_crtc_state *crtc_state)
+{
+ struct dcp_display_mode *mode;
+ struct dcp_wait_cookie *cookie;
+ struct dcp_color_mode *cmode = NULL;
+ int ret;
+
+ mode = lookup_mode(dcp, &crtc_state->mode);
+ if (!mode) {
+ dev_err(dcp->dev, "no match for " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&crtc_state->mode));
+ return -EIO;
+ }
+
+ dev_info(dcp->dev,
+ "set_digital_out_mode(color:%d timing:%d) " DRM_MODE_FMT "\n",
+ mode->color_mode_id, mode->timing_mode_id,
+ DRM_MODE_ARG(&crtc_state->mode));
+ if (mode->color_mode_id == mode->sdr_rgb.id)
+ cmode = &mode->sdr_rgb;
+ else if (mode->color_mode_id == mode->sdr_444.id)
+ cmode = &mode->sdr_444;
+ else if (mode->color_mode_id == mode->sdr.id)
+ cmode = &mode->sdr;
+ else if (mode->color_mode_id == mode->best.id)
+ cmode = &mode->best;
+ if (cmode)
+ dev_info(dcp->dev,
+ "set_digital_out_mode() color mode depth:%hhu format:%u "
+ "colorimetry:%u eotf:%u range:%u\n", cmode->depth,
+ cmode->format, cmode->colorimetry, cmode->eotf,
+ cmode->range);
+
+ dcp->mode = (struct dcp_set_digital_out_mode_req){
+ .color_mode_id = mode->color_mode_id,
+ .timing_mode_id = mode->timing_mode_id
+ };
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie) {
+ return -ENOMEM;
+ }
+
+ init_completion(&cookie->done);
+ kref_init(&cookie->refcount);
+ /* increase refcount to ensure the receiver has a reference */
+ kref_get(&cookie->refcount);
+
+ dcp->during_modeset = true;
+
+ dcp_set_digital_out_mode(dcp, false, &dcp->mode,
+ complete_set_digital_out_mode, cookie);
+
+ /*
+ * The DCP firmware has an internal timeout of ~8 seconds for
+ * modesets. Add an extra 500ms to safe side that the modeset
+ * call has returned.
+ */
+ dev_dbg(dcp->dev, "%s - wait for modeset", __func__);
+ ret = wait_for_completion_timeout(&cookie->done,
+ msecs_to_jiffies(8500));
+
+ kref_put(&cookie->refcount, release_wait_cookie);
+ dcp->during_modeset = false;
+ dev_info(dcp->dev, "set_digital_out_mode finished:%d\n", ret);
+
+ if (ret == 0) {
+ dev_info(dcp->dev, "set_digital_out_mode timed out\n");
+ return -EIO;
+ } else if (ret < 0) {
+ dev_info(dcp->dev,
+ "waiting on set_digital_out_mode failed:%d\n", ret);
+ return -EIO;
+
+ } else if (ret > 0) {
+ dev_dbg(dcp->dev,
+ "set_digital_out_mode finished with %d to spare\n",
+ jiffies_to_msecs(ret));
+ }
+ dcp->valid_mode = true;
+
+ return 0;
+}
+
+void DCP_FW_NAME(iomfb_flush)(struct apple_dcp *dcp, struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *new_state, *old_state;
+ struct drm_crtc_state *crtc_state;
+ struct DCP_FW_NAME(dcp_swap_submit_req) *req = &DCP_FW_UNION(dcp->swap);
+ int plane_idx, l;
+ int has_surface = 0;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ /* Reset to defaults */
+ memset(req, 0, sizeof(*req));
+ for (l = 0; l < SWAP_SURFACES; l++)
+ req->surf_null[l] = true;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ for (l = 0; l < 5; l++)
+ req->surf2_null[l] = true;
+ req->unkU32Ptr_null = true;
+ req->unkU32out_null = true;
+#endif
+
+ /*
+ * Clear all surfaces on startup. The boot framebuffer in surface 0
+ * sticks around.
+ */
+ if (!dcp->surfaces_cleared) {
+ req->swap.swap_enabled = IOMFB_SET_BACKGROUND | 0x7;
+ req->swap.bg_color = 0xFF000000;
+ dcp->surfaces_cleared = true;
+ }
+
+ // Surface 0 has limitations at least on t600x.
+ l = 1;
+ for_each_oldnew_plane_in_state(state, plane, old_state, new_state, plane_idx) {
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_gem_dma_object *obj;
+ struct drm_rect src_rect;
+ bool is_premultiplied = false;
+
+ /* skip planes not for this crtc */
+ if (old_state->crtc != crtc && new_state->crtc != crtc)
+ continue;
+
+ WARN_ON(l >= SWAP_SURFACES);
+
+ req->swap.swap_enabled |= BIT(l);
+
+ if (old_state->fb && fb != old_state->fb) {
+ /*
+ * Race condition between a framebuffer unbind getting
+ * swapped out and GEM unreferencing a framebuffer. If
+ * we lose the race, the display gets IOVA faults and
+ * the DCP crashes. We need to extend the lifetime of
+ * the drm_framebuffer (and hence the GEM object) until
+ * after we get a swap complete for the swap unbinding
+ * it.
+ */
+ struct dcp_fb_reference *entry =
+ kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry) {
+ entry->fb = old_state->fb;
+ entry->swap_id = dcp->last_swap_id;
+ list_add_tail(&entry->head,
+ &dcp->swapped_out_fbs);
+ }
+ drm_framebuffer_get(old_state->fb);
+ }
+
+ if (!new_state->fb) {
+ l += 1;
+ continue;
+ }
+ req->surf_null[l] = false;
+ has_surface = 1;
+
+ /*
+ * DCP doesn't support XBGR8 / XRGB8 natively. Blending as
+ * pre-multiplied alpha with a black background can be used as
+ * workaround for the bottommost plane.
+ */
+ if (fb->format->format == DRM_FORMAT_XRGB8888 ||
+ fb->format->format == DRM_FORMAT_XBGR8888)
+ is_premultiplied = true;
+
+ drm_rect_fp_to_int(&src_rect, &new_state->src);
+
+ req->swap.src_rect[l] = drm_to_dcp_rect(&src_rect);
+ req->swap.dst_rect[l] = drm_to_dcp_rect(&new_state->dst);
+
+ if (dcp->notch_height > 0)
+ req->swap.dst_rect[l].y += dcp->notch_height;
+
+ /* the obvious helper call drm_fb_dma_get_gem_addr() adjusts
+ * the address for source x/y offsets. Since IOMFB has a direct
+ * support source position prefer that.
+ */
+ obj = drm_fb_dma_get_gem_obj(fb, 0);
+ if (obj)
+ req->surf_iova[l] = obj->dma_addr + fb->offsets[0];
+
+ req->surf[l] = (struct DCP_FW_NAME(dcp_surface)){
+ .is_premultiplied = is_premultiplied,
+ .format = drm_format_to_dcp(fb->format->format),
+ .xfer_func = DCP_XFER_FUNC_SDR,
+ .colorspace = DCP_COLORSPACE_NATIVE,
+ .stride = fb->pitches[0],
+ .width = fb->width,
+ .height = fb->height,
+ .buf_size = fb->height * fb->pitches[0],
+ .surface_id = req->swap.surf_ids[l],
+
+ /* Only used for compressed or multiplanar surfaces */
+ .pix_size = 1,
+ .pel_w = 1,
+ .pel_h = 1,
+ .has_comp = 1,
+ .has_planes = 1,
+ };
+
+ l += 1;
+ }
+
+ if (!has_surface && !crtc_state->color_mgmt_changed) {
+ if (crtc_state->enable && crtc_state->active &&
+ !crtc_state->planes_changed) {
+ schedule_work(&dcp->vblank_wq);
+ return;
+ }
+
+ /* Set black background */
+ req->swap.swap_enabled |= IOMFB_SET_BACKGROUND;
+ req->swap.bg_color = 0xFF000000;
+ req->clear = 1;
+ }
+
+ /* These fields should be set together */
+ req->swap.swap_completed = req->swap.swap_enabled;
+
+ /* update brightness if changed */
+ if (dcp_has_panel(dcp) && dcp->brightness.update) {
+ req->swap.bl_unk = 1;
+ req->swap.bl_value = dcp->brightness.dac;
+ req->swap.bl_power = 0x40;
+ dcp->brightness.update = false;
+ }
+
+ if (crtc_state->color_mgmt_changed && crtc_state->ctm) {
+ struct iomfb_set_matrix_req mat;
+ struct drm_color_ctm *ctm = (struct drm_color_ctm *)crtc_state->ctm->data;
+
+ mat.unk_u32 = 9;
+ mat.r[0] = ctm->matrix[0];
+ mat.r[1] = ctm->matrix[1];
+ mat.r[2] = ctm->matrix[2];
+ mat.g[0] = ctm->matrix[3];
+ mat.g[1] = ctm->matrix[4];
+ mat.g[2] = ctm->matrix[5];
+ mat.b[0] = ctm->matrix[6];
+ mat.b[1] = ctm->matrix[7];
+ mat.b[2] = ctm->matrix[8];
+
+ iomfb_set_matrix(dcp, false, &mat, do_swap, NULL);
+ } else
+ do_swap(dcp, NULL, NULL);
+}
+
+static void res_is_main_display(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct apple_connector *connector;
+ int result = *(int *)out;
+ dev_info(dcp->dev, "DCP is_main_display: %d\n", result);
+
+ dcp->main_display = result != 0;
+
+ connector = dcp->connector;
+ if (connector) {
+ connector->connected = dcp->nr_modes > 0;
+ schedule_work(&connector->hotplug_wq);
+ }
+
+ dcp->active = true;
+ complete(&dcp->start_done);
+}
+
+static void init_3(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_is_main_display(dcp, false, res_is_main_display, NULL);
+}
+
+static void init_2(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_first_client_open(dcp, false, init_3, NULL);
+}
+
+static void init_1(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ u32 val = 0;
+ dcp_enable_disable_video_power_savings(dcp, false, &val, init_2, NULL);
+}
+
+static void dcp_started(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ struct iomfb_get_color_remap_mode_req color_remap =
+ (struct iomfb_get_color_remap_mode_req){
+ .mode = 6,
+ };
+
+ dev_info(dcp->dev, "DCP booted\n");
+
+ iomfb_get_color_remap_mode(dcp, false, &color_remap, init_1, cookie);
+}
+
+void DCP_FW_NAME(iomfb_shutdown)(struct apple_dcp *dcp)
+{
+ struct dcp_set_power_state_req req = {
+ /* defaults are ok */
+ };
+
+ dcp_set_power_state(dcp, false, &req, NULL, NULL);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+/*
+ * This file is intended to be included multiple times with IOMFB_VER
+ * defined to declare DCP firmware version dependent structs.
+ */
+
+#ifdef DCP_FW_VER
+
+#include <drm/drm_crtc.h>
+
+#include <linux/types.h>
+
+#include "iomfb.h"
+#include "version_utils.h"
+
+struct DCP_FW_NAME(dcp_swap) {
+ u64 ts1;
+ u64 ts2;
+ u64 unk_10[6];
+ u64 flags1;
+ u64 flags2;
+
+ u32 swap_id;
+
+ u32 surf_ids[SWAP_SURFACES];
+ struct dcp_rect src_rect[SWAP_SURFACES];
+ u32 surf_flags[SWAP_SURFACES];
+ u32 surf_unk[SWAP_SURFACES];
+ struct dcp_rect dst_rect[SWAP_SURFACES];
+ u32 swap_enabled;
+ u32 swap_completed;
+
+ u32 bg_color;
+ u8 unk_110[0x1b8];
+ u32 unk_2c8;
+ u8 unk_2cc[0x14];
+ u32 unk_2e0;
+#if DCP_FW_VER < DCP_FW_VERSION(13, 2, 0)
+ u16 unk_2e2;
+#else
+ u8 unk_2e2[3];
+#endif
+ u64 bl_unk;
+ u32 bl_value; // min value is 0x10000000
+ u8 bl_power; // constant 0x40 for on
+ u8 unk_2f3[0x2d];
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ u8 unk_320[0x13f];
+ u64 unk_1;
+#endif
+} __packed;
+
+/* Information describing a surface */
+struct DCP_FW_NAME(dcp_surface) {
+ u8 is_tiled;
+ u8 is_tearing_allowed;
+ u8 is_premultiplied;
+ u32 plane_cnt;
+ u32 plane_cnt2;
+ u32 format; /* DCP fourcc */
+ u32 ycbcr_matrix;
+ u8 xfer_func;
+ u8 colorspace;
+ u32 stride;
+ u16 pix_size;
+ u8 pel_w;
+ u8 pel_h;
+ u32 offset;
+ u32 width;
+ u32 height;
+ u32 buf_size;
+ u64 protection_opts;
+ u32 surface_id;
+ struct dcp_component_types comp_types[MAX_PLANES];
+ u64 has_comp;
+ struct dcp_plane_info planes[MAX_PLANES];
+ u64 has_planes;
+ u32 compression_info[MAX_PLANES][13];
+ u64 has_compr_info;
+ u32 unk_num;
+ u32 unk_denom;
+#if DCP_FW_VER < DCP_FW_VERSION(13, 2, 0)
+ u8 padding[7];
+#else
+ u8 padding[47];
+#endif
+} __packed;
+
+/* Prototypes */
+
+struct DCP_FW_NAME(dcp_swap_submit_req) {
+ struct DCP_FW_NAME(dcp_swap) swap;
+ struct DCP_FW_NAME(dcp_surface) surf[SWAP_SURFACES];
+ u64 surf_iova[SWAP_SURFACES];
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ u64 unk_u64_a[SWAP_SURFACES];
+ struct DCP_FW_NAME(dcp_surface) surf2[5];
+ u64 surf2_iova[5];
+#endif
+ u8 unkbool;
+ u64 unkdouble;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ u64 unkU64;
+ u8 unkbool2;
+#endif
+ u32 clear; // or maybe switch to default fb?
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ u32 unkU32Ptr;
+#endif
+ u8 swap_null;
+ u8 surf_null[SWAP_SURFACES];
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ u8 surf2_null[5];
+#endif
+ u8 unkoutbool_null;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ u8 unkU32Ptr_null;
+ u8 unkU32out_null;
+#endif
+ u8 padding[1];
+} __packed;
+
+struct DCP_FW_NAME(dcp_swap_submit_resp) {
+ u8 unkoutbool;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ u32 unkU32out;
+#endif
+ u32 ret;
+ u8 padding[3];
+} __packed;
+
+struct DCP_FW_NAME(dc_swap_complete_resp) {
+ u32 swap_id;
+ u8 unkbool;
+ u64 swap_data;
+#if DCP_FW_VER < DCP_FW_VERSION(13, 2, 0)
+ u8 swap_info[0x6c4];
+#else
+ u8 swap_info[0x6c5];
+#endif
+ u32 unkint;
+ u8 swap_info_null;
+} __packed;
+
+struct DCP_FW_NAME(dcp_map_reg_req) {
+ char obj[4];
+ u32 index;
+ u32 flags;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ u8 unk_u64_null;
+#endif
+ u8 addr_null;
+ u8 length_null;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ u8 padding[1];
+#else
+ u8 padding[2];
+#endif
+} __packed;
+
+struct DCP_FW_NAME(dcp_map_reg_resp) {
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+ u64 dva;
+#endif
+ u64 addr;
+ u64 length;
+ u32 ret;
+} __packed;
+
+
+struct apple_dcp;
+
+int DCP_FW_NAME(iomfb_modeset)(struct apple_dcp *dcp,
+ struct drm_crtc_state *crtc_state);
+void DCP_FW_NAME(iomfb_flush)(struct apple_dcp *dcp, struct drm_crtc *crtc, struct drm_atomic_state *state);
+void DCP_FW_NAME(iomfb_poweron)(struct apple_dcp *dcp);
+void DCP_FW_NAME(iomfb_poweroff)(struct apple_dcp *dcp);
+void DCP_FW_NAME(iomfb_sleep)(struct apple_dcp *dcp);
+void DCP_FW_NAME(iomfb_start)(struct apple_dcp *dcp);
+void DCP_FW_NAME(iomfb_shutdown)(struct apple_dcp *dcp);
+
+#endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#include "iomfb_v12_3.h"
+#include "iomfb_v13_3.h"
+#include "version_utils.h"
+
+static const struct dcp_method_entry dcp_methods[dcpep_num_methods] = {
+ IOMFB_METHOD("A000", dcpep_late_init_signal),
+ IOMFB_METHOD("A029", dcpep_setup_video_limits),
+ IOMFB_METHOD("A131", iomfbep_a131_pmu_service_matched),
+ IOMFB_METHOD("A132", iomfbep_a132_backlight_service_matched),
+ IOMFB_METHOD("A357", dcpep_set_create_dfb),
+ IOMFB_METHOD("A358", iomfbep_a358_vi_set_temperature_hint),
+ IOMFB_METHOD("A401", dcpep_start_signal),
+ IOMFB_METHOD("A407", dcpep_swap_start),
+ IOMFB_METHOD("A408", dcpep_swap_submit),
+ IOMFB_METHOD("A410", dcpep_set_display_device),
+ IOMFB_METHOD("A411", dcpep_is_main_display),
+ IOMFB_METHOD("A412", dcpep_set_digital_out_mode),
+ IOMFB_METHOD("A422", iomfbep_set_matrix),
+ IOMFB_METHOD("A426", iomfbep_get_color_remap_mode),
+ IOMFB_METHOD("A439", dcpep_set_parameter_dcp),
+ IOMFB_METHOD("A443", dcpep_create_default_fb),
+ IOMFB_METHOD("A447", dcpep_enable_disable_video_power_savings),
+ IOMFB_METHOD("A454", dcpep_first_client_open),
+ IOMFB_METHOD("A455", iomfbep_last_client_close),
+ IOMFB_METHOD("A460", dcpep_set_display_refresh_properties),
+ IOMFB_METHOD("A463", dcpep_flush_supports_power),
+ IOMFB_METHOD("A464", iomfbep_abort_swaps_dcp),
+ IOMFB_METHOD("A468", dcpep_set_power_state),
+};
+
+#define DCP_FW v12_3
+#define DCP_FW_VER DCP_FW_VERSION(12, 3, 0)
+
+#include "iomfb_template.c"
+
+static const iomfb_cb_handler cb_handlers[IOMFB_MAX_CB] = {
+ [0] = trampoline_true, /* did_boot_signal */
+ [1] = trampoline_true, /* did_power_on_signal */
+ [2] = trampoline_nop, /* will_power_off_signal */
+ [3] = trampoline_rt_bandwidth,
+ [100] = iomfbep_cb_match_pmu_service,
+ [101] = trampoline_zero, /* get_display_default_stride */
+ [102] = trampoline_nop, /* set_number_property */
+ [103] = trampoline_nop, /* set_boolean_property */
+ [106] = trampoline_nop, /* remove_property */
+ [107] = trampoline_true, /* create_provider_service */
+ [108] = trampoline_true, /* create_product_service */
+ [109] = trampoline_true, /* create_pmu_service */
+ [110] = trampoline_true, /* create_iomfb_service */
+ [111] = trampoline_create_backlight_service,
+ [116] = dcpep_cb_boot_1,
+ [117] = trampoline_false, /* is_dark_boot */
+ [118] = trampoline_false, /* is_dark_boot / is_waking_from_hibernate*/
+ [120] = trampoline_read_edt_data,
+ [122] = trampoline_prop_start,
+ [123] = trampoline_prop_chunk,
+ [124] = trampoline_prop_end,
+ [201] = trampoline_map_piodma,
+ [202] = trampoline_unmap_piodma,
+ [206] = iomfbep_cb_match_pmu_service_2,
+ [207] = iomfbep_cb_match_backlight_service,
+ [208] = trampoline_get_time,
+ [211] = trampoline_nop, /* update_backlight_factor_prop */
+ [300] = trampoline_pr_publish,
+ [401] = trampoline_get_uint_prop,
+ [404] = trampoline_nop, /* sr_set_uint_prop */
+ [406] = trampoline_set_fx_prop,
+ [408] = trampoline_get_frequency,
+ [411] = trampoline_map_reg,
+ [413] = trampoline_true, /* sr_set_property_dict */
+ [414] = trampoline_sr_set_property_int,
+ [415] = trampoline_true, /* sr_set_property_bool */
+ [451] = trampoline_allocate_buffer,
+ [452] = trampoline_map_physical,
+ [456] = trampoline_release_mem_desc,
+ [552] = trampoline_true, /* set_property_dict_0 */
+ [561] = trampoline_true, /* set_property_dict */
+ [563] = trampoline_true, /* set_property_int */
+ [565] = trampoline_true, /* set_property_bool */
+ [567] = trampoline_true, /* set_property_str */
+ [574] = trampoline_zero, /* power_up_dart */
+ [576] = trampoline_hotplug,
+ [577] = trampoline_nop, /* powerstate_notify */
+ [582] = trampoline_true, /* create_default_fb_surface */
+ [584] = trampoline_nop, /* IOMobileFramebufferAP::clear_default_surface */
+ [588] = trampoline_nop, /* resize_default_fb_surface_gated */
+ [589] = trampoline_swap_complete,
+ [591] = trampoline_swap_complete_intent_gated,
+ [592] = trampoline_abort_swap_ap_gated,
+ [593] = trampoline_enable_backlight_message_ap_gated,
+ [594] = trampoline_nop, /* IOMobileFramebufferAP::setSystemConsoleMode */
+ [596] = trampoline_false, /* IOMobileFramebufferAP::isDFBAllocated */
+ [597] = trampoline_false, /* IOMobileFramebufferAP::preserveContents */
+ [598] = trampoline_nop, /* find_swap_function_gated */
+};
+
+void DCP_FW_NAME(iomfb_start)(struct apple_dcp *dcp)
+{
+ dcp->cb_handlers = cb_handlers;
+
+ dcp_start_signal(dcp, false, dcp_started, NULL);
+}
+
+#undef DCP_FW_VER
+#undef DCP_FW
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#ifndef __APPLE_IOMFB_V12_3_H__
+#define __APPLE_IOMFB_V12_3_H__
+
+#include "version_utils.h"
+
+#define DCP_FW v12_3
+#define DCP_FW_VER DCP_FW_VERSION(12, 3, 0)
+
+#include "iomfb_template.h"
+
+#undef DCP_FW_VER
+#undef DCP_FW
+
+#endif /* __APPLE_IOMFB_V12_3_H__ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#include "iomfb_v12_3.h"
+#include "iomfb_v13_3.h"
+#include "version_utils.h"
+
+static const struct dcp_method_entry dcp_methods[dcpep_num_methods] = {
+ IOMFB_METHOD("A000", dcpep_late_init_signal),
+ IOMFB_METHOD("A029", dcpep_setup_video_limits),
+ IOMFB_METHOD("A131", iomfbep_a131_pmu_service_matched),
+ IOMFB_METHOD("A132", iomfbep_a132_backlight_service_matched),
+ IOMFB_METHOD("A373", dcpep_set_create_dfb),
+ IOMFB_METHOD("A374", iomfbep_a358_vi_set_temperature_hint),
+ IOMFB_METHOD("A401", dcpep_start_signal),
+ IOMFB_METHOD("A407", dcpep_swap_start),
+ IOMFB_METHOD("A408", dcpep_swap_submit),
+ IOMFB_METHOD("A410", dcpep_set_display_device),
+ IOMFB_METHOD("A411", dcpep_is_main_display),
+ IOMFB_METHOD("A412", dcpep_set_digital_out_mode),
+ IOMFB_METHOD("A422", iomfbep_set_matrix),
+ IOMFB_METHOD("A426", iomfbep_get_color_remap_mode),
+ IOMFB_METHOD("A441", dcpep_set_parameter_dcp),
+ IOMFB_METHOD("A445", dcpep_create_default_fb),
+ IOMFB_METHOD("A449", dcpep_enable_disable_video_power_savings),
+ IOMFB_METHOD("A456", dcpep_first_client_open),
+ IOMFB_METHOD("A457", iomfbep_last_client_close),
+ IOMFB_METHOD("A463", dcpep_set_display_refresh_properties),
+ IOMFB_METHOD("A466", dcpep_flush_supports_power),
+ IOMFB_METHOD("A467", iomfbep_abort_swaps_dcp),
+ IOMFB_METHOD("A472", dcpep_set_power_state),
+};
+
+#define DCP_FW v13_3
+#define DCP_FW_VER DCP_FW_VERSION(13, 3, 0)
+
+#include "iomfb_template.c"
+
+static const iomfb_cb_handler cb_handlers[IOMFB_MAX_CB] = {
+ [0] = trampoline_true, /* did_boot_signal */
+ [1] = trampoline_true, /* did_power_on_signal */
+ [2] = trampoline_nop, /* will_power_off_signal */
+ [3] = trampoline_rt_bandwidth,
+ [6] = trampoline_set_frame_sync_props,
+ [100] = iomfbep_cb_match_pmu_service,
+ [101] = trampoline_zero, /* get_display_default_stride */
+ [102] = trampoline_nop, /* set_number_property */
+ [103] = trampoline_nop, /* trigger_user_cal_loader */
+ [104] = trampoline_nop, /* set_boolean_property */
+ [107] = trampoline_nop, /* remove_property */
+ [108] = trampoline_true, /* create_provider_service */
+ [109] = trampoline_true, /* create_product_service */
+ [110] = trampoline_true, /* create_pmu_service */
+ [111] = trampoline_true, /* create_iomfb_service */
+ [112] = trampoline_create_backlight_service,
+ [113] = trampoline_true, /* create_nvram_servce? */
+ [114] = trampoline_get_tiling_state,
+ [115] = trampoline_false, /* set_tiling_state */
+ [120] = dcpep_cb_boot_1,
+ [121] = trampoline_false, /* is_dark_boot */
+ [122] = trampoline_false, /* is_dark_boot / is_waking_from_hibernate*/
+ [124] = trampoline_read_edt_data,
+ [126] = trampoline_prop_start,
+ [127] = trampoline_prop_chunk,
+ [128] = trampoline_prop_end,
+ [129] = trampoline_allocate_bandwidth,
+ [201] = trampoline_map_piodma,
+ [202] = trampoline_unmap_piodma,
+ [206] = iomfbep_cb_match_pmu_service_2,
+ [207] = iomfbep_cb_match_backlight_service,
+ [208] = trampoline_nop, /* update_backlight_factor_prop */
+ [209] = trampoline_get_time,
+ [300] = trampoline_pr_publish,
+ [401] = trampoline_get_uint_prop,
+ [404] = trampoline_nop, /* sr_set_uint_prop */
+ [406] = trampoline_set_fx_prop,
+ [408] = trampoline_get_frequency,
+ [411] = trampoline_map_reg,
+ [413] = trampoline_true, /* sr_set_property_dict */
+ [414] = trampoline_sr_set_property_int,
+ [415] = trampoline_true, /* sr_set_property_bool */
+ [451] = trampoline_allocate_buffer,
+ [452] = trampoline_map_physical,
+ [456] = trampoline_release_mem_desc,
+ [552] = trampoline_true, /* set_property_dict_0 */
+ [561] = trampoline_true, /* set_property_dict */
+ [563] = trampoline_true, /* set_property_int */
+ [565] = trampoline_true, /* set_property_bool */
+ [567] = trampoline_true, /* set_property_str */
+ [574] = trampoline_zero, /* power_up_dart */
+ [576] = trampoline_hotplug,
+ [577] = trampoline_nop, /* powerstate_notify */
+ [582] = trampoline_true, /* create_default_fb_surface */
+ [584] = trampoline_nop, /* IOMobileFramebufferAP::clear_default_surface */
+ [588] = trampoline_nop, /* resize_default_fb_surface_gated */
+ [589] = trampoline_swap_complete,
+ [591] = trampoline_swap_complete_intent_gated,
+ [592] = trampoline_abort_swap_ap_gated,
+ [593] = trampoline_enable_backlight_message_ap_gated,
+ [594] = trampoline_nop, /* IOMobileFramebufferAP::setSystemConsoleMode */
+ [596] = trampoline_false, /* IOMobileFramebufferAP::isDFBAllocated */
+ [597] = trampoline_false, /* IOMobileFramebufferAP::preserveContents */
+ [598] = trampoline_nop, /* find_swap_function_gated */
+};
+void DCP_FW_NAME(iomfb_start)(struct apple_dcp *dcp)
+{
+ dcp->cb_handlers = cb_handlers;
+
+ dcp_start_signal(dcp, false, dcp_started, NULL);
+}
+
+#undef DCP_FW_VER
+#undef DCP_FW
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#ifndef __APPLE_IOMFB_V13_3_H__
+#define __APPLE_IOMFB_V13_3_H__
+
+#include "version_utils.h"
+
+#define DCP_FW v13_3
+#define DCP_FW_VER DCP_FW_VERSION(13, 3, 0)
+
+#include "iomfb_template.h"
+
+#undef DCP_FW_VER
+#undef DCP_FW
+
+#endif /* __APPLE_IOMFB_V13_3_H__ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/math.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include <sound/pcm.h> // for sound format masks
+
+#include "parser.h"
+#include "trace.h"
+
+#define DCP_PARSE_HEADER 0xd3
+
+enum dcp_parse_type {
+ DCP_TYPE_DICTIONARY = 1,
+ DCP_TYPE_ARRAY = 2,
+ DCP_TYPE_INT64 = 4,
+ DCP_TYPE_STRING = 9,
+ DCP_TYPE_BLOB = 10,
+ DCP_TYPE_BOOL = 11
+};
+
+struct dcp_parse_tag {
+ unsigned int size : 24;
+ enum dcp_parse_type type : 5;
+ unsigned int padding : 2;
+ bool last : 1;
+} __packed;
+
+static const void *parse_bytes(struct dcp_parse_ctx *ctx, size_t count)
+{
+ const void *ptr = ctx->blob + ctx->pos;
+
+ if (ctx->pos + count > ctx->len)
+ return ERR_PTR(-EINVAL);
+
+ ctx->pos += count;
+ return ptr;
+}
+
+static const u32 *parse_u32(struct dcp_parse_ctx *ctx)
+{
+ return parse_bytes(ctx, sizeof(u32));
+}
+
+static const struct dcp_parse_tag *parse_tag(struct dcp_parse_ctx *ctx)
+{
+ const struct dcp_parse_tag *tag;
+
+ /* Align to 32-bits */
+ ctx->pos = round_up(ctx->pos, 4);
+
+ tag = parse_bytes(ctx, sizeof(struct dcp_parse_tag));
+
+ if (IS_ERR(tag))
+ return tag;
+
+ if (tag->padding)
+ return ERR_PTR(-EINVAL);
+
+ return tag;
+}
+
+static const struct dcp_parse_tag *parse_tag_of_type(struct dcp_parse_ctx *ctx,
+ enum dcp_parse_type type)
+{
+ const struct dcp_parse_tag *tag = parse_tag(ctx);
+
+ if (IS_ERR(tag))
+ return tag;
+
+ if (tag->type != type)
+ return ERR_PTR(-EINVAL);
+
+ return tag;
+}
+
+static int skip(struct dcp_parse_ctx *handle)
+{
+ const struct dcp_parse_tag *tag = parse_tag(handle);
+ int ret = 0;
+ int i;
+
+ if (IS_ERR(tag))
+ return PTR_ERR(tag);
+
+ switch (tag->type) {
+ case DCP_TYPE_DICTIONARY:
+ for (i = 0; i < tag->size; ++i) {
+ ret |= skip(handle); /* key */
+ ret |= skip(handle); /* value */
+ }
+
+ return ret;
+
+ case DCP_TYPE_ARRAY:
+ for (i = 0; i < tag->size; ++i)
+ ret |= skip(handle);
+
+ return ret;
+
+ case DCP_TYPE_INT64:
+ handle->pos += sizeof(s64);
+ return 0;
+
+ case DCP_TYPE_STRING:
+ case DCP_TYPE_BLOB:
+ handle->pos += tag->size;
+ return 0;
+
+ case DCP_TYPE_BOOL:
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int skip_pair(struct dcp_parse_ctx *handle)
+{
+ int ret;
+
+ ret = skip(handle);
+ if (ret)
+ return ret;
+
+ return skip(handle);
+}
+
+static bool consume_string(struct dcp_parse_ctx *ctx, const char *specimen)
+{
+ const struct dcp_parse_tag *tag;
+ const char *key;
+ ctx->pos = round_up(ctx->pos, 4);
+
+ if (ctx->pos + sizeof(*tag) + strlen(specimen) - 1 > ctx->len)
+ return false;
+ tag = ctx->blob + ctx->pos;
+ key = ctx->blob + ctx->pos + sizeof(*tag);
+ if (tag->padding)
+ return false;
+
+ if (tag->type != DCP_TYPE_STRING ||
+ tag->size != strlen(specimen) ||
+ strncmp(key, specimen, tag->size))
+ return false;
+
+ skip(ctx);
+ return true;
+}
+
+/* Caller must free the result */
+static char *parse_string(struct dcp_parse_ctx *handle)
+{
+ const struct dcp_parse_tag *tag = parse_tag_of_type(handle, DCP_TYPE_STRING);
+ const char *in;
+ char *out;
+
+ if (IS_ERR(tag))
+ return (void *)tag;
+
+ in = parse_bytes(handle, tag->size);
+ if (IS_ERR(in))
+ return (void *)in;
+
+ out = kmalloc(tag->size + 1, GFP_KERNEL);
+
+ memcpy(out, in, tag->size);
+ out[tag->size] = '\0';
+ return out;
+}
+
+static int parse_int(struct dcp_parse_ctx *handle, s64 *value)
+{
+ const void *tag = parse_tag_of_type(handle, DCP_TYPE_INT64);
+ const s64 *in;
+
+ if (IS_ERR(tag))
+ return PTR_ERR(tag);
+
+ in = parse_bytes(handle, sizeof(s64));
+
+ if (IS_ERR(in))
+ return PTR_ERR(in);
+
+ memcpy(value, in, sizeof(*value));
+ return 0;
+}
+
+static int parse_bool(struct dcp_parse_ctx *handle, bool *b)
+{
+ const struct dcp_parse_tag *tag = parse_tag_of_type(handle, DCP_TYPE_BOOL);
+
+ if (IS_ERR(tag))
+ return PTR_ERR(tag);
+
+ *b = !!tag->size;
+ return 0;
+}
+
+static int parse_blob(struct dcp_parse_ctx *handle, size_t size, u8 const **blob)
+{
+ const struct dcp_parse_tag *tag = parse_tag_of_type(handle, DCP_TYPE_BLOB);
+ const u8 *out;
+
+ if (IS_ERR(tag))
+ return PTR_ERR(tag);
+
+ if (tag->size < size)
+ return -EINVAL;
+
+ out = parse_bytes(handle, tag->size);
+
+ if (IS_ERR(out))
+ return PTR_ERR(out);
+
+ *blob = out;
+ return 0;
+}
+
+struct iterator {
+ struct dcp_parse_ctx *handle;
+ u32 idx, len;
+};
+
+static int iterator_begin(struct dcp_parse_ctx *handle, struct iterator *it,
+ bool dict)
+{
+ const struct dcp_parse_tag *tag;
+ enum dcp_parse_type type = dict ? DCP_TYPE_DICTIONARY : DCP_TYPE_ARRAY;
+
+ *it = (struct iterator) {
+ .handle = handle,
+ .idx = 0
+ };
+
+ tag = parse_tag_of_type(it->handle, type);
+ if (IS_ERR(tag))
+ return PTR_ERR(tag);
+
+ it->len = tag->size;
+ return 0;
+}
+
+#define dcp_parse_foreach_in_array(handle, it) \
+ for (iterator_begin(handle, &it, false); it.idx < it.len; ++it.idx)
+#define dcp_parse_foreach_in_dict(handle, it) \
+ for (iterator_begin(handle, &it, true); it.idx < it.len; ++it.idx)
+
+int parse(const void *blob, size_t size, struct dcp_parse_ctx *ctx)
+{
+ const u32 *header;
+
+ *ctx = (struct dcp_parse_ctx) {
+ .blob = blob,
+ .len = size,
+ .pos = 0,
+ };
+
+ header = parse_u32(ctx);
+ if (IS_ERR(header))
+ return PTR_ERR(header);
+
+ if (*header != DCP_PARSE_HEADER)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int parse_dimension(struct dcp_parse_ctx *handle, struct dimension *dim)
+{
+ struct iterator it;
+ int ret = 0;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+
+ if (IS_ERR(key))
+ ret = PTR_ERR(key);
+ else if (!strcmp(key, "Active"))
+ ret = parse_int(it.handle, &dim->active);
+ else if (!strcmp(key, "Total"))
+ ret = parse_int(it.handle, &dim->total);
+ else if (!strcmp(key, "FrontPorch"))
+ ret = parse_int(it.handle, &dim->front_porch);
+ else if (!strcmp(key, "SyncWidth"))
+ ret = parse_int(it.handle, &dim->sync_width);
+ else if (!strcmp(key, "PreciseSyncRate"))
+ ret = parse_int(it.handle, &dim->precise_sync_rate);
+ else
+ skip(it.handle);
+
+ if (!IS_ERR_OR_NULL(key))
+ kfree(key);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+struct color_mode {
+ s64 colorimetry;
+ s64 depth;
+ s64 dynamic_range;
+ s64 eotf;
+ s64 id;
+ s64 pixel_encoding;
+ s64 score;
+};
+
+static int fill_color_mode(struct dcp_color_mode *color,
+ struct color_mode *cmode)
+{
+ if (color->score >= cmode->score)
+ return 0;
+
+ if (cmode->colorimetry < 0 || cmode->colorimetry >= DCP_COLORIMETRY_COUNT)
+ return -EINVAL;
+ if (cmode->depth < 8 || cmode->depth > 12)
+ return -EINVAL;
+ if (cmode->dynamic_range < 0 || cmode->dynamic_range >= DCP_COLOR_YCBCR_RANGE_COUNT)
+ return -EINVAL;
+ if (cmode->eotf < 0 || cmode->eotf >= DCP_EOTF_COUNT)
+ return -EINVAL;
+ if (cmode->pixel_encoding < 0 || cmode->pixel_encoding >= DCP_COLOR_FORMAT_COUNT)
+ return -EINVAL;
+
+ color->score = cmode->score;
+ color->id = cmode->id;
+ color->eotf = cmode->eotf;
+ color->format = cmode->pixel_encoding;
+ color->colorimetry = cmode->colorimetry;
+ color->range = cmode->dynamic_range;
+ color->depth = cmode->depth;
+
+ return 0;
+}
+
+static int parse_color_modes(struct dcp_parse_ctx *handle,
+ struct dcp_display_mode *out)
+{
+ struct iterator outer_it;
+ int ret = 0;
+ out->sdr_444.score = -1;
+ out->sdr_rgb.score = -1;
+ out->best.score = -1;
+
+ dcp_parse_foreach_in_array(handle, outer_it) {
+ struct iterator it;
+ bool is_virtual = true;
+ struct color_mode cmode;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+
+ if (IS_ERR(key))
+ ret = PTR_ERR(key);
+ else if (!strcmp(key, "Colorimetry"))
+ ret = parse_int(it.handle, &cmode.colorimetry);
+ else if (!strcmp(key, "Depth"))
+ ret = parse_int(it.handle, &cmode.depth);
+ else if (!strcmp(key, "DynamicRange"))
+ ret = parse_int(it.handle, &cmode.dynamic_range);
+ else if (!strcmp(key, "EOTF"))
+ ret = parse_int(it.handle, &cmode.eotf);
+ else if (!strcmp(key, "ID"))
+ ret = parse_int(it.handle, &cmode.id);
+ else if (!strcmp(key, "IsVirtual"))
+ ret = parse_bool(it.handle, &is_virtual);
+ else if (!strcmp(key, "PixelEncoding"))
+ ret = parse_int(it.handle, &cmode.pixel_encoding);
+ else if (!strcmp(key, "Score"))
+ ret = parse_int(it.handle, &cmode.score);
+ else
+ skip(it.handle);
+
+ if (!IS_ERR_OR_NULL(key))
+ kfree(key);
+
+ if (ret)
+ return ret;
+ }
+
+ /* Skip virtual or partial entries */
+ if (is_virtual || cmode.score < 0 || cmode.id < 0)
+ continue;
+
+ trace_iomfb_color_mode(handle->dcp, cmode.id, cmode.score,
+ cmode.depth, cmode.colorimetry,
+ cmode.eotf, cmode.dynamic_range,
+ cmode.pixel_encoding);
+
+ if (cmode.eotf == DCP_EOTF_SDR_GAMMA) {
+ if (cmode.pixel_encoding == DCP_COLOR_FORMAT_RGB &&
+ cmode.depth <= 10)
+ fill_color_mode(&out->sdr_rgb, &cmode);
+ else if (cmode.pixel_encoding == DCP_COLOR_FORMAT_YCBCR444 &&
+ cmode.depth <= 10)
+ fill_color_mode(&out->sdr_444, &cmode);
+ fill_color_mode(&out->sdr, &cmode);
+ }
+ fill_color_mode(&out->best, &cmode);
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate the pixel clock for a mode given the 16:16 fixed-point refresh
+ * rate. The pixel clock is the refresh rate times the pixel count. DRM
+ * specifies the clock in kHz. The intermediate result may overflow a u32, so
+ * use a u64 where required.
+ */
+static u32 calculate_clock(struct dimension *horiz, struct dimension *vert)
+{
+ u32 pixels = horiz->total * vert->total;
+ u64 clock = mul_u32_u32(pixels, vert->precise_sync_rate);
+
+ return DIV_ROUND_CLOSEST_ULL(clock >> 16, 1000);
+}
+
+static int parse_mode(struct dcp_parse_ctx *handle,
+ struct dcp_display_mode *out, s64 *score, int width_mm,
+ int height_mm, unsigned notch_height)
+{
+ int ret = 0;
+ struct iterator it;
+ struct dimension horiz, vert;
+ s64 id = -1;
+ s64 best_color_mode = -1;
+ bool is_virtual = false;
+ struct drm_display_mode *mode = &out->mode;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+
+ if (IS_ERR(key))
+ ret = PTR_ERR(key);
+ else if (is_virtual)
+ skip(it.handle);
+ else if (!strcmp(key, "HorizontalAttributes"))
+ ret = parse_dimension(it.handle, &horiz);
+ else if (!strcmp(key, "VerticalAttributes"))
+ ret = parse_dimension(it.handle, &vert);
+ else if (!strcmp(key, "ColorModes"))
+ ret = parse_color_modes(it.handle, out);
+ else if (!strcmp(key, "ID"))
+ ret = parse_int(it.handle, &id);
+ else if (!strcmp(key, "IsVirtual"))
+ ret = parse_bool(it.handle, &is_virtual);
+ else if (!strcmp(key, "Score"))
+ ret = parse_int(it.handle, score);
+ else
+ skip(it.handle);
+
+ if (!IS_ERR_OR_NULL(key))
+ kfree(key);
+
+ if (ret) {
+ trace_iomfb_parse_mode_fail(id, &horiz, &vert, best_color_mode, is_virtual, *score);
+ return ret;
+ }
+ }
+ if (out->sdr_rgb.score >= 0)
+ best_color_mode = out->sdr_rgb.id;
+ else if (out->sdr_444.score >= 0)
+ best_color_mode = out->sdr_444.id;
+ else if (out->sdr.score >= 0)
+ best_color_mode = out->sdr.id;
+ else if (out->best.score >= 0)
+ best_color_mode = out->best.id;
+
+ trace_iomfb_parse_mode_success(id, &horiz, &vert, best_color_mode,
+ is_virtual, *score);
+
+ /*
+ * Reject modes without valid color mode.
+ */
+ if (best_color_mode < 0)
+ return -EINVAL;
+
+ /*
+ * We need to skip virtual modes. In some cases, virtual modes are "too
+ * big" for the monitor and can cause breakage. It is unclear why the
+ * DCP reports these modes at all. Treat as a recoverable error.
+ */
+ if (is_virtual)
+ return -EINVAL;
+
+ /*
+ * HACK:
+ * Ignore the 120 Hz mode on j314/j316 (identified by resolution).
+ * DCP limits normal swaps to 60 Hz anyway and the 120 Hz mode might
+ * cause choppiness with X11.
+ * Just downscoring it and thus making the 60 Hz mode the preferred mode
+ * seems not enough for some user space.
+ */
+ if (vert.precise_sync_rate >> 16 == 120 &&
+ ((horiz.active == 3024 && vert.active == 1964) ||
+ (horiz.active == 3456 && vert.active == 2234)))
+ return -EINVAL;
+
+ /*
+ * HACK: reject refresh modes with a pixel clock above 926484,480 kHz
+ * (bandwidth limit reported by dcp). This allows 4k 100Hz and
+ * 5k 60Hz but not much beyond.
+ * DSC setup seems to require additional steps
+ */
+ if (calculate_clock(&horiz, &vert) > 926484) {
+ pr_info("dcp: rejecting mode %lldx%lld@%lld.%03lld (pixel clk:%d)\n",
+ horiz.active, vert.active, vert.precise_sync_rate >> 16,
+ ((1000 * vert.precise_sync_rate) >> 16) % 1000,
+ calculate_clock(&horiz, &vert));
+ return -EINVAL;
+ }
+
+ vert.active -= notch_height;
+ vert.sync_width += notch_height;
+
+ /* From here we must succeed. Start filling out the mode. */
+ *mode = (struct drm_display_mode) {
+ .type = DRM_MODE_TYPE_DRIVER,
+ .clock = calculate_clock(&horiz, &vert),
+
+ .vdisplay = vert.active,
+ .vsync_start = vert.active + vert.front_porch,
+ .vsync_end = vert.active + vert.front_porch + vert.sync_width,
+ .vtotal = vert.total,
+
+ .hdisplay = horiz.active,
+ .hsync_start = horiz.active + horiz.front_porch,
+ .hsync_end = horiz.active + horiz.front_porch +
+ horiz.sync_width,
+ .htotal = horiz.total,
+
+ .width_mm = width_mm,
+ .height_mm = height_mm,
+ };
+
+ drm_mode_set_name(mode);
+
+ out->timing_mode_id = id;
+ out->color_mode_id = best_color_mode;
+
+ trace_iomfb_timing_mode(handle->dcp, id, *score, horiz.active,
+ vert.active, vert.precise_sync_rate,
+ best_color_mode);
+
+ return 0;
+}
+
+struct dcp_display_mode *enumerate_modes(struct dcp_parse_ctx *handle,
+ unsigned int *count, int width_mm,
+ int height_mm, unsigned notch_height)
+{
+ struct iterator it;
+ int ret;
+ struct dcp_display_mode *mode, *modes;
+ struct dcp_display_mode *best_mode = NULL;
+ s64 score, best_score = -1;
+
+ ret = iterator_begin(handle, &it, false);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Start with a worst case allocation */
+ modes = kmalloc_array(it.len, sizeof(*modes), GFP_KERNEL);
+ *count = 0;
+
+ if (!modes)
+ return ERR_PTR(-ENOMEM);
+
+ for (; it.idx < it.len; ++it.idx) {
+ mode = &modes[*count];
+ ret = parse_mode(it.handle, mode, &score, width_mm, height_mm, notch_height);
+
+ /* Errors for a single mode are recoverable -- just skip it. */
+ if (ret)
+ continue;
+
+ /* Process a successful mode */
+ (*count)++;
+
+ if (score > best_score) {
+ best_score = score;
+ best_mode = mode;
+ }
+ }
+
+ if (best_mode != NULL)
+ best_mode->mode.type |= DRM_MODE_TYPE_PREFERRED;
+
+ return modes;
+}
+
+int parse_display_attributes(struct dcp_parse_ctx *handle, int *width_mm,
+ int *height_mm)
+{
+ int ret = 0;
+ struct iterator it;
+ s64 width_cm = 0, height_cm = 0;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+
+ if (IS_ERR(key))
+ ret = PTR_ERR(key);
+ else if (!strcmp(key, "MaxHorizontalImageSize"))
+ ret = parse_int(it.handle, &width_cm);
+ else if (!strcmp(key, "MaxVerticalImageSize"))
+ ret = parse_int(it.handle, &height_cm);
+ else
+ skip(it.handle);
+
+ if (!IS_ERR_OR_NULL(key))
+ kfree(key);
+
+ if (ret)
+ return ret;
+ }
+
+ /* 1cm = 10mm */
+ *width_mm = 10 * width_cm;
+ *height_mm = 10 * height_cm;
+
+ return 0;
+}
+
+int parse_epic_service_init(struct dcp_parse_ctx *handle, const char **name,
+ const char **class, s64 *unit)
+{
+ int ret = 0;
+ struct iterator it;
+ bool parsed_unit = false;
+ bool parsed_name = false;
+ bool parsed_class = false;
+
+ *name = ERR_PTR(-ENOENT);
+ *class = ERR_PTR(-ENOENT);
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ break;
+ }
+
+ if (!strcmp(key, "EPICName")) {
+ *name = parse_string(it.handle);
+ if (IS_ERR(*name))
+ ret = PTR_ERR(*name);
+ else
+ parsed_name = true;
+ } else if (!strcmp(key, "EPICProviderClass")) {
+ *class = parse_string(it.handle);
+ if (IS_ERR(*class))
+ ret = PTR_ERR(*class);
+ else
+ parsed_class = true;
+ } else if (!strcmp(key, "EPICUnit")) {
+ ret = parse_int(it.handle, unit);
+ if (!ret)
+ parsed_unit = true;
+ } else {
+ skip(it.handle);
+ }
+
+ kfree(key);
+ if (ret)
+ break;
+ }
+
+ if (!parsed_unit || !parsed_name || !parsed_class)
+ ret = -ENOENT;
+
+ if (ret) {
+ if (!IS_ERR(*name)) {
+ kfree(*name);
+ *name = ERR_PTR(ret);
+ }
+ if (!IS_ERR(*class)) {
+ kfree(*class);
+ *class = ERR_PTR(ret);
+ }
+ }
+
+ return ret;
+}
+
+int parse_sample_rate_bit(struct dcp_parse_ctx *handle, unsigned int *ratebit)
+{
+ s64 rate;
+ int ret = parse_int(handle, &rate);
+
+ if (ret)
+ return ret;
+
+ *ratebit = snd_pcm_rate_to_rate_bit(rate);
+ if (*ratebit == SNDRV_PCM_RATE_KNOT) {
+ /*
+ * The rate wasn't recognized, and unless we supply
+ * a supplementary constraint, the SNDRV_PCM_RATE_KNOT bit
+ * will allow any rate. So clear it.
+ */
+ *ratebit = 0;
+ }
+
+ return 0;
+}
+
+int parse_sample_fmtbit(struct dcp_parse_ctx *handle, u64 *fmtbit)
+{
+ s64 sample_size;
+ int ret = parse_int(handle, &sample_size);
+
+ if (ret)
+ return ret;
+
+ switch (sample_size) {
+ case 16:
+ *fmtbit = SNDRV_PCM_FMTBIT_S16;
+ break;
+ case 20:
+ *fmtbit = SNDRV_PCM_FMTBIT_S20;
+ break;
+ case 24:
+ *fmtbit = SNDRV_PCM_FMTBIT_S24;
+ break;
+ case 32:
+ *fmtbit = SNDRV_PCM_FMTBIT_S32;
+ break;
+ default:
+ *fmtbit = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static struct {
+ const char *label;
+ u8 type;
+} chan_position_names[] = {
+ { "Front Left", SNDRV_CHMAP_FL },
+ { "Front Right", SNDRV_CHMAP_FR },
+ { "Rear Left", SNDRV_CHMAP_RL },
+ { "Rear Right", SNDRV_CHMAP_RR },
+ { "Front Center", SNDRV_CHMAP_FC },
+ { "Low Frequency Effects", SNDRV_CHMAP_LFE },
+ { "Rear Center", SNDRV_CHMAP_RC },
+ { "Front Left Center", SNDRV_CHMAP_FLC },
+ { "Front Right Center", SNDRV_CHMAP_FRC },
+ { "Rear Left Center", SNDRV_CHMAP_RLC },
+ { "Rear Right Center", SNDRV_CHMAP_RRC },
+ { "Front Left Wide", SNDRV_CHMAP_FLW },
+ { "Front Right Wide", SNDRV_CHMAP_FRW },
+ { "Front Left High", SNDRV_CHMAP_FLH },
+ { "Front Center High", SNDRV_CHMAP_FCH },
+ { "Front Right High", SNDRV_CHMAP_FRH },
+ { "Top Center", SNDRV_CHMAP_TC },
+};
+
+static void append_chmap(struct snd_pcm_chmap_elem *chmap, u8 type)
+{
+ if (!chmap || chmap->channels >= ARRAY_SIZE(chmap->map))
+ return;
+
+ chmap->map[chmap->channels] = type;
+ chmap->channels++;
+}
+
+static int parse_chmap(struct dcp_parse_ctx *handle, struct snd_pcm_chmap_elem *chmap)
+{
+ struct iterator it;
+ int i, ret;
+
+ if (!chmap) {
+ skip(handle);
+ return 0;
+ }
+
+ chmap->channels = 0;
+
+ dcp_parse_foreach_in_array(handle, it) {
+ for (i = 0; i < ARRAY_SIZE(chan_position_names); i++)
+ if (consume_string(it.handle, chan_position_names[i].label))
+ break;
+
+ if (i == ARRAY_SIZE(chan_position_names)) {
+ ret = skip(it.handle);
+ if (ret)
+ return ret;
+
+ append_chmap(chmap, SNDRV_CHMAP_UNKNOWN);
+ continue;
+ }
+
+ append_chmap(chmap, chan_position_names[i].type);
+ }
+
+ return 0;
+}
+
+static int parse_chan_layout_element(struct dcp_parse_ctx *handle,
+ unsigned int *nchans_out,
+ struct snd_pcm_chmap_elem *chmap)
+{
+ struct iterator it;
+ int ret;
+ s64 nchans = 0;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ if (consume_string(it.handle, "ActiveChannelCount"))
+ ret = parse_int(it.handle, &nchans);
+ else if (consume_string(it.handle, "ChannelLayout"))
+ ret = parse_chmap(it.handle, chmap);
+ else
+ ret = skip_pair(it.handle);
+
+ if (ret)
+ return ret;
+ }
+
+ if (nchans_out)
+ *nchans_out = nchans;
+
+ return 0;
+}
+
+static int parse_nchans_mask(struct dcp_parse_ctx *handle, unsigned int *mask)
+{
+ struct iterator it;
+ int ret;
+
+ *mask = 0;
+
+ dcp_parse_foreach_in_array(handle, it) {
+ int nchans;
+
+ ret = parse_chan_layout_element(it.handle, &nchans, NULL);
+ if (ret)
+ return ret;
+ *mask |= 1 << nchans;
+ }
+
+ return 0;
+}
+
+static int parse_avep_element(struct dcp_parse_ctx *handle,
+ struct dcp_sound_format_mask *sieve,
+ struct dcp_sound_format_mask *hits)
+{
+ struct dcp_sound_format_mask mask = {0, 0, 0};
+ struct iterator it;
+ int ret;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ if (consume_string(handle, "StreamSampleRate"))
+ ret = parse_sample_rate_bit(it.handle, &mask.rates);
+ else if (consume_string(handle, "SampleSize"))
+ ret = parse_sample_fmtbit(it.handle, &mask.formats);
+ else if (consume_string(handle, "AudioChannelLayoutElements"))
+ ret = parse_nchans_mask(it.handle, &mask.nchans);
+ else
+ ret = skip_pair(it.handle);
+
+ if (ret)
+ return ret;
+ }
+
+ trace_avep_sound_mode(handle->dcp, mask.rates, mask.formats, mask.nchans);
+
+ if (!(mask.rates & sieve->rates) || !(mask.formats & sieve->formats) ||
+ !(mask.nchans & sieve->nchans))
+ return 0;
+
+ if (hits) {
+ hits->rates |= mask.rates;
+ hits->formats |= mask.formats;
+ hits->nchans |= mask.nchans;
+ }
+
+ return 1;
+}
+
+static int parse_mode_in_avep_element(struct dcp_parse_ctx *handle,
+ unsigned int selected_nchans,
+ struct snd_pcm_chmap_elem *chmap,
+ struct dcp_sound_cookie *cookie)
+{
+ struct iterator it;
+ struct dcp_parse_ctx save_handle;
+ int ret;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ if (consume_string(it.handle, "AudioChannelLayoutElements")) {
+ struct iterator inner_it;
+ int nchans;
+
+ dcp_parse_foreach_in_array(it.handle, inner_it) {
+ save_handle = *it.handle;
+ ret = parse_chan_layout_element(inner_it.handle,
+ &nchans, NULL);
+ if (ret)
+ return ret;
+
+ if (nchans != selected_nchans)
+ continue;
+
+ /*
+ * Now that we know this layout matches the
+ * selected channel number, reread the element
+ * and fill in the channel map.
+ */
+ *inner_it.handle = save_handle;
+ ret = parse_chan_layout_element(inner_it.handle,
+ NULL, chmap);
+ if (ret)
+ return ret;
+ }
+ } else if (consume_string(it.handle, "ElementData")) {
+ const u8 *blob;
+
+ ret = parse_blob(it.handle, sizeof(*cookie), &blob);
+ if (ret)
+ return ret;
+
+ if (cookie)
+ memcpy(cookie, blob, sizeof(*cookie));
+ } else {
+ ret = skip_pair(it.handle);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int parse_sound_constraints(struct dcp_parse_ctx *handle,
+ struct dcp_sound_format_mask *sieve,
+ struct dcp_sound_format_mask *hits)
+{
+ int ret;
+ struct iterator it;
+
+ if (hits) {
+ hits->rates = 0;
+ hits->formats = 0;
+ hits->nchans = 0;
+ }
+
+ dcp_parse_foreach_in_array(handle, it) {
+ ret = parse_avep_element(it.handle, sieve, hits);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(parse_sound_constraints);
+
+int parse_sound_mode(struct dcp_parse_ctx *handle,
+ struct dcp_sound_format_mask *sieve,
+ struct snd_pcm_chmap_elem *chmap,
+ struct dcp_sound_cookie *cookie)
+{
+ struct dcp_parse_ctx save_handle;
+ struct iterator it;
+ int ret;
+
+ dcp_parse_foreach_in_array(handle, it) {
+ save_handle = *it.handle;
+ ret = parse_avep_element(it.handle, sieve, NULL);
+
+ if (!ret)
+ continue;
+
+ if (ret < 0)
+ return ret;
+
+ ret = parse_mode_in_avep_element(&save_handle, __ffs(sieve->nchans),
+ chmap, cookie);
+ if (ret < 0)
+ return ret;
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(parse_sound_mode);
+
+int parse_system_log_mnits(struct dcp_parse_ctx *handle, struct dcp_system_ev_mnits *entry)
+{
+ struct iterator it;
+ int ret;
+ s64 mnits = -1;
+ s64 idac = -1;
+ s64 timestamp = -1;
+ bool type_match = false;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ } else if (!strcmp(key, "mNits")) {
+ ret = parse_int(it.handle, &mnits);
+ } else if (!strcmp(key, "iDAC")) {
+ ret = parse_int(it.handle, &idac);
+ } else if (!strcmp(key, "logEvent")) {
+ const char * value = parse_string(it.handle);
+ if (!IS_ERR_OR_NULL(value)) {
+ type_match = strcmp(value, "Display (Event Forward)") == 0;
+ kfree(value);
+ }
+ } else if (!strcmp(key, "timestamp")) {
+ ret = parse_int(it.handle, ×tamp);
+ } else {
+ skip(it.handle);
+ }
+
+ if (!IS_ERR_OR_NULL(key))
+ kfree(key);
+
+ if (ret) {
+ pr_err("dcp parser: failed to parse mNits sys event\n");
+ return ret;
+ }
+ }
+
+ if (!type_match || mnits < 0 || idac < 0 || timestamp < 0)
+ return -EINVAL;
+
+ entry->millinits = mnits;
+ entry->idac = idac;
+ entry->timestamp = timestamp;
+
+ return 0;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCP_PARSER_H__
+#define __APPLE_DCP_PARSER_H__
+
+/* For mode parsing */
+#include <drm/drm_modes.h>
+
+struct apple_dcp;
+
+struct dcp_parse_ctx {
+ struct apple_dcp *dcp;
+ const void *blob;
+ u32 pos, len;
+};
+
+enum dcp_color_eotf {
+ DCP_EOTF_SDR_GAMMA = 0, // "SDR gamma"
+ DCP_EOTF_HDR_GAMMA = 1, // "HDR gamma"
+ DCP_EOTF_ST_2084 = 2, // "ST 2084 (PQ)"
+ DCP_EOTF_BT_2100 = 3, // "BT.2100 (HLG)"
+ DCP_EOTF_COUNT
+};
+
+enum dcp_color_format {
+ DCP_COLOR_FORMAT_RGB = 0, // "RGB"
+ DCP_COLOR_FORMAT_YCBCR420 = 1, // "YUV 4:2:0"
+ DCP_COLOR_FORMAT_YCBCR422 = 3, // "YUV 4:2:2"
+ DCP_COLOR_FORMAT_YCBCR444 = 2, // "YUV 4:4:4"
+ DCP_COLOR_FORMAT_DV_NATIVE = 4, // "DolbyVision (native)"
+ DCP_COLOR_FORMAT_DV_HDMI = 5, // "DolbyVision (HDMI)"
+ DCP_COLOR_FORMAT_YCBCR422_DP = 6, // "YCbCr 4:2:2 (DP tunnel)"
+ DCP_COLOR_FORMAT_YCBCR422_HDMI = 7, // "YCbCr 4:2:2 (HDMI tunnel)"
+ DCP_COLOR_FORMAT_DV_LL_YCBCR422 = 8, // "DolbyVision LL YCbCr 4:2:2"
+ DCP_COLOR_FORMAT_DV_LL_YCBCR422_DP = 9, // "DolbyVision LL YCbCr 4:2:2 (DP)"
+ DCP_COLOR_FORMAT_DV_LL_YCBCR422_HDMI = 10, // "DolbyVision LL YCbCr 4:2:2 (HDMI)"
+ DCP_COLOR_FORMAT_DV_LL_YCBCR444 = 11, // "DolbyVision LL YCbCr 4:4:4"
+ DCP_COLOR_FORMAT_DV_LL_RGB422 = 12, // "DolbyVision LL RGB 4:2:2"
+ DCP_COLOR_FORMAT_GRGB_BLUE_422 = 13, // "GRGB as YCbCr422 (Even line blue)"
+ DCP_COLOR_FORMAT_GRGB_RED_422 = 14, // "GRGB as YCbCr422 (Even line red)"
+ DCP_COLOR_FORMAT_COUNT
+};
+
+enum dcp_colorimetry {
+ DCP_COLORIMETRY_BT601 = 0, // "SMPTE 170M/BT.601"
+ DCP_COLORIMETRY_BT709 = 1, // "BT.701"
+ DCP_COLORIMETRY_XVYCC_601 = 2, // "xvYCC601"
+ DCP_COLORIMETRY_XVYCC_709 = 3, // "xvYCC709"
+ DCP_COLORIMETRY_SYCC_601 = 4, // "sYCC601"
+ DCP_COLORIMETRY_ADOBE_YCC_601 = 5, // "AdobeYCC601"
+ DCP_COLORIMETRY_BT2020_CYCC = 6, // "BT.2020 (c)"
+ DCP_COLORIMETRY_BT2020_YCC = 7, // "BT.2020 (nc)"
+ DCP_COLORIMETRY_VSVDB = 8, // "DolbyVision VSVDB"
+ DCP_COLORIMETRY_BT2020_RGB = 9, // "BT.2020 (RGB)"
+ DCP_COLORIMETRY_SRGB = 10, // "sRGB"
+ DCP_COLORIMETRY_SCRGB = 11, // "scRGB"
+ DCP_COLORIMETRY_SCRGB_FIXED = 12, // "scRGBfixed"
+ DCP_COLORIMETRY_ADOBE_RGB = 13, // "AdobeRGB"
+ DCP_COLORIMETRY_DCI_P3_RGB_D65 = 14, // "DCI-P3 (D65)"
+ DCP_COLORIMETRY_DCI_P3_RGB_THEATER = 15, // "DCI-P3 (Theater)"
+ DCP_COLORIMETRY_RGB = 16, // "Default RGB"
+ DCP_COLORIMETRY_COUNT
+};
+
+enum dcp_color_range {
+ DCP_COLOR_YCBCR_RANGE_FULL = 0,
+ DCP_COLOR_YCBCR_RANGE_LIMITED = 1,
+ DCP_COLOR_YCBCR_RANGE_COUNT
+};
+
+struct dcp_color_mode {
+ s64 score;
+ u32 id;
+ enum dcp_color_eotf eotf;
+ enum dcp_color_format format;
+ enum dcp_colorimetry colorimetry;
+ enum dcp_color_range range;
+ u8 depth;
+};
+
+/*
+ * Represents a single display mode. These mode objects are populated at
+ * runtime based on the TimingElements dictionary sent by the DCP.
+ */
+struct dcp_display_mode {
+ struct drm_display_mode mode;
+ u32 color_mode_id;
+ u32 timing_mode_id;
+ struct dcp_color_mode sdr_rgb;
+ struct dcp_color_mode sdr_444;
+ struct dcp_color_mode sdr;
+ struct dcp_color_mode best;
+};
+
+struct dimension {
+ s64 total, front_porch, sync_width, active;
+ s64 precise_sync_rate;
+};
+
+int parse(const void *blob, size_t size, struct dcp_parse_ctx *ctx);
+struct dcp_display_mode *enumerate_modes(struct dcp_parse_ctx *handle,
+ unsigned int *count, int width_mm,
+ int height_mm, unsigned notch_height);
+int parse_display_attributes(struct dcp_parse_ctx *handle, int *width_mm,
+ int *height_mm);
+int parse_epic_service_init(struct dcp_parse_ctx *handle, const char **name,
+ const char **class, s64 *unit);
+
+struct dcp_sound_format_mask {
+ u64 formats; /* SNDRV_PCM_FMTBIT_* */
+ unsigned int rates; /* SNDRV_PCM_RATE_* */
+ unsigned int nchans;
+};
+
+struct dcp_sound_cookie {
+ u8 data[24];
+};
+
+struct snd_pcm_chmap_elem;
+int parse_sound_constraints(struct dcp_parse_ctx *handle,
+ struct dcp_sound_format_mask *sieve,
+ struct dcp_sound_format_mask *hits);
+int parse_sound_mode(struct dcp_parse_ctx *handle,
+ struct dcp_sound_format_mask *sieve,
+ struct snd_pcm_chmap_elem *chmap,
+ struct dcp_sound_cookie *cookie);
+
+struct dcp_system_ev_mnits {
+ u32 timestamp;
+ u32 millinits;
+ u32 idac;
+};
+
+int parse_system_log_mnits(struct dcp_parse_ctx *handle,
+ struct dcp_system_ev_mnits *entry);
+
+#endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2022 Sven Peter <sven@svenpeter.dev> */
+
+#include <linux/completion.h>
+
+#include "afk.h"
+#include "dcp.h"
+#include "parser.h"
+
+static bool enable_verbose_logging;
+module_param(enable_verbose_logging, bool, 0644);
+MODULE_PARM_DESC(enable_verbose_logging, "Enable DCP firmware verbose logging");
+
+/*
+ * Serialized setProperty("gAFKConfigLogMask", 0xffff) IPC call which
+ * will set the DCP firmware log level to the most verbose setting
+ */
+#define SYSTEM_SET_PROPERTY 0x43
+static const u8 setprop_gAFKConfigLogMask_ffff[] = {
+ 0x14, 0x00, 0x00, 0x00, 0x67, 0x41, 0x46, 0x4b, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x73,
+ 0x6b, 0x00, 0x00, 0x00, 0xd3, 0x00, 0x00, 0x00, 0x40, 0x00,
+ 0x00, 0x84, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+struct systemep_work {
+ struct apple_epic_service *service;
+ struct work_struct work;
+};
+
+static void system_log_work(struct work_struct *work_)
+{
+ struct systemep_work *work =
+ container_of(work_, struct systemep_work, work);
+
+ afk_send_command(work->service, SYSTEM_SET_PROPERTY,
+ setprop_gAFKConfigLogMask_ffff,
+ sizeof(setprop_gAFKConfigLogMask_ffff), NULL,
+ sizeof(setprop_gAFKConfigLogMask_ffff), NULL);
+ complete(&work->service->ep->dcp->systemep_done);
+ kfree(work);
+}
+
+static void system_init(struct apple_epic_service *service, const char *name,
+ const char *class, s64 unit)
+{
+ struct systemep_work *work;
+
+ if (!enable_verbose_logging)
+ return;
+
+ /*
+ * We're called from the service message handler thread and can't
+ * dispatch blocking message from there.
+ */
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return;
+
+ work->service = service;
+ INIT_WORK(&work->work, system_log_work);
+ schedule_work(&work->work);
+}
+
+static void powerlog_init(struct apple_epic_service *service, const char *name,
+ const char *class, s64 unit)
+{
+}
+
+static int powerlog_report(struct apple_epic_service *service, enum epic_subtype type,
+ const void *data, size_t data_size)
+{
+ struct dcp_system_ev_mnits mnits;
+ struct dcp_parse_ctx parse_ctx;
+ struct apple_dcp *dcp = service->ep->dcp;
+ int ret;
+
+ dev_dbg(dcp->dev, "systemep[ch:%u]: report type:%02x len:%zu\n",
+ service->channel, type, data_size);
+
+ if (type != EPIC_SUBTYPE_STD_SERVICE)
+ return 0;
+
+ ret = parse(data, data_size, &parse_ctx);
+ if (ret) {
+ dev_warn(service->ep->dcp->dev, "systemep: failed to parse report: %d\n", ret);
+ return ret;
+ }
+
+ ret = parse_system_log_mnits(&parse_ctx, &mnits);
+ if (ret) {
+ /* ignore parse errors in the case dcp sends unknown log events */
+ dev_dbg(dcp->dev, "systemep: failed to parse mNits event: %d\n", ret);
+ return 0;
+ }
+
+ dev_dbg(dcp->dev, "systemep: mNits event: Nits: %u.%03u, iDAC: %u\n",
+ mnits.millinits / 1000, mnits.millinits % 1000, mnits.idac);
+
+ dcp->brightness.nits = mnits.millinits / 1000;
+
+ return 0;
+}
+
+static const struct apple_epic_service_ops systemep_ops[] = {
+ {
+ .name = "system",
+ .init = system_init,
+ },
+ {
+ .name = "powerlog-service",
+ .init = powerlog_init,
+ .report = powerlog_report,
+ },
+ {}
+};
+
+int systemep_init(struct apple_dcp *dcp)
+{
+ init_completion(&dcp->systemep_done);
+
+ dcp->systemep = afk_init(dcp, SYSTEM_ENDPOINT, systemep_ops);
+ afk_start(dcp->systemep);
+
+ if (!enable_verbose_logging)
+ return 0;
+
+ /*
+ * Timeouts aren't really fatal here: in the worst case we just weren't
+ * able to enable additional debug prints inside DCP
+ */
+ if (!wait_for_completion_timeout(&dcp->systemep_done,
+ msecs_to_jiffies(MSEC_PER_SEC)))
+ dev_err(dcp->dev, "systemep: couldn't enable verbose logs\n");
+
+ return 0;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Tracepoints for Apple DCP driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (C) The Asahi Linux Contributors */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dcp
+
+#if !defined(_TRACE_DCP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DCP_H
+
+#include "afk.h"
+#include "dptxep.h"
+#include "dcp-internal.h"
+#include "parser.h"
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#define show_dcp_endpoint(ep) \
+ __print_symbolic(ep, { SYSTEM_ENDPOINT, "system" }, \
+ { TEST_ENDPOINT, "test" }, \
+ { DCP_EXPERT_ENDPOINT, "dcpexpert" }, \
+ { DISP0_ENDPOINT, "disp0" }, \
+ { DPTX_ENDPOINT, "dptxport" }, \
+ { HDCP_ENDPOINT, "hdcp" }, \
+ { REMOTE_ALLOC_ENDPOINT, "remotealloc" }, \
+ { IOMFB_ENDPOINT, "iomfb" })
+#define print_epic_type(etype) \
+ __print_symbolic(etype, { EPIC_TYPE_NOTIFY, "notify" }, \
+ { EPIC_TYPE_COMMAND, "command" }, \
+ { EPIC_TYPE_REPLY, "reply" }, \
+ { EPIC_TYPE_NOTIFY_ACK, "notify-ack" })
+
+#define print_epic_category(ecat) \
+ __print_symbolic(ecat, { EPIC_CAT_REPORT, "report" }, \
+ { EPIC_CAT_NOTIFY, "notify" }, \
+ { EPIC_CAT_REPLY, "reply" }, \
+ { EPIC_CAT_COMMAND, "command" })
+
+#define show_dptxport_apcall(idx) \
+ __print_symbolic( \
+ idx, { DPTX_APCALL_ACTIVATE, "activate" }, \
+ { DPTX_APCALL_DEACTIVATE, "deactivate" }, \
+ { DPTX_APCALL_GET_MAX_DRIVE_SETTINGS, \
+ "get_max_drive_settings" }, \
+ { DPTX_APCALL_SET_DRIVE_SETTINGS, "set_drive_settings" }, \
+ { DPTX_APCALL_GET_DRIVE_SETTINGS, "get_drive_settings" }, \
+ { DPTX_APCALL_WILL_CHANGE_LINKG_CONFIG, \
+ "will_change_link_config" }, \
+ { DPTX_APCALL_DID_CHANGE_LINK_CONFIG, \
+ "did_change_link_config" }, \
+ { DPTX_APCALL_GET_MAX_LINK_RATE, "get_max_link_rate" }, \
+ { DPTX_APCALL_GET_LINK_RATE, "get_link_rate" }, \
+ { DPTX_APCALL_SET_LINK_RATE, "set_link_rate" }, \
+ { DPTX_APCALL_GET_MAX_LANE_COUNT, \
+ "get_max_lane_count" }, \
+ { DPTX_APCALL_GET_ACTIVE_LANE_COUNT, \
+ "get_active_lane_count" }, \
+ { DPTX_APCALL_SET_ACTIVE_LANE_COUNT, \
+ "set_active_lane_count" }, \
+ { DPTX_APCALL_GET_SUPPORTS_DOWN_SPREAD, \
+ "get_supports_downspread" }, \
+ { DPTX_APCALL_GET_DOWN_SPREAD, "get_downspread" }, \
+ { DPTX_APCALL_SET_DOWN_SPREAD, "set_downspread" }, \
+ { DPTX_APCALL_GET_SUPPORTS_LANE_MAPPING, \
+ "get_supports_lane_mapping" }, \
+ { DPTX_APCALL_SET_LANE_MAP, "set_lane_map" }, \
+ { DPTX_APCALL_GET_SUPPORTS_HPD, "get_supports_hpd" }, \
+ { DPTX_APCALL_FORCE_HOTPLUG_DETECT, "force_hotplug_detect" }, \
+ { DPTX_APCALL_INACTIVE_SINK_DETECTED, \
+ "inactive_sink_detected" }, \
+ { DPTX_APCALL_SET_TILED_DISPLAY_HINTS, \
+ "set_tiled_display_hints" }, \
+ { DPTX_APCALL_DEVICE_NOT_RESPONDING, \
+ "device_not_responding" }, \
+ { DPTX_APCALL_DEVICE_BUSY_TIMEOUT, "device_busy_timeout" }, \
+ { DPTX_APCALL_DEVICE_NOT_STARTED, "device_not_started" })
+
+TRACE_EVENT(dcp_recv_msg,
+ TP_PROTO(struct apple_dcp *dcp, u8 endpoint, u64 message),
+ TP_ARGS(dcp, endpoint, message),
+
+ TP_STRUCT__entry(__string(devname, dev_name(dcp->dev))
+ __field(u8, endpoint)
+ __field(u64, message)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(dcp->dev));
+ __entry->endpoint = endpoint;
+ __entry->message = message;),
+
+ TP_printk("%s: endpoint 0x%x (%s): received message 0x%016llx",
+ __get_str(devname), __entry->endpoint,
+ show_dcp_endpoint(__entry->endpoint), __entry->message));
+
+TRACE_EVENT(dcp_send_msg,
+ TP_PROTO(struct apple_dcp *dcp, u8 endpoint, u64 message),
+ TP_ARGS(dcp, endpoint, message),
+
+ TP_STRUCT__entry(__string(devname, dev_name(dcp->dev))
+ __field(u8, endpoint)
+ __field(u64, message)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(dcp->dev));
+ __entry->endpoint = endpoint;
+ __entry->message = message;),
+
+ TP_printk("%s: endpoint 0x%x (%s): will send message 0x%016llx",
+ __get_str(devname), __entry->endpoint,
+ show_dcp_endpoint(__entry->endpoint), __entry->message));
+
+TRACE_EVENT(
+ afk_getbuf, TP_PROTO(struct apple_dcp_afkep *ep, u16 size, u16 tag),
+ TP_ARGS(ep, size, tag),
+
+ TP_STRUCT__entry(__string(devname, dev_name(ep->dcp->dev))
+ __field(u8, endpoint) __field(u16, size)
+ __field(u16, tag)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(ep->dcp->dev));
+ __entry->endpoint = ep->endpoint; __entry->size = size;
+ __entry->tag = tag;),
+
+ TP_printk(
+ "%s: endpoint 0x%x (%s): get buffer with size 0x%x and tag 0x%x",
+ __get_str(devname), __entry->endpoint,
+ show_dcp_endpoint(__entry->endpoint), __entry->size,
+ __entry->tag));
+
+DECLARE_EVENT_CLASS(afk_rwptr_template,
+ TP_PROTO(struct apple_dcp_afkep *ep, u32 rptr, u32 wptr),
+ TP_ARGS(ep, rptr, wptr),
+
+ TP_STRUCT__entry(__string(devname, dev_name(ep->dcp->dev))
+ __field(u8, endpoint) __field(u32, rptr)
+ __field(u32, wptr)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(ep->dcp->dev));
+ __entry->endpoint = ep->endpoint;
+ __entry->rptr = rptr; __entry->wptr = wptr;),
+
+ TP_printk("%s: endpoint 0x%x (%s): rptr 0x%x, wptr 0x%x",
+ __get_str(devname), __entry->endpoint,
+ show_dcp_endpoint(__entry->endpoint), __entry->rptr,
+ __entry->wptr));
+
+DEFINE_EVENT(afk_rwptr_template, afk_recv_rwptr_pre,
+ TP_PROTO(struct apple_dcp_afkep *ep, u32 rptr, u32 wptr),
+ TP_ARGS(ep, rptr, wptr));
+DEFINE_EVENT(afk_rwptr_template, afk_recv_rwptr_post,
+ TP_PROTO(struct apple_dcp_afkep *ep, u32 rptr, u32 wptr),
+ TP_ARGS(ep, rptr, wptr));
+DEFINE_EVENT(afk_rwptr_template, afk_send_rwptr_pre,
+ TP_PROTO(struct apple_dcp_afkep *ep, u32 rptr, u32 wptr),
+ TP_ARGS(ep, rptr, wptr));
+DEFINE_EVENT(afk_rwptr_template, afk_send_rwptr_post,
+ TP_PROTO(struct apple_dcp_afkep *ep, u32 rptr, u32 wptr),
+ TP_ARGS(ep, rptr, wptr));
+
+TRACE_EVENT(
+ afk_recv_qe,
+ TP_PROTO(struct apple_dcp_afkep *ep, u32 rptr, u32 magic, u32 size),
+ TP_ARGS(ep, rptr, magic, size),
+
+ TP_STRUCT__entry(__string(devname, dev_name(ep->dcp->dev))
+ __field(u8, endpoint) __field(u32, rptr)
+ __field(u32, magic)
+ __field(u32, size)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(ep->dcp->dev));
+ __entry->endpoint = ep->endpoint; __entry->rptr = rptr;
+ __entry->magic = magic; __entry->size = size;),
+
+ TP_printk("%s: endpoint 0x%x (%s): QE rptr 0x%x, magic 0x%x, size 0x%x",
+ __get_str(devname), __entry->endpoint,
+ show_dcp_endpoint(__entry->endpoint), __entry->rptr,
+ __entry->magic, __entry->size));
+
+TRACE_EVENT(
+ afk_recv_handle,
+ TP_PROTO(struct apple_dcp_afkep *ep, u32 channel, u32 type,
+ u32 data_size, struct epic_hdr *ehdr,
+ struct epic_sub_hdr *eshdr),
+ TP_ARGS(ep, channel, type, data_size, ehdr, eshdr),
+
+ TP_STRUCT__entry(__string(devname, dev_name(ep->dcp->dev)) __field(
+ u8, endpoint) __field(u32, channel) __field(u32, type)
+ __field(u32, data_size) __field(u8, category)
+ __field(u16, subtype)
+ __field(u16, tag)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(ep->dcp->dev));
+ __entry->endpoint = ep->endpoint;
+ __entry->channel = channel; __entry->type = type;
+ __entry->data_size = data_size;
+ __entry->category = eshdr->category,
+ __entry->subtype = le16_to_cpu(eshdr->type),
+ __entry->tag = le16_to_cpu(eshdr->tag)),
+
+ TP_printk(
+ "%s: endpoint 0x%x (%s): channel 0x%x, type 0x%x (%s), data_size 0x%x, category: 0x%x (%s), subtype: 0x%x, seq: 0x%x",
+ __get_str(devname), __entry->endpoint,
+ show_dcp_endpoint(__entry->endpoint), __entry->channel,
+ __entry->type, print_epic_type(__entry->type),
+ __entry->data_size, __entry->category,
+ print_epic_category(__entry->category), __entry->subtype,
+ __entry->tag));
+
+TRACE_EVENT(iomfb_callback,
+ TP_PROTO(struct apple_dcp *dcp, int tag, const char *name),
+ TP_ARGS(dcp, tag, name),
+
+ TP_STRUCT__entry(
+ __string(devname, dev_name(dcp->dev))
+ __field(int, tag)
+ __field(const char *, name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(devname, dev_name(dcp->dev));
+ __entry->tag = tag; __entry->name = name;
+ ),
+
+ TP_printk("%s: Callback D%03d %s", __get_str(devname), __entry->tag,
+ __entry->name));
+
+TRACE_EVENT(iomfb_push,
+ TP_PROTO(struct apple_dcp *dcp,
+ const struct dcp_method_entry *method, int context,
+ int offset, int depth),
+ TP_ARGS(dcp, method, context, offset, depth),
+
+ TP_STRUCT__entry(
+ __string(devname, dev_name(dcp->dev))
+ __string(name, method->name)
+ __field(int, context)
+ __field(int, offset)
+ __field(int, depth)),
+
+ TP_fast_assign(
+ __assign_str(devname, dev_name(dcp->dev));
+ __assign_str(name, method->name);
+ __entry->context = context; __entry->offset = offset;
+ __entry->depth = depth;
+ ),
+
+ TP_printk("%s: Method %s: context %u, offset %u, depth %u",
+ __get_str(devname), __get_str(name), __entry->context,
+ __entry->offset, __entry->depth));
+
+TRACE_EVENT(iomfb_swap_submit,
+ TP_PROTO(struct apple_dcp *dcp, u32 swap_id),
+ TP_ARGS(dcp, swap_id),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, swap_id)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->swap_id = swap_id;
+ ),
+ TP_printk("dcp=%llx, swap_id=%d",
+ __entry->dcp,
+ __entry->swap_id)
+);
+
+TRACE_EVENT(iomfb_swap_complete,
+ TP_PROTO(struct apple_dcp *dcp, u32 swap_id),
+ TP_ARGS(dcp, swap_id),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, swap_id)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->swap_id = swap_id;
+ ),
+ TP_printk("dcp=%llx, swap_id=%d",
+ __entry->dcp,
+ __entry->swap_id
+ )
+);
+
+TRACE_EVENT(iomfb_swap_complete_intent_gated,
+ TP_PROTO(struct apple_dcp *dcp, u32 swap_id, u32 width, u32 height),
+ TP_ARGS(dcp, swap_id, width, height),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, swap_id)
+ __field(u32, width)
+ __field(u32, height)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->swap_id = swap_id;
+ __entry->height = height;
+ __entry->width = width;
+ ),
+ TP_printk("dcp=%llx, swap_id=%u %ux%u",
+ __entry->dcp,
+ __entry->swap_id,
+ __entry->width,
+ __entry->height
+ )
+);
+
+TRACE_EVENT(iomfb_abort_swap_ap_gated,
+ TP_PROTO(struct apple_dcp *dcp, u32 swap_id),
+ TP_ARGS(dcp, swap_id),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, swap_id)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->swap_id = swap_id;
+ ),
+ TP_printk("dcp=%llx, swap_id=%u",
+ __entry->dcp,
+ __entry->swap_id
+ )
+);
+
+DECLARE_EVENT_CLASS(iomfb_parse_mode_template,
+ TP_PROTO(s64 id, struct dimension *horiz, struct dimension *vert, s64 best_color_mode, bool is_virtual, s64 score),
+ TP_ARGS(id, horiz, vert, best_color_mode, is_virtual, score),
+
+ TP_STRUCT__entry(__field(s64, id)
+ __field_struct(struct dimension, horiz)
+ __field_struct(struct dimension, vert)
+ __field(s64, best_color_mode)
+ __field(bool, is_virtual)
+ __field(s64, score)),
+
+ TP_fast_assign(__entry->id = id;
+ __entry->horiz = *horiz;
+ __entry->vert = *vert;
+ __entry->best_color_mode = best_color_mode;
+ __entry->is_virtual = is_virtual;
+ __entry->score = score;),
+
+ TP_printk("id: %lld, best_color_mode: %lld, resolution:%lldx%lld virtual: %d, score: %lld",
+ __entry->id, __entry->best_color_mode,
+ __entry->horiz.active, __entry->vert.active,
+ __entry->is_virtual, __entry->score));
+
+DEFINE_EVENT(iomfb_parse_mode_template, iomfb_parse_mode_success,
+ TP_PROTO(s64 id, struct dimension *horiz, struct dimension *vert, s64 best_color_mode, bool is_virtual, s64 score),
+ TP_ARGS(id, horiz, vert, best_color_mode, is_virtual, score));
+
+DEFINE_EVENT(iomfb_parse_mode_template, iomfb_parse_mode_fail,
+ TP_PROTO(s64 id, struct dimension *horiz, struct dimension *vert, s64 best_color_mode, bool is_virtual, s64 score),
+ TP_ARGS(id, horiz, vert, best_color_mode, is_virtual, score));
+
+TRACE_EVENT(dptxport_init, TP_PROTO(struct apple_dcp *dcp, u64 unit),
+ TP_ARGS(dcp, unit),
+
+ TP_STRUCT__entry(__string(devname, dev_name(dcp->dev))
+ __field(u64, unit)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(dcp->dev));
+ __entry->unit = unit;),
+
+ TP_printk("%s: dptxport unit %lld initialized", __get_str(devname),
+ __entry->unit));
+
+TRACE_EVENT(
+ dptxport_apcall,
+ TP_PROTO(struct dptx_port *dptx, int idx, size_t len),
+ TP_ARGS(dptx, idx, len),
+
+ TP_STRUCT__entry(__string(devname, dev_name(dptx->service->ep->dcp->dev))
+ __field(u32, unit) __field(int, idx) __field(size_t, len)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(dptx->service->ep->dcp->dev));
+ __entry->unit = dptx->unit; __entry->idx = idx; __entry->len = len;),
+
+ TP_printk("%s: dptx%d: AP Call %d (%s) with len %lu", __get_str(devname),
+ __entry->unit,
+ __entry->idx, show_dptxport_apcall(__entry->idx), __entry->len));
+
+TRACE_EVENT(
+ dptxport_validate_connection,
+ TP_PROTO(struct dptx_port *dptx, u8 core, u8 atc, u8 die),
+ TP_ARGS(dptx, core, atc, die),
+
+ TP_STRUCT__entry(__string(devname, dev_name(dptx->service->ep->dcp->dev))
+ __field(u32, unit) __field(u8, core) __field(u8, atc) __field(u8, die)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(dptx->service->ep->dcp->dev));
+ __entry->unit = dptx->unit; __entry->core = core; __entry->atc = atc; __entry->die = die;),
+
+ TP_printk("%s: dptx%d: core %d, atc %d, die %d", __get_str(devname),
+ __entry->unit, __entry->core, __entry->atc, __entry->die));
+
+TRACE_EVENT(
+ dptxport_connect,
+ TP_PROTO(struct dptx_port *dptx, u8 core, u8 atc, u8 die),
+ TP_ARGS(dptx, core, atc, die),
+
+ TP_STRUCT__entry(__string(devname, dev_name(dptx->service->ep->dcp->dev))
+ __field(u32, unit) __field(u8, core) __field(u8, atc) __field(u8, die)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(dptx->service->ep->dcp->dev));
+ __entry->unit = dptx->unit; __entry->core = core; __entry->atc = atc; __entry->die = die;),
+
+ TP_printk("%s: dptx%d: core %d, atc %d, die %d", __get_str(devname),
+ __entry->unit, __entry->core, __entry->atc, __entry->die));
+
+TRACE_EVENT(
+ dptxport_call_set_link_rate,
+ TP_PROTO(struct dptx_port *dptx, u32 link_rate),
+ TP_ARGS(dptx, link_rate),
+
+ TP_STRUCT__entry(__string(devname, dev_name(dptx->service->ep->dcp->dev))
+ __field(u32, unit)
+ __field(u32, link_rate)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(dptx->service->ep->dcp->dev));
+ __entry->unit = dptx->unit;
+ __entry->link_rate = link_rate;),
+
+ TP_printk("%s: dptx%d: link rate 0x%x", __get_str(devname), __entry->unit,
+ __entry->link_rate));
+
+TRACE_EVENT(iomfb_brightness,
+ TP_PROTO(struct apple_dcp *dcp, u32 nits),
+ TP_ARGS(dcp, nits),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, nits)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->nits = nits;
+ ),
+ TP_printk("dcp=%llx, nits=%u (raw=0x%05x)",
+ __entry->dcp,
+ __entry->nits >> 16,
+ __entry->nits
+ )
+);
+
+#define show_eotf(eotf) \
+ __print_symbolic(eotf, { 0, "SDR gamma"}, \
+ { 1, "HDR gamma"}, \
+ { 2, "ST 2084 (PQ)"}, \
+ { 3, "BT.2100 (HLG)"}, \
+ { 4, "unexpected"})
+
+#define show_encoding(enc) \
+ __print_symbolic(enc, { 0, "RGB"}, \
+ { 1, "YUV 4:2:0"}, \
+ { 3, "YUV 4:2:2"}, \
+ { 2, "YUV 4:4:4"}, \
+ { 4, "DolbyVision (native)"}, \
+ { 5, "DolbyVision (HDMI)"}, \
+ { 6, "YCbCr 4:2:2 (DP tunnel)"}, \
+ { 7, "YCbCr 4:2:2 (HDMI tunnel)"}, \
+ { 8, "DolbyVision LL YCbCr 4:2:2"}, \
+ { 9, "DolbyVision LL YCbCr 4:2:2 (DP)"}, \
+ {10, "DolbyVision LL YCbCr 4:2:2 (HDMI)"}, \
+ {11, "DolbyVision LL YCbCr 4:4:4"}, \
+ {12, "DolbyVision LL RGB 4:2:2"}, \
+ {13, "GRGB as YCbCr422 (Even line blue)"}, \
+ {14, "GRGB as YCbCr422 (Even line red)"}, \
+ {15, "unexpected"})
+
+#define show_colorimetry(col) \
+ __print_symbolic(col, { 0, "SMPTE 170M/BT.601"}, \
+ { 1, "BT.701"}, \
+ { 2, "xvYCC601"}, \
+ { 3, "xvYCC709"}, \
+ { 4, "sYCC601"}, \
+ { 5, "AdobeYCC601"}, \
+ { 6, "BT.2020 (c)"}, \
+ { 7, "BT.2020 (nc)"}, \
+ { 8, "DolbyVision VSVDB"}, \
+ { 9, "BT.2020 (RGB)"}, \
+ {10, "sRGB"}, \
+ {11, "scRGB"}, \
+ {12, "scRGBfixed"}, \
+ {13, "AdobeRGB"}, \
+ {14, "DCI-P3 (D65)"}, \
+ {15, "DCI-P3 (Theater)"}, \
+ {16, "Default RGB"}, \
+ {17, "unexpected"})
+
+#define show_range(range) \
+ __print_symbolic(range, { 0, "Full"}, \
+ { 1, "Limited"}, \
+ { 2, "unexpected"})
+
+TRACE_EVENT(iomfb_color_mode,
+ TP_PROTO(struct apple_dcp *dcp, u32 id, u32 score, u32 depth,
+ u32 colorimetry, u32 eotf, u32 range, u32 pixel_enc),
+ TP_ARGS(dcp, id, score, depth, colorimetry, eotf, range, pixel_enc),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, id)
+ __field(u32, score)
+ __field(u32, depth)
+ __field(u32, colorimetry)
+ __field(u32, eotf)
+ __field(u32, range)
+ __field(u32, pixel_enc)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->id = id;
+ __entry->score = score;
+ __entry->depth = depth;
+ __entry->colorimetry = min_t(u32, colorimetry, 17U);
+ __entry->eotf = min_t(u32, eotf, 4U);
+ __entry->range = min_t(u32, range, 2U);
+ __entry->pixel_enc = min_t(u32, pixel_enc, 15U);
+ ),
+ TP_printk("dcp=%llx, id=%u, score=%u, depth=%u, colorimetry=%s, eotf=%s, range=%s, pixel_enc=%s",
+ __entry->dcp,
+ __entry->id,
+ __entry->score,
+ __entry->depth,
+ show_colorimetry(__entry->colorimetry),
+ show_eotf(__entry->eotf),
+ show_range(__entry->range),
+ show_encoding(__entry->pixel_enc)
+ )
+);
+
+TRACE_EVENT(iomfb_timing_mode,
+ TP_PROTO(struct apple_dcp *dcp, u32 id, u32 score, u32 width,
+ u32 height, u32 clock, u32 color_mode),
+ TP_ARGS(dcp, id, score, width, height, clock, color_mode),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, id)
+ __field(u32, score)
+ __field(u32, width)
+ __field(u32, height)
+ __field(u32, clock)
+ __field(u32, color_mode)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->id = id;
+ __entry->score = score;
+ __entry->width = width;
+ __entry->height = height;
+ __entry->clock = clock;
+ __entry->color_mode = color_mode;
+ ),
+ TP_printk("dcp=%llx, id=%u, score=%u, %ux%u@%u.%u, color_mode=%u",
+ __entry->dcp,
+ __entry->id,
+ __entry->score,
+ __entry->width,
+ __entry->height,
+ __entry->clock >> 16,
+ ((__entry->clock & 0xffff) * 1000) >> 16,
+ __entry->color_mode
+ )
+);
+
+TRACE_EVENT(avep_sound_mode,
+ TP_PROTO(struct apple_dcp *dcp, u32 rates, u64 formats, unsigned int nchans),
+ TP_ARGS(dcp, rates, formats, nchans),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, rates)
+ __field(u64, formats)
+ __field(unsigned int, nchans)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->rates = rates;
+ __entry->formats = formats;
+ __entry->nchans = nchans;
+ ),
+ TP_printk("dcp=%llx, rates=%#x, formats=%#llx, nchans=%#x",
+ __entry->dcp,
+ __entry->rates,
+ __entry->formats,
+ __entry->nchans
+ )
+);
+
+#endif /* _TRACE_DCP_H */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#include <trace/define_trace.h>
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#ifndef __APPLE_VERSION_UTILS_H__
+#define __APPLE_VERSION_UTILS_H__
+
+#include <linux/kernel.h>
+#include <linux/args.h>
+
+#define DCP_FW_UNION(u) (u).DCP_FW
+#define DCP_FW_SUFFIX CONCATENATE(_, DCP_FW)
+#define DCP_FW_NAME(name) CONCATENATE(name, DCP_FW_SUFFIX)
+#define DCP_FW_VERSION(x, y, z) ( ((x) << 16) | ((y) << 8) | (z) )
+
+#endif /*__APPLE_VERSION_UTILS_H__*/
-# $OpenBSD: files.drm,v 1.61 2024/01/16 23:37:51 jsg Exp $
+# $OpenBSD: files.drm,v 1.62 2024/01/22 18:54:01 kettenis Exp $
#file dev/pci/drm/aperture.c drm
file dev/pci/drm/dma-resv.c drm
file dev/pci/drm/drm_encoder_slave.c drm
file dev/pci/drm/drm_exec.c drm
file dev/pci/drm/drm_fb_helper.c drm
+file dev/pci/drm/drm_fb_dma_helper.c drm
file dev/pci/drm/drm_fbdev_dma.c drm
file dev/pci/drm/drm_fbdev_generic.c drm
file dev/pci/drm/drm_file.c drm
file dev/pci/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c amdgpu
file dev/pci/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c amdgpu
file dev/pci/drm/amd/pm/swsmu/smu_cmn.c amdgpu
+
+device apldcp
+attach apldcp at fdt
+file dev/pci/drm/apple/apldcp.c apldcp
+file dev/pci/drm/apple/afk.c apldcp
+file dev/pci/drm/apple/dcp_backlight.c apldcp
+file dev/pci/drm/apple/dptxep.c apldcp
+file dev/pci/drm/apple/ibootep.c apldcp
+file dev/pci/drm/apple/iomfb.c apldcp
+file dev/pci/drm/apple/iomfb_v12_3.c apldcp
+file dev/pci/drm/apple/iomfb_v13_3.c apldcp
+file dev/pci/drm/apple/parser.c apldcp
+file dev/pci/drm/apple/systemep.c apldcp
+
+device apldrm: drmbase, wsemuldisplaydev, rasops32
+attach apldrm at fdt
+file dev/pci/drm/apple/apldrm.c apldrm
#ifdef __LP64__
#define CONFIG_64BIT 1
#endif
+
+#if defined(SUSPEND) || defined(HIBERNATE)
+#define CONFIG_PM_SLEEP
+#endif
--- /dev/null
+/* Public domain. */
+
+#ifndef _LINUX_SOC_APPLE_RTKIT_H
+#define _LINUX_SOC_APPLE_RTKIT_H
+
+#include <linux/bitfield.h>
+
+struct apple_rtkit;
+
+struct apple_rtkit_shmem {
+ dma_addr_t iova;
+ void *buffer;
+ size_t size;
+ int is_mapped;
+};
+
+struct apple_rtkit_ops {
+ void (*crashed)(void *);
+ void (*recv_message)(void *, uint8_t, uint64_t);
+ int (*shmem_setup)(void *, struct apple_rtkit_shmem *);
+ void (*shmem_destroy)(void *, struct apple_rtkit_shmem *);
+};
+
+struct apple_rtkit *devm_apple_rtkit_init(struct device *, void *,
+ const char *, int, const struct apple_rtkit_ops *);
+
+int apple_rtkit_send_message(struct apple_rtkit *, uint8_t, uint64_t,
+ struct completion *, int);
+int apple_rtkit_start_ep(struct apple_rtkit *, uint8_t);
+int apple_rtkit_wake(struct apple_rtkit *);
+
+#endif