From: kettenis Date: Sat, 3 Dec 2022 13:31:32 +0000 (+0000) Subject: The device tree bindings for aplcpu(4) changed once more, recycling the X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=99901a0d4e04706587761ec917ae429af027d633;p=openbsd The device tree bindings for aplcpu(4) changed once more, recycling the "apple,cluster-cpufreq" compatible that was used for the first version. Add support for the "new new" binding while retaining support for the "old new" binding. Hopefully nobody is using the "old" binding anymore now that we update the m1n1+u-boot+dtb "boot firmware" automatically on sysupgrade. ok patrick@, tobhe@ --- diff --git a/sys/arch/arm64/dev/aplcpu.c b/sys/arch/arm64/dev/aplcpu.c index 22d672629b9..f11f8e6b7d7 100644 --- a/sys/arch/arm64/dev/aplcpu.c +++ b/sys/arch/arm64/dev/aplcpu.c @@ -1,4 +1,4 @@ -/* $OpenBSD: aplcpu.c,v 1.4 2022/10/18 15:12:13 kettenis Exp $ */ +/* $OpenBSD: aplcpu.c,v 1.5 2022/12/03 13:31:32 kettenis Exp $ */ /* * Copyright (c) 2022 Mark Kettenis * @@ -81,8 +81,6 @@ struct aplcpu_softc { struct ksensor sc_sensor[APLCPU_MAX_CLUSTERS]; }; -struct aplcpu_softc *aplcpu_sc; - int aplcpu_match(struct device *, void *, void *); void aplcpu_attach(struct device *, struct device *, void *); @@ -105,7 +103,8 @@ aplcpu_match(struct device *parent, void *match, void *aux) { struct fdt_attach_args *faa = aux; - return OF_is_compatible(faa->fa_node, "apple,soc-cpufreq"); + return OF_is_compatible(faa->fa_node, "apple,soc-cpufreq") || + OF_is_compatible(faa->fa_node, "apple,cluster-cpufreq"); } void @@ -142,10 +141,12 @@ aplcpu_attach(struct device *parent, struct device *self, void *aux) sc->sc_node = faa->fa_node; sc->sc_nclusters = faa->fa_nreg; - if (OF_is_compatible(sc->sc_node, "apple,t8103-soc-cpufreq")) { + if (OF_is_compatible(sc->sc_node, "apple,t8103-soc-cpufreq") || + OF_is_compatible(sc->sc_node, "apple,t8103-cluster-cpufreq")) { sc->sc_cur_ps_mask = DVFS_T8103_STATUS_CUR_PS_MASK; sc->sc_cur_ps_shift = DVFS_T8103_STATUS_CUR_PS_SHIFT; - } else if (OF_is_compatible(sc->sc_node, "apple,t8112-soc-cpufreq")) { + } else if (OF_is_compatible(sc->sc_node, "apple,t8112-soc-cpufreq") || + OF_is_compatible(sc->sc_node, "apple,t8112-cluster-cpufreq")) { sc->sc_cur_ps_mask = DVFS_T8112_STATUS_CUR_PS_MASK; sc->sc_cur_ps_shift = DVFS_T8112_STATUS_CUR_PS_SHIFT; } @@ -170,7 +171,6 @@ aplcpu_attach(struct device *parent, struct device *self, void *aux) sensordev_install(&sc->sc_sensordev); sensor_task_register(sc, aplcpu_refresh_sensors, 1); - aplcpu_sc = sc; cpu_cpuspeed = aplcpu_clockspeed; cpu_setperf = aplcpu_setperf; return; @@ -192,14 +192,18 @@ aplcpu_opp_init(struct aplcpu_softc *sc, int node) uint32_t opp_hz, opp_level; int i, j; - if (OF_getpropintarray(node, "apple,freq-domain", freq_domain, - sizeof(freq_domain)) != sizeof(freq_domain)) - return; + freq_domain[0] = OF_getpropint(node, "performance-domains", 0); + freq_domain[1] = 0; + if (freq_domain[0] == 0) { + if (OF_getpropintarray(node, "apple,freq-domain", freq_domain, + sizeof(freq_domain)) != sizeof(freq_domain)) + return; + if (freq_domain[1] > APLCPU_MAX_CLUSTERS) + return; + } if (freq_domain[0] != OF_getpropint(sc->sc_node, "phandle", 0)) return; - if (freq_domain[1] > APLCPU_MAX_CLUSTERS) - return; - + phandle = OF_getpropint(node, "operating-points-v2", 0); if (phandle == 0) return; @@ -289,29 +293,37 @@ aplcpu_opp_level(struct aplcpu_softc *sc, int cluster) int aplcpu_clockspeed(int *freq) { - struct aplcpu_softc *sc = aplcpu_sc; + struct aplcpu_softc *sc; struct opp_table *ot; uint32_t opp_hz = 0, opp_level; - int i, j; + int i, j, k; /* * Clusters can run at different frequencies. We report the * highest frequency among all clusters. */ - for (i = 0; i < sc->sc_nclusters; i++) { - if (sc->sc_opp_table[i] == NULL) + for (i = 0; i < aplcpu_cd.cd_ndevs; i++) { + sc = aplcpu_cd.cd_devs[i]; + if (sc == NULL) continue; - opp_level = aplcpu_opp_level(sc, i); + for (j = 0; j < sc->sc_nclusters; j++) { + if (sc->sc_opp_table[j] == NULL) + continue; - /* Translate P-state to frequency. */ - ot = sc->sc_opp_table[i]; - for (j = 0; j < ot->ot_nopp; j++) { - if (ot->ot_opp[j].opp_level == opp_level) - opp_hz = MAX(opp_hz, ot->ot_opp[j].opp_hz); + opp_level = aplcpu_opp_level(sc, j); + + /* Translate P-state to frequency. */ + ot = sc->sc_opp_table[j]; + for (k = 0; k < ot->ot_nopp; k++) { + if (ot->ot_opp[k].opp_level != opp_level) + continue; + opp_hz = MAX(opp_hz, ot->ot_opp[k].opp_hz); + } } } + if (opp_hz == 0) return EINVAL; @@ -322,16 +334,13 @@ aplcpu_clockspeed(int *freq) void aplcpu_setperf(int level) { - struct aplcpu_softc *sc = aplcpu_sc; + struct aplcpu_softc *sc; struct opp_table *ot; uint64_t min, max; uint64_t level_hz; uint32_t opp_level; uint64_t reg; - int i, j, timo; - - if (sc->sc_perflevel == level) - return; + int i, j, k, timo; /* * We let the CPU performance level span the entire range @@ -344,44 +353,59 @@ aplcpu_setperf(int level) * to come up with something more sensible given the * constraints of the hw.setperf sysctl interface. */ - min = sc->sc_opp_hz_min; - max = sc->sc_opp_hz_max; - level_hz = min + (level * (max - min)) / 100; + for (i = 0; i < aplcpu_cd.cd_ndevs; i++) { + sc = aplcpu_cd.cd_devs[i]; + if (sc == NULL) + continue; - for (i = 0; i < sc->sc_nclusters; i++) { - if (sc->sc_opp_table[i] == NULL) + min = sc->sc_opp_hz_min; + max = sc->sc_opp_hz_max; + level_hz = min + (level * (max - min)) / 100; + } + + for (i = 0; i < aplcpu_cd.cd_ndevs; i++) { + sc = aplcpu_cd.cd_devs[i]; + if (sc == NULL) + continue; + if (sc->sc_perflevel == level) continue; - /* Translate performance level to a P-state. */ - opp_level = 1; - ot = sc->sc_opp_table[i]; - for (j = 0; j < ot->ot_nopp; j++) { - if (ot->ot_opp[j].opp_hz <= level_hz && - ot->ot_opp[j].opp_level >= opp_level) - opp_level = ot->ot_opp[j].opp_level; - } + for (j = 0; j < sc->sc_nclusters; j++) { + if (sc->sc_opp_table[j] == NULL) + continue; + + /* Translate performance level to a P-state. */ + opp_level = 1; + ot = sc->sc_opp_table[j]; + for (k = 0; k < ot->ot_nopp; k++) { + if (ot->ot_opp[k].opp_hz <= level_hz && + ot->ot_opp[k].opp_level >= opp_level) + opp_level = ot->ot_opp[k].opp_level; + } - /* Wait until P-state logic isn't busy. */ - for (timo = 100; timo > 0; timo--) { - reg = bus_space_read_8(sc->sc_iot, sc->sc_ioh[i], - DVFS_CMD); - if ((reg & DVFS_CMD_BUSY) == 0) - break; - delay(1); + /* Wait until P-state logic isn't busy. */ + for (timo = 100; timo > 0; timo--) { + reg = bus_space_read_8(sc->sc_iot, + sc->sc_ioh[j], DVFS_CMD); + if ((reg & DVFS_CMD_BUSY) == 0) + break; + delay(1); + } + if (reg & DVFS_CMD_BUSY) + continue; + + /* Set desired P-state. */ + reg &= ~DVFS_CMD_PS1_MASK; + reg &= ~DVFS_CMD_PS2_MASK; + reg |= (opp_level << DVFS_CMD_PS1_SHIFT); + reg |= (opp_level << DVFS_CMD_PS2_SHIFT); + reg |= DVFS_CMD_SET; + bus_space_write_8(sc->sc_iot, sc->sc_ioh[j], + DVFS_CMD, reg); } - if (reg & DVFS_CMD_BUSY) - continue; - /* Set desired P-state. */ - reg &= ~DVFS_CMD_PS1_MASK; - reg &= ~DVFS_CMD_PS2_MASK; - reg |= (opp_level << DVFS_CMD_PS1_SHIFT); - reg |= (opp_level << DVFS_CMD_PS2_SHIFT); - reg |= DVFS_CMD_SET; - bus_space_write_8(sc->sc_iot, sc->sc_ioh[i], DVFS_CMD, reg); + sc->sc_perflevel = level; } - - sc->sc_perflevel = level; } void