-/* $OpenBSD: aplcpu.c,v 1.4 2022/10/18 15:12:13 kettenis Exp $ */
+/* $OpenBSD: aplcpu.c,v 1.5 2022/12/03 13:31:32 kettenis Exp $ */
/*
* Copyright (c) 2022 Mark Kettenis <kettenis@openbsd.org>
*
struct ksensor sc_sensor[APLCPU_MAX_CLUSTERS];
};
-struct aplcpu_softc *aplcpu_sc;
-
int aplcpu_match(struct device *, void *, void *);
void aplcpu_attach(struct device *, struct device *, void *);
{
struct fdt_attach_args *faa = aux;
- return OF_is_compatible(faa->fa_node, "apple,soc-cpufreq");
+ return OF_is_compatible(faa->fa_node, "apple,soc-cpufreq") ||
+ OF_is_compatible(faa->fa_node, "apple,cluster-cpufreq");
}
void
sc->sc_node = faa->fa_node;
sc->sc_nclusters = faa->fa_nreg;
- if (OF_is_compatible(sc->sc_node, "apple,t8103-soc-cpufreq")) {
+ if (OF_is_compatible(sc->sc_node, "apple,t8103-soc-cpufreq") ||
+ OF_is_compatible(sc->sc_node, "apple,t8103-cluster-cpufreq")) {
sc->sc_cur_ps_mask = DVFS_T8103_STATUS_CUR_PS_MASK;
sc->sc_cur_ps_shift = DVFS_T8103_STATUS_CUR_PS_SHIFT;
- } else if (OF_is_compatible(sc->sc_node, "apple,t8112-soc-cpufreq")) {
+ } else if (OF_is_compatible(sc->sc_node, "apple,t8112-soc-cpufreq") ||
+ OF_is_compatible(sc->sc_node, "apple,t8112-cluster-cpufreq")) {
sc->sc_cur_ps_mask = DVFS_T8112_STATUS_CUR_PS_MASK;
sc->sc_cur_ps_shift = DVFS_T8112_STATUS_CUR_PS_SHIFT;
}
sensordev_install(&sc->sc_sensordev);
sensor_task_register(sc, aplcpu_refresh_sensors, 1);
- aplcpu_sc = sc;
cpu_cpuspeed = aplcpu_clockspeed;
cpu_setperf = aplcpu_setperf;
return;
uint32_t opp_hz, opp_level;
int i, j;
- if (OF_getpropintarray(node, "apple,freq-domain", freq_domain,
- sizeof(freq_domain)) != sizeof(freq_domain))
- return;
+ freq_domain[0] = OF_getpropint(node, "performance-domains", 0);
+ freq_domain[1] = 0;
+ if (freq_domain[0] == 0) {
+ if (OF_getpropintarray(node, "apple,freq-domain", freq_domain,
+ sizeof(freq_domain)) != sizeof(freq_domain))
+ return;
+ if (freq_domain[1] > APLCPU_MAX_CLUSTERS)
+ return;
+ }
if (freq_domain[0] != OF_getpropint(sc->sc_node, "phandle", 0))
return;
- if (freq_domain[1] > APLCPU_MAX_CLUSTERS)
- return;
-
+
phandle = OF_getpropint(node, "operating-points-v2", 0);
if (phandle == 0)
return;
int
aplcpu_clockspeed(int *freq)
{
- struct aplcpu_softc *sc = aplcpu_sc;
+ struct aplcpu_softc *sc;
struct opp_table *ot;
uint32_t opp_hz = 0, opp_level;
- int i, j;
+ int i, j, k;
/*
* Clusters can run at different frequencies. We report the
* highest frequency among all clusters.
*/
- for (i = 0; i < sc->sc_nclusters; i++) {
- if (sc->sc_opp_table[i] == NULL)
+ for (i = 0; i < aplcpu_cd.cd_ndevs; i++) {
+ sc = aplcpu_cd.cd_devs[i];
+ if (sc == NULL)
continue;
- opp_level = aplcpu_opp_level(sc, i);
+ for (j = 0; j < sc->sc_nclusters; j++) {
+ if (sc->sc_opp_table[j] == NULL)
+ continue;
- /* Translate P-state to frequency. */
- ot = sc->sc_opp_table[i];
- for (j = 0; j < ot->ot_nopp; j++) {
- if (ot->ot_opp[j].opp_level == opp_level)
- opp_hz = MAX(opp_hz, ot->ot_opp[j].opp_hz);
+ opp_level = aplcpu_opp_level(sc, j);
+
+ /* Translate P-state to frequency. */
+ ot = sc->sc_opp_table[j];
+ for (k = 0; k < ot->ot_nopp; k++) {
+ if (ot->ot_opp[k].opp_level != opp_level)
+ continue;
+ opp_hz = MAX(opp_hz, ot->ot_opp[k].opp_hz);
+ }
}
}
+
if (opp_hz == 0)
return EINVAL;
void
aplcpu_setperf(int level)
{
- struct aplcpu_softc *sc = aplcpu_sc;
+ struct aplcpu_softc *sc;
struct opp_table *ot;
uint64_t min, max;
uint64_t level_hz;
uint32_t opp_level;
uint64_t reg;
- int i, j, timo;
-
- if (sc->sc_perflevel == level)
- return;
+ int i, j, k, timo;
/*
* We let the CPU performance level span the entire range
* to come up with something more sensible given the
* constraints of the hw.setperf sysctl interface.
*/
- min = sc->sc_opp_hz_min;
- max = sc->sc_opp_hz_max;
- level_hz = min + (level * (max - min)) / 100;
+ for (i = 0; i < aplcpu_cd.cd_ndevs; i++) {
+ sc = aplcpu_cd.cd_devs[i];
+ if (sc == NULL)
+ continue;
- for (i = 0; i < sc->sc_nclusters; i++) {
- if (sc->sc_opp_table[i] == NULL)
+ min = sc->sc_opp_hz_min;
+ max = sc->sc_opp_hz_max;
+ level_hz = min + (level * (max - min)) / 100;
+ }
+
+ for (i = 0; i < aplcpu_cd.cd_ndevs; i++) {
+ sc = aplcpu_cd.cd_devs[i];
+ if (sc == NULL)
+ continue;
+ if (sc->sc_perflevel == level)
continue;
- /* Translate performance level to a P-state. */
- opp_level = 1;
- ot = sc->sc_opp_table[i];
- for (j = 0; j < ot->ot_nopp; j++) {
- if (ot->ot_opp[j].opp_hz <= level_hz &&
- ot->ot_opp[j].opp_level >= opp_level)
- opp_level = ot->ot_opp[j].opp_level;
- }
+ for (j = 0; j < sc->sc_nclusters; j++) {
+ if (sc->sc_opp_table[j] == NULL)
+ continue;
+
+ /* Translate performance level to a P-state. */
+ opp_level = 1;
+ ot = sc->sc_opp_table[j];
+ for (k = 0; k < ot->ot_nopp; k++) {
+ if (ot->ot_opp[k].opp_hz <= level_hz &&
+ ot->ot_opp[k].opp_level >= opp_level)
+ opp_level = ot->ot_opp[k].opp_level;
+ }
- /* Wait until P-state logic isn't busy. */
- for (timo = 100; timo > 0; timo--) {
- reg = bus_space_read_8(sc->sc_iot, sc->sc_ioh[i],
- DVFS_CMD);
- if ((reg & DVFS_CMD_BUSY) == 0)
- break;
- delay(1);
+ /* Wait until P-state logic isn't busy. */
+ for (timo = 100; timo > 0; timo--) {
+ reg = bus_space_read_8(sc->sc_iot,
+ sc->sc_ioh[j], DVFS_CMD);
+ if ((reg & DVFS_CMD_BUSY) == 0)
+ break;
+ delay(1);
+ }
+ if (reg & DVFS_CMD_BUSY)
+ continue;
+
+ /* Set desired P-state. */
+ reg &= ~DVFS_CMD_PS1_MASK;
+ reg &= ~DVFS_CMD_PS2_MASK;
+ reg |= (opp_level << DVFS_CMD_PS1_SHIFT);
+ reg |= (opp_level << DVFS_CMD_PS2_SHIFT);
+ reg |= DVFS_CMD_SET;
+ bus_space_write_8(sc->sc_iot, sc->sc_ioh[j],
+ DVFS_CMD, reg);
}
- if (reg & DVFS_CMD_BUSY)
- continue;
- /* Set desired P-state. */
- reg &= ~DVFS_CMD_PS1_MASK;
- reg &= ~DVFS_CMD_PS2_MASK;
- reg |= (opp_level << DVFS_CMD_PS1_SHIFT);
- reg |= (opp_level << DVFS_CMD_PS2_SHIFT);
- reg |= DVFS_CMD_SET;
- bus_space_write_8(sc->sc_iot, sc->sc_ioh[i], DVFS_CMD, reg);
+ sc->sc_perflevel = level;
}
-
- sc->sc_perflevel = level;
}
void