cpufreq-dt.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. /*
  2. * Copyright (C) 2012 Freescale Semiconductor, Inc.
  3. *
  4. * Copyright (C) 2014 Linaro.
  5. * Viresh Kumar <viresh.kumar@linaro.org>
  6. *
  7. * The OPP code in function set_target() is reused from
  8. * drivers/cpufreq/omap-cpufreq.c
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15. #include <linux/clk.h>
  16. #include <linux/cpu.h>
  17. #include <linux/cpu_cooling.h>
  18. #include <linux/cpufreq.h>
  19. #include <linux/cpufreq-dt.h>
  20. #include <linux/cpumask.h>
  21. #include <linux/err.h>
  22. #include <linux/module.h>
  23. #include <linux/of.h>
  24. #include <linux/pm_opp.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/regulator/consumer.h>
  27. #include <linux/slab.h>
  28. #include <linux/thermal.h>
  29. struct private_data {
  30. struct device *cpu_dev;
  31. struct regulator *cpu_reg;
  32. struct thermal_cooling_device *cdev;
  33. unsigned int voltage_tolerance; /* in percentage */
  34. };
  35. static int set_target(struct cpufreq_policy *policy, unsigned int index)
  36. {
  37. struct dev_pm_opp *opp;
  38. struct cpufreq_frequency_table *freq_table = policy->freq_table;
  39. struct clk *cpu_clk = policy->clk;
  40. struct private_data *priv = policy->driver_data;
  41. struct device *cpu_dev = priv->cpu_dev;
  42. struct regulator *cpu_reg = priv->cpu_reg;
  43. unsigned long volt = 0, volt_old = 0, tol = 0;
  44. unsigned int old_freq, new_freq;
  45. long freq_Hz, freq_exact;
  46. int ret;
  47. freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
  48. if (freq_Hz <= 0)
  49. freq_Hz = freq_table[index].frequency * 1000;
  50. freq_exact = freq_Hz;
  51. new_freq = freq_Hz / 1000;
  52. old_freq = clk_get_rate(cpu_clk) / 1000;
  53. if (!IS_ERR(cpu_reg)) {
  54. unsigned long opp_freq;
  55. rcu_read_lock();
  56. opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
  57. if (IS_ERR(opp)) {
  58. rcu_read_unlock();
  59. dev_err(cpu_dev, "failed to find OPP for %ld\n",
  60. freq_Hz);
  61. return PTR_ERR(opp);
  62. }
  63. volt = dev_pm_opp_get_voltage(opp);
  64. opp_freq = dev_pm_opp_get_freq(opp);
  65. rcu_read_unlock();
  66. tol = volt * priv->voltage_tolerance / 100;
  67. volt_old = regulator_get_voltage(cpu_reg);
  68. dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
  69. opp_freq / 1000, volt);
  70. }
  71. dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
  72. old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
  73. new_freq / 1000, volt ? volt / 1000 : -1);
  74. /* scaling up? scale voltage before frequency */
  75. if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
  76. ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
  77. if (ret) {
  78. dev_err(cpu_dev, "failed to scale voltage up: %d\n",
  79. ret);
  80. return ret;
  81. }
  82. }
  83. ret = clk_set_rate(cpu_clk, freq_exact);
  84. if (ret) {
  85. dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
  86. if (!IS_ERR(cpu_reg) && volt_old > 0)
  87. regulator_set_voltage_tol(cpu_reg, volt_old, tol);
  88. return ret;
  89. }
  90. /* scaling down? scale voltage after frequency */
  91. if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
  92. ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
  93. if (ret) {
  94. dev_err(cpu_dev, "failed to scale voltage down: %d\n",
  95. ret);
  96. clk_set_rate(cpu_clk, old_freq * 1000);
  97. }
  98. }
  99. return ret;
  100. }
  101. static int allocate_resources(int cpu, struct device **cdev,
  102. struct regulator **creg, struct clk **cclk)
  103. {
  104. struct device *cpu_dev;
  105. struct regulator *cpu_reg;
  106. struct clk *cpu_clk;
  107. int ret = 0;
  108. char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
  109. cpu_dev = get_cpu_device(cpu);
  110. if (!cpu_dev) {
  111. pr_err("failed to get cpu%d device\n", cpu);
  112. return -ENODEV;
  113. }
  114. /* Try "cpu0" for older DTs */
  115. if (!cpu)
  116. reg = reg_cpu0;
  117. else
  118. reg = reg_cpu;
  119. try_again:
  120. cpu_reg = regulator_get_optional(cpu_dev, reg);
  121. if (IS_ERR(cpu_reg)) {
  122. /*
  123. * If cpu's regulator supply node is present, but regulator is
  124. * not yet registered, we should try defering probe.
  125. */
  126. if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
  127. dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
  128. cpu);
  129. return -EPROBE_DEFER;
  130. }
  131. /* Try with "cpu-supply" */
  132. if (reg == reg_cpu0) {
  133. reg = reg_cpu;
  134. goto try_again;
  135. }
  136. dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
  137. cpu, PTR_ERR(cpu_reg));
  138. }
  139. cpu_clk = clk_get(cpu_dev, NULL);
  140. if (IS_ERR(cpu_clk)) {
  141. /* put regulator */
  142. if (!IS_ERR(cpu_reg))
  143. regulator_put(cpu_reg);
  144. ret = PTR_ERR(cpu_clk);
  145. /*
  146. * If cpu's clk node is present, but clock is not yet
  147. * registered, we should try defering probe.
  148. */
  149. if (ret == -EPROBE_DEFER)
  150. dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
  151. else
  152. dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
  153. ret);
  154. } else {
  155. *cdev = cpu_dev;
  156. *creg = cpu_reg;
  157. *cclk = cpu_clk;
  158. }
  159. return ret;
  160. }
  161. static int cpufreq_init(struct cpufreq_policy *policy)
  162. {
  163. struct cpufreq_dt_platform_data *pd;
  164. struct cpufreq_frequency_table *freq_table;
  165. struct device_node *np;
  166. struct private_data *priv;
  167. struct device *cpu_dev;
  168. struct regulator *cpu_reg;
  169. struct clk *cpu_clk;
  170. unsigned long min_uV = ~0, max_uV = 0;
  171. unsigned int transition_latency;
  172. int ret;
  173. ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
  174. if (ret) {
  175. pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
  176. return ret;
  177. }
  178. np = of_node_get(cpu_dev->of_node);
  179. if (!np) {
  180. dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
  181. ret = -ENOENT;
  182. goto out_put_reg_clk;
  183. }
  184. /* OPPs might be populated at runtime, don't check for error here */
  185. of_init_opp_table(cpu_dev);
  186. /*
  187. * But we need OPP table to function so if it is not there let's
  188. * give platform code chance to provide it for us.
  189. */
  190. ret = dev_pm_opp_get_opp_count(cpu_dev);
  191. if (ret <= 0) {
  192. pr_debug("OPP table is not ready, deferring probe\n");
  193. ret = -EPROBE_DEFER;
  194. goto out_free_opp;
  195. }
  196. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  197. if (!priv) {
  198. ret = -ENOMEM;
  199. goto out_free_opp;
  200. }
  201. of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
  202. if (of_property_read_u32(np, "clock-latency", &transition_latency))
  203. transition_latency = CPUFREQ_ETERNAL;
  204. if (!IS_ERR(cpu_reg)) {
  205. unsigned long opp_freq = 0;
  206. /*
  207. * Disable any OPPs where the connected regulator isn't able to
  208. * provide the specified voltage and record minimum and maximum
  209. * voltage levels.
  210. */
  211. while (1) {
  212. struct dev_pm_opp *opp;
  213. unsigned long opp_uV, tol_uV;
  214. rcu_read_lock();
  215. opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
  216. if (IS_ERR(opp)) {
  217. rcu_read_unlock();
  218. break;
  219. }
  220. opp_uV = dev_pm_opp_get_voltage(opp);
  221. rcu_read_unlock();
  222. tol_uV = opp_uV * priv->voltage_tolerance / 100;
  223. if (regulator_is_supported_voltage(cpu_reg, opp_uV,
  224. opp_uV + tol_uV)) {
  225. if (opp_uV < min_uV)
  226. min_uV = opp_uV;
  227. if (opp_uV > max_uV)
  228. max_uV = opp_uV;
  229. } else {
  230. dev_pm_opp_disable(cpu_dev, opp_freq);
  231. }
  232. opp_freq++;
  233. }
  234. ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
  235. if (ret > 0)
  236. transition_latency += ret * 1000;
  237. }
  238. ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
  239. if (ret) {
  240. pr_err("failed to init cpufreq table: %d\n", ret);
  241. goto out_free_priv;
  242. }
  243. priv->cpu_dev = cpu_dev;
  244. priv->cpu_reg = cpu_reg;
  245. policy->driver_data = priv;
  246. policy->clk = cpu_clk;
  247. ret = cpufreq_table_validate_and_show(policy, freq_table);
  248. if (ret) {
  249. dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
  250. ret);
  251. goto out_free_cpufreq_table;
  252. }
  253. policy->cpuinfo.transition_latency = transition_latency;
  254. pd = cpufreq_get_driver_data();
  255. if (!pd || !pd->independent_clocks)
  256. cpumask_setall(policy->cpus);
  257. of_node_put(np);
  258. return 0;
  259. out_free_cpufreq_table:
  260. dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
  261. out_free_priv:
  262. kfree(priv);
  263. out_free_opp:
  264. of_free_opp_table(cpu_dev);
  265. of_node_put(np);
  266. out_put_reg_clk:
  267. clk_put(cpu_clk);
  268. if (!IS_ERR(cpu_reg))
  269. regulator_put(cpu_reg);
  270. return ret;
  271. }
  272. static int cpufreq_exit(struct cpufreq_policy *policy)
  273. {
  274. struct private_data *priv = policy->driver_data;
  275. cpufreq_cooling_unregister(priv->cdev);
  276. dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
  277. of_free_opp_table(priv->cpu_dev);
  278. clk_put(policy->clk);
  279. if (!IS_ERR(priv->cpu_reg))
  280. regulator_put(priv->cpu_reg);
  281. kfree(priv);
  282. return 0;
  283. }
  284. static void cpufreq_ready(struct cpufreq_policy *policy)
  285. {
  286. struct private_data *priv = policy->driver_data;
  287. struct device_node *np = of_node_get(priv->cpu_dev->of_node);
  288. if (WARN_ON(!np))
  289. return;
  290. /*
  291. * For now, just loading the cooling device;
  292. * thermal DT code takes care of matching them.
  293. */
  294. if (of_find_property(np, "#cooling-cells", NULL)) {
  295. priv->cdev = of_cpufreq_cooling_register(np,
  296. policy->related_cpus);
  297. if (IS_ERR(priv->cdev)) {
  298. dev_err(priv->cpu_dev,
  299. "running cpufreq without cooling device: %ld\n",
  300. PTR_ERR(priv->cdev));
  301. priv->cdev = NULL;
  302. }
  303. }
  304. of_node_put(np);
  305. }
  306. static struct cpufreq_driver dt_cpufreq_driver = {
  307. .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
  308. .verify = cpufreq_generic_frequency_table_verify,
  309. .target_index = set_target,
  310. .get = cpufreq_generic_get,
  311. .init = cpufreq_init,
  312. .exit = cpufreq_exit,
  313. .ready = cpufreq_ready,
  314. .name = "cpufreq-dt",
  315. .attr = cpufreq_generic_attr,
  316. };
  317. static int dt_cpufreq_probe(struct platform_device *pdev)
  318. {
  319. struct device *cpu_dev;
  320. struct regulator *cpu_reg;
  321. struct clk *cpu_clk;
  322. int ret;
  323. /*
  324. * All per-cluster (CPUs sharing clock/voltages) initialization is done
  325. * from ->init(). In probe(), we just need to make sure that clk and
  326. * regulators are available. Else defer probe and retry.
  327. *
  328. * FIXME: Is checking this only for CPU0 sufficient ?
  329. */
  330. ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
  331. if (ret)
  332. return ret;
  333. clk_put(cpu_clk);
  334. if (!IS_ERR(cpu_reg))
  335. regulator_put(cpu_reg);
  336. dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
  337. ret = cpufreq_register_driver(&dt_cpufreq_driver);
  338. if (ret)
  339. dev_err(cpu_dev, "failed register driver: %d\n", ret);
  340. return ret;
  341. }
  342. static int dt_cpufreq_remove(struct platform_device *pdev)
  343. {
  344. cpufreq_unregister_driver(&dt_cpufreq_driver);
  345. return 0;
  346. }
  347. static struct platform_driver dt_cpufreq_platdrv = {
  348. .driver = {
  349. .name = "cpufreq-dt",
  350. },
  351. .probe = dt_cpufreq_probe,
  352. .remove = dt_cpufreq_remove,
  353. };
  354. module_platform_driver(dt_cpufreq_platdrv);
  355. MODULE_ALIAS("platform:cpufreq-dt");
  356. MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
  357. MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
  358. MODULE_DESCRIPTION("Generic cpufreq driver");
  359. MODULE_LICENSE("GPL");