scmi-cpufreq.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * System Control and Power Interface (SCMI) based CPUFreq Interface driver
  4. *
  5. * Copyright (C) 2018 ARM Ltd.
  6. * Sudeep Holla <sudeep.holla@arm.com>
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/cpu.h>
  10. #include <linux/cpufreq.h>
  11. #include <linux/cpumask.h>
  12. #include <linux/cpu_cooling.h>
  13. #include <linux/export.h>
  14. #include <linux/module.h>
  15. #include <linux/pm_opp.h>
  16. #include <linux/slab.h>
  17. #include <linux/scmi_protocol.h>
  18. #include <linux/types.h>
  19. struct scmi_data {
  20. int domain_id;
  21. struct device *cpu_dev;
  22. struct thermal_cooling_device *cdev;
  23. };
  24. static const struct scmi_handle *handle;
  25. static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
  26. {
  27. struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
  28. struct scmi_perf_ops *perf_ops = handle->perf_ops;
  29. struct scmi_data *priv = policy->driver_data;
  30. unsigned long rate;
  31. int ret;
  32. ret = perf_ops->freq_get(handle, priv->domain_id, &rate, false);
  33. if (ret)
  34. return 0;
  35. return rate / 1000;
  36. }
  37. /*
  38. * perf_ops->freq_set is not a synchronous, the actual OPP change will
  39. * happen asynchronously and can get notified if the events are
  40. * subscribed for by the SCMI firmware
  41. */
  42. static int
  43. scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
  44. {
  45. int ret;
  46. struct scmi_data *priv = policy->driver_data;
  47. struct scmi_perf_ops *perf_ops = handle->perf_ops;
  48. u64 freq = policy->freq_table[index].frequency;
  49. ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
  50. if (!ret)
  51. arch_set_freq_scale(policy->related_cpus, freq,
  52. policy->cpuinfo.max_freq);
  53. return ret;
  54. }
  55. static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
  56. unsigned int target_freq)
  57. {
  58. struct scmi_data *priv = policy->driver_data;
  59. struct scmi_perf_ops *perf_ops = handle->perf_ops;
  60. if (!perf_ops->freq_set(handle, priv->domain_id,
  61. target_freq * 1000, true)) {
  62. arch_set_freq_scale(policy->related_cpus, target_freq,
  63. policy->cpuinfo.max_freq);
  64. return target_freq;
  65. }
  66. return 0;
  67. }
  68. static int
  69. scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
  70. {
  71. int cpu, domain, tdomain;
  72. struct device *tcpu_dev;
  73. domain = handle->perf_ops->device_domain_id(cpu_dev);
  74. if (domain < 0)
  75. return domain;
  76. for_each_possible_cpu(cpu) {
  77. if (cpu == cpu_dev->id)
  78. continue;
  79. tcpu_dev = get_cpu_device(cpu);
  80. if (!tcpu_dev)
  81. continue;
  82. tdomain = handle->perf_ops->device_domain_id(tcpu_dev);
  83. if (tdomain == domain)
  84. cpumask_set_cpu(cpu, cpumask);
  85. }
  86. return 0;
  87. }
  88. static int scmi_cpufreq_init(struct cpufreq_policy *policy)
  89. {
  90. int ret;
  91. unsigned int latency;
  92. struct device *cpu_dev;
  93. struct scmi_data *priv;
  94. struct cpufreq_frequency_table *freq_table;
  95. cpu_dev = get_cpu_device(policy->cpu);
  96. if (!cpu_dev) {
  97. pr_err("failed to get cpu%d device\n", policy->cpu);
  98. return -ENODEV;
  99. }
  100. ret = handle->perf_ops->device_opps_add(handle, cpu_dev);
  101. if (ret) {
  102. dev_warn(cpu_dev, "failed to add opps to the device\n");
  103. return ret;
  104. }
  105. ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
  106. if (ret) {
  107. dev_warn(cpu_dev, "failed to get sharing cpumask\n");
  108. return ret;
  109. }
  110. ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
  111. if (ret) {
  112. dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
  113. __func__, ret);
  114. return ret;
  115. }
  116. ret = dev_pm_opp_get_opp_count(cpu_dev);
  117. if (ret <= 0) {
  118. dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
  119. ret = -EPROBE_DEFER;
  120. goto out_free_opp;
  121. }
  122. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  123. if (!priv) {
  124. ret = -ENOMEM;
  125. goto out_free_opp;
  126. }
  127. ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
  128. if (ret) {
  129. dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
  130. goto out_free_priv;
  131. }
  132. priv->cpu_dev = cpu_dev;
  133. priv->domain_id = handle->perf_ops->device_domain_id(cpu_dev);
  134. policy->driver_data = priv;
  135. policy->freq_table = freq_table;
  136. /* SCMI allows DVFS request for any domain from any CPU */
  137. policy->dvfs_possible_from_any_cpu = true;
  138. latency = handle->perf_ops->transition_latency_get(handle, cpu_dev);
  139. if (!latency)
  140. latency = CPUFREQ_ETERNAL;
  141. policy->cpuinfo.transition_latency = latency;
  142. policy->fast_switch_possible = true;
  143. return 0;
  144. out_free_priv:
  145. kfree(priv);
  146. out_free_opp:
  147. dev_pm_opp_cpumask_remove_table(policy->cpus);
  148. return ret;
  149. }
  150. static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
  151. {
  152. struct scmi_data *priv = policy->driver_data;
  153. cpufreq_cooling_unregister(priv->cdev);
  154. dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
  155. kfree(priv);
  156. dev_pm_opp_cpumask_remove_table(policy->related_cpus);
  157. return 0;
  158. }
  159. static void scmi_cpufreq_ready(struct cpufreq_policy *policy)
  160. {
  161. struct scmi_data *priv = policy->driver_data;
  162. priv->cdev = of_cpufreq_cooling_register(policy);
  163. }
  164. static struct cpufreq_driver scmi_cpufreq_driver = {
  165. .name = "scmi",
  166. .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
  167. CPUFREQ_NEED_INITIAL_FREQ_CHECK,
  168. .verify = cpufreq_generic_frequency_table_verify,
  169. .attr = cpufreq_generic_attr,
  170. .target_index = scmi_cpufreq_set_target,
  171. .fast_switch = scmi_cpufreq_fast_switch,
  172. .get = scmi_cpufreq_get_rate,
  173. .init = scmi_cpufreq_init,
  174. .exit = scmi_cpufreq_exit,
  175. .ready = scmi_cpufreq_ready,
  176. };
  177. static int scmi_cpufreq_probe(struct scmi_device *sdev)
  178. {
  179. int ret;
  180. handle = sdev->handle;
  181. if (!handle || !handle->perf_ops)
  182. return -ENODEV;
  183. ret = cpufreq_register_driver(&scmi_cpufreq_driver);
  184. if (ret) {
  185. dev_err(&sdev->dev, "%s: registering cpufreq failed, err: %d\n",
  186. __func__, ret);
  187. }
  188. return ret;
  189. }
  190. static void scmi_cpufreq_remove(struct scmi_device *sdev)
  191. {
  192. cpufreq_unregister_driver(&scmi_cpufreq_driver);
  193. }
  194. static const struct scmi_device_id scmi_id_table[] = {
  195. { SCMI_PROTOCOL_PERF },
  196. { },
  197. };
  198. MODULE_DEVICE_TABLE(scmi, scmi_id_table);
  199. static struct scmi_driver scmi_cpufreq_drv = {
  200. .name = "scmi-cpufreq",
  201. .probe = scmi_cpufreq_probe,
  202. .remove = scmi_cpufreq_remove,
  203. .id_table = scmi_id_table,
  204. };
  205. module_scmi_driver(scmi_cpufreq_drv);
  206. MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
  207. MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver");
  208. MODULE_LICENSE("GPL v2");