gk20a.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /*
  2. * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20. * DEALINGS IN THE SOFTWARE.
  21. */
  22. #define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base)
  23. #include "priv.h"
  24. #include <subdev/clk.h>
  25. #include <subdev/timer.h>
  26. #include <subdev/volt.h>
  27. #define BUSY_SLOT 0
  28. #define CLK_SLOT 7
  29. struct gk20a_pmu_dvfs_data {
  30. int p_load_target;
  31. int p_load_max;
  32. int p_smooth;
  33. unsigned int avg_load;
  34. };
  35. struct gk20a_pmu {
  36. struct nvkm_pmu base;
  37. struct nvkm_alarm alarm;
  38. struct gk20a_pmu_dvfs_data *data;
  39. };
  40. struct gk20a_pmu_dvfs_dev_status {
  41. u32 total;
  42. u32 busy;
  43. };
  44. static int
  45. gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state)
  46. {
  47. struct nvkm_clk *clk = pmu->base.subdev.device->clk;
  48. return nvkm_clk_astate(clk, *state, 0, false);
  49. }
  50. static void
  51. gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state)
  52. {
  53. struct nvkm_clk *clk = pmu->base.subdev.device->clk;
  54. *state = clk->pstate;
  55. }
  56. static int
  57. gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu,
  58. int *state, int load)
  59. {
  60. struct gk20a_pmu_dvfs_data *data = pmu->data;
  61. struct nvkm_clk *clk = pmu->base.subdev.device->clk;
  62. int cur_level, level;
  63. /* For GK20A, the performance level is directly mapped to pstate */
  64. level = cur_level = clk->pstate;
  65. if (load > data->p_load_max) {
  66. level = min(clk->state_nr - 1, level + (clk->state_nr / 3));
  67. } else {
  68. level += ((load - data->p_load_target) * 10 /
  69. data->p_load_target) / 2;
  70. level = max(0, level);
  71. level = min(clk->state_nr - 1, level);
  72. }
  73. nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n",
  74. cur_level, level);
  75. *state = level;
  76. return (level != cur_level);
  77. }
  78. static void
  79. gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
  80. struct gk20a_pmu_dvfs_dev_status *status)
  81. {
  82. struct nvkm_falcon *falcon = pmu->base.falcon;
  83. status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10));
  84. status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10));
  85. }
  86. static void
  87. gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
  88. {
  89. struct nvkm_falcon *falcon = pmu->base.falcon;
  90. nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000);
  91. nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000);
  92. }
  93. static void
  94. gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
  95. {
  96. struct gk20a_pmu *pmu =
  97. container_of(alarm, struct gk20a_pmu, alarm);
  98. struct gk20a_pmu_dvfs_data *data = pmu->data;
  99. struct gk20a_pmu_dvfs_dev_status status;
  100. struct nvkm_subdev *subdev = &pmu->base.subdev;
  101. struct nvkm_device *device = subdev->device;
  102. struct nvkm_clk *clk = device->clk;
  103. struct nvkm_timer *tmr = device->timer;
  104. struct nvkm_volt *volt = device->volt;
  105. u32 utilization = 0;
  106. int state;
  107. /*
  108. * The PMU is initialized before CLK and VOLT, so we have to make sure the
  109. * CLK and VOLT are ready here.
  110. */
  111. if (!clk || !volt)
  112. goto resched;
  113. gk20a_pmu_dvfs_get_dev_status(pmu, &status);
  114. if (status.total)
  115. utilization = div_u64((u64)status.busy * 100, status.total);
  116. data->avg_load = (data->p_smooth * data->avg_load) + utilization;
  117. data->avg_load /= data->p_smooth + 1;
  118. nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n",
  119. utilization, data->avg_load);
  120. gk20a_pmu_dvfs_get_cur_state(pmu, &state);
  121. if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
  122. nvkm_trace(subdev, "set new state to %d\n", state);
  123. gk20a_pmu_dvfs_target(pmu, &state);
  124. }
  125. resched:
  126. gk20a_pmu_dvfs_reset_dev_status(pmu);
  127. nvkm_timer_alarm(tmr, 100000000, alarm);
  128. }
  129. static void
  130. gk20a_pmu_fini(struct nvkm_pmu *pmu)
  131. {
  132. struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
  133. nvkm_timer_alarm(pmu->subdev.device->timer, 0, &gpmu->alarm);
  134. nvkm_falcon_put(pmu->falcon, &pmu->subdev);
  135. }
  136. static int
  137. gk20a_pmu_init(struct nvkm_pmu *pmu)
  138. {
  139. struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
  140. struct nvkm_subdev *subdev = &pmu->subdev;
  141. struct nvkm_device *device = pmu->subdev.device;
  142. struct nvkm_falcon *falcon = pmu->falcon;
  143. int ret;
  144. ret = nvkm_falcon_get(falcon, subdev);
  145. if (ret) {
  146. nvkm_error(subdev, "cannot acquire %s falcon!\n", falcon->name);
  147. return ret;
  148. }
  149. /* init pwr perf counter */
  150. nvkm_falcon_wr32(falcon, 0x504 + (BUSY_SLOT * 0x10), 0x00200001);
  151. nvkm_falcon_wr32(falcon, 0x50c + (BUSY_SLOT * 0x10), 0x00000002);
  152. nvkm_falcon_wr32(falcon, 0x50c + (CLK_SLOT * 0x10), 0x00000003);
  153. nvkm_timer_alarm(device->timer, 2000000000, &gpmu->alarm);
  154. return 0;
  155. }
  156. static struct gk20a_pmu_dvfs_data
  157. gk20a_dvfs_data= {
  158. .p_load_target = 70,
  159. .p_load_max = 90,
  160. .p_smooth = 1,
  161. };
  162. static const struct nvkm_pmu_func
  163. gk20a_pmu = {
  164. .enabled = gf100_pmu_enabled,
  165. .init = gk20a_pmu_init,
  166. .fini = gk20a_pmu_fini,
  167. .reset = gf100_pmu_reset,
  168. };
  169. int
  170. gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
  171. {
  172. struct gk20a_pmu *pmu;
  173. if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
  174. return -ENOMEM;
  175. *ppmu = &pmu->base;
  176. nvkm_pmu_ctor(&gk20a_pmu, device, index, &pmu->base);
  177. pmu->data = &gk20a_dvfs_data;
  178. nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
  179. return 0;
  180. }