radeon_pm.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908
  1. /*
  2. * Permission is hereby granted, free of charge, to any person obtaining a
  3. * copy of this software and associated documentation files (the "Software"),
  4. * to deal in the Software without restriction, including without limitation
  5. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  6. * and/or sell copies of the Software, and to permit persons to whom the
  7. * Software is furnished to do so, subject to the following conditions:
  8. *
  9. * The above copyright notice and this permission notice shall be included in
  10. * all copies or substantial portions of the Software.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  15. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  16. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  17. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  18. * OTHER DEALINGS IN THE SOFTWARE.
  19. *
  20. * Authors: Rafał Miłecki <zajec5@gmail.com>
  21. * Alex Deucher <alexdeucher@gmail.com>
  22. */
  23. #include <drm/drmP.h>
  24. #include "radeon.h"
  25. #include "avivod.h"
  26. #include "atom.h"
  27. #include "r600_dpm.h"
  28. #include <linux/power_supply.h>
  29. #include <linux/hwmon.h>
  30. #include <linux/hwmon-sysfs.h>
  31. #define RADEON_IDLE_LOOP_MS 100
  32. #define RADEON_RECLOCK_DELAY_MS 200
  33. #define RADEON_WAIT_VBLANK_TIMEOUT 200
  34. static const char *radeon_pm_state_type_name[5] = {
  35. "",
  36. "Powersave",
  37. "Battery",
  38. "Balanced",
  39. "Performance",
  40. };
  41. static void radeon_dynpm_idle_work_handler(struct work_struct *work);
  42. static int radeon_debugfs_pm_init(struct radeon_device *rdev);
  43. static bool radeon_pm_in_vbl(struct radeon_device *rdev);
  44. static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
  45. static void radeon_pm_update_profile(struct radeon_device *rdev);
  46. static void radeon_pm_set_clocks(struct radeon_device *rdev);
  47. int radeon_pm_get_type_index(struct radeon_device *rdev,
  48. enum radeon_pm_state_type ps_type,
  49. int instance)
  50. {
  51. int i;
  52. int found_instance = -1;
  53. for (i = 0; i < rdev->pm.num_power_states; i++) {
  54. if (rdev->pm.power_state[i].type == ps_type) {
  55. found_instance++;
  56. if (found_instance == instance)
  57. return i;
  58. }
  59. }
  60. /* return default if no match */
  61. return rdev->pm.default_power_state_index;
  62. }
  63. void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
  64. {
  65. if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
  66. mutex_lock(&rdev->pm.mutex);
  67. if (power_supply_is_system_supplied() > 0)
  68. rdev->pm.dpm.ac_power = true;
  69. else
  70. rdev->pm.dpm.ac_power = false;
  71. if (rdev->family == CHIP_ARUBA) {
  72. if (rdev->asic->dpm.enable_bapm)
  73. radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
  74. }
  75. mutex_unlock(&rdev->pm.mutex);
  76. } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
  77. if (rdev->pm.profile == PM_PROFILE_AUTO) {
  78. mutex_lock(&rdev->pm.mutex);
  79. radeon_pm_update_profile(rdev);
  80. radeon_pm_set_clocks(rdev);
  81. mutex_unlock(&rdev->pm.mutex);
  82. }
  83. }
  84. }
  85. static void radeon_pm_update_profile(struct radeon_device *rdev)
  86. {
  87. switch (rdev->pm.profile) {
  88. case PM_PROFILE_DEFAULT:
  89. rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
  90. break;
  91. case PM_PROFILE_AUTO:
  92. if (power_supply_is_system_supplied() > 0) {
  93. if (rdev->pm.active_crtc_count > 1)
  94. rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
  95. else
  96. rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
  97. } else {
  98. if (rdev->pm.active_crtc_count > 1)
  99. rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
  100. else
  101. rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
  102. }
  103. break;
  104. case PM_PROFILE_LOW:
  105. if (rdev->pm.active_crtc_count > 1)
  106. rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
  107. else
  108. rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
  109. break;
  110. case PM_PROFILE_MID:
  111. if (rdev->pm.active_crtc_count > 1)
  112. rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
  113. else
  114. rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
  115. break;
  116. case PM_PROFILE_HIGH:
  117. if (rdev->pm.active_crtc_count > 1)
  118. rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
  119. else
  120. rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
  121. break;
  122. }
  123. if (rdev->pm.active_crtc_count == 0) {
  124. rdev->pm.requested_power_state_index =
  125. rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
  126. rdev->pm.requested_clock_mode_index =
  127. rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
  128. } else {
  129. rdev->pm.requested_power_state_index =
  130. rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
  131. rdev->pm.requested_clock_mode_index =
  132. rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
  133. }
  134. }
  135. static void radeon_unmap_vram_bos(struct radeon_device *rdev)
  136. {
  137. struct radeon_bo *bo, *n;
  138. if (list_empty(&rdev->gem.objects))
  139. return;
  140. list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
  141. if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
  142. ttm_bo_unmap_virtual(&bo->tbo);
  143. }
  144. }
  145. static void radeon_sync_with_vblank(struct radeon_device *rdev)
  146. {
  147. if (rdev->pm.active_crtcs) {
  148. rdev->pm.vblank_sync = false;
  149. wait_event_timeout(
  150. rdev->irq.vblank_queue, rdev->pm.vblank_sync,
  151. msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
  152. }
  153. }
  154. static void radeon_set_power_state(struct radeon_device *rdev)
  155. {
  156. u32 sclk, mclk;
  157. bool misc_after = false;
  158. if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
  159. (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
  160. return;
  161. if (radeon_gui_idle(rdev)) {
  162. sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
  163. clock_info[rdev->pm.requested_clock_mode_index].sclk;
  164. if (sclk > rdev->pm.default_sclk)
  165. sclk = rdev->pm.default_sclk;
  166. /* starting with BTC, there is one state that is used for both
  167. * MH and SH. Difference is that we always use the high clock index for
  168. * mclk and vddci.
  169. */
  170. if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
  171. (rdev->family >= CHIP_BARTS) &&
  172. rdev->pm.active_crtc_count &&
  173. ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
  174. (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
  175. mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
  176. clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
  177. else
  178. mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
  179. clock_info[rdev->pm.requested_clock_mode_index].mclk;
  180. if (mclk > rdev->pm.default_mclk)
  181. mclk = rdev->pm.default_mclk;
  182. /* upvolt before raising clocks, downvolt after lowering clocks */
  183. if (sclk < rdev->pm.current_sclk)
  184. misc_after = true;
  185. radeon_sync_with_vblank(rdev);
  186. if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
  187. if (!radeon_pm_in_vbl(rdev))
  188. return;
  189. }
  190. radeon_pm_prepare(rdev);
  191. if (!misc_after)
  192. /* voltage, pcie lanes, etc.*/
  193. radeon_pm_misc(rdev);
  194. /* set engine clock */
  195. if (sclk != rdev->pm.current_sclk) {
  196. radeon_pm_debug_check_in_vbl(rdev, false);
  197. radeon_set_engine_clock(rdev, sclk);
  198. radeon_pm_debug_check_in_vbl(rdev, true);
  199. rdev->pm.current_sclk = sclk;
  200. DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
  201. }
  202. /* set memory clock */
  203. if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
  204. radeon_pm_debug_check_in_vbl(rdev, false);
  205. radeon_set_memory_clock(rdev, mclk);
  206. radeon_pm_debug_check_in_vbl(rdev, true);
  207. rdev->pm.current_mclk = mclk;
  208. DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
  209. }
  210. if (misc_after)
  211. /* voltage, pcie lanes, etc.*/
  212. radeon_pm_misc(rdev);
  213. radeon_pm_finish(rdev);
  214. rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
  215. rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
  216. } else
  217. DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
  218. }
  219. static void radeon_pm_set_clocks(struct radeon_device *rdev)
  220. {
  221. struct drm_crtc *crtc;
  222. int i, r;
  223. /* no need to take locks, etc. if nothing's going to change */
  224. if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
  225. (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
  226. return;
  227. down_write(&rdev->pm.mclk_lock);
  228. mutex_lock(&rdev->ring_lock);
  229. /* wait for the rings to drain */
  230. for (i = 0; i < RADEON_NUM_RINGS; i++) {
  231. struct radeon_ring *ring = &rdev->ring[i];
  232. if (!ring->ready) {
  233. continue;
  234. }
  235. r = radeon_fence_wait_empty(rdev, i);
  236. if (r) {
  237. /* needs a GPU reset dont reset here */
  238. mutex_unlock(&rdev->ring_lock);
  239. up_write(&rdev->pm.mclk_lock);
  240. return;
  241. }
  242. }
  243. radeon_unmap_vram_bos(rdev);
  244. if (rdev->irq.installed) {
  245. i = 0;
  246. drm_for_each_crtc(crtc, rdev->ddev) {
  247. if (rdev->pm.active_crtcs & (1 << i)) {
  248. /* This can fail if a modeset is in progress */
  249. if (drm_crtc_vblank_get(crtc) == 0)
  250. rdev->pm.req_vblank |= (1 << i);
  251. else
  252. DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
  253. i);
  254. }
  255. i++;
  256. }
  257. }
  258. radeon_set_power_state(rdev);
  259. if (rdev->irq.installed) {
  260. i = 0;
  261. drm_for_each_crtc(crtc, rdev->ddev) {
  262. if (rdev->pm.req_vblank & (1 << i)) {
  263. rdev->pm.req_vblank &= ~(1 << i);
  264. drm_crtc_vblank_put(crtc);
  265. }
  266. i++;
  267. }
  268. }
  269. /* update display watermarks based on new power state */
  270. radeon_update_bandwidth_info(rdev);
  271. if (rdev->pm.active_crtc_count)
  272. radeon_bandwidth_update(rdev);
  273. rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
  274. mutex_unlock(&rdev->ring_lock);
  275. up_write(&rdev->pm.mclk_lock);
  276. }
  277. static void radeon_pm_print_states(struct radeon_device *rdev)
  278. {
  279. int i, j;
  280. struct radeon_power_state *power_state;
  281. struct radeon_pm_clock_info *clock_info;
  282. DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
  283. for (i = 0; i < rdev->pm.num_power_states; i++) {
  284. power_state = &rdev->pm.power_state[i];
  285. DRM_DEBUG_DRIVER("State %d: %s\n", i,
  286. radeon_pm_state_type_name[power_state->type]);
  287. if (i == rdev->pm.default_power_state_index)
  288. DRM_DEBUG_DRIVER("\tDefault");
  289. if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
  290. DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
  291. if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
  292. DRM_DEBUG_DRIVER("\tSingle display only\n");
  293. DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
  294. for (j = 0; j < power_state->num_clock_modes; j++) {
  295. clock_info = &(power_state->clock_info[j]);
  296. if (rdev->flags & RADEON_IS_IGP)
  297. DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
  298. j,
  299. clock_info->sclk * 10);
  300. else
  301. DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
  302. j,
  303. clock_info->sclk * 10,
  304. clock_info->mclk * 10,
  305. clock_info->voltage.voltage);
  306. }
  307. }
  308. }
  309. static ssize_t radeon_get_pm_profile(struct device *dev,
  310. struct device_attribute *attr,
  311. char *buf)
  312. {
  313. struct drm_device *ddev = dev_get_drvdata(dev);
  314. struct radeon_device *rdev = ddev->dev_private;
  315. int cp = rdev->pm.profile;
  316. return snprintf(buf, PAGE_SIZE, "%s\n",
  317. (cp == PM_PROFILE_AUTO) ? "auto" :
  318. (cp == PM_PROFILE_LOW) ? "low" :
  319. (cp == PM_PROFILE_MID) ? "mid" :
  320. (cp == PM_PROFILE_HIGH) ? "high" : "default");
  321. }
  322. static ssize_t radeon_set_pm_profile(struct device *dev,
  323. struct device_attribute *attr,
  324. const char *buf,
  325. size_t count)
  326. {
  327. struct drm_device *ddev = dev_get_drvdata(dev);
  328. struct radeon_device *rdev = ddev->dev_private;
  329. /* Can't set profile when the card is off */
  330. if ((rdev->flags & RADEON_IS_PX) &&
  331. (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
  332. return -EINVAL;
  333. mutex_lock(&rdev->pm.mutex);
  334. if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
  335. if (strncmp("default", buf, strlen("default")) == 0)
  336. rdev->pm.profile = PM_PROFILE_DEFAULT;
  337. else if (strncmp("auto", buf, strlen("auto")) == 0)
  338. rdev->pm.profile = PM_PROFILE_AUTO;
  339. else if (strncmp("low", buf, strlen("low")) == 0)
  340. rdev->pm.profile = PM_PROFILE_LOW;
  341. else if (strncmp("mid", buf, strlen("mid")) == 0)
  342. rdev->pm.profile = PM_PROFILE_MID;
  343. else if (strncmp("high", buf, strlen("high")) == 0)
  344. rdev->pm.profile = PM_PROFILE_HIGH;
  345. else {
  346. count = -EINVAL;
  347. goto fail;
  348. }
  349. radeon_pm_update_profile(rdev);
  350. radeon_pm_set_clocks(rdev);
  351. } else
  352. count = -EINVAL;
  353. fail:
  354. mutex_unlock(&rdev->pm.mutex);
  355. return count;
  356. }
  357. static ssize_t radeon_get_pm_method(struct device *dev,
  358. struct device_attribute *attr,
  359. char *buf)
  360. {
  361. struct drm_device *ddev = dev_get_drvdata(dev);
  362. struct radeon_device *rdev = ddev->dev_private;
  363. int pm = rdev->pm.pm_method;
  364. return snprintf(buf, PAGE_SIZE, "%s\n",
  365. (pm == PM_METHOD_DYNPM) ? "dynpm" :
  366. (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
  367. }
  368. static ssize_t radeon_set_pm_method(struct device *dev,
  369. struct device_attribute *attr,
  370. const char *buf,
  371. size_t count)
  372. {
  373. struct drm_device *ddev = dev_get_drvdata(dev);
  374. struct radeon_device *rdev = ddev->dev_private;
  375. /* Can't set method when the card is off */
  376. if ((rdev->flags & RADEON_IS_PX) &&
  377. (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
  378. count = -EINVAL;
  379. goto fail;
  380. }
  381. /* we don't support the legacy modes with dpm */
  382. if (rdev->pm.pm_method == PM_METHOD_DPM) {
  383. count = -EINVAL;
  384. goto fail;
  385. }
  386. if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
  387. mutex_lock(&rdev->pm.mutex);
  388. rdev->pm.pm_method = PM_METHOD_DYNPM;
  389. rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
  390. rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
  391. mutex_unlock(&rdev->pm.mutex);
  392. } else if (strncmp("profile", buf, strlen("profile")) == 0) {
  393. mutex_lock(&rdev->pm.mutex);
  394. /* disable dynpm */
  395. rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
  396. rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
  397. rdev->pm.pm_method = PM_METHOD_PROFILE;
  398. mutex_unlock(&rdev->pm.mutex);
  399. cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
  400. } else {
  401. count = -EINVAL;
  402. goto fail;
  403. }
  404. radeon_pm_compute_clocks(rdev);
  405. fail:
  406. return count;
  407. }
  408. static ssize_t radeon_get_dpm_state(struct device *dev,
  409. struct device_attribute *attr,
  410. char *buf)
  411. {
  412. struct drm_device *ddev = dev_get_drvdata(dev);
  413. struct radeon_device *rdev = ddev->dev_private;
  414. enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
  415. return snprintf(buf, PAGE_SIZE, "%s\n",
  416. (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
  417. (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
  418. }
  419. static ssize_t radeon_set_dpm_state(struct device *dev,
  420. struct device_attribute *attr,
  421. const char *buf,
  422. size_t count)
  423. {
  424. struct drm_device *ddev = dev_get_drvdata(dev);
  425. struct radeon_device *rdev = ddev->dev_private;
  426. mutex_lock(&rdev->pm.mutex);
  427. if (strncmp("battery", buf, strlen("battery")) == 0)
  428. rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
  429. else if (strncmp("balanced", buf, strlen("balanced")) == 0)
  430. rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
  431. else if (strncmp("performance", buf, strlen("performance")) == 0)
  432. rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
  433. else {
  434. mutex_unlock(&rdev->pm.mutex);
  435. count = -EINVAL;
  436. goto fail;
  437. }
  438. mutex_unlock(&rdev->pm.mutex);
  439. /* Can't set dpm state when the card is off */
  440. if (!(rdev->flags & RADEON_IS_PX) ||
  441. (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
  442. radeon_pm_compute_clocks(rdev);
  443. fail:
  444. return count;
  445. }
  446. static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
  447. struct device_attribute *attr,
  448. char *buf)
  449. {
  450. struct drm_device *ddev = dev_get_drvdata(dev);
  451. struct radeon_device *rdev = ddev->dev_private;
  452. enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
  453. if ((rdev->flags & RADEON_IS_PX) &&
  454. (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
  455. return snprintf(buf, PAGE_SIZE, "off\n");
  456. return snprintf(buf, PAGE_SIZE, "%s\n",
  457. (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
  458. (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
  459. }
  460. static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
  461. struct device_attribute *attr,
  462. const char *buf,
  463. size_t count)
  464. {
  465. struct drm_device *ddev = dev_get_drvdata(dev);
  466. struct radeon_device *rdev = ddev->dev_private;
  467. enum radeon_dpm_forced_level level;
  468. int ret = 0;
  469. /* Can't force performance level when the card is off */
  470. if ((rdev->flags & RADEON_IS_PX) &&
  471. (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
  472. return -EINVAL;
  473. mutex_lock(&rdev->pm.mutex);
  474. if (strncmp("low", buf, strlen("low")) == 0) {
  475. level = RADEON_DPM_FORCED_LEVEL_LOW;
  476. } else if (strncmp("high", buf, strlen("high")) == 0) {
  477. level = RADEON_DPM_FORCED_LEVEL_HIGH;
  478. } else if (strncmp("auto", buf, strlen("auto")) == 0) {
  479. level = RADEON_DPM_FORCED_LEVEL_AUTO;
  480. } else {
  481. count = -EINVAL;
  482. goto fail;
  483. }
  484. if (rdev->asic->dpm.force_performance_level) {
  485. if (rdev->pm.dpm.thermal_active) {
  486. count = -EINVAL;
  487. goto fail;
  488. }
  489. ret = radeon_dpm_force_performance_level(rdev, level);
  490. if (ret)
  491. count = -EINVAL;
  492. }
  493. fail:
  494. mutex_unlock(&rdev->pm.mutex);
  495. return count;
  496. }
  497. static ssize_t radeon_hwmon_get_pwm1_enable(struct device *dev,
  498. struct device_attribute *attr,
  499. char *buf)
  500. {
  501. struct radeon_device *rdev = dev_get_drvdata(dev);
  502. u32 pwm_mode = 0;
  503. if (rdev->asic->dpm.fan_ctrl_get_mode)
  504. pwm_mode = rdev->asic->dpm.fan_ctrl_get_mode(rdev);
  505. /* never 0 (full-speed), fuse or smc-controlled always */
  506. return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
  507. }
  508. static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev,
  509. struct device_attribute *attr,
  510. const char *buf,
  511. size_t count)
  512. {
  513. struct radeon_device *rdev = dev_get_drvdata(dev);
  514. int err;
  515. int value;
  516. if(!rdev->asic->dpm.fan_ctrl_set_mode)
  517. return -EINVAL;
  518. err = kstrtoint(buf, 10, &value);
  519. if (err)
  520. return err;
  521. switch (value) {
  522. case 1: /* manual, percent-based */
  523. rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC);
  524. break;
  525. default: /* disable */
  526. rdev->asic->dpm.fan_ctrl_set_mode(rdev, 0);
  527. break;
  528. }
  529. return count;
  530. }
  531. static ssize_t radeon_hwmon_get_pwm1_min(struct device *dev,
  532. struct device_attribute *attr,
  533. char *buf)
  534. {
  535. return sprintf(buf, "%i\n", 0);
  536. }
  537. static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev,
  538. struct device_attribute *attr,
  539. char *buf)
  540. {
  541. return sprintf(buf, "%i\n", 255);
  542. }
  543. static ssize_t radeon_hwmon_set_pwm1(struct device *dev,
  544. struct device_attribute *attr,
  545. const char *buf, size_t count)
  546. {
  547. struct radeon_device *rdev = dev_get_drvdata(dev);
  548. int err;
  549. u32 value;
  550. err = kstrtou32(buf, 10, &value);
  551. if (err)
  552. return err;
  553. value = (value * 100) / 255;
  554. err = rdev->asic->dpm.set_fan_speed_percent(rdev, value);
  555. if (err)
  556. return err;
  557. return count;
  558. }
  559. static ssize_t radeon_hwmon_get_pwm1(struct device *dev,
  560. struct device_attribute *attr,
  561. char *buf)
  562. {
  563. struct radeon_device *rdev = dev_get_drvdata(dev);
  564. int err;
  565. u32 speed;
  566. err = rdev->asic->dpm.get_fan_speed_percent(rdev, &speed);
  567. if (err)
  568. return err;
  569. speed = (speed * 255) / 100;
  570. return sprintf(buf, "%i\n", speed);
  571. }
  572. static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
  573. static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
  574. static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
  575. static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
  576. radeon_get_dpm_forced_performance_level,
  577. radeon_set_dpm_forced_performance_level);
  578. static ssize_t radeon_hwmon_show_temp(struct device *dev,
  579. struct device_attribute *attr,
  580. char *buf)
  581. {
  582. struct radeon_device *rdev = dev_get_drvdata(dev);
  583. struct drm_device *ddev = rdev->ddev;
  584. int temp;
  585. /* Can't get temperature when the card is off */
  586. if ((rdev->flags & RADEON_IS_PX) &&
  587. (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
  588. return -EINVAL;
  589. if (rdev->asic->pm.get_temperature)
  590. temp = radeon_get_temperature(rdev);
  591. else
  592. temp = 0;
  593. return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  594. }
  595. static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
  596. struct device_attribute *attr,
  597. char *buf)
  598. {
  599. struct radeon_device *rdev = dev_get_drvdata(dev);
  600. int hyst = to_sensor_dev_attr(attr)->index;
  601. int temp;
  602. if (hyst)
  603. temp = rdev->pm.dpm.thermal.min_temp;
  604. else
  605. temp = rdev->pm.dpm.thermal.max_temp;
  606. return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  607. }
  608. static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
  609. static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
  610. static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
  611. static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1, radeon_hwmon_set_pwm1, 0);
  612. static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_enable, radeon_hwmon_set_pwm1_enable, 0);
  613. static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0);
  614. static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0);
  615. static struct attribute *hwmon_attributes[] = {
  616. &sensor_dev_attr_temp1_input.dev_attr.attr,
  617. &sensor_dev_attr_temp1_crit.dev_attr.attr,
  618. &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
  619. &sensor_dev_attr_pwm1.dev_attr.attr,
  620. &sensor_dev_attr_pwm1_enable.dev_attr.attr,
  621. &sensor_dev_attr_pwm1_min.dev_attr.attr,
  622. &sensor_dev_attr_pwm1_max.dev_attr.attr,
  623. NULL
  624. };
  625. static umode_t hwmon_attributes_visible(struct kobject *kobj,
  626. struct attribute *attr, int index)
  627. {
  628. struct device *dev = kobj_to_dev(kobj);
  629. struct radeon_device *rdev = dev_get_drvdata(dev);
  630. umode_t effective_mode = attr->mode;
  631. /* Skip attributes if DPM is not enabled */
  632. if (rdev->pm.pm_method != PM_METHOD_DPM &&
  633. (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
  634. attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
  635. attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
  636. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
  637. attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  638. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  639. return 0;
  640. /* Skip fan attributes if fan is not present */
  641. if (rdev->pm.no_fan &&
  642. (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
  643. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
  644. attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  645. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  646. return 0;
  647. /* mask fan attributes if we have no bindings for this asic to expose */
  648. if ((!rdev->asic->dpm.get_fan_speed_percent &&
  649. attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
  650. (!rdev->asic->dpm.fan_ctrl_get_mode &&
  651. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
  652. effective_mode &= ~S_IRUGO;
  653. if ((!rdev->asic->dpm.set_fan_speed_percent &&
  654. attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
  655. (!rdev->asic->dpm.fan_ctrl_set_mode &&
  656. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
  657. effective_mode &= ~S_IWUSR;
  658. /* hide max/min values if we can't both query and manage the fan */
  659. if ((!rdev->asic->dpm.set_fan_speed_percent &&
  660. !rdev->asic->dpm.get_fan_speed_percent) &&
  661. (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  662. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  663. return 0;
  664. return effective_mode;
  665. }
  666. static const struct attribute_group hwmon_attrgroup = {
  667. .attrs = hwmon_attributes,
  668. .is_visible = hwmon_attributes_visible,
  669. };
  670. static const struct attribute_group *hwmon_groups[] = {
  671. &hwmon_attrgroup,
  672. NULL
  673. };
  674. static int radeon_hwmon_init(struct radeon_device *rdev)
  675. {
  676. int err = 0;
  677. switch (rdev->pm.int_thermal_type) {
  678. case THERMAL_TYPE_RV6XX:
  679. case THERMAL_TYPE_RV770:
  680. case THERMAL_TYPE_EVERGREEN:
  681. case THERMAL_TYPE_NI:
  682. case THERMAL_TYPE_SUMO:
  683. case THERMAL_TYPE_SI:
  684. case THERMAL_TYPE_CI:
  685. case THERMAL_TYPE_KV:
  686. if (rdev->asic->pm.get_temperature == NULL)
  687. return err;
  688. rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
  689. "radeon", rdev,
  690. hwmon_groups);
  691. if (IS_ERR(rdev->pm.int_hwmon_dev)) {
  692. err = PTR_ERR(rdev->pm.int_hwmon_dev);
  693. dev_err(rdev->dev,
  694. "Unable to register hwmon device: %d\n", err);
  695. }
  696. break;
  697. default:
  698. break;
  699. }
  700. return err;
  701. }
  702. static void radeon_hwmon_fini(struct radeon_device *rdev)
  703. {
  704. if (rdev->pm.int_hwmon_dev)
  705. hwmon_device_unregister(rdev->pm.int_hwmon_dev);
  706. }
  707. static void radeon_dpm_thermal_work_handler(struct work_struct *work)
  708. {
  709. struct radeon_device *rdev =
  710. container_of(work, struct radeon_device,
  711. pm.dpm.thermal.work);
  712. /* switch to the thermal state */
  713. enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
  714. if (!rdev->pm.dpm_enabled)
  715. return;
  716. if (rdev->asic->pm.get_temperature) {
  717. int temp = radeon_get_temperature(rdev);
  718. if (temp < rdev->pm.dpm.thermal.min_temp)
  719. /* switch back the user state */
  720. dpm_state = rdev->pm.dpm.user_state;
  721. } else {
  722. if (rdev->pm.dpm.thermal.high_to_low)
  723. /* switch back the user state */
  724. dpm_state = rdev->pm.dpm.user_state;
  725. }
  726. mutex_lock(&rdev->pm.mutex);
  727. if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
  728. rdev->pm.dpm.thermal_active = true;
  729. else
  730. rdev->pm.dpm.thermal_active = false;
  731. rdev->pm.dpm.state = dpm_state;
  732. mutex_unlock(&rdev->pm.mutex);
  733. radeon_pm_compute_clocks(rdev);
  734. }
  735. static bool radeon_dpm_single_display(struct radeon_device *rdev)
  736. {
  737. bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
  738. true : false;
  739. /* check if the vblank period is too short to adjust the mclk */
  740. if (single_display && rdev->asic->dpm.vblank_too_short) {
  741. if (radeon_dpm_vblank_too_short(rdev))
  742. single_display = false;
  743. }
  744. /* 120hz tends to be problematic even if they are under the
  745. * vblank limit.
  746. */
  747. if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
  748. single_display = false;
  749. return single_display;
  750. }
  751. static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
  752. enum radeon_pm_state_type dpm_state)
  753. {
  754. int i;
  755. struct radeon_ps *ps;
  756. u32 ui_class;
  757. bool single_display = radeon_dpm_single_display(rdev);
  758. /* certain older asics have a separare 3D performance state,
  759. * so try that first if the user selected performance
  760. */
  761. if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
  762. dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
  763. /* balanced states don't exist at the moment */
  764. if (dpm_state == POWER_STATE_TYPE_BALANCED)
  765. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  766. restart_search:
  767. /* Pick the best power state based on current conditions */
  768. for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
  769. ps = &rdev->pm.dpm.ps[i];
  770. ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
  771. switch (dpm_state) {
  772. /* user states */
  773. case POWER_STATE_TYPE_BATTERY:
  774. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
  775. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  776. if (single_display)
  777. return ps;
  778. } else
  779. return ps;
  780. }
  781. break;
  782. case POWER_STATE_TYPE_BALANCED:
  783. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
  784. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  785. if (single_display)
  786. return ps;
  787. } else
  788. return ps;
  789. }
  790. break;
  791. case POWER_STATE_TYPE_PERFORMANCE:
  792. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
  793. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  794. if (single_display)
  795. return ps;
  796. } else
  797. return ps;
  798. }
  799. break;
  800. /* internal states */
  801. case POWER_STATE_TYPE_INTERNAL_UVD:
  802. if (rdev->pm.dpm.uvd_ps)
  803. return rdev->pm.dpm.uvd_ps;
  804. else
  805. break;
  806. case POWER_STATE_TYPE_INTERNAL_UVD_SD:
  807. if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
  808. return ps;
  809. break;
  810. case POWER_STATE_TYPE_INTERNAL_UVD_HD:
  811. if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
  812. return ps;
  813. break;
  814. case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
  815. if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
  816. return ps;
  817. break;
  818. case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
  819. if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
  820. return ps;
  821. break;
  822. case POWER_STATE_TYPE_INTERNAL_BOOT:
  823. return rdev->pm.dpm.boot_ps;
  824. case POWER_STATE_TYPE_INTERNAL_THERMAL:
  825. if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
  826. return ps;
  827. break;
  828. case POWER_STATE_TYPE_INTERNAL_ACPI:
  829. if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
  830. return ps;
  831. break;
  832. case POWER_STATE_TYPE_INTERNAL_ULV:
  833. if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
  834. return ps;
  835. break;
  836. case POWER_STATE_TYPE_INTERNAL_3DPERF:
  837. if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
  838. return ps;
  839. break;
  840. default:
  841. break;
  842. }
  843. }
  844. /* use a fallback state if we didn't match */
  845. switch (dpm_state) {
  846. case POWER_STATE_TYPE_INTERNAL_UVD_SD:
  847. dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
  848. goto restart_search;
  849. case POWER_STATE_TYPE_INTERNAL_UVD_HD:
  850. case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
  851. case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
  852. if (rdev->pm.dpm.uvd_ps) {
  853. return rdev->pm.dpm.uvd_ps;
  854. } else {
  855. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  856. goto restart_search;
  857. }
  858. case POWER_STATE_TYPE_INTERNAL_THERMAL:
  859. dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
  860. goto restart_search;
  861. case POWER_STATE_TYPE_INTERNAL_ACPI:
  862. dpm_state = POWER_STATE_TYPE_BATTERY;
  863. goto restart_search;
  864. case POWER_STATE_TYPE_BATTERY:
  865. case POWER_STATE_TYPE_BALANCED:
  866. case POWER_STATE_TYPE_INTERNAL_3DPERF:
  867. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  868. goto restart_search;
  869. default:
  870. break;
  871. }
  872. return NULL;
  873. }
  874. static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
  875. {
  876. int i;
  877. struct radeon_ps *ps;
  878. enum radeon_pm_state_type dpm_state;
  879. int ret;
  880. bool single_display = radeon_dpm_single_display(rdev);
  881. /* if dpm init failed */
  882. if (!rdev->pm.dpm_enabled)
  883. return;
  884. if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
  885. /* add other state override checks here */
  886. if ((!rdev->pm.dpm.thermal_active) &&
  887. (!rdev->pm.dpm.uvd_active))
  888. rdev->pm.dpm.state = rdev->pm.dpm.user_state;
  889. }
  890. dpm_state = rdev->pm.dpm.state;
  891. ps = radeon_dpm_pick_power_state(rdev, dpm_state);
  892. if (ps)
  893. rdev->pm.dpm.requested_ps = ps;
  894. else
  895. return;
  896. /* no need to reprogram if nothing changed unless we are on BTC+ */
  897. if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
  898. /* vce just modifies an existing state so force a change */
  899. if (ps->vce_active != rdev->pm.dpm.vce_active)
  900. goto force;
  901. /* user has made a display change (such as timing) */
  902. if (rdev->pm.dpm.single_display != single_display)
  903. goto force;
  904. if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
  905. /* for pre-BTC and APUs if the num crtcs changed but state is the same,
  906. * all we need to do is update the display configuration.
  907. */
  908. if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
  909. /* update display watermarks based on new power state */
  910. radeon_bandwidth_update(rdev);
  911. /* update displays */
  912. radeon_dpm_display_configuration_changed(rdev);
  913. rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
  914. rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
  915. }
  916. return;
  917. } else {
  918. /* for BTC+ if the num crtcs hasn't changed and state is the same,
  919. * nothing to do, if the num crtcs is > 1 and state is the same,
  920. * update display configuration.
  921. */
  922. if (rdev->pm.dpm.new_active_crtcs ==
  923. rdev->pm.dpm.current_active_crtcs) {
  924. return;
  925. } else {
  926. if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
  927. (rdev->pm.dpm.new_active_crtc_count > 1)) {
  928. /* update display watermarks based on new power state */
  929. radeon_bandwidth_update(rdev);
  930. /* update displays */
  931. radeon_dpm_display_configuration_changed(rdev);
  932. rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
  933. rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
  934. return;
  935. }
  936. }
  937. }
  938. }
  939. force:
  940. if (radeon_dpm == 1) {
  941. printk("switching from power state:\n");
  942. radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
  943. printk("switching to power state:\n");
  944. radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
  945. }
  946. down_write(&rdev->pm.mclk_lock);
  947. mutex_lock(&rdev->ring_lock);
  948. /* update whether vce is active */
  949. ps->vce_active = rdev->pm.dpm.vce_active;
  950. ret = radeon_dpm_pre_set_power_state(rdev);
  951. if (ret)
  952. goto done;
  953. /* update display watermarks based on new power state */
  954. radeon_bandwidth_update(rdev);
  955. /* update displays */
  956. radeon_dpm_display_configuration_changed(rdev);
  957. /* wait for the rings to drain */
  958. for (i = 0; i < RADEON_NUM_RINGS; i++) {
  959. struct radeon_ring *ring = &rdev->ring[i];
  960. if (ring->ready)
  961. radeon_fence_wait_empty(rdev, i);
  962. }
  963. /* program the new power state */
  964. radeon_dpm_set_power_state(rdev);
  965. /* update current power state */
  966. rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
  967. radeon_dpm_post_set_power_state(rdev);
  968. rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
  969. rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
  970. rdev->pm.dpm.single_display = single_display;
  971. if (rdev->asic->dpm.force_performance_level) {
  972. if (rdev->pm.dpm.thermal_active) {
  973. enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
  974. /* force low perf level for thermal */
  975. radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
  976. /* save the user's level */
  977. rdev->pm.dpm.forced_level = level;
  978. } else {
  979. /* otherwise, user selected level */
  980. radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
  981. }
  982. }
  983. done:
  984. mutex_unlock(&rdev->ring_lock);
  985. up_write(&rdev->pm.mclk_lock);
  986. }
  987. void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
  988. {
  989. enum radeon_pm_state_type dpm_state;
  990. if (rdev->asic->dpm.powergate_uvd) {
  991. mutex_lock(&rdev->pm.mutex);
  992. /* don't powergate anything if we
  993. have active but pause streams */
  994. enable |= rdev->pm.dpm.sd > 0;
  995. enable |= rdev->pm.dpm.hd > 0;
  996. /* enable/disable UVD */
  997. radeon_dpm_powergate_uvd(rdev, !enable);
  998. mutex_unlock(&rdev->pm.mutex);
  999. } else {
  1000. if (enable) {
  1001. mutex_lock(&rdev->pm.mutex);
  1002. rdev->pm.dpm.uvd_active = true;
  1003. /* disable this for now */
  1004. #if 0
  1005. if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
  1006. dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
  1007. else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
  1008. dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
  1009. else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
  1010. dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
  1011. else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
  1012. dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
  1013. else
  1014. #endif
  1015. dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
  1016. rdev->pm.dpm.state = dpm_state;
  1017. mutex_unlock(&rdev->pm.mutex);
  1018. } else {
  1019. mutex_lock(&rdev->pm.mutex);
  1020. rdev->pm.dpm.uvd_active = false;
  1021. mutex_unlock(&rdev->pm.mutex);
  1022. }
  1023. radeon_pm_compute_clocks(rdev);
  1024. }
  1025. }
  1026. void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
  1027. {
  1028. if (enable) {
  1029. mutex_lock(&rdev->pm.mutex);
  1030. rdev->pm.dpm.vce_active = true;
  1031. /* XXX select vce level based on ring/task */
  1032. rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
  1033. mutex_unlock(&rdev->pm.mutex);
  1034. } else {
  1035. mutex_lock(&rdev->pm.mutex);
  1036. rdev->pm.dpm.vce_active = false;
  1037. mutex_unlock(&rdev->pm.mutex);
  1038. }
  1039. radeon_pm_compute_clocks(rdev);
  1040. }
  1041. static void radeon_pm_suspend_old(struct radeon_device *rdev)
  1042. {
  1043. mutex_lock(&rdev->pm.mutex);
  1044. if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
  1045. if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
  1046. rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
  1047. }
  1048. mutex_unlock(&rdev->pm.mutex);
  1049. cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
  1050. }
  1051. static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
  1052. {
  1053. mutex_lock(&rdev->pm.mutex);
  1054. /* disable dpm */
  1055. radeon_dpm_disable(rdev);
  1056. /* reset the power state */
  1057. rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
  1058. rdev->pm.dpm_enabled = false;
  1059. mutex_unlock(&rdev->pm.mutex);
  1060. }
  1061. void radeon_pm_suspend(struct radeon_device *rdev)
  1062. {
  1063. if (rdev->pm.pm_method == PM_METHOD_DPM)
  1064. radeon_pm_suspend_dpm(rdev);
  1065. else
  1066. radeon_pm_suspend_old(rdev);
  1067. }
  1068. static void radeon_pm_resume_old(struct radeon_device *rdev)
  1069. {
  1070. /* set up the default clocks if the MC ucode is loaded */
  1071. if ((rdev->family >= CHIP_BARTS) &&
  1072. (rdev->family <= CHIP_CAYMAN) &&
  1073. rdev->mc_fw) {
  1074. if (rdev->pm.default_vddc)
  1075. radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
  1076. SET_VOLTAGE_TYPE_ASIC_VDDC);
  1077. if (rdev->pm.default_vddci)
  1078. radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
  1079. SET_VOLTAGE_TYPE_ASIC_VDDCI);
  1080. if (rdev->pm.default_sclk)
  1081. radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
  1082. if (rdev->pm.default_mclk)
  1083. radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
  1084. }
  1085. /* asic init will reset the default power state */
  1086. mutex_lock(&rdev->pm.mutex);
  1087. rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
  1088. rdev->pm.current_clock_mode_index = 0;
  1089. rdev->pm.current_sclk = rdev->pm.default_sclk;
  1090. rdev->pm.current_mclk = rdev->pm.default_mclk;
  1091. if (rdev->pm.power_state) {
  1092. rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
  1093. rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
  1094. }
  1095. if (rdev->pm.pm_method == PM_METHOD_DYNPM
  1096. && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
  1097. rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
  1098. schedule_delayed_work(&rdev->pm.dynpm_idle_work,
  1099. msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
  1100. }
  1101. mutex_unlock(&rdev->pm.mutex);
  1102. radeon_pm_compute_clocks(rdev);
  1103. }
  1104. static void radeon_pm_resume_dpm(struct radeon_device *rdev)
  1105. {
  1106. int ret;
  1107. /* asic init will reset to the boot state */
  1108. mutex_lock(&rdev->pm.mutex);
  1109. rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
  1110. radeon_dpm_setup_asic(rdev);
  1111. ret = radeon_dpm_enable(rdev);
  1112. mutex_unlock(&rdev->pm.mutex);
  1113. if (ret)
  1114. goto dpm_resume_fail;
  1115. rdev->pm.dpm_enabled = true;
  1116. return;
  1117. dpm_resume_fail:
  1118. DRM_ERROR("radeon: dpm resume failed\n");
  1119. if ((rdev->family >= CHIP_BARTS) &&
  1120. (rdev->family <= CHIP_CAYMAN) &&
  1121. rdev->mc_fw) {
  1122. if (rdev->pm.default_vddc)
  1123. radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
  1124. SET_VOLTAGE_TYPE_ASIC_VDDC);
  1125. if (rdev->pm.default_vddci)
  1126. radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
  1127. SET_VOLTAGE_TYPE_ASIC_VDDCI);
  1128. if (rdev->pm.default_sclk)
  1129. radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
  1130. if (rdev->pm.default_mclk)
  1131. radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
  1132. }
  1133. }
  1134. void radeon_pm_resume(struct radeon_device *rdev)
  1135. {
  1136. if (rdev->pm.pm_method == PM_METHOD_DPM)
  1137. radeon_pm_resume_dpm(rdev);
  1138. else
  1139. radeon_pm_resume_old(rdev);
  1140. }
  1141. static int radeon_pm_init_old(struct radeon_device *rdev)
  1142. {
  1143. int ret;
  1144. rdev->pm.profile = PM_PROFILE_DEFAULT;
  1145. rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
  1146. rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
  1147. rdev->pm.dynpm_can_upclock = true;
  1148. rdev->pm.dynpm_can_downclock = true;
  1149. rdev->pm.default_sclk = rdev->clock.default_sclk;
  1150. rdev->pm.default_mclk = rdev->clock.default_mclk;
  1151. rdev->pm.current_sclk = rdev->clock.default_sclk;
  1152. rdev->pm.current_mclk = rdev->clock.default_mclk;
  1153. rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
  1154. if (rdev->bios) {
  1155. if (rdev->is_atom_bios)
  1156. radeon_atombios_get_power_modes(rdev);
  1157. else
  1158. radeon_combios_get_power_modes(rdev);
  1159. radeon_pm_print_states(rdev);
  1160. radeon_pm_init_profile(rdev);
  1161. /* set up the default clocks if the MC ucode is loaded */
  1162. if ((rdev->family >= CHIP_BARTS) &&
  1163. (rdev->family <= CHIP_CAYMAN) &&
  1164. rdev->mc_fw) {
  1165. if (rdev->pm.default_vddc)
  1166. radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
  1167. SET_VOLTAGE_TYPE_ASIC_VDDC);
  1168. if (rdev->pm.default_vddci)
  1169. radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
  1170. SET_VOLTAGE_TYPE_ASIC_VDDCI);
  1171. if (rdev->pm.default_sclk)
  1172. radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
  1173. if (rdev->pm.default_mclk)
  1174. radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
  1175. }
  1176. }
  1177. /* set up the internal thermal sensor if applicable */
  1178. ret = radeon_hwmon_init(rdev);
  1179. if (ret)
  1180. return ret;
  1181. INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
  1182. if (rdev->pm.num_power_states > 1) {
  1183. if (radeon_debugfs_pm_init(rdev)) {
  1184. DRM_ERROR("Failed to register debugfs file for PM!\n");
  1185. }
  1186. DRM_INFO("radeon: power management initialized\n");
  1187. }
  1188. return 0;
  1189. }
  1190. static void radeon_dpm_print_power_states(struct radeon_device *rdev)
  1191. {
  1192. int i;
  1193. for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
  1194. printk("== power state %d ==\n", i);
  1195. radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
  1196. }
  1197. }
  1198. static int radeon_pm_init_dpm(struct radeon_device *rdev)
  1199. {
  1200. int ret;
  1201. /* default to balanced state */
  1202. rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
  1203. rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
  1204. rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
  1205. rdev->pm.default_sclk = rdev->clock.default_sclk;
  1206. rdev->pm.default_mclk = rdev->clock.default_mclk;
  1207. rdev->pm.current_sclk = rdev->clock.default_sclk;
  1208. rdev->pm.current_mclk = rdev->clock.default_mclk;
  1209. rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
  1210. if (rdev->bios && rdev->is_atom_bios)
  1211. radeon_atombios_get_power_modes(rdev);
  1212. else
  1213. return -EINVAL;
  1214. /* set up the internal thermal sensor if applicable */
  1215. ret = radeon_hwmon_init(rdev);
  1216. if (ret)
  1217. return ret;
  1218. INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
  1219. mutex_lock(&rdev->pm.mutex);
  1220. radeon_dpm_init(rdev);
  1221. rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
  1222. if (radeon_dpm == 1)
  1223. radeon_dpm_print_power_states(rdev);
  1224. radeon_dpm_setup_asic(rdev);
  1225. ret = radeon_dpm_enable(rdev);
  1226. mutex_unlock(&rdev->pm.mutex);
  1227. if (ret)
  1228. goto dpm_failed;
  1229. rdev->pm.dpm_enabled = true;
  1230. if (radeon_debugfs_pm_init(rdev)) {
  1231. DRM_ERROR("Failed to register debugfs file for dpm!\n");
  1232. }
  1233. DRM_INFO("radeon: dpm initialized\n");
  1234. return 0;
  1235. dpm_failed:
  1236. rdev->pm.dpm_enabled = false;
  1237. if ((rdev->family >= CHIP_BARTS) &&
  1238. (rdev->family <= CHIP_CAYMAN) &&
  1239. rdev->mc_fw) {
  1240. if (rdev->pm.default_vddc)
  1241. radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
  1242. SET_VOLTAGE_TYPE_ASIC_VDDC);
  1243. if (rdev->pm.default_vddci)
  1244. radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
  1245. SET_VOLTAGE_TYPE_ASIC_VDDCI);
  1246. if (rdev->pm.default_sclk)
  1247. radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
  1248. if (rdev->pm.default_mclk)
  1249. radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
  1250. }
  1251. DRM_ERROR("radeon: dpm initialization failed\n");
  1252. return ret;
  1253. }
  1254. struct radeon_dpm_quirk {
  1255. u32 chip_vendor;
  1256. u32 chip_device;
  1257. u32 subsys_vendor;
  1258. u32 subsys_device;
  1259. };
  1260. /* cards with dpm stability problems */
  1261. static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
  1262. /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
  1263. { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
  1264. /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
  1265. { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
  1266. { 0, 0, 0, 0 },
  1267. };
  1268. int radeon_pm_init(struct radeon_device *rdev)
  1269. {
  1270. struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
  1271. bool disable_dpm = false;
  1272. /* Apply dpm quirks */
  1273. while (p && p->chip_device != 0) {
  1274. if (rdev->pdev->vendor == p->chip_vendor &&
  1275. rdev->pdev->device == p->chip_device &&
  1276. rdev->pdev->subsystem_vendor == p->subsys_vendor &&
  1277. rdev->pdev->subsystem_device == p->subsys_device) {
  1278. disable_dpm = true;
  1279. break;
  1280. }
  1281. ++p;
  1282. }
  1283. /* enable dpm on rv6xx+ */
  1284. switch (rdev->family) {
  1285. case CHIP_RV610:
  1286. case CHIP_RV630:
  1287. case CHIP_RV620:
  1288. case CHIP_RV635:
  1289. case CHIP_RV670:
  1290. case CHIP_RS780:
  1291. case CHIP_RS880:
  1292. case CHIP_RV770:
  1293. /* DPM requires the RLC, RV770+ dGPU requires SMC */
  1294. if (!rdev->rlc_fw)
  1295. rdev->pm.pm_method = PM_METHOD_PROFILE;
  1296. else if ((rdev->family >= CHIP_RV770) &&
  1297. (!(rdev->flags & RADEON_IS_IGP)) &&
  1298. (!rdev->smc_fw))
  1299. rdev->pm.pm_method = PM_METHOD_PROFILE;
  1300. else if (radeon_dpm == 1)
  1301. rdev->pm.pm_method = PM_METHOD_DPM;
  1302. else
  1303. rdev->pm.pm_method = PM_METHOD_PROFILE;
  1304. break;
  1305. case CHIP_RV730:
  1306. case CHIP_RV710:
  1307. case CHIP_RV740:
  1308. case CHIP_CEDAR:
  1309. case CHIP_REDWOOD:
  1310. case CHIP_JUNIPER:
  1311. case CHIP_CYPRESS:
  1312. case CHIP_HEMLOCK:
  1313. case CHIP_PALM:
  1314. case CHIP_SUMO:
  1315. case CHIP_SUMO2:
  1316. case CHIP_BARTS:
  1317. case CHIP_TURKS:
  1318. case CHIP_CAICOS:
  1319. case CHIP_CAYMAN:
  1320. case CHIP_ARUBA:
  1321. case CHIP_TAHITI:
  1322. case CHIP_PITCAIRN:
  1323. case CHIP_VERDE:
  1324. case CHIP_OLAND:
  1325. case CHIP_HAINAN:
  1326. case CHIP_BONAIRE:
  1327. case CHIP_KABINI:
  1328. case CHIP_KAVERI:
  1329. case CHIP_HAWAII:
  1330. case CHIP_MULLINS:
  1331. /* DPM requires the RLC, RV770+ dGPU requires SMC */
  1332. if (!rdev->rlc_fw)
  1333. rdev->pm.pm_method = PM_METHOD_PROFILE;
  1334. else if ((rdev->family >= CHIP_RV770) &&
  1335. (!(rdev->flags & RADEON_IS_IGP)) &&
  1336. (!rdev->smc_fw))
  1337. rdev->pm.pm_method = PM_METHOD_PROFILE;
  1338. else if (disable_dpm && (radeon_dpm == -1))
  1339. rdev->pm.pm_method = PM_METHOD_PROFILE;
  1340. else if (radeon_dpm == 0)
  1341. rdev->pm.pm_method = PM_METHOD_PROFILE;
  1342. else
  1343. rdev->pm.pm_method = PM_METHOD_DPM;
  1344. break;
  1345. default:
  1346. /* default to profile method */
  1347. rdev->pm.pm_method = PM_METHOD_PROFILE;
  1348. break;
  1349. }
  1350. if (rdev->pm.pm_method == PM_METHOD_DPM)
  1351. return radeon_pm_init_dpm(rdev);
  1352. else
  1353. return radeon_pm_init_old(rdev);
  1354. }
  1355. int radeon_pm_late_init(struct radeon_device *rdev)
  1356. {
  1357. int ret = 0;
  1358. if (rdev->pm.pm_method == PM_METHOD_DPM) {
  1359. if (rdev->pm.dpm_enabled) {
  1360. if (!rdev->pm.sysfs_initialized) {
  1361. ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
  1362. if (ret)
  1363. DRM_ERROR("failed to create device file for dpm state\n");
  1364. ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
  1365. if (ret)
  1366. DRM_ERROR("failed to create device file for dpm state\n");
  1367. /* XXX: these are noops for dpm but are here for backwards compat */
  1368. ret = device_create_file(rdev->dev, &dev_attr_power_profile);
  1369. if (ret)
  1370. DRM_ERROR("failed to create device file for power profile\n");
  1371. ret = device_create_file(rdev->dev, &dev_attr_power_method);
  1372. if (ret)
  1373. DRM_ERROR("failed to create device file for power method\n");
  1374. rdev->pm.sysfs_initialized = true;
  1375. }
  1376. mutex_lock(&rdev->pm.mutex);
  1377. ret = radeon_dpm_late_enable(rdev);
  1378. mutex_unlock(&rdev->pm.mutex);
  1379. if (ret) {
  1380. rdev->pm.dpm_enabled = false;
  1381. DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
  1382. } else {
  1383. /* set the dpm state for PX since there won't be
  1384. * a modeset to call this.
  1385. */
  1386. radeon_pm_compute_clocks(rdev);
  1387. }
  1388. }
  1389. } else {
  1390. if ((rdev->pm.num_power_states > 1) &&
  1391. (!rdev->pm.sysfs_initialized)) {
  1392. /* where's the best place to put these? */
  1393. ret = device_create_file(rdev->dev, &dev_attr_power_profile);
  1394. if (ret)
  1395. DRM_ERROR("failed to create device file for power profile\n");
  1396. ret = device_create_file(rdev->dev, &dev_attr_power_method);
  1397. if (ret)
  1398. DRM_ERROR("failed to create device file for power method\n");
  1399. if (!ret)
  1400. rdev->pm.sysfs_initialized = true;
  1401. }
  1402. }
  1403. return ret;
  1404. }
  1405. static void radeon_pm_fini_old(struct radeon_device *rdev)
  1406. {
  1407. if (rdev->pm.num_power_states > 1) {
  1408. mutex_lock(&rdev->pm.mutex);
  1409. if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
  1410. rdev->pm.profile = PM_PROFILE_DEFAULT;
  1411. radeon_pm_update_profile(rdev);
  1412. radeon_pm_set_clocks(rdev);
  1413. } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
  1414. /* reset default clocks */
  1415. rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
  1416. rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
  1417. radeon_pm_set_clocks(rdev);
  1418. }
  1419. mutex_unlock(&rdev->pm.mutex);
  1420. cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
  1421. device_remove_file(rdev->dev, &dev_attr_power_profile);
  1422. device_remove_file(rdev->dev, &dev_attr_power_method);
  1423. }
  1424. radeon_hwmon_fini(rdev);
  1425. kfree(rdev->pm.power_state);
  1426. }
  1427. static void radeon_pm_fini_dpm(struct radeon_device *rdev)
  1428. {
  1429. if (rdev->pm.num_power_states > 1) {
  1430. mutex_lock(&rdev->pm.mutex);
  1431. radeon_dpm_disable(rdev);
  1432. mutex_unlock(&rdev->pm.mutex);
  1433. device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
  1434. device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
  1435. /* XXX backwards compat */
  1436. device_remove_file(rdev->dev, &dev_attr_power_profile);
  1437. device_remove_file(rdev->dev, &dev_attr_power_method);
  1438. }
  1439. radeon_dpm_fini(rdev);
  1440. radeon_hwmon_fini(rdev);
  1441. kfree(rdev->pm.power_state);
  1442. }
  1443. void radeon_pm_fini(struct radeon_device *rdev)
  1444. {
  1445. if (rdev->pm.pm_method == PM_METHOD_DPM)
  1446. radeon_pm_fini_dpm(rdev);
  1447. else
  1448. radeon_pm_fini_old(rdev);
  1449. }
  1450. static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
  1451. {
  1452. struct drm_device *ddev = rdev->ddev;
  1453. struct drm_crtc *crtc;
  1454. struct radeon_crtc *radeon_crtc;
  1455. if (rdev->pm.num_power_states < 2)
  1456. return;
  1457. mutex_lock(&rdev->pm.mutex);
  1458. rdev->pm.active_crtcs = 0;
  1459. rdev->pm.active_crtc_count = 0;
  1460. if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
  1461. list_for_each_entry(crtc,
  1462. &ddev->mode_config.crtc_list, head) {
  1463. radeon_crtc = to_radeon_crtc(crtc);
  1464. if (radeon_crtc->enabled) {
  1465. rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
  1466. rdev->pm.active_crtc_count++;
  1467. }
  1468. }
  1469. }
  1470. if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
  1471. radeon_pm_update_profile(rdev);
  1472. radeon_pm_set_clocks(rdev);
  1473. } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
  1474. if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
  1475. if (rdev->pm.active_crtc_count > 1) {
  1476. if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
  1477. cancel_delayed_work(&rdev->pm.dynpm_idle_work);
  1478. rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
  1479. rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
  1480. radeon_pm_get_dynpm_state(rdev);
  1481. radeon_pm_set_clocks(rdev);
  1482. DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
  1483. }
  1484. } else if (rdev->pm.active_crtc_count == 1) {
  1485. /* TODO: Increase clocks if needed for current mode */
  1486. if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
  1487. rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
  1488. rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
  1489. radeon_pm_get_dynpm_state(rdev);
  1490. radeon_pm_set_clocks(rdev);
  1491. schedule_delayed_work(&rdev->pm.dynpm_idle_work,
  1492. msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
  1493. } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
  1494. rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
  1495. schedule_delayed_work(&rdev->pm.dynpm_idle_work,
  1496. msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
  1497. DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
  1498. }
  1499. } else { /* count == 0 */
  1500. if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
  1501. cancel_delayed_work(&rdev->pm.dynpm_idle_work);
  1502. rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
  1503. rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
  1504. radeon_pm_get_dynpm_state(rdev);
  1505. radeon_pm_set_clocks(rdev);
  1506. }
  1507. }
  1508. }
  1509. }
  1510. mutex_unlock(&rdev->pm.mutex);
  1511. }
  1512. static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
  1513. {
  1514. struct drm_device *ddev = rdev->ddev;
  1515. struct drm_crtc *crtc;
  1516. struct radeon_crtc *radeon_crtc;
  1517. if (!rdev->pm.dpm_enabled)
  1518. return;
  1519. mutex_lock(&rdev->pm.mutex);
  1520. /* update active crtc counts */
  1521. rdev->pm.dpm.new_active_crtcs = 0;
  1522. rdev->pm.dpm.new_active_crtc_count = 0;
  1523. if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
  1524. list_for_each_entry(crtc,
  1525. &ddev->mode_config.crtc_list, head) {
  1526. radeon_crtc = to_radeon_crtc(crtc);
  1527. if (crtc->enabled) {
  1528. rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
  1529. rdev->pm.dpm.new_active_crtc_count++;
  1530. }
  1531. }
  1532. }
  1533. /* update battery/ac status */
  1534. if (power_supply_is_system_supplied() > 0)
  1535. rdev->pm.dpm.ac_power = true;
  1536. else
  1537. rdev->pm.dpm.ac_power = false;
  1538. radeon_dpm_change_power_state_locked(rdev);
  1539. mutex_unlock(&rdev->pm.mutex);
  1540. }
  1541. void radeon_pm_compute_clocks(struct radeon_device *rdev)
  1542. {
  1543. if (rdev->pm.pm_method == PM_METHOD_DPM)
  1544. radeon_pm_compute_clocks_dpm(rdev);
  1545. else
  1546. radeon_pm_compute_clocks_old(rdev);
  1547. }
  1548. static bool radeon_pm_in_vbl(struct radeon_device *rdev)
  1549. {
  1550. int crtc, vpos, hpos, vbl_status;
  1551. bool in_vbl = true;
  1552. /* Iterate over all active crtc's. All crtc's must be in vblank,
  1553. * otherwise return in_vbl == false.
  1554. */
  1555. for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
  1556. if (rdev->pm.active_crtcs & (1 << crtc)) {
  1557. vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
  1558. crtc,
  1559. USE_REAL_VBLANKSTART,
  1560. &vpos, &hpos, NULL, NULL,
  1561. &rdev->mode_info.crtcs[crtc]->base.hwmode);
  1562. if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
  1563. !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
  1564. in_vbl = false;
  1565. }
  1566. }
  1567. return in_vbl;
  1568. }
  1569. static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
  1570. {
  1571. u32 stat_crtc = 0;
  1572. bool in_vbl = radeon_pm_in_vbl(rdev);
  1573. if (in_vbl == false)
  1574. DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
  1575. finish ? "exit" : "entry");
  1576. return in_vbl;
  1577. }
  1578. static void radeon_dynpm_idle_work_handler(struct work_struct *work)
  1579. {
  1580. struct radeon_device *rdev;
  1581. int resched;
  1582. rdev = container_of(work, struct radeon_device,
  1583. pm.dynpm_idle_work.work);
  1584. resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
  1585. mutex_lock(&rdev->pm.mutex);
  1586. if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
  1587. int not_processed = 0;
  1588. int i;
  1589. for (i = 0; i < RADEON_NUM_RINGS; ++i) {
  1590. struct radeon_ring *ring = &rdev->ring[i];
  1591. if (ring->ready) {
  1592. not_processed += radeon_fence_count_emitted(rdev, i);
  1593. if (not_processed >= 3)
  1594. break;
  1595. }
  1596. }
  1597. if (not_processed >= 3) { /* should upclock */
  1598. if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
  1599. rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
  1600. } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
  1601. rdev->pm.dynpm_can_upclock) {
  1602. rdev->pm.dynpm_planned_action =
  1603. DYNPM_ACTION_UPCLOCK;
  1604. rdev->pm.dynpm_action_timeout = jiffies +
  1605. msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
  1606. }
  1607. } else if (not_processed == 0) { /* should downclock */
  1608. if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
  1609. rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
  1610. } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
  1611. rdev->pm.dynpm_can_downclock) {
  1612. rdev->pm.dynpm_planned_action =
  1613. DYNPM_ACTION_DOWNCLOCK;
  1614. rdev->pm.dynpm_action_timeout = jiffies +
  1615. msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
  1616. }
  1617. }
  1618. /* Note, radeon_pm_set_clocks is called with static_switch set
  1619. * to false since we want to wait for vbl to avoid flicker.
  1620. */
  1621. if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
  1622. jiffies > rdev->pm.dynpm_action_timeout) {
  1623. radeon_pm_get_dynpm_state(rdev);
  1624. radeon_pm_set_clocks(rdev);
  1625. }
  1626. schedule_delayed_work(&rdev->pm.dynpm_idle_work,
  1627. msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
  1628. }
  1629. mutex_unlock(&rdev->pm.mutex);
  1630. ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
  1631. }
  1632. /*
  1633. * Debugfs info
  1634. */
  1635. #if defined(CONFIG_DEBUG_FS)
  1636. static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
  1637. {
  1638. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1639. struct drm_device *dev = node->minor->dev;
  1640. struct radeon_device *rdev = dev->dev_private;
  1641. struct drm_device *ddev = rdev->ddev;
  1642. if ((rdev->flags & RADEON_IS_PX) &&
  1643. (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
  1644. seq_printf(m, "PX asic powered off\n");
  1645. } else if (rdev->pm.dpm_enabled) {
  1646. mutex_lock(&rdev->pm.mutex);
  1647. if (rdev->asic->dpm.debugfs_print_current_performance_level)
  1648. radeon_dpm_debugfs_print_current_performance_level(rdev, m);
  1649. else
  1650. seq_printf(m, "Debugfs support not implemented for this asic\n");
  1651. mutex_unlock(&rdev->pm.mutex);
  1652. } else {
  1653. seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
  1654. /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
  1655. if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
  1656. seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
  1657. else
  1658. seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
  1659. seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
  1660. if (rdev->asic->pm.get_memory_clock)
  1661. seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
  1662. if (rdev->pm.current_vddc)
  1663. seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
  1664. if (rdev->asic->pm.get_pcie_lanes)
  1665. seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
  1666. }
  1667. return 0;
  1668. }
  1669. static struct drm_info_list radeon_pm_info_list[] = {
  1670. {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
  1671. };
  1672. #endif
  1673. static int radeon_debugfs_pm_init(struct radeon_device *rdev)
  1674. {
  1675. #if defined(CONFIG_DEBUG_FS)
  1676. return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
  1677. #else
  1678. return 0;
  1679. #endif
  1680. }