mali_kbase_pm.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /*
  2. *
  3. * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
  4. *
  5. * This program is free software and is provided to you under the terms of the
  6. * GNU General Public License version 2 as published by the Free Software
  7. * Foundation, and any use by you of this program is subject to the terms
  8. * of such GNU licence.
  9. *
  10. * A copy of the licence is included with the program, and can also be obtained
  11. * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  12. * Boston, MA 02110-1301, USA.
  13. *
  14. */
  15. /**
  16. * @file mali_kbase_pm.c
  17. * Base kernel power management APIs
  18. */
  19. #include <mali_kbase.h>
  20. #include <mali_midg_regmap.h>
  21. #include <mali_kbase_config_defaults.h>
  22. #include <mali_kbase_instr.h>
  23. #include <mali_kbase_pm.h>
  24. int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags)
  25. {
  26. return kbase_hwaccess_pm_powerup(kbdev, flags);
  27. }
  28. void kbase_pm_halt(struct kbase_device *kbdev)
  29. {
  30. kbase_hwaccess_pm_halt(kbdev);
  31. }
  32. void kbase_pm_context_active(struct kbase_device *kbdev)
  33. {
  34. (void)kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE);
  35. }
  36. int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler)
  37. {
  38. struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
  39. int c;
  40. int old_count;
  41. KBASE_DEBUG_ASSERT(kbdev != NULL);
  42. /* Trace timeline information about how long it took to handle the decision
  43. * to powerup. Sometimes the event might be missed due to reading the count
  44. * outside of mutex, but this is necessary to get the trace timing
  45. * correct. */
  46. old_count = kbdev->pm.active_count;
  47. if (old_count == 0)
  48. kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
  49. mutex_lock(&js_devdata->runpool_mutex);
  50. mutex_lock(&kbdev->pm.lock);
  51. if (kbase_pm_is_suspending(kbdev)) {
  52. switch (suspend_handler) {
  53. case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
  54. if (kbdev->pm.active_count != 0)
  55. break;
  56. /* FALLTHROUGH */
  57. case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
  58. mutex_unlock(&kbdev->pm.lock);
  59. mutex_unlock(&js_devdata->runpool_mutex);
  60. if (old_count == 0)
  61. kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
  62. return 1;
  63. case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
  64. /* FALLTHROUGH */
  65. default:
  66. KBASE_DEBUG_ASSERT_MSG(false, "unreachable");
  67. break;
  68. }
  69. }
  70. c = ++kbdev->pm.active_count;
  71. KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
  72. KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c);
  73. /* Trace the event being handled */
  74. if (old_count == 0)
  75. kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
  76. if (c == 1)
  77. /* First context active: Power on the GPU and any cores requested by
  78. * the policy */
  79. kbase_hwaccess_pm_gpu_active(kbdev);
  80. mutex_unlock(&kbdev->pm.lock);
  81. mutex_unlock(&js_devdata->runpool_mutex);
  82. return 0;
  83. }
  84. KBASE_EXPORT_TEST_API(kbase_pm_context_active);
  85. void kbase_pm_context_idle(struct kbase_device *kbdev)
  86. {
  87. struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
  88. int c;
  89. int old_count;
  90. KBASE_DEBUG_ASSERT(kbdev != NULL);
  91. /* Trace timeline information about how long it took to handle the decision
  92. * to powerdown. Sometimes the event might be missed due to reading the
  93. * count outside of mutex, but this is necessary to get the trace timing
  94. * correct. */
  95. old_count = kbdev->pm.active_count;
  96. if (old_count == 0)
  97. kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
  98. mutex_lock(&js_devdata->runpool_mutex);
  99. mutex_lock(&kbdev->pm.lock);
  100. c = --kbdev->pm.active_count;
  101. KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
  102. KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c);
  103. KBASE_DEBUG_ASSERT(c >= 0);
  104. /* Trace the event being handled */
  105. if (old_count == 0)
  106. kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
  107. if (c == 0) {
  108. /* Last context has gone idle */
  109. kbase_hwaccess_pm_gpu_idle(kbdev);
  110. /* Wake up anyone waiting for this to become 0 (e.g. suspend). The
  111. * waiters must synchronize with us by locking the pm.lock after
  112. * waiting */
  113. wake_up(&kbdev->pm.zero_active_count_wait);
  114. }
  115. mutex_unlock(&kbdev->pm.lock);
  116. mutex_unlock(&js_devdata->runpool_mutex);
  117. }
  118. KBASE_EXPORT_TEST_API(kbase_pm_context_idle);
  119. void kbase_pm_suspend(struct kbase_device *kbdev)
  120. {
  121. KBASE_DEBUG_ASSERT(kbdev);
  122. mutex_lock(&kbdev->pm.lock);
  123. KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
  124. kbdev->pm.suspending = true;
  125. mutex_unlock(&kbdev->pm.lock);
  126. /* From now on, the active count will drop towards zero. Sometimes, it'll
  127. * go up briefly before going down again. However, once it reaches zero it
  128. * will stay there - guaranteeing that we've idled all pm references */
  129. /* Suspend job scheduler and associated components, so that it releases all
  130. * the PM active count references */
  131. kbasep_js_suspend(kbdev);
  132. /* Suspend any counter collection that might be happening */
  133. kbase_instr_hwcnt_suspend(kbdev);
  134. /* Wait for the active count to reach zero. This is not the same as
  135. * waiting for a power down, since not all policies power down when this
  136. * reaches zero. */
  137. wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);
  138. /* NOTE: We synchronize with anything that was just finishing a
  139. * kbase_pm_context_idle() call by locking the pm.lock below */
  140. kbase_hwaccess_pm_suspend(kbdev);
  141. }
  142. void kbase_pm_resume(struct kbase_device *kbdev)
  143. {
  144. /* MUST happen before any pm_context_active calls occur */
  145. kbase_hwaccess_pm_resume(kbdev);
  146. /* Initial active call, to power on the GPU/cores if needed */
  147. kbase_pm_context_active(kbdev);
  148. /* Re-enable instrumentation, if it was previously disabled */
  149. kbase_instr_hwcnt_resume(kbdev);
  150. /* Resume any blocked atoms (which may cause contexts to be scheduled in
  151. * and dependent atoms to run) */
  152. kbase_resume_suspended_soft_jobs(kbdev);
  153. /* Resume the Job Scheduler and associated components, and start running
  154. * atoms */
  155. kbasep_js_resume(kbdev);
  156. /* Matching idle call, to power off the GPU/cores if we didn't actually
  157. * need it and the policy doesn't want it on */
  158. kbase_pm_context_idle(kbdev);
  159. }