mali_kbase_pm.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. /*
  2. *
  3. * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
  4. *
  5. * This program is free software and is provided to you under the terms of the
  6. * GNU General Public License version 2 as published by the Free Software
  7. * Foundation, and any use by you of this program is subject to the terms
  8. * of such GNU licence.
  9. *
  10. * A copy of the licence is included with the program, and can also be obtained
  11. * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  12. * Boston, MA 02110-1301, USA.
  13. *
  14. */
  15. /**
  16. * @file mali_kbase_pm.c
  17. * Base kernel power management APIs
  18. */
  19. #include <mali_kbase.h>
  20. #include <mali_midg_regmap.h>
  21. #include <mali_kbase_config_defaults.h>
  22. #include <mali_kbase_instr.h>
  23. #include <mali_kbase_pm.h>
  24. int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags)
  25. {
  26. return kbase_hwaccess_pm_powerup(kbdev, flags);
  27. }
  28. void kbase_pm_halt(struct kbase_device *kbdev)
  29. {
  30. kbase_hwaccess_pm_halt(kbdev);
  31. }
  32. void kbase_pm_context_active(struct kbase_device *kbdev)
  33. {
  34. (void)kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE);
  35. }
  36. int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler)
  37. {
  38. struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
  39. int c;
  40. int old_count;
  41. KBASE_DEBUG_ASSERT(kbdev != NULL);
  42. /* Trace timeline information about how long it took to handle the decision
  43. * to powerup. Sometimes the event might be missed due to reading the count
  44. * outside of mutex, but this is necessary to get the trace timing
  45. * correct. */
  46. old_count = kbdev->pm.active_count;
  47. if (old_count == 0)
  48. kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
  49. mutex_lock(&js_devdata->runpool_mutex);
  50. mutex_lock(&kbdev->pm.lock);
  51. if (kbase_pm_is_suspending(kbdev)) {
  52. switch (suspend_handler) {
  53. case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
  54. if (kbdev->pm.active_count != 0)
  55. break;
  56. /* FALLTHROUGH */
  57. case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
  58. mutex_unlock(&kbdev->pm.lock);
  59. mutex_unlock(&js_devdata->runpool_mutex);
  60. if (old_count == 0)
  61. kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
  62. return 1;
  63. case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
  64. /* FALLTHROUGH */
  65. default:
  66. KBASE_DEBUG_ASSERT_MSG(false, "unreachable");
  67. break;
  68. }
  69. }
  70. c = ++kbdev->pm.active_count;
  71. KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
  72. KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c);
  73. /* Trace the event being handled */
  74. if (old_count == 0)
  75. kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
  76. if (c == 1)
  77. /* First context active: Power on the GPU and any cores requested by
  78. * the policy */
  79. kbase_hwaccess_pm_gpu_active(kbdev);
  80. mutex_unlock(&kbdev->pm.lock);
  81. mutex_unlock(&js_devdata->runpool_mutex);
  82. return 0;
  83. }
  84. void kbase_pm_context_idle(struct kbase_device *kbdev)
  85. {
  86. struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
  87. int c;
  88. int old_count;
  89. KBASE_DEBUG_ASSERT(kbdev != NULL);
  90. /* Trace timeline information about how long it took to handle the decision
  91. * to powerdown. Sometimes the event might be missed due to reading the
  92. * count outside of mutex, but this is necessary to get the trace timing
  93. * correct. */
  94. old_count = kbdev->pm.active_count;
  95. if (old_count == 0)
  96. kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
  97. mutex_lock(&js_devdata->runpool_mutex);
  98. mutex_lock(&kbdev->pm.lock);
  99. c = --kbdev->pm.active_count;
  100. KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
  101. KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c);
  102. KBASE_DEBUG_ASSERT(c >= 0);
  103. /* Trace the event being handled */
  104. if (old_count == 0)
  105. kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
  106. if (c == 0) {
  107. /* Last context has gone idle */
  108. kbase_hwaccess_pm_gpu_idle(kbdev);
  109. /* Wake up anyone waiting for this to become 0 (e.g. suspend). The
  110. * waiters must synchronize with us by locking the pm.lock after
  111. * waiting */
  112. wake_up(&kbdev->pm.zero_active_count_wait);
  113. }
  114. mutex_unlock(&kbdev->pm.lock);
  115. mutex_unlock(&js_devdata->runpool_mutex);
  116. }
  117. void kbase_pm_suspend(struct kbase_device *kbdev)
  118. {
  119. KBASE_DEBUG_ASSERT(kbdev);
  120. mutex_lock(&kbdev->pm.lock);
  121. KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
  122. kbdev->pm.suspending = true;
  123. mutex_unlock(&kbdev->pm.lock);
  124. /* From now on, the active count will drop towards zero. Sometimes, it'll
  125. * go up briefly before going down again. However, once it reaches zero it
  126. * will stay there - guaranteeing that we've idled all pm references */
  127. /* Suspend job scheduler and associated components, so that it releases all
  128. * the PM active count references */
  129. kbasep_js_suspend(kbdev);
  130. /* Suspend any counter collection that might be happening */
  131. kbase_instr_hwcnt_suspend(kbdev);
  132. /* Wait for the active count to reach zero. This is not the same as
  133. * waiting for a power down, since not all policies power down when this
  134. * reaches zero. */
  135. wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);
  136. /* NOTE: We synchronize with anything that was just finishing a
  137. * kbase_pm_context_idle() call by locking the pm.lock below */
  138. kbase_hwaccess_pm_suspend(kbdev);
  139. }
  140. void kbase_pm_resume(struct kbase_device *kbdev)
  141. {
  142. /* MUST happen before any pm_context_active calls occur */
  143. kbase_hwaccess_pm_resume(kbdev);
  144. /* Initial active call, to power on the GPU/cores if needed */
  145. kbase_pm_context_active(kbdev);
  146. /* Re-enable instrumentation, if it was previously disabled */
  147. kbase_instr_hwcnt_resume(kbdev);
  148. /* Resume any blocked atoms (which may cause contexts to be scheduled in
  149. * and dependent atoms to run) */
  150. kbase_resume_suspended_soft_jobs(kbdev);
  151. /* Resume the Job Scheduler and associated components, and start running
  152. * atoms */
  153. kbasep_js_resume(kbdev);
  154. /* Matching idle call, to power off the GPU/cores if we didn't actually
  155. * need it and the policy doesn't want it on */
  156. kbase_pm_context_idle(kbdev);
  157. }