mali_kbase_pm_ca.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. /*
  2. *
  3. * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved.
  4. *
  5. * This program is free software and is provided to you under the terms of the
  6. * GNU General Public License version 2 as published by the Free Software
  7. * Foundation, and any use by you of this program is subject to the terms
  8. * of such GNU licence.
  9. *
  10. * A copy of the licence is included with the program, and can also be obtained
  11. * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  12. * Boston, MA 02110-1301, USA.
  13. *
  14. */
  15. /*
  16. * Base kernel core availability APIs
  17. */
  18. #include <mali_kbase.h>
  19. #include <mali_kbase_pm.h>
  20. #include <backend/gpu/mali_kbase_pm_internal.h>
  21. static const struct kbase_pm_ca_policy *const policy_list[] = {
  22. &kbase_pm_ca_fixed_policy_ops,
  23. };
  24. /**
  25. * POLICY_COUNT - The number of policies available in the system.
  26. *
  27. * This is derived from the number of functions listed in policy_list.
  28. */
  29. #define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list))
  30. int kbase_pm_ca_init(struct kbase_device *kbdev)
  31. {
  32. KBASE_DEBUG_ASSERT(kbdev != NULL);
  33. kbdev->pm.backend.ca_current_policy = policy_list[0];
  34. kbdev->pm.backend.ca_current_policy->init(kbdev);
  35. return 0;
  36. }
  37. void kbase_pm_ca_term(struct kbase_device *kbdev)
  38. {
  39. kbdev->pm.backend.ca_current_policy->term(kbdev);
  40. }
  41. int kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **list)
  42. {
  43. if (!list)
  44. return POLICY_COUNT;
  45. *list = policy_list;
  46. return POLICY_COUNT;
  47. }
  48. const struct kbase_pm_ca_policy
  49. *kbase_pm_ca_get_policy(struct kbase_device *kbdev)
  50. {
  51. KBASE_DEBUG_ASSERT(kbdev != NULL);
  52. return kbdev->pm.backend.ca_current_policy;
  53. }
  54. void kbase_pm_ca_set_policy(struct kbase_device *kbdev,
  55. const struct kbase_pm_ca_policy *new_policy)
  56. {
  57. const struct kbase_pm_ca_policy *old_policy;
  58. unsigned long flags;
  59. KBASE_DEBUG_ASSERT(kbdev != NULL);
  60. KBASE_DEBUG_ASSERT(new_policy != NULL);
  61. KBASE_TRACE_ADD(kbdev, PM_CA_SET_POLICY, NULL, NULL, 0u,
  62. new_policy->id);
  63. /* During a policy change we pretend the GPU is active */
  64. /* A suspend won't happen here, because we're in a syscall from a
  65. * userspace thread */
  66. kbase_pm_context_active(kbdev);
  67. mutex_lock(&kbdev->pm.lock);
  68. /* Remove the policy to prevent IRQ handlers from working on it */
  69. spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
  70. old_policy = kbdev->pm.backend.ca_current_policy;
  71. kbdev->pm.backend.ca_current_policy = NULL;
  72. spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
  73. if (old_policy->term)
  74. old_policy->term(kbdev);
  75. if (new_policy->init)
  76. new_policy->init(kbdev);
  77. spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
  78. kbdev->pm.backend.ca_current_policy = new_policy;
  79. /* If any core power state changes were previously attempted, but
  80. * couldn't be made because the policy was changing (current_policy was
  81. * NULL), then re-try them here. */
  82. kbase_pm_update_cores_state_nolock(kbdev);
  83. kbdev->pm.backend.ca_current_policy->update_core_status(kbdev,
  84. kbdev->shader_ready_bitmap,
  85. kbdev->shader_transitioning_bitmap);
  86. spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
  87. mutex_unlock(&kbdev->pm.lock);
  88. /* Now the policy change is finished, we release our fake context active
  89. * reference */
  90. kbase_pm_context_idle(kbdev);
  91. }
  92. u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
  93. {
  94. lockdep_assert_held(&kbdev->pm.power_change_lock);
  95. /* All cores must be enabled when instrumentation is in use */
  96. if (kbdev->pm.backend.instr_enabled)
  97. return kbdev->gpu_props.props.raw_props.shader_present &
  98. kbdev->pm.debug_core_mask_all;
  99. if (kbdev->pm.backend.ca_current_policy == NULL)
  100. return kbdev->gpu_props.props.raw_props.shader_present &
  101. kbdev->pm.debug_core_mask_all;
  102. return kbdev->pm.backend.ca_current_policy->get_core_mask(kbdev) &
  103. kbdev->pm.debug_core_mask_all;
  104. }
  105. void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready,
  106. u64 cores_transitioning)
  107. {
  108. lockdep_assert_held(&kbdev->pm.power_change_lock);
  109. if (kbdev->pm.backend.ca_current_policy != NULL)
  110. kbdev->pm.backend.ca_current_policy->update_core_status(kbdev,
  111. cores_ready,
  112. cores_transitioning);
  113. }
  114. void kbase_pm_ca_instr_enable(struct kbase_device *kbdev)
  115. {
  116. unsigned long flags;
  117. spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
  118. kbdev->pm.backend.instr_enabled = true;
  119. kbase_pm_update_cores_state_nolock(kbdev);
  120. spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
  121. }
  122. void kbase_pm_ca_instr_disable(struct kbase_device *kbdev)
  123. {
  124. unsigned long flags;
  125. spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
  126. kbdev->pm.backend.instr_enabled = false;
  127. kbase_pm_update_cores_state_nolock(kbdev);
  128. spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
  129. }