mali_kbase_context.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. /*
  2. *
  3. * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
  4. *
  5. * This program is free software and is provided to you under the terms of the
  6. * GNU General Public License version 2 as published by the Free Software
  7. * Foundation, and any use by you of this program is subject to the terms
  8. * of such GNU licence.
  9. *
  10. * A copy of the licence is included with the program, and can also be obtained
  11. * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  12. * Boston, MA 02110-1301, USA.
  13. *
  14. */
  15. /*
  16. * Base kernel context APIs
  17. */
  18. #include <mali_kbase.h>
  19. #include <mali_midg_regmap.h>
  20. #include <mali_kbase_instr.h>
  21. #include <mali_kbase_mem_linux.h>
  22. /**
  23. * kbase_create_context() - Create a kernel base context.
  24. * @kbdev: Kbase device
  25. * @is_compat: Force creation of a 32-bit context
  26. *
  27. * Allocate and init a kernel base context.
  28. *
  29. * Return: new kbase context
  30. */
  31. struct kbase_context *
  32. kbase_create_context(struct kbase_device *kbdev, bool is_compat)
  33. {
  34. struct kbase_context *kctx;
  35. int err;
  36. KBASE_DEBUG_ASSERT(kbdev != NULL);
  37. /* zero-inited as lot of code assume it's zero'ed out on create */
  38. kctx = vzalloc(sizeof(*kctx));
  39. if (!kctx)
  40. goto out;
  41. /* creating a context is considered a disjoint event */
  42. kbase_disjoint_event(kbdev);
  43. kctx->kbdev = kbdev;
  44. kctx->as_nr = KBASEP_AS_NR_INVALID;
  45. kctx->is_compat = is_compat;
  46. #ifdef CONFIG_MALI_TRACE_TIMELINE
  47. kctx->timeline.owner_tgid = task_tgid_nr(current);
  48. #endif
  49. atomic_set(&kctx->setup_complete, 0);
  50. atomic_set(&kctx->setup_in_progress, 0);
  51. kctx->infinite_cache_active = 0;
  52. spin_lock_init(&kctx->mm_update_lock);
  53. kctx->process_mm = NULL;
  54. atomic_set(&kctx->nonmapped_pages, 0);
  55. kctx->slots_pullable = 0;
  56. kctx->tgid = current->tgid;
  57. kctx->pid = current->pid;
  58. err = kbase_mem_pool_init(&kctx->mem_pool,
  59. kbdev->mem_pool_max_size_default,
  60. kctx->kbdev, &kbdev->mem_pool);
  61. if (err)
  62. goto free_kctx;
  63. err = kbase_mem_evictable_init(kctx);
  64. if (err)
  65. goto free_pool;
  66. atomic_set(&kctx->used_pages, 0);
  67. err = kbase_jd_init(kctx);
  68. if (err)
  69. goto deinit_evictable;
  70. err = kbasep_js_kctx_init(kctx);
  71. if (err)
  72. goto free_jd; /* safe to call kbasep_js_kctx_term in this case */
  73. err = kbase_event_init(kctx);
  74. if (err)
  75. goto free_jd;
  76. mutex_init(&kctx->reg_lock);
  77. INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
  78. spin_lock_init(&kctx->waiting_soft_jobs_lock);
  79. #ifdef CONFIG_KDS
  80. INIT_LIST_HEAD(&kctx->waiting_kds_resource);
  81. #endif
  82. err = kbase_dma_fence_init(kctx);
  83. if (err)
  84. goto free_event;
  85. err = kbase_mmu_init(kctx);
  86. if (err)
  87. goto term_dma_fence;
  88. kctx->pgd = kbase_mmu_alloc_pgd(kctx);
  89. if (!kctx->pgd)
  90. goto free_mmu;
  91. kctx->aliasing_sink_page = kbase_mem_pool_alloc(&kctx->mem_pool);
  92. if (!kctx->aliasing_sink_page)
  93. goto no_sink_page;
  94. init_waitqueue_head(&kctx->event_queue);
  95. kctx->cookies = KBASE_COOKIE_MASK;
  96. /* Make sure page 0 is not used... */
  97. err = kbase_region_tracker_init(kctx);
  98. if (err)
  99. goto no_region_tracker;
  100. err = kbase_sticky_resource_init(kctx);
  101. if (err)
  102. goto no_sticky;
  103. err = kbase_jit_init(kctx);
  104. if (err)
  105. goto no_jit;
  106. #ifdef CONFIG_GPU_TRACEPOINTS
  107. atomic_set(&kctx->jctx.work_id, 0);
  108. #endif
  109. #ifdef CONFIG_MALI_TRACE_TIMELINE
  110. atomic_set(&kctx->timeline.jd_atoms_in_flight, 0);
  111. #endif
  112. kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;
  113. mutex_init(&kctx->vinstr_cli_lock);
  114. hrtimer_init(&kctx->soft_event_timeout, CLOCK_MONOTONIC,
  115. HRTIMER_MODE_REL);
  116. kctx->soft_event_timeout.function = &kbasep_soft_event_timeout_worker;
  117. return kctx;
  118. no_jit:
  119. kbase_gpu_vm_lock(kctx);
  120. kbase_sticky_resource_term(kctx);
  121. kbase_gpu_vm_unlock(kctx);
  122. no_sticky:
  123. kbase_region_tracker_term(kctx);
  124. no_region_tracker:
  125. kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
  126. no_sink_page:
  127. /* VM lock needed for the call to kbase_mmu_free_pgd */
  128. kbase_gpu_vm_lock(kctx);
  129. kbase_mmu_free_pgd(kctx);
  130. kbase_gpu_vm_unlock(kctx);
  131. free_mmu:
  132. kbase_mmu_term(kctx);
  133. term_dma_fence:
  134. kbase_dma_fence_term(kctx);
  135. free_event:
  136. kbase_event_cleanup(kctx);
  137. free_jd:
  138. /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
  139. kbasep_js_kctx_term(kctx);
  140. kbase_jd_exit(kctx);
  141. deinit_evictable:
  142. kbase_mem_evictable_deinit(kctx);
  143. free_pool:
  144. kbase_mem_pool_term(&kctx->mem_pool);
  145. free_kctx:
  146. vfree(kctx);
  147. out:
  148. return NULL;
  149. }
  150. KBASE_EXPORT_SYMBOL(kbase_create_context);
  151. static void kbase_reg_pending_dtor(struct kbase_va_region *reg)
  152. {
  153. dev_dbg(reg->kctx->kbdev->dev, "Freeing pending unmapped region\n");
  154. kbase_mem_phy_alloc_put(reg->cpu_alloc);
  155. kbase_mem_phy_alloc_put(reg->gpu_alloc);
  156. kfree(reg);
  157. }
  158. /**
  159. * kbase_destroy_context - Destroy a kernel base context.
  160. * @kctx: Context to destroy
  161. *
  162. * Calls kbase_destroy_os_context() to free OS specific structures.
  163. * Will release all outstanding regions.
  164. */
  165. void kbase_destroy_context(struct kbase_context *kctx)
  166. {
  167. struct kbase_device *kbdev;
  168. int pages;
  169. unsigned long pending_regions_to_clean;
  170. KBASE_DEBUG_ASSERT(kctx != NULL);
  171. kbdev = kctx->kbdev;
  172. KBASE_DEBUG_ASSERT(kbdev != NULL);
  173. KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
  174. /* Ensure the core is powered up for the destroy process */
  175. /* A suspend won't happen here, because we're in a syscall from a userspace
  176. * thread. */
  177. kbase_pm_context_active(kbdev);
  178. kbase_jd_zap_context(kctx);
  179. kbase_event_cleanup(kctx);
  180. /*
  181. * JIT must be terminated before the code below as it must be called
  182. * without the region lock being held.
  183. * The code above ensures no new JIT allocations can be made by
  184. * by the time we get to this point of context tear down.
  185. */
  186. kbase_jit_term(kctx);
  187. kbase_gpu_vm_lock(kctx);
  188. kbase_sticky_resource_term(kctx);
  189. /* MMU is disabled as part of scheduling out the context */
  190. kbase_mmu_free_pgd(kctx);
  191. /* drop the aliasing sink page now that it can't be mapped anymore */
  192. kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
  193. /* free pending region setups */
  194. pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
  195. while (pending_regions_to_clean) {
  196. unsigned int cookie = __ffs(pending_regions_to_clean);
  197. BUG_ON(!kctx->pending_regions[cookie]);
  198. kbase_reg_pending_dtor(kctx->pending_regions[cookie]);
  199. kctx->pending_regions[cookie] = NULL;
  200. pending_regions_to_clean &= ~(1UL << cookie);
  201. }
  202. kbase_region_tracker_term(kctx);
  203. kbase_gpu_vm_unlock(kctx);
  204. /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
  205. kbasep_js_kctx_term(kctx);
  206. kbase_jd_exit(kctx);
  207. kbase_pm_context_idle(kbdev);
  208. kbase_dma_fence_term(kctx);
  209. kbase_mmu_term(kctx);
  210. pages = atomic_read(&kctx->used_pages);
  211. if (pages != 0)
  212. dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
  213. kbase_mem_evictable_deinit(kctx);
  214. kbase_mem_pool_term(&kctx->mem_pool);
  215. WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
  216. vfree(kctx);
  217. }
  218. KBASE_EXPORT_SYMBOL(kbase_destroy_context);
  219. /**
  220. * kbase_context_set_create_flags - Set creation flags on a context
  221. * @kctx: Kbase context
  222. * @flags: Flags to set
  223. *
  224. * Return: 0 on success
  225. */
  226. int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)
  227. {
  228. int err = 0;
  229. struct kbasep_js_kctx_info *js_kctx_info;
  230. unsigned long irq_flags;
  231. KBASE_DEBUG_ASSERT(kctx != NULL);
  232. js_kctx_info = &kctx->jctx.sched_info;
  233. /* Validate flags */
  234. if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) {
  235. err = -EINVAL;
  236. goto out;
  237. }
  238. mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
  239. spin_lock_irqsave(&kctx->kbdev->js_data.runpool_irq.lock, irq_flags);
  240. /* Translate the flags */
  241. if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
  242. js_kctx_info->ctx.flags &= ~((u32) KBASE_CTX_FLAG_SUBMIT_DISABLED);
  243. /* Latch the initial attributes into the Job Scheduler */
  244. kbasep_js_ctx_attr_set_initial_attrs(kctx->kbdev, kctx);
  245. spin_unlock_irqrestore(&kctx->kbdev->js_data.runpool_irq.lock,
  246. irq_flags);
  247. mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
  248. out:
  249. return err;
  250. }
  251. KBASE_EXPORT_SYMBOL(kbase_context_set_create_flags);