mali_kbase_softjobs.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383
  1. /*
  2. *
  3. * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
  4. *
  5. * This program is free software and is provided to you under the terms of the
  6. * GNU General Public License version 2 as published by the Free Software
  7. * Foundation, and any use by you of this program is subject to the terms
  8. * of such GNU licence.
  9. *
  10. * A copy of the licence is included with the program, and can also be obtained
  11. * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  12. * Boston, MA 02110-1301, USA.
  13. *
  14. */
  15. #include <mali_kbase.h>
  16. #if defined(CONFIG_DMA_SHARED_BUFFER)
  17. #include <linux/dma-buf.h>
  18. #include <asm/cacheflush.h>
  19. #endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
  20. #include <linux/dma-mapping.h>
  21. #ifdef CONFIG_SYNC
  22. #include "sync.h"
  23. #include <linux/syscalls.h>
  24. #include "mali_kbase_sync.h"
  25. #endif
  26. #include <mali_base_kernel.h>
  27. #include <mali_kbase_hwaccess_time.h>
  28. #include <mali_kbase_mem_linux.h>
  29. #include <linux/version.h>
  30. #include <linux/ktime.h>
  31. #include <linux/pfn.h>
  32. #include <linux/sched.h>
  33. /* Mask to check cache alignment of data structures */
  34. #define KBASE_CACHE_ALIGNMENT_MASK ((1<<L1_CACHE_SHIFT)-1)
  35. /**
  36. * @file mali_kbase_softjobs.c
  37. *
  38. * This file implements the logic behind software only jobs that are
  39. * executed within the driver rather than being handed over to the GPU.
  40. */
  41. void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom)
  42. {
  43. struct kbase_context *kctx = katom->kctx;
  44. unsigned long lflags;
  45. spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
  46. list_add_tail(&katom->dep_item[0], &kctx->waiting_soft_jobs);
  47. spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
  48. }
  49. static struct page *kbasep_translate_gpu_addr_to_kernel_page(
  50. struct kbase_context *kctx, u64 gpu_addr)
  51. {
  52. u64 pfn;
  53. struct kbase_va_region *reg;
  54. phys_addr_t addr = 0;
  55. KBASE_DEBUG_ASSERT(kctx != NULL);
  56. pfn = gpu_addr >> PAGE_SHIFT;
  57. kbase_gpu_vm_lock(kctx);
  58. reg = kbase_region_tracker_find_region_enclosing_address(
  59. kctx, gpu_addr);
  60. if (!reg || (reg->flags & KBASE_REG_FREE))
  61. goto err_vm_unlock;
  62. addr = reg->cpu_alloc->pages[pfn - reg->start_pfn];
  63. kbase_gpu_vm_unlock(kctx);
  64. if (!addr)
  65. goto err;
  66. return pfn_to_page(PFN_DOWN(addr));
  67. err_vm_unlock:
  68. kbase_gpu_vm_unlock(kctx);
  69. err:
  70. return NULL;
  71. }
  72. int kbasep_read_soft_event_status(
  73. struct kbase_context *kctx, u64 evt, unsigned char *status)
  74. {
  75. struct page *pg = kbasep_translate_gpu_addr_to_kernel_page(
  76. kctx, evt);
  77. unsigned char *mapped_pg;
  78. u32 offset = evt & ~PAGE_MASK;
  79. KBASE_DEBUG_ASSERT(status != NULL);
  80. if (!pg)
  81. return -1;
  82. mapped_pg = (unsigned char *)kmap_atomic(pg);
  83. KBASE_DEBUG_ASSERT(mapped_pg != NULL); /* kmap_atomic() must not fail */
  84. *status = *(mapped_pg + offset);
  85. kunmap_atomic(mapped_pg);
  86. return 0;
  87. }
  88. int kbasep_write_soft_event_status(
  89. struct kbase_context *kctx, u64 evt, unsigned char new_status)
  90. {
  91. struct page *pg = kbasep_translate_gpu_addr_to_kernel_page(
  92. kctx, evt);
  93. unsigned char *mapped_pg;
  94. u32 offset = evt & ~PAGE_MASK;
  95. KBASE_DEBUG_ASSERT((new_status == BASE_JD_SOFT_EVENT_SET) ||
  96. (new_status == BASE_JD_SOFT_EVENT_RESET));
  97. if (!pg)
  98. return -1;
  99. mapped_pg = (unsigned char *)kmap_atomic(pg);
  100. KBASE_DEBUG_ASSERT(mapped_pg != NULL); /* kmap_atomic() must not fail */
  101. *(mapped_pg + offset) = new_status;
  102. kunmap_atomic(mapped_pg);
  103. return 0;
  104. }
  105. static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
  106. {
  107. struct kbase_va_region *reg;
  108. phys_addr_t addr = 0;
  109. u64 pfn;
  110. u32 offset;
  111. char *page;
  112. struct timespec ts;
  113. struct base_dump_cpu_gpu_counters data;
  114. u64 system_time;
  115. u64 cycle_counter;
  116. u64 jc = katom->jc;
  117. struct kbase_context *kctx = katom->kctx;
  118. int pm_active_err;
  119. memset(&data, 0, sizeof(data));
  120. /* Take the PM active reference as late as possible - otherwise, it could
  121. * delay suspend until we process the atom (which may be at the end of a
  122. * long chain of dependencies */
  123. pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
  124. if (pm_active_err) {
  125. struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
  126. /* We're suspended - queue this on the list of suspended jobs
  127. * Use dep_item[1], because dep_item[0] is in use for 'waiting_soft_jobs' */
  128. mutex_lock(&js_devdata->runpool_mutex);
  129. list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
  130. mutex_unlock(&js_devdata->runpool_mutex);
  131. /* Also adding this to the list of waiting soft job */
  132. kbasep_add_waiting_soft_job(katom);
  133. return pm_active_err;
  134. }
  135. kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time,
  136. &ts);
  137. kbase_pm_context_idle(kctx->kbdev);
  138. data.sec = ts.tv_sec;
  139. data.usec = ts.tv_nsec / 1000;
  140. data.system_time = system_time;
  141. data.cycle_counter = cycle_counter;
  142. pfn = jc >> PAGE_SHIFT;
  143. offset = jc & ~PAGE_MASK;
  144. /* Assume this atom will be cancelled until we know otherwise */
  145. katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
  146. if (offset > 0x1000 - sizeof(data)) {
  147. /* Wouldn't fit in the page */
  148. return 0;
  149. }
  150. kbase_gpu_vm_lock(kctx);
  151. reg = kbase_region_tracker_find_region_enclosing_address(kctx, jc);
  152. if (reg &&
  153. (reg->flags & KBASE_REG_GPU_WR) &&
  154. reg->cpu_alloc && reg->cpu_alloc->pages)
  155. addr = reg->cpu_alloc->pages[pfn - reg->start_pfn];
  156. kbase_gpu_vm_unlock(kctx);
  157. if (!addr)
  158. return 0;
  159. page = kmap(pfn_to_page(PFN_DOWN(addr)));
  160. if (!page)
  161. return 0;
  162. kbase_sync_single_for_cpu(katom->kctx->kbdev,
  163. kbase_dma_addr(pfn_to_page(PFN_DOWN(addr))) +
  164. offset, sizeof(data),
  165. DMA_BIDIRECTIONAL);
  166. memcpy(page + offset, &data, sizeof(data));
  167. kbase_sync_single_for_device(katom->kctx->kbdev,
  168. kbase_dma_addr(pfn_to_page(PFN_DOWN(addr))) +
  169. offset, sizeof(data),
  170. DMA_BIDIRECTIONAL);
  171. kunmap(pfn_to_page(PFN_DOWN(addr)));
  172. /* Atom was fine - mark it as done */
  173. katom->event_code = BASE_JD_EVENT_DONE;
  174. return 0;
  175. }
  176. #ifdef CONFIG_SYNC
  177. /* Complete an atom that has returned '1' from kbase_process_soft_job (i.e. has waited)
  178. *
  179. * @param katom The atom to complete
  180. */
  181. static void complete_soft_job(struct kbase_jd_atom *katom)
  182. {
  183. struct kbase_context *kctx = katom->kctx;
  184. mutex_lock(&kctx->jctx.lock);
  185. list_del(&katom->dep_item[0]);
  186. kbase_finish_soft_job(katom);
  187. if (jd_done_nolock(katom, NULL))
  188. kbase_js_sched_all(kctx->kbdev);
  189. mutex_unlock(&kctx->jctx.lock);
  190. }
  191. static enum base_jd_event_code kbase_fence_trigger(struct kbase_jd_atom *katom, int result)
  192. {
  193. struct sync_pt *pt;
  194. struct sync_timeline *timeline;
  195. if (katom->fence->num_fences != 1) {
  196. /* Not exactly one item in the list - so it didn't (directly) come from us */
  197. return BASE_JD_EVENT_JOB_CANCELLED;
  198. }
  199. pt = container_of(katom->fence->cbs[0].sync_pt, struct sync_pt, base);
  200. timeline = sync_pt_parent(pt);
  201. if (!kbase_sync_timeline_is_ours(timeline)) {
  202. /* Fence has a sync_pt which isn't ours! */
  203. return BASE_JD_EVENT_JOB_CANCELLED;
  204. }
  205. kbase_sync_signal_pt(pt, result);
  206. sync_timeline_signal(timeline);
  207. return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
  208. }
  209. static void kbase_fence_wait_worker(struct work_struct *data)
  210. {
  211. struct kbase_jd_atom *katom;
  212. struct kbase_context *kctx;
  213. katom = container_of(data, struct kbase_jd_atom, work);
  214. kctx = katom->kctx;
  215. complete_soft_job(katom);
  216. }
  217. static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
  218. {
  219. struct kbase_jd_atom *katom = container_of(waiter, struct kbase_jd_atom, sync_waiter);
  220. struct kbase_context *kctx;
  221. KBASE_DEBUG_ASSERT(katom != NULL);
  222. kctx = katom->kctx;
  223. KBASE_DEBUG_ASSERT(kctx != NULL);
  224. /* Propagate the fence status to the atom.
  225. * If negative then cancel this atom and its dependencies.
  226. */
  227. if (atomic_read(&fence->status) < 0)
  228. katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
  229. /* To prevent a potential deadlock we schedule the work onto the job_done_wq workqueue
  230. *
  231. * The issue is that we may signal the timeline while holding kctx->jctx.lock and
  232. * the callbacks are run synchronously from sync_timeline_signal. So we simply defer the work.
  233. */
  234. KBASE_DEBUG_ASSERT(object_is_on_stack(&katom->work) == 0);
  235. INIT_WORK(&katom->work, kbase_fence_wait_worker);
  236. queue_work(kctx->jctx.job_done_wq, &katom->work);
  237. }
  238. static int kbase_fence_wait(struct kbase_jd_atom *katom)
  239. {
  240. int ret;
  241. KBASE_DEBUG_ASSERT(katom != NULL);
  242. KBASE_DEBUG_ASSERT(katom->kctx != NULL);
  243. sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);
  244. ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);
  245. if (ret == 1) {
  246. /* Already signalled */
  247. return 0;
  248. }
  249. if (ret < 0) {
  250. katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
  251. /* We should cause the dependent jobs in the bag to be failed,
  252. * to do this we schedule the work queue to complete this job */
  253. KBASE_DEBUG_ASSERT(object_is_on_stack(&katom->work) == 0);
  254. INIT_WORK(&katom->work, kbase_fence_wait_worker);
  255. queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
  256. }
  257. kbasep_add_waiting_soft_job(katom);
  258. return 1;
  259. }
  260. static void kbase_fence_cancel_wait(struct kbase_jd_atom *katom)
  261. {
  262. if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
  263. /* The wait wasn't cancelled - leave the cleanup for kbase_fence_wait_callback */
  264. return;
  265. }
  266. /* Wait was cancelled - zap the atoms */
  267. katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
  268. kbase_finish_soft_job(katom);
  269. if (jd_done_nolock(katom, NULL))
  270. kbase_js_sched_all(katom->kctx->kbdev);
  271. }
  272. #endif /* CONFIG_SYNC */
  273. static void kbasep_soft_event_complete_job(struct work_struct *work)
  274. {
  275. struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
  276. work);
  277. struct kbase_context *kctx = katom->kctx;
  278. int resched;
  279. mutex_lock(&kctx->jctx.lock);
  280. resched = jd_done_nolock(katom, NULL);
  281. mutex_unlock(&kctx->jctx.lock);
  282. if (resched)
  283. kbase_js_sched_all(kctx->kbdev);
  284. }
  285. void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)
  286. {
  287. int cancel_timer = 1;
  288. struct list_head *entry, *tmp;
  289. unsigned long lflags;
  290. spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
  291. list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
  292. struct kbase_jd_atom *katom = list_entry(
  293. entry, struct kbase_jd_atom, dep_item[0]);
  294. if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) ==
  295. BASE_JD_REQ_SOFT_EVENT_WAIT) {
  296. if (katom->jc == evt) {
  297. list_del(&katom->dep_item[0]);
  298. katom->event_code = BASE_JD_EVENT_DONE;
  299. INIT_WORK(&katom->work,
  300. kbasep_soft_event_complete_job);
  301. queue_work(kctx->jctx.job_done_wq,
  302. &katom->work);
  303. } else {
  304. /* There are still other waiting jobs, we cannot
  305. * cancel the timer yet */
  306. cancel_timer = 0;
  307. }
  308. }
  309. }
  310. if (cancel_timer)
  311. hrtimer_try_to_cancel(&kctx->soft_event_timeout);
  312. spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
  313. }
  314. enum hrtimer_restart kbasep_soft_event_timeout_worker(struct hrtimer *timer)
  315. {
  316. struct kbase_context *kctx = container_of(timer, struct kbase_context,
  317. soft_event_timeout);
  318. u32 timeout_ms = (u32)atomic_read(
  319. &kctx->kbdev->js_data.soft_event_timeout_ms);
  320. ktime_t cur_time = ktime_get();
  321. enum hrtimer_restart restarting = HRTIMER_NORESTART;
  322. unsigned long lflags;
  323. struct list_head *entry, *tmp;
  324. spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
  325. list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
  326. struct kbase_jd_atom *katom = list_entry(
  327. entry, struct kbase_jd_atom, dep_item[0]);
  328. if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) ==
  329. BASE_JD_REQ_SOFT_EVENT_WAIT) {
  330. s64 elapsed_time =
  331. ktime_to_ms(ktime_sub(cur_time,
  332. katom->start_timestamp));
  333. if (elapsed_time > (s64)timeout_ms) {
  334. /* Take it out of the list to ensure that it
  335. * will be cancelled in all cases */
  336. list_del(&katom->dep_item[0]);
  337. katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
  338. INIT_WORK(&katom->work,
  339. kbasep_soft_event_complete_job);
  340. queue_work(kctx->jctx.job_done_wq,
  341. &katom->work);
  342. } else {
  343. restarting = HRTIMER_RESTART;
  344. }
  345. }
  346. }
  347. if (restarting)
  348. hrtimer_add_expires(timer, HR_TIMER_DELAY_MSEC(timeout_ms));
  349. spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
  350. return restarting;
  351. }
  352. static int kbasep_soft_event_wait(struct kbase_jd_atom *katom)
  353. {
  354. struct kbase_context *kctx = katom->kctx;
  355. ktime_t remaining;
  356. unsigned char status;
  357. /* The status of this soft-job is stored in jc */
  358. if (kbasep_read_soft_event_status(kctx, katom->jc, &status) != 0) {
  359. katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
  360. return 0;
  361. }
  362. if (status == BASE_JD_SOFT_EVENT_SET)
  363. return 0; /* Event already set, nothing to do */
  364. /* Record the start time of this atom so we could cancel it at
  365. * the right time */
  366. katom->start_timestamp = ktime_get();
  367. /* Add the atom to the waiting list before the timer is
  368. * (re)started to make sure that it gets processed */
  369. kbasep_add_waiting_soft_job(katom);
  370. /* Schedule cancellation of this atom after a period if it is
  371. * not active */
  372. remaining = hrtimer_get_remaining(&kctx->soft_event_timeout);
  373. if (ktime_compare(remaining, ktime_set(0, 0)) <= 0) {
  374. int timeout_ms = atomic_read(
  375. &kctx->kbdev->js_data.soft_event_timeout_ms);
  376. hrtimer_start(&kctx->soft_event_timeout,
  377. HR_TIMER_DELAY_MSEC((u64)timeout_ms),
  378. HRTIMER_MODE_REL);
  379. }
  380. return 1;
  381. }
  382. static void kbasep_soft_event_update(struct kbase_jd_atom *katom,
  383. unsigned char new_status)
  384. {
  385. /* Complete jobs waiting on the same event */
  386. struct kbase_context *kctx = katom->kctx;
  387. if (kbasep_write_soft_event_status(kctx, katom->jc, new_status) != 0) {
  388. katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
  389. return;
  390. }
  391. if (new_status == BASE_JD_SOFT_EVENT_SET)
  392. kbasep_complete_triggered_soft_events(kctx, katom->jc);
  393. }
  394. static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)
  395. {
  396. katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
  397. if (jd_done_nolock(katom, NULL))
  398. kbase_js_sched_all(katom->kctx->kbdev);
  399. }
  400. struct kbase_debug_copy_buffer {
  401. size_t size;
  402. struct page **pages;
  403. int nr_pages;
  404. size_t offset;
  405. /*To find memory region*/
  406. u64 gpu_addr;
  407. struct page **extres_pages;
  408. int nr_extres_pages;
  409. };
  410. static inline void free_user_buffer(struct kbase_debug_copy_buffer *buffer)
  411. {
  412. struct page **pages = buffer->extres_pages;
  413. int nr_pages = buffer->nr_extres_pages;
  414. if (pages) {
  415. int i;
  416. for (i = 0; i < nr_pages; i++) {
  417. struct page *pg = pages[i];
  418. if (pg)
  419. put_page(pg);
  420. }
  421. kfree(pages);
  422. }
  423. }
  424. static void kbase_debug_copy_finish(struct kbase_jd_atom *katom)
  425. {
  426. struct kbase_debug_copy_buffer *buffers =
  427. (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
  428. unsigned int i;
  429. unsigned int nr = katom->nr_extres;
  430. if (!buffers)
  431. return;
  432. kbase_gpu_vm_lock(katom->kctx);
  433. for (i = 0; i < nr; i++) {
  434. int p;
  435. struct kbase_va_region *reg;
  436. reg = kbase_region_tracker_find_region_enclosing_address(
  437. katom->kctx, buffers[i].gpu_addr);
  438. if (!buffers[i].pages)
  439. break;
  440. for (p = 0; p < buffers[i].nr_pages; p++) {
  441. struct page *pg = buffers[i].pages[p];
  442. if (pg)
  443. put_page(pg);
  444. }
  445. kfree(buffers[i].pages);
  446. if (reg && reg->gpu_alloc) {
  447. switch (reg->gpu_alloc->type) {
  448. case BASE_MEM_IMPORT_TYPE_USER_BUFFER:
  449. {
  450. free_user_buffer(&buffers[i]);
  451. break;
  452. }
  453. default:
  454. /* Nothing to be done. */
  455. break;
  456. }
  457. kbase_mem_phy_alloc_put(reg->gpu_alloc);
  458. }
  459. }
  460. kbase_gpu_vm_unlock(katom->kctx);
  461. kfree(buffers);
  462. katom->jc = 0;
  463. }
  464. static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom)
  465. {
  466. struct kbase_debug_copy_buffer *buffers;
  467. struct base_jd_debug_copy_buffer *user_buffers = NULL;
  468. unsigned int i;
  469. unsigned int nr = katom->nr_extres;
  470. int ret = 0;
  471. void __user *user_structs = (void __user *)(uintptr_t)katom->jc;
  472. if (!user_structs)
  473. return -EINVAL;
  474. buffers = kcalloc(nr, sizeof(*buffers), GFP_KERNEL);
  475. if (!buffers) {
  476. ret = -ENOMEM;
  477. katom->jc = 0;
  478. goto out_cleanup;
  479. }
  480. katom->jc = (u64)(uintptr_t)buffers;
  481. user_buffers = kmalloc_array(nr, sizeof(*user_buffers), GFP_KERNEL);
  482. if (!user_buffers) {
  483. ret = -ENOMEM;
  484. goto out_cleanup;
  485. }
  486. ret = copy_from_user(user_buffers, user_structs,
  487. sizeof(*user_buffers)*nr);
  488. if (ret)
  489. goto out_cleanup;
  490. for (i = 0; i < nr; i++) {
  491. u64 addr = user_buffers[i].address;
  492. u64 page_addr = addr & PAGE_MASK;
  493. u64 end_page_addr = addr + user_buffers[i].size - 1;
  494. u64 last_page_addr = end_page_addr & PAGE_MASK;
  495. int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1;
  496. int pinned_pages;
  497. struct kbase_va_region *reg;
  498. struct base_external_resource user_extres;
  499. if (!addr)
  500. continue;
  501. buffers[i].nr_pages = nr_pages;
  502. buffers[i].offset = addr & ~PAGE_MASK;
  503. if (buffers[i].offset >= PAGE_SIZE) {
  504. ret = -EINVAL;
  505. goto out_cleanup;
  506. }
  507. buffers[i].size = user_buffers[i].size;
  508. buffers[i].pages = kcalloc(nr_pages, sizeof(struct page *),
  509. GFP_KERNEL);
  510. if (!buffers[i].pages) {
  511. ret = -ENOMEM;
  512. goto out_cleanup;
  513. }
  514. pinned_pages = get_user_pages_fast(page_addr,
  515. nr_pages,
  516. 1, /* Write */
  517. buffers[i].pages);
  518. if (pinned_pages < 0) {
  519. ret = pinned_pages;
  520. goto out_cleanup;
  521. }
  522. if (pinned_pages != nr_pages) {
  523. ret = -EINVAL;
  524. goto out_cleanup;
  525. }
  526. user_extres = user_buffers[i].extres;
  527. if (user_extres.ext_resource == 0ULL) {
  528. ret = -EINVAL;
  529. goto out_cleanup;
  530. }
  531. buffers[i].gpu_addr = user_extres.ext_resource &
  532. ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
  533. kbase_gpu_vm_lock(katom->kctx);
  534. reg = kbase_region_tracker_find_region_enclosing_address(
  535. katom->kctx, buffers[i].gpu_addr);
  536. if (NULL == reg || NULL == reg->cpu_alloc ||
  537. (reg->flags & KBASE_REG_FREE)) {
  538. ret = -EINVAL;
  539. goto out_unlock;
  540. }
  541. kbase_mem_phy_alloc_get(reg->gpu_alloc);
  542. buffers[i].nr_extres_pages = reg->nr_pages;
  543. if (reg->nr_pages*PAGE_SIZE != buffers[i].size)
  544. dev_warn(katom->kctx->kbdev->dev, "Copy buffer is not of same size as the external resource to copy.\n");
  545. switch (reg->gpu_alloc->type) {
  546. case BASE_MEM_IMPORT_TYPE_USER_BUFFER:
  547. {
  548. struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
  549. unsigned long nr_pages =
  550. alloc->imported.user_buf.nr_pages;
  551. if (alloc->imported.user_buf.mm != current->mm) {
  552. ret = -EINVAL;
  553. goto out_unlock;
  554. }
  555. buffers[i].extres_pages = kcalloc(nr_pages,
  556. sizeof(struct page *), GFP_KERNEL);
  557. if (!buffers[i].extres_pages) {
  558. ret = -ENOMEM;
  559. goto out_unlock;
  560. }
  561. ret = get_user_pages_fast(
  562. alloc->imported.user_buf.address,
  563. nr_pages, 0,
  564. buffers[i].extres_pages);
  565. if (ret != nr_pages)
  566. goto out_unlock;
  567. ret = 0;
  568. break;
  569. }
  570. case BASE_MEM_IMPORT_TYPE_UMP:
  571. {
  572. dev_warn(katom->kctx->kbdev->dev,
  573. "UMP is not supported for debug_copy jobs\n");
  574. ret = -EINVAL;
  575. goto out_unlock;
  576. }
  577. default:
  578. /* Nothing to be done. */
  579. break;
  580. }
  581. kbase_gpu_vm_unlock(katom->kctx);
  582. }
  583. kfree(user_buffers);
  584. return ret;
  585. out_unlock:
  586. kbase_gpu_vm_unlock(katom->kctx);
  587. out_cleanup:
  588. kfree(buffers);
  589. kfree(user_buffers);
  590. /* Frees allocated memory for kbase_debug_copy_job struct, including
  591. * members, and sets jc to 0 */
  592. kbase_debug_copy_finish(katom);
  593. return ret;
  594. }
  595. static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
  596. void *extres_page, struct page **pages, unsigned int nr_pages,
  597. unsigned int *target_page_nr, size_t offset, size_t *to_copy)
  598. {
  599. void *target_page = kmap(pages[*target_page_nr]);
  600. size_t chunk = PAGE_SIZE-offset;
  601. if (!target_page) {
  602. *target_page_nr += 1;
  603. dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
  604. return;
  605. }
  606. chunk = min(chunk, *to_copy);
  607. memcpy(target_page + offset, extres_page, chunk);
  608. *to_copy -= chunk;
  609. kunmap(pages[*target_page_nr]);
  610. *target_page_nr += 1;
  611. if (*target_page_nr >= nr_pages)
  612. return;
  613. target_page = kmap(pages[*target_page_nr]);
  614. if (!target_page) {
  615. *target_page_nr += 1;
  616. dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
  617. return;
  618. }
  619. KBASE_DEBUG_ASSERT(target_page);
  620. chunk = min(offset, *to_copy);
  621. memcpy(target_page, extres_page + PAGE_SIZE-offset, chunk);
  622. *to_copy -= chunk;
  623. kunmap(pages[*target_page_nr]);
  624. }
  625. static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
  626. struct kbase_debug_copy_buffer *buf_data)
  627. {
  628. unsigned int i;
  629. unsigned int target_page_nr = 0;
  630. struct kbase_va_region *reg;
  631. struct page **pages = buf_data->pages;
  632. u64 offset = buf_data->offset;
  633. size_t extres_size = buf_data->nr_extres_pages*PAGE_SIZE;
  634. size_t to_copy = min(extres_size, buf_data->size);
  635. int ret = 0;
  636. KBASE_DEBUG_ASSERT(pages != NULL);
  637. kbase_gpu_vm_lock(kctx);
  638. reg = kbase_region_tracker_find_region_enclosing_address(
  639. kctx, buf_data->gpu_addr);
  640. if (!reg) {
  641. ret = -EINVAL;
  642. goto out_unlock;
  643. }
  644. switch (reg->gpu_alloc->type) {
  645. case BASE_MEM_IMPORT_TYPE_USER_BUFFER:
  646. {
  647. for (i = 0; i < buf_data->nr_extres_pages; i++) {
  648. struct page *pg = buf_data->extres_pages[i];
  649. void *extres_page = kmap(pg);
  650. if (extres_page)
  651. kbase_mem_copy_from_extres_page(kctx,
  652. extres_page, pages,
  653. buf_data->nr_pages,
  654. &target_page_nr,
  655. offset, &to_copy);
  656. kunmap(pg);
  657. if (target_page_nr >= buf_data->nr_pages)
  658. break;
  659. }
  660. break;
  661. }
  662. break;
  663. #ifdef CONFIG_DMA_SHARED_BUFFER
  664. case BASE_MEM_IMPORT_TYPE_UMM: {
  665. struct dma_buf *dma_buf = reg->gpu_alloc->imported.umm.dma_buf;
  666. KBASE_DEBUG_ASSERT(dma_buf != NULL);
  667. ret = dma_buf_begin_cpu_access(dma_buf,
  668. DMA_FROM_DEVICE);
  669. if (ret)
  670. goto out_unlock;
  671. for (i = 0; i < buf_data->nr_extres_pages; i++) {
  672. void *extres_page = dma_buf_kmap(dma_buf, i);
  673. if (extres_page)
  674. kbase_mem_copy_from_extres_page(kctx,
  675. extres_page, pages,
  676. buf_data->nr_pages,
  677. &target_page_nr,
  678. offset, &to_copy);
  679. dma_buf_kunmap(dma_buf, i, extres_page);
  680. if (target_page_nr >= buf_data->nr_pages)
  681. break;
  682. }
  683. dma_buf_end_cpu_access(dma_buf,
  684. DMA_FROM_DEVICE);
  685. break;
  686. }
  687. #endif
  688. default:
  689. ret = -EINVAL;
  690. }
  691. out_unlock:
  692. kbase_gpu_vm_unlock(kctx);
  693. return ret;
  694. }
  695. static int kbase_debug_copy(struct kbase_jd_atom *katom)
  696. {
  697. struct kbase_debug_copy_buffer *buffers =
  698. (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
  699. unsigned int i;
  700. for (i = 0; i < katom->nr_extres; i++) {
  701. int res = kbase_mem_copy_from_extres(katom->kctx, &buffers[i]);
  702. if (res)
  703. return res;
  704. }
  705. return 0;
  706. }
  707. static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
  708. {
  709. __user void *data = (__user void *)(uintptr_t) katom->jc;
  710. struct base_jit_alloc_info *info;
  711. int ret;
  712. /* Fail the job if there is no info structure */
  713. if (!data) {
  714. ret = -EINVAL;
  715. goto fail;
  716. }
  717. /* Copy the information for safe access and future storage */
  718. info = kzalloc(sizeof(*info), GFP_KERNEL);
  719. if (!info) {
  720. ret = -ENOMEM;
  721. goto fail;
  722. }
  723. if (copy_from_user(info, data, sizeof(*info)) != 0) {
  724. ret = -EINVAL;
  725. goto free_info;
  726. }
  727. /* If the ID is zero then fail the job */
  728. if (info->id == 0) {
  729. ret = -EINVAL;
  730. goto free_info;
  731. }
  732. /* Sanity check that the PA fits within the VA */
  733. if (info->va_pages < info->commit_pages) {
  734. ret = -EINVAL;
  735. goto free_info;
  736. }
  737. /* Ensure the GPU address is correctly aligned */
  738. if ((info->gpu_alloc_addr & 0x7) != 0) {
  739. ret = -EINVAL;
  740. goto free_info;
  741. }
  742. /* Replace the user pointer with our kernel allocated info structure */
  743. katom->jc = (u64)(uintptr_t) info;
  744. /*
  745. * Note:
  746. * The provided info->gpu_alloc_addr isn't validated here as
  747. * userland can cache allocations which means that even
  748. * though the region is valid it doesn't represent the
  749. * same thing it used to.
  750. *
  751. * Complete validation of va_pages, commit_pages and extent
  752. * isn't done here as it will be done during the call to
  753. * kbase_mem_alloc.
  754. */
  755. return 0;
  756. free_info:
  757. kfree(info);
  758. fail:
  759. katom->jc = 0;
  760. return ret;
  761. }
  762. static void kbase_jit_allocate_process(struct kbase_jd_atom *katom)
  763. {
  764. struct kbase_context *kctx = katom->kctx;
  765. struct base_jit_alloc_info *info;
  766. struct kbase_va_region *reg;
  767. struct kbase_vmap_struct mapping;
  768. u64 *ptr;
  769. info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
  770. /* The JIT ID is still in use so fail the allocation */
  771. if (kctx->jit_alloc[info->id]) {
  772. katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
  773. return;
  774. }
  775. /*
  776. * Mark the allocation so we know it's in use even if the
  777. * allocation itself fails.
  778. */
  779. kctx->jit_alloc[info->id] = (struct kbase_va_region *) -1;
  780. /* Create a JIT allocation */
  781. reg = kbase_jit_allocate(kctx, info);
  782. if (!reg) {
  783. katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
  784. return;
  785. }
  786. /*
  787. * Write the address of the JIT allocation to the user provided
  788. * GPU allocation.
  789. */
  790. ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
  791. &mapping);
  792. if (!ptr) {
  793. /*
  794. * Leave the allocation "live" as the JIT free jit will be
  795. * submitted anyway.
  796. */
  797. katom->event_code = BASE_JD_EVENT_JOB_INVALID;
  798. return;
  799. }
  800. *ptr = reg->start_pfn << PAGE_SHIFT;
  801. kbase_vunmap(kctx, &mapping);
  802. katom->event_code = BASE_JD_EVENT_DONE;
  803. /*
  804. * Bind it to the user provided ID. Do this last so we can check for
  805. * the JIT free racing this JIT alloc job.
  806. */
  807. kctx->jit_alloc[info->id] = reg;
  808. }
  809. static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
  810. {
  811. struct base_jit_alloc_info *info;
  812. info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
  813. /* Free the info structure */
  814. kfree(info);
  815. }
  816. static void kbase_jit_free_process(struct kbase_jd_atom *katom)
  817. {
  818. struct kbase_context *kctx = katom->kctx;
  819. u8 id = (u8) katom->jc;
  820. /*
  821. * If the ID is zero or it is not in use yet then fail the job.
  822. */
  823. if ((id == 0) || (kctx->jit_alloc[id] == NULL)) {
  824. katom->event_code = BASE_JD_EVENT_JOB_INVALID;
  825. return;
  826. }
  827. /*
  828. * If the ID is valid but the allocation request failed still succeed
  829. * this soft job but don't try and free the allocation.
  830. */
  831. if (kctx->jit_alloc[id] != (struct kbase_va_region *) -1)
  832. kbase_jit_free(kctx, kctx->jit_alloc[id]);
  833. kctx->jit_alloc[id] = NULL;
  834. }
  835. static int kbase_ext_res_prepare(struct kbase_jd_atom *katom)
  836. {
  837. __user struct base_external_resource_list *user_ext_res;
  838. struct base_external_resource_list *ext_res;
  839. u64 count = 0;
  840. size_t copy_size;
  841. int ret;
  842. user_ext_res = (__user struct base_external_resource_list *)
  843. (uintptr_t) katom->jc;
  844. /* Fail the job if there is no info structure */
  845. if (!user_ext_res) {
  846. ret = -EINVAL;
  847. goto fail;
  848. }
  849. if (copy_from_user(&count, &user_ext_res->count, sizeof(u64)) != 0) {
  850. ret = -EINVAL;
  851. goto fail;
  852. }
  853. /* Is the number of external resources in range? */
  854. if (!count || count > BASE_EXT_RES_COUNT_MAX) {
  855. ret = -EINVAL;
  856. goto fail;
  857. }
  858. /* Copy the information for safe access and future storage */
  859. copy_size = sizeof(*ext_res);
  860. copy_size += sizeof(struct base_external_resource) * (count - 1);
  861. ext_res = kzalloc(copy_size, GFP_KERNEL);
  862. if (!ext_res) {
  863. ret = -ENOMEM;
  864. goto fail;
  865. }
  866. if (copy_from_user(ext_res, user_ext_res, copy_size) != 0) {
  867. ret = -EINVAL;
  868. goto free_info;
  869. }
  870. /*
  871. * Overwrite the count with the first value incase it was changed
  872. * after the fact.
  873. */
  874. ext_res->count = count;
  875. /*
  876. * Replace the user pointer with our kernel allocated
  877. * ext_res structure.
  878. */
  879. katom->jc = (u64)(uintptr_t) ext_res;
  880. return 0;
  881. free_info:
  882. kfree(ext_res);
  883. fail:
  884. return ret;
  885. }
  886. static void kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)
  887. {
  888. struct base_external_resource_list *ext_res;
  889. int i;
  890. bool failed = false;
  891. ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
  892. if (!ext_res)
  893. goto failed_jc;
  894. kbase_gpu_vm_lock(katom->kctx);
  895. for (i = 0; i < ext_res->count; i++) {
  896. u64 gpu_addr;
  897. gpu_addr = ext_res->ext_res[i].ext_resource &
  898. ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
  899. if (map) {
  900. if (!kbase_sticky_resource_acquire(katom->kctx,
  901. gpu_addr))
  902. goto failed_loop;
  903. } else
  904. if (!kbase_sticky_resource_release(katom->kctx, NULL,
  905. gpu_addr))
  906. failed = true;
  907. }
  908. /*
  909. * In the case of unmap we continue unmapping other resources in the
  910. * case of failure but will always report failure if _any_ unmap
  911. * request fails.
  912. */
  913. if (failed)
  914. katom->event_code = BASE_JD_EVENT_JOB_INVALID;
  915. else
  916. katom->event_code = BASE_JD_EVENT_DONE;
  917. kbase_gpu_vm_unlock(katom->kctx);
  918. return;
  919. failed_loop:
  920. while (--i > 0) {
  921. u64 gpu_addr;
  922. gpu_addr = ext_res->ext_res[i].ext_resource &
  923. ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
  924. kbase_sticky_resource_release(katom->kctx, NULL, gpu_addr);
  925. }
  926. katom->event_code = BASE_JD_EVENT_JOB_INVALID;
  927. kbase_gpu_vm_unlock(katom->kctx);
  928. failed_jc:
  929. return;
  930. }
  931. static void kbase_ext_res_finish(struct kbase_jd_atom *katom)
  932. {
  933. struct base_external_resource_list *ext_res;
  934. ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
  935. /* Free the info structure */
  936. kfree(ext_res);
  937. }
  938. int kbase_process_soft_job(struct kbase_jd_atom *katom)
  939. {
  940. switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
  941. case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
  942. return kbase_dump_cpu_gpu_time(katom);
  943. #ifdef CONFIG_SYNC
  944. case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
  945. KBASE_DEBUG_ASSERT(katom->fence != NULL);
  946. katom->event_code = kbase_fence_trigger(katom, katom->event_code == BASE_JD_EVENT_DONE ? 0 : -EFAULT);
  947. /* Release the reference as we don't need it any more */
  948. sync_fence_put(katom->fence);
  949. katom->fence = NULL;
  950. break;
  951. case BASE_JD_REQ_SOFT_FENCE_WAIT:
  952. return kbase_fence_wait(katom);
  953. #endif /* CONFIG_SYNC */
  954. case BASE_JD_REQ_SOFT_REPLAY:
  955. return kbase_replay_process(katom);
  956. case BASE_JD_REQ_SOFT_EVENT_WAIT:
  957. return kbasep_soft_event_wait(katom);
  958. case BASE_JD_REQ_SOFT_EVENT_SET:
  959. kbasep_soft_event_update(katom, BASE_JD_SOFT_EVENT_SET);
  960. break;
  961. case BASE_JD_REQ_SOFT_EVENT_RESET:
  962. kbasep_soft_event_update(katom, BASE_JD_SOFT_EVENT_RESET);
  963. break;
  964. case BASE_JD_REQ_SOFT_DEBUG_COPY:
  965. {
  966. int res = kbase_debug_copy(katom);
  967. if (res)
  968. katom->event_code = BASE_JD_EVENT_JOB_INVALID;
  969. break;
  970. }
  971. case BASE_JD_REQ_SOFT_JIT_ALLOC:
  972. kbase_jit_allocate_process(katom);
  973. break;
  974. case BASE_JD_REQ_SOFT_JIT_FREE:
  975. kbase_jit_free_process(katom);
  976. break;
  977. case BASE_JD_REQ_SOFT_EXT_RES_MAP:
  978. kbase_ext_res_process(katom, true);
  979. break;
  980. case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
  981. kbase_ext_res_process(katom, false);
  982. break;
  983. }
  984. /* Atom is complete */
  985. return 0;
  986. }
  987. void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
  988. {
  989. switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
  990. #ifdef CONFIG_SYNC
  991. case BASE_JD_REQ_SOFT_FENCE_WAIT:
  992. kbase_fence_cancel_wait(katom);
  993. break;
  994. #endif
  995. case BASE_JD_REQ_SOFT_EVENT_WAIT:
  996. kbasep_soft_event_cancel_job(katom);
  997. break;
  998. default:
  999. /* This soft-job doesn't support cancellation! */
  1000. KBASE_DEBUG_ASSERT(0);
  1001. }
  1002. }
  1003. int kbase_prepare_soft_job(struct kbase_jd_atom *katom)
  1004. {
  1005. switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
  1006. case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
  1007. {
  1008. if (0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK))
  1009. return -EINVAL;
  1010. }
  1011. break;
  1012. #ifdef CONFIG_SYNC
  1013. case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
  1014. {
  1015. struct base_fence fence;
  1016. int fd;
  1017. if (copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)) != 0)
  1018. return -EINVAL;
  1019. fd = kbase_stream_create_fence(fence.basep.stream_fd);
  1020. if (fd < 0)
  1021. return -EINVAL;
  1022. katom->fence = sync_fence_fdget(fd);
  1023. if (katom->fence == NULL) {
  1024. /* The only way the fence can be NULL is if userspace closed it for us.
  1025. * So we don't need to clear it up */
  1026. return -EINVAL;
  1027. }
  1028. fence.basep.fd = fd;
  1029. if (copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence)) != 0) {
  1030. katom->fence = NULL;
  1031. sys_close(fd);
  1032. return -EINVAL;
  1033. }
  1034. }
  1035. break;
  1036. case BASE_JD_REQ_SOFT_FENCE_WAIT:
  1037. {
  1038. struct base_fence fence;
  1039. if (copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)) != 0)
  1040. return -EINVAL;
  1041. /* Get a reference to the fence object */
  1042. katom->fence = sync_fence_fdget(fence.basep.fd);
  1043. if (katom->fence == NULL)
  1044. return -EINVAL;
  1045. }
  1046. break;
  1047. #endif /* CONFIG_SYNC */
  1048. case BASE_JD_REQ_SOFT_JIT_ALLOC:
  1049. return kbase_jit_allocate_prepare(katom);
  1050. case BASE_JD_REQ_SOFT_REPLAY:
  1051. case BASE_JD_REQ_SOFT_JIT_FREE:
  1052. break;
  1053. case BASE_JD_REQ_SOFT_EVENT_WAIT:
  1054. case BASE_JD_REQ_SOFT_EVENT_SET:
  1055. case BASE_JD_REQ_SOFT_EVENT_RESET:
  1056. if (katom->jc == 0)
  1057. return -EINVAL;
  1058. break;
  1059. case BASE_JD_REQ_SOFT_DEBUG_COPY:
  1060. return kbase_debug_copy_prepare(katom);
  1061. case BASE_JD_REQ_SOFT_EXT_RES_MAP:
  1062. return kbase_ext_res_prepare(katom);
  1063. case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
  1064. return kbase_ext_res_prepare(katom);
  1065. default:
  1066. /* Unsupported soft-job */
  1067. return -EINVAL;
  1068. }
  1069. return 0;
  1070. }
  1071. void kbase_finish_soft_job(struct kbase_jd_atom *katom)
  1072. {
  1073. switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
  1074. case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
  1075. /* Nothing to do */
  1076. break;
  1077. #ifdef CONFIG_SYNC
  1078. case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
  1079. /* If fence has not yet been signalled, do it now */
  1080. if (katom->fence) {
  1081. kbase_fence_trigger(katom, katom->event_code ==
  1082. BASE_JD_EVENT_DONE ? 0 : -EFAULT);
  1083. sync_fence_put(katom->fence);
  1084. katom->fence = NULL;
  1085. }
  1086. break;
  1087. case BASE_JD_REQ_SOFT_FENCE_WAIT:
  1088. /* Release the reference to the fence object */
  1089. sync_fence_put(katom->fence);
  1090. katom->fence = NULL;
  1091. break;
  1092. #endif /* CONFIG_SYNC */
  1093. case BASE_JD_REQ_SOFT_DEBUG_COPY:
  1094. kbase_debug_copy_finish(katom);
  1095. break;
  1096. case BASE_JD_REQ_SOFT_JIT_ALLOC:
  1097. kbase_jit_allocate_finish(katom);
  1098. break;
  1099. case BASE_JD_REQ_SOFT_EXT_RES_MAP:
  1100. kbase_ext_res_finish(katom);
  1101. break;
  1102. case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
  1103. kbase_ext_res_finish(katom);
  1104. break;
  1105. }
  1106. }
  1107. void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
  1108. {
  1109. LIST_HEAD(local_suspended_soft_jobs);
  1110. struct kbase_jd_atom *tmp_iter;
  1111. struct kbase_jd_atom *katom_iter;
  1112. struct kbasep_js_device_data *js_devdata;
  1113. bool resched = false;
  1114. KBASE_DEBUG_ASSERT(kbdev);
  1115. js_devdata = &kbdev->js_data;
  1116. /* Move out the entire list */
  1117. mutex_lock(&js_devdata->runpool_mutex);
  1118. list_splice_init(&js_devdata->suspended_soft_jobs_list,
  1119. &local_suspended_soft_jobs);
  1120. mutex_unlock(&js_devdata->runpool_mutex);
  1121. /*
  1122. * Each atom must be detached from the list and ran separately -
  1123. * it could be re-added to the old list, but this is unlikely
  1124. */
  1125. list_for_each_entry_safe(katom_iter, tmp_iter,
  1126. &local_suspended_soft_jobs, dep_item[1]) {
  1127. struct kbase_context *kctx = katom_iter->kctx;
  1128. mutex_lock(&kctx->jctx.lock);
  1129. /* Remove from the global list */
  1130. list_del(&katom_iter->dep_item[1]);
  1131. /* Remove from the context's list of waiting soft jobs */
  1132. list_del(&katom_iter->dep_item[0]);
  1133. if (kbase_process_soft_job(katom_iter) == 0) {
  1134. kbase_finish_soft_job(katom_iter);
  1135. resched |= jd_done_nolock(katom_iter, NULL);
  1136. } else {
  1137. KBASE_DEBUG_ASSERT((katom_iter->core_req &
  1138. BASEP_JD_REQ_ATOM_TYPE)
  1139. != BASE_JD_REQ_SOFT_REPLAY);
  1140. }
  1141. mutex_unlock(&kctx->jctx.lock);
  1142. }
  1143. if (resched)
  1144. kbase_js_sched_all(kbdev);
  1145. }