mali_kbase_core_linux.c 96 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633
  1. /*
  2. *
  3. * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
  4. *
  5. * This program is free software and is provided to you under the terms of the
  6. * GNU General Public License version 2 as published by the Free Software
  7. * Foundation, and any use by you of this program is subject to the terms
  8. * of such GNU licence.
  9. *
  10. * A copy of the licence is included with the program, and can also be obtained
  11. * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  12. * Boston, MA 02110-1301, USA.
  13. *
  14. */
  15. #include <mali_kbase.h>
  16. #include <mali_kbase_hwaccess_gpuprops.h>
  17. #include <mali_kbase_config_defaults.h>
  18. #include <mali_kbase_uku.h>
  19. #include <mali_midg_regmap.h>
  20. #include <mali_kbase_instr.h>
  21. #include <backend/gpu/mali_kbase_js_affinity.h>
  22. #include <mali_kbase_mem_linux.h>
  23. #ifdef CONFIG_MALI_DEVFREQ
  24. #include <backend/gpu/mali_kbase_devfreq.h>
  25. #endif /* CONFIG_MALI_DEVFREQ */
  26. #include "mali_kbase_mem.h"
  27. #include <mali_kbase_hwaccess_backend.h>
  28. #include <mali_kbase_hwaccess_jm.h>
  29. #include <backend/gpu/mali_kbase_device_internal.h>
  30. #ifdef CONFIG_KDS
  31. #include <linux/kds.h>
  32. #include <linux/anon_inodes.h>
  33. #include <linux/syscalls.h>
  34. #endif /* CONFIG_KDS */
  35. #include <linux/module.h>
  36. #include <linux/init.h>
  37. #include <linux/poll.h>
  38. #include <linux/kernel.h>
  39. #include <linux/errno.h>
  40. #include <linux/of.h>
  41. #include <linux/platform_device.h>
  42. #include <linux/miscdevice.h>
  43. #include <linux/list.h>
  44. #include <linux/semaphore.h>
  45. #include <linux/fs.h>
  46. #include <linux/uaccess.h>
  47. #include <linux/interrupt.h>
  48. #include <linux/io.h>
  49. #include <linux/mm.h>
  50. #include <linux/compat.h> /* is_compat_task */
  51. #include <linux/mman.h>
  52. #include <linux/version.h>
  53. #include <linux/security.h>
  54. #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
  55. #include <linux/pm_runtime.h>
  56. #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
  57. #include <mali_kbase_hw.h>
  58. #include <platform/mali_kbase_platform_common.h>
  59. #ifdef CONFIG_SYNC
  60. #include <mali_kbase_sync.h>
  61. #endif /* CONFIG_SYNC */
  62. #ifdef CONFIG_PM_DEVFREQ
  63. #include <linux/devfreq.h>
  64. #endif /* CONFIG_PM_DEVFREQ */
  65. #include <linux/clk.h>
  66. #include <linux/delay.h>
  67. #include <linux/dma-buf.h>
  68. #include <mali_kbase_config.h>
  69. #include <mali_kbase_tlstream.h>
  70. #include <linux/pm_opp.h>
  71. /* GPU IRQ Tags */
  72. #define JOB_IRQ_TAG 0
  73. #define MMU_IRQ_TAG 1
  74. #define GPU_IRQ_TAG 2
  75. static int kbase_dev_nr;
  76. static DEFINE_MUTEX(kbase_dev_list_lock);
  77. static LIST_HEAD(kbase_dev_list);
  78. #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
  79. static inline void __compile_time_asserts(void)
  80. {
  81. CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
  82. }
  83. #ifdef CONFIG_KDS
  84. struct kbasep_kds_resource_set_file_data {
  85. struct kds_resource_set *lock;
  86. };
  87. static int kds_resource_release(struct inode *inode, struct file *file);
  88. static const struct file_operations kds_resource_fops = {
  89. .release = kds_resource_release
  90. };
  91. struct kbase_kds_resource_list_data {
  92. struct kds_resource **kds_resources;
  93. unsigned long *kds_access_bitmap;
  94. int num_elems;
  95. };
  96. static int kds_resource_release(struct inode *inode, struct file *file)
  97. {
  98. struct kbasep_kds_resource_set_file_data *data;
  99. data = (struct kbasep_kds_resource_set_file_data *)file->private_data;
  100. if (data != NULL) {
  101. if (data->lock != NULL)
  102. kds_resource_set_release(&data->lock);
  103. kfree(data);
  104. }
  105. return 0;
  106. }
  107. static int kbasep_kds_allocate_resource_list_data(struct kbase_context *kctx, struct base_external_resource *ext_res, int num_elems, struct kbase_kds_resource_list_data *resources_list)
  108. {
  109. struct base_external_resource *res = ext_res;
  110. int res_id;
  111. /* assume we have to wait for all */
  112. KBASE_DEBUG_ASSERT(num_elems != 0);
  113. resources_list->kds_resources = kmalloc_array(num_elems,
  114. sizeof(struct kds_resource *), GFP_KERNEL);
  115. if (resources_list->kds_resources == NULL)
  116. return -ENOMEM;
  117. KBASE_DEBUG_ASSERT(num_elems != 0);
  118. resources_list->kds_access_bitmap = kzalloc(
  119. sizeof(unsigned long) *
  120. ((num_elems + BITS_PER_LONG - 1) / BITS_PER_LONG),
  121. GFP_KERNEL);
  122. if (resources_list->kds_access_bitmap == NULL) {
  123. kfree(resources_list->kds_access_bitmap);
  124. return -ENOMEM;
  125. }
  126. kbase_gpu_vm_lock(kctx);
  127. for (res_id = 0; res_id < num_elems; res_id++, res++) {
  128. int exclusive;
  129. struct kbase_va_region *reg;
  130. struct kds_resource *kds_res = NULL;
  131. exclusive = res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE;
  132. reg = kbase_region_tracker_find_region_enclosing_address(kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
  133. /* did we find a matching region object? */
  134. if (NULL == reg || (reg->flags & KBASE_REG_FREE))
  135. break;
  136. /* no need to check reg->alloc as only regions with an alloc has
  137. * a size, and kbase_region_tracker_find_region_enclosing_address
  138. * only returns regions with size > 0 */
  139. switch (reg->gpu_alloc->type) {
  140. #if defined(CONFIG_UMP) && defined(CONFIG_KDS)
  141. case KBASE_MEM_TYPE_IMPORTED_UMP:
  142. kds_res = ump_dd_kds_resource_get(reg->gpu_alloc->imported.ump_handle);
  143. break;
  144. #endif /* defined(CONFIG_UMP) && defined(CONFIG_KDS) */
  145. default:
  146. break;
  147. }
  148. /* no kds resource for the region ? */
  149. if (!kds_res)
  150. break;
  151. resources_list->kds_resources[res_id] = kds_res;
  152. if (exclusive)
  153. set_bit(res_id, resources_list->kds_access_bitmap);
  154. }
  155. kbase_gpu_vm_unlock(kctx);
  156. /* did the loop run to completion? */
  157. if (res_id == num_elems)
  158. return 0;
  159. /* Clean up as the resource list is not valid. */
  160. kfree(resources_list->kds_resources);
  161. kfree(resources_list->kds_access_bitmap);
  162. return -EINVAL;
  163. }
  164. static bool kbasep_validate_kbase_pointer(
  165. struct kbase_context *kctx, union kbase_pointer *p)
  166. {
  167. if (kctx->is_compat) {
  168. if (p->compat_value == 0)
  169. return false;
  170. } else {
  171. if (p->value == NULL)
  172. return false;
  173. }
  174. return true;
  175. }
  176. static int kbase_external_buffer_lock(struct kbase_context *kctx,
  177. struct kbase_uk_ext_buff_kds_data *args, u32 args_size)
  178. {
  179. struct base_external_resource *ext_res_copy;
  180. size_t ext_resource_size;
  181. int ret = -EINVAL;
  182. int fd = -EBADF;
  183. struct base_external_resource __user *ext_res_user;
  184. int __user *file_desc_usr;
  185. struct kbasep_kds_resource_set_file_data *fdata;
  186. struct kbase_kds_resource_list_data resource_list_data;
  187. if (args_size != sizeof(struct kbase_uk_ext_buff_kds_data))
  188. return -EINVAL;
  189. /* Check user space has provided valid data */
  190. if (!kbasep_validate_kbase_pointer(kctx, &args->external_resource) ||
  191. !kbasep_validate_kbase_pointer(kctx, &args->file_descriptor) ||
  192. (args->num_res == 0) ||
  193. (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES))
  194. return -EINVAL;
  195. ext_resource_size = sizeof(struct base_external_resource) * args->num_res;
  196. KBASE_DEBUG_ASSERT(ext_resource_size != 0);
  197. ext_res_copy = kmalloc(ext_resource_size, GFP_KERNEL);
  198. if (!ext_res_copy)
  199. return -EINVAL;
  200. #ifdef CONFIG_COMPAT
  201. if (kctx->is_compat) {
  202. ext_res_user = compat_ptr(args->external_resource.compat_value);
  203. file_desc_usr = compat_ptr(args->file_descriptor.compat_value);
  204. } else {
  205. #endif /* CONFIG_COMPAT */
  206. ext_res_user = args->external_resource.value;
  207. file_desc_usr = args->file_descriptor.value;
  208. #ifdef CONFIG_COMPAT
  209. }
  210. #endif /* CONFIG_COMPAT */
  211. /* Copy the external resources to lock from user space */
  212. if (copy_from_user(ext_res_copy, ext_res_user, ext_resource_size))
  213. goto out;
  214. /* Allocate data to be stored in the file */
  215. fdata = kmalloc(sizeof(*fdata), GFP_KERNEL);
  216. if (!fdata) {
  217. ret = -ENOMEM;
  218. goto out;
  219. }
  220. /* Parse given elements and create resource and access lists */
  221. ret = kbasep_kds_allocate_resource_list_data(kctx,
  222. ext_res_copy, args->num_res, &resource_list_data);
  223. if (!ret) {
  224. long err;
  225. fdata->lock = NULL;
  226. fd = anon_inode_getfd("kds_ext", &kds_resource_fops, fdata, 0);
  227. err = copy_to_user(file_desc_usr, &fd, sizeof(fd));
  228. /* If the file descriptor was valid and we successfully copied
  229. * it to user space, then we can try and lock the requested
  230. * kds resources.
  231. */
  232. if ((fd >= 0) && (err == 0)) {
  233. struct kds_resource_set *lock;
  234. lock = kds_waitall(args->num_res,
  235. resource_list_data.kds_access_bitmap,
  236. resource_list_data.kds_resources,
  237. KDS_WAIT_BLOCKING);
  238. if (!lock) {
  239. ret = -EINVAL;
  240. } else if (IS_ERR(lock)) {
  241. ret = PTR_ERR(lock);
  242. } else {
  243. ret = 0;
  244. fdata->lock = lock;
  245. }
  246. } else {
  247. ret = -EINVAL;
  248. }
  249. kfree(resource_list_data.kds_resources);
  250. kfree(resource_list_data.kds_access_bitmap);
  251. }
  252. if (ret) {
  253. /* If the file was opened successfully then close it which will
  254. * clean up the file data, otherwise we clean up the file data
  255. * ourself.
  256. */
  257. if (fd >= 0)
  258. sys_close(fd);
  259. else
  260. kfree(fdata);
  261. }
  262. out:
  263. kfree(ext_res_copy);
  264. return ret;
  265. }
  266. #endif /* CONFIG_KDS */
  267. static void kbase_create_timeline_objects(struct kbase_context *kctx)
  268. {
  269. struct kbase_device *kbdev = kctx->kbdev;
  270. unsigned int lpu_id;
  271. unsigned int as_nr;
  272. struct kbasep_kctx_list_element *element;
  273. /* Create LPU objects. */
  274. for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
  275. u32 *lpu =
  276. &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
  277. kbase_tlstream_tl_summary_new_lpu(lpu, lpu_id, *lpu);
  278. }
  279. /* Create Address Space objects. */
  280. for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
  281. kbase_tlstream_tl_summary_new_as(&kbdev->as[as_nr], as_nr);
  282. /* Create GPU object and make it retain all LPUs and address spaces. */
  283. kbase_tlstream_tl_summary_new_gpu(
  284. kbdev,
  285. kbdev->gpu_props.props.raw_props.gpu_id,
  286. kbdev->gpu_props.num_cores);
  287. for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
  288. void *lpu =
  289. &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
  290. kbase_tlstream_tl_summary_lifelink_lpu_gpu(lpu, kbdev);
  291. }
  292. for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
  293. kbase_tlstream_tl_summary_lifelink_as_gpu(
  294. &kbdev->as[as_nr],
  295. kbdev);
  296. /* Create object for each known context. */
  297. mutex_lock(&kbdev->kctx_list_lock);
  298. list_for_each_entry(element, &kbdev->kctx_list, link) {
  299. kbase_tlstream_tl_summary_new_ctx(
  300. element->kctx,
  301. (u32)(element->kctx->id),
  302. (u32)(element->kctx->tgid));
  303. }
  304. /* Before releasing the lock, reset body stream buffers.
  305. * This will prevent context creation message to be directed to both
  306. * summary and body stream. */
  307. kbase_tlstream_reset_body_streams();
  308. mutex_unlock(&kbdev->kctx_list_lock);
  309. /* Static object are placed into summary packet that needs to be
  310. * transmitted first. Flush all streams to make it available to
  311. * user space. */
  312. kbase_tlstream_flush_streams();
  313. }
  314. static void kbase_api_handshake(struct uku_version_check_args *version)
  315. {
  316. switch (version->major) {
  317. #ifdef BASE_LEGACY_UK8_SUPPORT
  318. case 8:
  319. /* We are backwards compatible with version 8,
  320. * so pretend to be the old version */
  321. version->major = 8;
  322. version->minor = 4;
  323. break;
  324. #endif /* BASE_LEGACY_UK8_SUPPORT */
  325. #ifdef BASE_LEGACY_UK9_SUPPORT
  326. case 9:
  327. /* We are backwards compatible with version 9,
  328. * so pretend to be the old version */
  329. version->major = 9;
  330. version->minor = 0;
  331. break;
  332. #endif /* BASE_LEGACY_UK8_SUPPORT */
  333. case BASE_UK_VERSION_MAJOR:
  334. /* set minor to be the lowest common */
  335. version->minor = min_t(int, BASE_UK_VERSION_MINOR,
  336. (int)version->minor);
  337. break;
  338. default:
  339. /* We return our actual version regardless if it
  340. * matches the version returned by userspace -
  341. * userspace can bail if it can't handle this
  342. * version */
  343. version->major = BASE_UK_VERSION_MAJOR;
  344. version->minor = BASE_UK_VERSION_MINOR;
  345. break;
  346. }
  347. }
  348. /**
  349. * enum mali_error - Mali error codes shared with userspace
  350. *
  351. * This is subset of those common Mali errors that can be returned to userspace.
  352. * Values of matching user and kernel space enumerators MUST be the same.
  353. * MALI_ERROR_NONE is guaranteed to be 0.
  354. */
  355. enum mali_error {
  356. MALI_ERROR_NONE = 0,
  357. MALI_ERROR_OUT_OF_GPU_MEMORY,
  358. MALI_ERROR_OUT_OF_MEMORY,
  359. MALI_ERROR_FUNCTION_FAILED,
  360. };
  361. enum {
  362. inited_mem = (1u << 0),
  363. inited_js = (1u << 1),
  364. inited_pm_runtime_init = (1u << 2),
  365. #ifdef CONFIG_MALI_DEVFREQ
  366. inited_devfreq = (1u << 3),
  367. #endif /* CONFIG_MALI_DEVFREQ */
  368. inited_tlstream = (1u << 4),
  369. inited_backend_early = (1u << 5),
  370. inited_backend_late = (1u << 6),
  371. inited_device = (1u << 7),
  372. inited_vinstr = (1u << 8),
  373. inited_ipa = (1u << 9),
  374. inited_misc_register = (1u << 11),
  375. inited_get_device = (1u << 12),
  376. inited_sysfs_group = (1u << 13),
  377. inited_dev_list = (1u << 14),
  378. inited_debugfs = (1u << 15),
  379. inited_gpu_device = (1u << 16),
  380. inited_registers_map = (1u << 17),
  381. inited_power_control = (1u << 19),
  382. inited_buslogger = (1u << 20)
  383. };
  384. #ifdef CONFIG_MALI_DEBUG
  385. #define INACTIVE_WAIT_MS (5000)
  386. void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
  387. {
  388. kbdev->driver_inactive = inactive;
  389. wake_up(&kbdev->driver_inactive_wait);
  390. /* Wait for any running IOCTLs to complete */
  391. if (inactive)
  392. msleep(INACTIVE_WAIT_MS);
  393. }
  394. #endif /* CONFIG_MALI_DEBUG */
  395. static int kbase_dispatch(struct kbase_context *kctx, void * const args, u32 args_size)
  396. {
  397. struct kbase_device *kbdev;
  398. union uk_header *ukh = args;
  399. u32 id;
  400. int ret = 0;
  401. KBASE_DEBUG_ASSERT(ukh != NULL);
  402. kbdev = kctx->kbdev;
  403. id = ukh->id;
  404. ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
  405. #ifdef CONFIG_MALI_DEBUG
  406. wait_event(kbdev->driver_inactive_wait,
  407. kbdev->driver_inactive == false);
  408. #endif /* CONFIG_MALI_DEBUG */
  409. if (id == UKP_FUNC_ID_CHECK_VERSION) {
  410. struct uku_version_check_args *version_check;
  411. if (args_size != sizeof(struct uku_version_check_args)) {
  412. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  413. return 0;
  414. }
  415. version_check = (struct uku_version_check_args *)args;
  416. kbase_api_handshake(version_check);
  417. /* save the proposed version number for later use */
  418. kctx->api_version = KBASE_API_VERSION(version_check->major,
  419. version_check->minor);
  420. ukh->ret = MALI_ERROR_NONE;
  421. return 0;
  422. }
  423. /* block calls until version handshake */
  424. if (kctx->api_version == 0)
  425. return -EINVAL;
  426. if (!atomic_read(&kctx->setup_complete)) {
  427. struct kbase_uk_set_flags *kbase_set_flags;
  428. /* setup pending, try to signal that we'll do the setup,
  429. * if setup was already in progress, err this call
  430. */
  431. if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
  432. return -EINVAL;
  433. /* if unexpected call, will stay stuck in setup mode
  434. * (is it the only call we accept?)
  435. */
  436. if (id != KBASE_FUNC_SET_FLAGS)
  437. return -EINVAL;
  438. kbase_set_flags = (struct kbase_uk_set_flags *)args;
  439. /* if not matching the expected call, stay in setup mode */
  440. if (sizeof(*kbase_set_flags) != args_size)
  441. goto bad_size;
  442. /* if bad flags, will stay stuck in setup mode */
  443. if (kbase_context_set_create_flags(kctx,
  444. kbase_set_flags->create_flags) != 0)
  445. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  446. atomic_set(&kctx->setup_complete, 1);
  447. return 0;
  448. }
  449. /* setup complete, perform normal operation */
  450. switch (id) {
  451. case KBASE_FUNC_MEM_JIT_INIT:
  452. {
  453. struct kbase_uk_mem_jit_init *jit_init = args;
  454. if (sizeof(*jit_init) != args_size)
  455. goto bad_size;
  456. if (kbase_region_tracker_init_jit(kctx,
  457. jit_init->va_pages))
  458. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  459. break;
  460. }
  461. case KBASE_FUNC_MEM_ALLOC:
  462. {
  463. struct kbase_uk_mem_alloc *mem = args;
  464. struct kbase_va_region *reg;
  465. if (sizeof(*mem) != args_size)
  466. goto bad_size;
  467. #if defined(CONFIG_64BIT)
  468. if (!kctx->is_compat) {
  469. /* force SAME_VA if a 64-bit client */
  470. mem->flags |= BASE_MEM_SAME_VA;
  471. }
  472. #endif
  473. reg = kbase_mem_alloc(kctx, mem->va_pages,
  474. mem->commit_pages, mem->extent,
  475. &mem->flags, &mem->gpu_va,
  476. &mem->va_alignment);
  477. if (!reg)
  478. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  479. break;
  480. }
  481. case KBASE_FUNC_MEM_IMPORT: {
  482. struct kbase_uk_mem_import *mem_import = args;
  483. void __user *phandle;
  484. if (sizeof(*mem_import) != args_size)
  485. goto bad_size;
  486. #ifdef CONFIG_COMPAT
  487. if (kctx->is_compat)
  488. phandle = compat_ptr(mem_import->phandle.compat_value);
  489. else
  490. #endif
  491. phandle = mem_import->phandle.value;
  492. if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
  493. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  494. break;
  495. }
  496. if (kbase_mem_import(kctx, mem_import->type, phandle,
  497. &mem_import->gpu_va,
  498. &mem_import->va_pages,
  499. &mem_import->flags)) {
  500. mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
  501. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  502. }
  503. break;
  504. }
  505. case KBASE_FUNC_MEM_ALIAS: {
  506. struct kbase_uk_mem_alias *alias = args;
  507. struct base_mem_aliasing_info __user *user_ai;
  508. struct base_mem_aliasing_info *ai;
  509. if (sizeof(*alias) != args_size)
  510. goto bad_size;
  511. if (alias->nents > 2048) {
  512. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  513. break;
  514. }
  515. if (!alias->nents) {
  516. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  517. break;
  518. }
  519. #ifdef CONFIG_COMPAT
  520. if (kctx->is_compat)
  521. user_ai = compat_ptr(alias->ai.compat_value);
  522. else
  523. #endif
  524. user_ai = alias->ai.value;
  525. ai = vmalloc(sizeof(*ai) * alias->nents);
  526. if (!ai) {
  527. ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
  528. break;
  529. }
  530. if (copy_from_user(ai, user_ai,
  531. sizeof(*ai) * alias->nents)) {
  532. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  533. goto copy_failed;
  534. }
  535. alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
  536. alias->stride,
  537. alias->nents, ai,
  538. &alias->va_pages);
  539. if (!alias->gpu_va) {
  540. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  541. goto no_alias;
  542. }
  543. no_alias:
  544. copy_failed:
  545. vfree(ai);
  546. break;
  547. }
  548. case KBASE_FUNC_MEM_COMMIT:
  549. {
  550. struct kbase_uk_mem_commit *commit = args;
  551. if (sizeof(*commit) != args_size)
  552. goto bad_size;
  553. if (commit->gpu_addr & ~PAGE_MASK) {
  554. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid");
  555. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  556. break;
  557. }
  558. if (kbase_mem_commit(kctx, commit->gpu_addr,
  559. commit->pages,
  560. (base_backing_threshold_status *)
  561. &commit->result_subcode) != 0)
  562. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  563. break;
  564. }
  565. case KBASE_FUNC_MEM_QUERY:
  566. {
  567. struct kbase_uk_mem_query *query = args;
  568. if (sizeof(*query) != args_size)
  569. goto bad_size;
  570. if (query->gpu_addr & ~PAGE_MASK) {
  571. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid");
  572. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  573. break;
  574. }
  575. if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE &&
  576. query->query != KBASE_MEM_QUERY_VA_SIZE &&
  577. query->query != KBASE_MEM_QUERY_FLAGS) {
  578. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query);
  579. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  580. break;
  581. }
  582. if (kbase_mem_query(kctx, query->gpu_addr,
  583. query->query, &query->value) != 0)
  584. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  585. else
  586. ukh->ret = MALI_ERROR_NONE;
  587. break;
  588. }
  589. break;
  590. case KBASE_FUNC_MEM_FLAGS_CHANGE:
  591. {
  592. struct kbase_uk_mem_flags_change *fc = args;
  593. if (sizeof(*fc) != args_size)
  594. goto bad_size;
  595. if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) {
  596. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid");
  597. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  598. break;
  599. }
  600. if (kbase_mem_flags_change(kctx, fc->gpu_va,
  601. fc->flags, fc->mask) != 0)
  602. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  603. break;
  604. }
  605. case KBASE_FUNC_MEM_FREE:
  606. {
  607. struct kbase_uk_mem_free *mem = args;
  608. if (sizeof(*mem) != args_size)
  609. goto bad_size;
  610. if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) {
  611. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid");
  612. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  613. break;
  614. }
  615. if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
  616. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  617. break;
  618. }
  619. case KBASE_FUNC_JOB_SUBMIT:
  620. {
  621. struct kbase_uk_job_submit *job = args;
  622. if (sizeof(*job) != args_size)
  623. goto bad_size;
  624. if (kbase_jd_submit(kctx, job) != 0)
  625. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  626. break;
  627. }
  628. case KBASE_FUNC_SYNC:
  629. {
  630. struct kbase_uk_sync_now *sn = args;
  631. if (sizeof(*sn) != args_size)
  632. goto bad_size;
  633. if (sn->sset.basep_sset.mem_handle.basep.handle & ~PAGE_MASK) {
  634. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid");
  635. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  636. break;
  637. }
  638. #ifndef CONFIG_MALI_COH_USER
  639. if (kbase_sync_now(kctx, &sn->sset) != 0)
  640. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  641. #endif
  642. break;
  643. }
  644. case KBASE_FUNC_DISJOINT_QUERY:
  645. {
  646. struct kbase_uk_disjoint_query *dquery = args;
  647. if (sizeof(*dquery) != args_size)
  648. goto bad_size;
  649. /* Get the disjointness counter value. */
  650. dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
  651. break;
  652. }
  653. case KBASE_FUNC_POST_TERM:
  654. {
  655. kbase_event_close(kctx);
  656. break;
  657. }
  658. case KBASE_FUNC_HWCNT_SETUP:
  659. {
  660. struct kbase_uk_hwcnt_setup *setup = args;
  661. if (sizeof(*setup) != args_size)
  662. goto bad_size;
  663. mutex_lock(&kctx->vinstr_cli_lock);
  664. if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
  665. &kctx->vinstr_cli, setup) != 0)
  666. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  667. mutex_unlock(&kctx->vinstr_cli_lock);
  668. break;
  669. }
  670. case KBASE_FUNC_HWCNT_DUMP:
  671. {
  672. /* args ignored */
  673. mutex_lock(&kctx->vinstr_cli_lock);
  674. if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
  675. BASE_HWCNT_READER_EVENT_MANUAL) != 0)
  676. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  677. mutex_unlock(&kctx->vinstr_cli_lock);
  678. break;
  679. }
  680. case KBASE_FUNC_HWCNT_CLEAR:
  681. {
  682. /* args ignored */
  683. mutex_lock(&kctx->vinstr_cli_lock);
  684. if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
  685. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  686. mutex_unlock(&kctx->vinstr_cli_lock);
  687. break;
  688. }
  689. case KBASE_FUNC_HWCNT_READER_SETUP:
  690. {
  691. struct kbase_uk_hwcnt_reader_setup *setup = args;
  692. if (sizeof(*setup) != args_size)
  693. goto bad_size;
  694. mutex_lock(&kctx->vinstr_cli_lock);
  695. if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
  696. setup) != 0)
  697. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  698. mutex_unlock(&kctx->vinstr_cli_lock);
  699. break;
  700. }
  701. case KBASE_FUNC_GPU_PROPS_REG_DUMP:
  702. {
  703. struct kbase_uk_gpuprops *setup = args;
  704. if (sizeof(*setup) != args_size)
  705. goto bad_size;
  706. if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
  707. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  708. break;
  709. }
  710. case KBASE_FUNC_FIND_CPU_OFFSET:
  711. {
  712. struct kbase_uk_find_cpu_offset *find = args;
  713. if (sizeof(*find) != args_size)
  714. goto bad_size;
  715. if (find->gpu_addr & ~PAGE_MASK) {
  716. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
  717. goto out_bad;
  718. }
  719. if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
  720. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  721. } else {
  722. int err;
  723. err = kbasep_find_enclosing_cpu_mapping_offset(
  724. kctx,
  725. find->gpu_addr,
  726. (uintptr_t) find->cpu_addr,
  727. (size_t) find->size,
  728. &find->offset);
  729. if (err)
  730. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  731. }
  732. break;
  733. }
  734. case KBASE_FUNC_GET_VERSION:
  735. {
  736. struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
  737. if (sizeof(*get_version) != args_size)
  738. goto bad_size;
  739. /* version buffer size check is made in compile time assert */
  740. memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
  741. get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
  742. break;
  743. }
  744. case KBASE_FUNC_STREAM_CREATE:
  745. {
  746. #ifdef CONFIG_SYNC
  747. struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
  748. if (sizeof(*screate) != args_size)
  749. goto bad_size;
  750. if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
  751. /* not NULL terminated */
  752. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  753. break;
  754. }
  755. if (kbase_stream_create(screate->name, &screate->fd) != 0)
  756. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  757. else
  758. ukh->ret = MALI_ERROR_NONE;
  759. #else /* CONFIG_SYNC */
  760. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  761. #endif /* CONFIG_SYNC */
  762. break;
  763. }
  764. case KBASE_FUNC_FENCE_VALIDATE:
  765. {
  766. #ifdef CONFIG_SYNC
  767. struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
  768. if (sizeof(*fence_validate) != args_size)
  769. goto bad_size;
  770. if (kbase_fence_validate(fence_validate->fd) != 0)
  771. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  772. else
  773. ukh->ret = MALI_ERROR_NONE;
  774. #endif /* CONFIG_SYNC */
  775. break;
  776. }
  777. case KBASE_FUNC_EXT_BUFFER_LOCK:
  778. {
  779. #ifdef CONFIG_KDS
  780. ret = kbase_external_buffer_lock(kctx,
  781. (struct kbase_uk_ext_buff_kds_data *)args,
  782. args_size);
  783. switch (ret) {
  784. case 0:
  785. ukh->ret = MALI_ERROR_NONE;
  786. break;
  787. case -ENOMEM:
  788. ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
  789. break;
  790. default:
  791. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  792. }
  793. #endif /* CONFIG_KDS */
  794. break;
  795. }
  796. #ifdef BASE_LEGACY_UK8_SUPPORT
  797. case KBASE_FUNC_KEEP_GPU_POWERED:
  798. {
  799. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_KEEP_GPU_POWERED: function is deprecated and disabled\n");
  800. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  801. break;
  802. }
  803. #endif /* BASE_LEGACY_UK8_SUPPORT */
  804. case KBASE_FUNC_TLSTREAM_ACQUIRE:
  805. {
  806. struct kbase_uk_tlstream_acquire *tlstream_acquire =
  807. args;
  808. if (sizeof(*tlstream_acquire) != args_size)
  809. goto bad_size;
  810. if (0 != kbase_tlstream_acquire(
  811. kctx,
  812. &tlstream_acquire->fd)) {
  813. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  814. } else if (tlstream_acquire->fd >= 0) {
  815. /* Summary stream was cleared during acquire.
  816. * Create static timeline objects that will be
  817. * read by client. */
  818. kbase_create_timeline_objects(kctx);
  819. }
  820. break;
  821. }
  822. case KBASE_FUNC_TLSTREAM_FLUSH:
  823. {
  824. struct kbase_uk_tlstream_flush *tlstream_flush =
  825. args;
  826. if (sizeof(*tlstream_flush) != args_size)
  827. goto bad_size;
  828. kbase_tlstream_flush_streams();
  829. break;
  830. }
  831. case KBASE_FUNC_GET_CONTEXT_ID:
  832. {
  833. struct kbase_uk_context_id *info = args;
  834. info->id = kctx->id;
  835. break;
  836. }
  837. case KBASE_FUNC_SOFT_EVENT_UPDATE:
  838. {
  839. struct kbase_uk_soft_event_update *update = args;
  840. if (sizeof(*update) != args_size)
  841. goto bad_size;
  842. if (((update->new_status != BASE_JD_SOFT_EVENT_SET) &&
  843. (update->new_status != BASE_JD_SOFT_EVENT_RESET)) ||
  844. (update->flags != 0))
  845. goto out_bad;
  846. if (kbasep_write_soft_event_status(
  847. kctx, update->evt,
  848. update->new_status) != 0) {
  849. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  850. break;
  851. }
  852. if (update->new_status == BASE_JD_SOFT_EVENT_SET)
  853. kbasep_complete_triggered_soft_events(
  854. kctx, update->evt);
  855. break;
  856. }
  857. case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
  858. {
  859. /* Trap */
  860. break;
  861. }
  862. default:
  863. dev_err(kbdev->dev, "unknown ioctl %u\n", id);
  864. goto out_bad;
  865. }
  866. return ret;
  867. bad_size:
  868. dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
  869. out_bad:
  870. return -EINVAL;
  871. }
  872. static struct kbase_device *to_kbase_device(struct device *dev)
  873. {
  874. return dev_get_drvdata(dev);
  875. }
  876. static int assign_irqs(struct platform_device *pdev)
  877. {
  878. struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
  879. int i;
  880. if (!kbdev)
  881. return -ENODEV;
  882. /* 3 IRQ resources */
  883. for (i = 0; i < 3; i++) {
  884. struct resource *irq_res;
  885. int irqtag;
  886. irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
  887. if (!irq_res) {
  888. dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
  889. return -ENOENT;
  890. }
  891. #ifdef CONFIG_OF
  892. if (!strcmp(irq_res->name, "job")) {
  893. irqtag = JOB_IRQ_TAG;
  894. } else if (!strcmp(irq_res->name, "mmu")) {
  895. irqtag = MMU_IRQ_TAG;
  896. } else if (!strcmp(irq_res->name, "gpu")) {
  897. irqtag = GPU_IRQ_TAG;
  898. } else {
  899. dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
  900. irq_res->name);
  901. return -EINVAL;
  902. }
  903. #else
  904. irqtag = i;
  905. #endif /* CONFIG_OF */
  906. kbdev->irqs[irqtag].irq = irq_res->start;
  907. kbdev->irqs[irqtag].flags = irq_res->flags & IRQF_TRIGGER_MASK;
  908. }
  909. return 0;
  910. }
  911. /*
  912. * API to acquire device list mutex and
  913. * return pointer to the device list head
  914. */
  915. const struct list_head *kbase_dev_list_get(void)
  916. {
  917. mutex_lock(&kbase_dev_list_lock);
  918. return &kbase_dev_list;
  919. }
  920. /* API to release the device list mutex */
  921. void kbase_dev_list_put(const struct list_head *dev_list)
  922. {
  923. mutex_unlock(&kbase_dev_list_lock);
  924. }
  925. /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
  926. struct kbase_device *kbase_find_device(int minor)
  927. {
  928. struct kbase_device *kbdev = NULL;
  929. struct list_head *entry;
  930. const struct list_head *dev_list = kbase_dev_list_get();
  931. list_for_each(entry, dev_list) {
  932. struct kbase_device *tmp;
  933. tmp = list_entry(entry, struct kbase_device, entry);
  934. if (tmp->mdev.minor == minor || minor == -1) {
  935. kbdev = tmp;
  936. get_device(kbdev->dev);
  937. break;
  938. }
  939. }
  940. kbase_dev_list_put(dev_list);
  941. return kbdev;
  942. }
  943. EXPORT_SYMBOL(kbase_find_device);
  944. void kbase_release_device(struct kbase_device *kbdev)
  945. {
  946. put_device(kbdev->dev);
  947. }
  948. EXPORT_SYMBOL(kbase_release_device);
  949. static int kbase_open(struct inode *inode, struct file *filp)
  950. {
  951. struct kbase_device *kbdev = NULL;
  952. struct kbase_context *kctx;
  953. int ret = 0;
  954. kbdev = kbase_find_device(iminor(inode));
  955. if (!kbdev)
  956. return -ENODEV;
  957. kctx = kbase_create_context(kbdev, is_compat_task());
  958. if (!kctx) {
  959. ret = -ENOMEM;
  960. goto out;
  961. }
  962. init_waitqueue_head(&kctx->event_queue);
  963. filp->private_data = kctx;
  964. kctx->filp = filp;
  965. kctx->infinite_cache_active = kbdev->infinite_cache_active_default;
  966. dev_dbg(kbdev->dev, "created base context\n");
  967. {
  968. struct kbasep_kctx_list_element *element;
  969. element = kzalloc(sizeof(*element), GFP_KERNEL);
  970. if (element) {
  971. mutex_lock(&kbdev->kctx_list_lock);
  972. element->kctx = kctx;
  973. list_add(&element->link, &kbdev->kctx_list);
  974. kbase_tlstream_tl_new_ctx(
  975. element->kctx,
  976. (u32)(element->kctx->id),
  977. (u32)(element->kctx->tgid));
  978. mutex_unlock(&kbdev->kctx_list_lock);
  979. } else {
  980. /* we don't treat this as a fail - just warn about it */
  981. dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
  982. }
  983. }
  984. return 0;
  985. out:
  986. kbase_release_device(kbdev);
  987. return ret;
  988. }
  989. static int kbase_release(struct inode *inode, struct file *filp)
  990. {
  991. struct kbase_context *kctx = filp->private_data;
  992. struct kbase_device *kbdev = kctx->kbdev;
  993. struct kbasep_kctx_list_element *element, *tmp;
  994. bool found_element = false;
  995. kbase_tlstream_tl_del_ctx(kctx);
  996. mutex_lock(&kbdev->kctx_list_lock);
  997. list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
  998. if (element->kctx == kctx) {
  999. list_del(&element->link);
  1000. kfree(element);
  1001. found_element = true;
  1002. }
  1003. }
  1004. mutex_unlock(&kbdev->kctx_list_lock);
  1005. if (!found_element)
  1006. dev_warn(kbdev->dev, "kctx not in kctx_list\n");
  1007. filp->private_data = NULL;
  1008. mutex_lock(&kctx->vinstr_cli_lock);
  1009. /* If this client was performing hwcnt dumping and did not explicitly
  1010. * detach itself, remove it from the vinstr core now */
  1011. if (kctx->vinstr_cli) {
  1012. struct kbase_uk_hwcnt_setup setup;
  1013. setup.dump_buffer = 0llu;
  1014. kbase_vinstr_legacy_hwc_setup(
  1015. kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
  1016. }
  1017. mutex_unlock(&kctx->vinstr_cli_lock);
  1018. kbase_destroy_context(kctx);
  1019. dev_dbg(kbdev->dev, "deleted base context\n");
  1020. kbase_release_device(kbdev);
  1021. return 0;
  1022. }
  1023. #define CALL_MAX_SIZE 536
  1024. static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1025. {
  1026. u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull }; /* alignment fixup */
  1027. u32 size = _IOC_SIZE(cmd);
  1028. struct kbase_context *kctx = filp->private_data;
  1029. if (size > CALL_MAX_SIZE)
  1030. return -ENOTTY;
  1031. if (copy_from_user(&msg, (void __user *)arg, size) != 0) {
  1032. dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
  1033. return -EFAULT;
  1034. }
  1035. if (kbase_dispatch(kctx, &msg, size) != 0)
  1036. return -EFAULT;
  1037. if (copy_to_user((void __user *)arg, &msg, size) != 0) {
  1038. dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
  1039. return -EFAULT;
  1040. }
  1041. return 0;
  1042. }
  1043. static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
  1044. {
  1045. struct kbase_context *kctx = filp->private_data;
  1046. struct base_jd_event_v2 uevent;
  1047. int out_count = 0;
  1048. if (count < sizeof(uevent))
  1049. return -ENOBUFS;
  1050. do {
  1051. while (kbase_event_dequeue(kctx, &uevent)) {
  1052. if (out_count > 0)
  1053. goto out;
  1054. if (filp->f_flags & O_NONBLOCK)
  1055. return -EAGAIN;
  1056. if (wait_event_interruptible(kctx->event_queue,
  1057. kbase_event_pending(kctx)) != 0)
  1058. return -ERESTARTSYS;
  1059. }
  1060. if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
  1061. if (out_count == 0)
  1062. return -EPIPE;
  1063. goto out;
  1064. }
  1065. if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
  1066. return -EFAULT;
  1067. buf += sizeof(uevent);
  1068. out_count++;
  1069. count -= sizeof(uevent);
  1070. } while (count >= sizeof(uevent));
  1071. out:
  1072. return out_count * sizeof(uevent);
  1073. }
  1074. static unsigned int kbase_poll(struct file *filp, poll_table *wait)
  1075. {
  1076. struct kbase_context *kctx = filp->private_data;
  1077. poll_wait(filp, &kctx->event_queue, wait);
  1078. if (kbase_event_pending(kctx))
  1079. return POLLIN | POLLRDNORM;
  1080. return 0;
  1081. }
  1082. void kbase_event_wakeup(struct kbase_context *kctx)
  1083. {
  1084. KBASE_DEBUG_ASSERT(kctx);
  1085. wake_up_interruptible(&kctx->event_queue);
  1086. }
  1087. static int kbase_check_flags(int flags)
  1088. {
  1089. /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
  1090. * closes the file descriptor in a child process.
  1091. */
  1092. if (0 == (flags & O_CLOEXEC))
  1093. return -EINVAL;
  1094. return 0;
  1095. }
  1096. #ifdef CONFIG_64BIT
  1097. /* The following function is taken from the kernel and just
  1098. * renamed. As it's not exported to modules we must copy-paste it here.
  1099. */
  1100. static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
  1101. *info)
  1102. {
  1103. struct mm_struct *mm = current->mm;
  1104. struct vm_area_struct *vma;
  1105. unsigned long length, low_limit, high_limit, gap_start, gap_end;
  1106. /* Adjust search length to account for worst case alignment overhead */
  1107. length = info->length + info->align_mask;
  1108. if (length < info->length)
  1109. return -ENOMEM;
  1110. /*
  1111. * Adjust search limits by the desired length.
  1112. * See implementation comment at top of unmapped_area().
  1113. */
  1114. gap_end = info->high_limit;
  1115. if (gap_end < length)
  1116. return -ENOMEM;
  1117. high_limit = gap_end - length;
  1118. if (info->low_limit > high_limit)
  1119. return -ENOMEM;
  1120. low_limit = info->low_limit + length;
  1121. /* Check highest gap, which does not precede any rbtree node */
  1122. gap_start = mm->highest_vm_end;
  1123. if (gap_start <= high_limit)
  1124. goto found_highest;
  1125. /* Check if rbtree root looks promising */
  1126. if (RB_EMPTY_ROOT(&mm->mm_rb))
  1127. return -ENOMEM;
  1128. vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
  1129. if (vma->rb_subtree_gap < length)
  1130. return -ENOMEM;
  1131. while (true) {
  1132. /* Visit right subtree if it looks promising */
  1133. gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
  1134. if (gap_start <= high_limit && vma->vm_rb.rb_right) {
  1135. struct vm_area_struct *right =
  1136. rb_entry(vma->vm_rb.rb_right,
  1137. struct vm_area_struct, vm_rb);
  1138. if (right->rb_subtree_gap >= length) {
  1139. vma = right;
  1140. continue;
  1141. }
  1142. }
  1143. check_current:
  1144. /* Check if current node has a suitable gap */
  1145. gap_end = vma->vm_start;
  1146. if (gap_end < low_limit)
  1147. return -ENOMEM;
  1148. if (gap_start <= high_limit && gap_end - gap_start >= length)
  1149. goto found;
  1150. /* Visit left subtree if it looks promising */
  1151. if (vma->vm_rb.rb_left) {
  1152. struct vm_area_struct *left =
  1153. rb_entry(vma->vm_rb.rb_left,
  1154. struct vm_area_struct, vm_rb);
  1155. if (left->rb_subtree_gap >= length) {
  1156. vma = left;
  1157. continue;
  1158. }
  1159. }
  1160. /* Go back up the rbtree to find next candidate node */
  1161. while (true) {
  1162. struct rb_node *prev = &vma->vm_rb;
  1163. if (!rb_parent(prev))
  1164. return -ENOMEM;
  1165. vma = rb_entry(rb_parent(prev),
  1166. struct vm_area_struct, vm_rb);
  1167. if (prev == vma->vm_rb.rb_right) {
  1168. gap_start = vma->vm_prev ?
  1169. vma->vm_prev->vm_end : 0;
  1170. goto check_current;
  1171. }
  1172. }
  1173. }
  1174. found:
  1175. /* We found a suitable gap. Clip it with the original high_limit. */
  1176. if (gap_end > info->high_limit)
  1177. gap_end = info->high_limit;
  1178. found_highest:
  1179. /* Compute highest gap address at the desired alignment */
  1180. gap_end -= info->length;
  1181. gap_end -= (gap_end - info->align_offset) & info->align_mask;
  1182. VM_BUG_ON(gap_end < info->low_limit);
  1183. VM_BUG_ON(gap_end < gap_start);
  1184. return gap_end;
  1185. }
  1186. static unsigned long kbase_get_unmapped_area(struct file *filp,
  1187. const unsigned long addr, const unsigned long len,
  1188. const unsigned long pgoff, const unsigned long flags)
  1189. {
  1190. /* based on get_unmapped_area, but simplified slightly due to that some
  1191. * values are known in advance */
  1192. struct kbase_context *kctx = filp->private_data;
  1193. struct mm_struct *mm = current->mm;
  1194. struct vm_unmapped_area_info info;
  1195. /* err on fixed address */
  1196. if ((flags & MAP_FIXED) || addr)
  1197. return -EINVAL;
  1198. /* too big? */
  1199. if (len > TASK_SIZE - SZ_2M)
  1200. return -ENOMEM;
  1201. if (kctx->is_compat)
  1202. return current->mm->get_unmapped_area(filp, addr, len, pgoff,
  1203. flags);
  1204. if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) {
  1205. info.high_limit = kctx->same_va_end << PAGE_SHIFT;
  1206. info.align_mask = 0;
  1207. info.align_offset = 0;
  1208. } else {
  1209. info.high_limit = min_t(unsigned long, mm->mmap_base,
  1210. (kctx->same_va_end << PAGE_SHIFT));
  1211. if (len >= SZ_2M) {
  1212. info.align_offset = SZ_2M;
  1213. info.align_mask = SZ_2M - 1;
  1214. } else {
  1215. info.align_mask = 0;
  1216. info.align_offset = 0;
  1217. }
  1218. }
  1219. info.flags = 0;
  1220. info.length = len;
  1221. info.low_limit = SZ_2M;
  1222. return kbase_unmapped_area_topdown(&info);
  1223. }
  1224. #endif
  1225. static const struct file_operations kbase_fops = {
  1226. .owner = THIS_MODULE,
  1227. .open = kbase_open,
  1228. .release = kbase_release,
  1229. .read = kbase_read,
  1230. .poll = kbase_poll,
  1231. .unlocked_ioctl = kbase_ioctl,
  1232. .compat_ioctl = kbase_ioctl,
  1233. .mmap = kbase_mmap,
  1234. .check_flags = kbase_check_flags,
  1235. #ifdef CONFIG_64BIT
  1236. .get_unmapped_area = kbase_get_unmapped_area,
  1237. #endif
  1238. };
  1239. void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
  1240. {
  1241. writel(value, kbdev->reg + offset);
  1242. }
  1243. u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
  1244. {
  1245. return readl(kbdev->reg + offset);
  1246. }
  1247. /** Show callback for the @c power_policy sysfs file.
  1248. *
  1249. * This function is called to get the contents of the @c power_policy sysfs
  1250. * file. This is a list of the available policies with the currently active one
  1251. * surrounded by square brackets.
  1252. *
  1253. * @param dev The device this sysfs file is for
  1254. * @param attr The attributes of the sysfs file
  1255. * @param buf The output buffer for the sysfs file contents
  1256. *
  1257. * @return The number of bytes output to @c buf.
  1258. */
  1259. static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
  1260. {
  1261. struct kbase_device *kbdev;
  1262. const struct kbase_pm_policy *current_policy;
  1263. const struct kbase_pm_policy *const *policy_list;
  1264. int policy_count;
  1265. int i;
  1266. ssize_t ret = 0;
  1267. kbdev = to_kbase_device(dev);
  1268. if (!kbdev)
  1269. return -ENODEV;
  1270. current_policy = kbase_pm_get_policy(kbdev);
  1271. policy_count = kbase_pm_list_policies(&policy_list);
  1272. for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
  1273. if (policy_list[i] == current_policy)
  1274. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
  1275. else
  1276. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
  1277. }
  1278. if (ret < PAGE_SIZE - 1) {
  1279. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
  1280. } else {
  1281. buf[PAGE_SIZE - 2] = '\n';
  1282. buf[PAGE_SIZE - 1] = '\0';
  1283. ret = PAGE_SIZE - 1;
  1284. }
  1285. return ret;
  1286. }
  1287. /** Store callback for the @c power_policy sysfs file.
  1288. *
  1289. * This function is called when the @c power_policy sysfs file is written to.
  1290. * It matches the requested policy against the available policies and if a
  1291. * matching policy is found calls @ref kbase_pm_set_policy to change the
  1292. * policy.
  1293. *
  1294. * @param dev The device with sysfs file is for
  1295. * @param attr The attributes of the sysfs file
  1296. * @param buf The value written to the sysfs file
  1297. * @param count The number of bytes written to the sysfs file
  1298. *
  1299. * @return @c count if the function succeeded. An error code on failure.
  1300. */
  1301. static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1302. {
  1303. struct kbase_device *kbdev;
  1304. const struct kbase_pm_policy *new_policy = NULL;
  1305. const struct kbase_pm_policy *const *policy_list;
  1306. int policy_count;
  1307. int i;
  1308. kbdev = to_kbase_device(dev);
  1309. if (!kbdev)
  1310. return -ENODEV;
  1311. policy_count = kbase_pm_list_policies(&policy_list);
  1312. for (i = 0; i < policy_count; i++) {
  1313. if (sysfs_streq(policy_list[i]->name, buf)) {
  1314. new_policy = policy_list[i];
  1315. break;
  1316. }
  1317. }
  1318. if (!new_policy) {
  1319. dev_err(dev, "power_policy: policy not found\n");
  1320. return -EINVAL;
  1321. }
  1322. kbase_pm_set_policy(kbdev, new_policy);
  1323. return count;
  1324. }
  1325. /** The sysfs file @c power_policy.
  1326. *
  1327. * This is used for obtaining information about the available policies,
  1328. * determining which policy is currently active, and changing the active
  1329. * policy.
  1330. */
  1331. static DEVICE_ATTR(power_policy, 0644, show_policy, set_policy);
  1332. /** Show callback for the @c core_availability_policy sysfs file.
  1333. *
  1334. * This function is called to get the contents of the @c core_availability_policy
  1335. * sysfs file. This is a list of the available policies with the currently
  1336. * active one surrounded by square brackets.
  1337. *
  1338. * @param dev The device this sysfs file is for
  1339. * @param attr The attributes of the sysfs file
  1340. * @param buf The output buffer for the sysfs file contents
  1341. *
  1342. * @return The number of bytes output to @c buf.
  1343. */
  1344. static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
  1345. {
  1346. struct kbase_device *kbdev;
  1347. const struct kbase_pm_ca_policy *current_policy;
  1348. const struct kbase_pm_ca_policy *const *policy_list;
  1349. int policy_count;
  1350. int i;
  1351. ssize_t ret = 0;
  1352. kbdev = to_kbase_device(dev);
  1353. if (!kbdev)
  1354. return -ENODEV;
  1355. current_policy = kbase_pm_ca_get_policy(kbdev);
  1356. policy_count = kbase_pm_ca_list_policies(&policy_list);
  1357. for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
  1358. if (policy_list[i] == current_policy)
  1359. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
  1360. else
  1361. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
  1362. }
  1363. if (ret < PAGE_SIZE - 1) {
  1364. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
  1365. } else {
  1366. buf[PAGE_SIZE - 2] = '\n';
  1367. buf[PAGE_SIZE - 1] = '\0';
  1368. ret = PAGE_SIZE - 1;
  1369. }
  1370. return ret;
  1371. }
  1372. /** Store callback for the @c core_availability_policy sysfs file.
  1373. *
  1374. * This function is called when the @c core_availability_policy sysfs file is
  1375. * written to. It matches the requested policy against the available policies
  1376. * and if a matching policy is found calls @ref kbase_pm_set_policy to change
  1377. * the policy.
  1378. *
  1379. * @param dev The device with sysfs file is for
  1380. * @param attr The attributes of the sysfs file
  1381. * @param buf The value written to the sysfs file
  1382. * @param count The number of bytes written to the sysfs file
  1383. *
  1384. * @return @c count if the function succeeded. An error code on failure.
  1385. */
  1386. static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1387. {
  1388. struct kbase_device *kbdev;
  1389. const struct kbase_pm_ca_policy *new_policy = NULL;
  1390. const struct kbase_pm_ca_policy *const *policy_list;
  1391. int policy_count;
  1392. int i;
  1393. kbdev = to_kbase_device(dev);
  1394. if (!kbdev)
  1395. return -ENODEV;
  1396. policy_count = kbase_pm_ca_list_policies(&policy_list);
  1397. for (i = 0; i < policy_count; i++) {
  1398. if (sysfs_streq(policy_list[i]->name, buf)) {
  1399. new_policy = policy_list[i];
  1400. break;
  1401. }
  1402. }
  1403. if (!new_policy) {
  1404. dev_err(dev, "core_availability_policy: policy not found\n");
  1405. return -EINVAL;
  1406. }
  1407. kbase_pm_ca_set_policy(kbdev, new_policy);
  1408. return count;
  1409. }
  1410. /** The sysfs file @c core_availability_policy
  1411. *
  1412. * This is used for obtaining information about the available policies,
  1413. * determining which policy is currently active, and changing the active
  1414. * policy.
  1415. */
  1416. static DEVICE_ATTR(core_availability_policy, 0644, show_ca_policy, set_ca_policy);
  1417. /** Show callback for the @c core_mask sysfs file.
  1418. *
  1419. * This function is called to get the contents of the @c core_mask sysfs
  1420. * file.
  1421. *
  1422. * @param dev The device this sysfs file is for
  1423. * @param attr The attributes of the sysfs file
  1424. * @param buf The output buffer for the sysfs file contents
  1425. *
  1426. * @return The number of bytes output to @c buf.
  1427. */
  1428. static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
  1429. {
  1430. struct kbase_device *kbdev;
  1431. ssize_t ret = 0;
  1432. kbdev = to_kbase_device(dev);
  1433. if (!kbdev)
  1434. return -ENODEV;
  1435. ret += scnprintf(buf + ret, PAGE_SIZE - ret,
  1436. "Current core mask (JS0) : 0x%llX\n",
  1437. kbdev->pm.debug_core_mask[0]);
  1438. ret += scnprintf(buf + ret, PAGE_SIZE - ret,
  1439. "Current core mask (JS1) : 0x%llX\n",
  1440. kbdev->pm.debug_core_mask[1]);
  1441. ret += scnprintf(buf + ret, PAGE_SIZE - ret,
  1442. "Current core mask (JS2) : 0x%llX\n",
  1443. kbdev->pm.debug_core_mask[2]);
  1444. ret += scnprintf(buf + ret, PAGE_SIZE - ret,
  1445. "Available core mask : 0x%llX\n",
  1446. kbdev->gpu_props.props.raw_props.shader_present);
  1447. return ret;
  1448. }
  1449. /** Store callback for the @c core_mask sysfs file.
  1450. *
  1451. * This function is called when the @c core_mask sysfs file is written to.
  1452. *
  1453. * @param dev The device with sysfs file is for
  1454. * @param attr The attributes of the sysfs file
  1455. * @param buf The value written to the sysfs file
  1456. * @param count The number of bytes written to the sysfs file
  1457. *
  1458. * @return @c count if the function succeeded. An error code on failure.
  1459. */
  1460. static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1461. {
  1462. struct kbase_device *kbdev;
  1463. u64 new_core_mask[3];
  1464. int items;
  1465. kbdev = to_kbase_device(dev);
  1466. if (!kbdev)
  1467. return -ENODEV;
  1468. items = sscanf(buf, "%llx %llx %llx",
  1469. &new_core_mask[0], &new_core_mask[1],
  1470. &new_core_mask[2]);
  1471. if (items == 1)
  1472. new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
  1473. if (items == 1 || items == 3) {
  1474. u64 shader_present =
  1475. kbdev->gpu_props.props.raw_props.shader_present;
  1476. u64 group0_core_mask =
  1477. kbdev->gpu_props.props.coherency_info.group[0].
  1478. core_mask;
  1479. if ((new_core_mask[0] & shader_present) != new_core_mask[0] ||
  1480. !(new_core_mask[0] & group0_core_mask) ||
  1481. (new_core_mask[1] & shader_present) !=
  1482. new_core_mask[1] ||
  1483. !(new_core_mask[1] & group0_core_mask) ||
  1484. (new_core_mask[2] & shader_present) !=
  1485. new_core_mask[2] ||
  1486. !(new_core_mask[2] & group0_core_mask)) {
  1487. dev_err(dev, "power_policy: invalid core specification\n");
  1488. return -EINVAL;
  1489. }
  1490. if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
  1491. kbdev->pm.debug_core_mask[1] !=
  1492. new_core_mask[1] ||
  1493. kbdev->pm.debug_core_mask[2] !=
  1494. new_core_mask[2]) {
  1495. unsigned long flags;
  1496. spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
  1497. kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
  1498. new_core_mask[1], new_core_mask[2]);
  1499. spin_unlock_irqrestore(&kbdev->pm.power_change_lock,
  1500. flags);
  1501. }
  1502. return count;
  1503. }
  1504. dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n"
  1505. "Use format <core_mask>\n"
  1506. "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
  1507. return -EINVAL;
  1508. }
  1509. /** The sysfs file @c core_mask.
  1510. *
  1511. * This is used to restrict shader core availability for debugging purposes.
  1512. * Reading it will show the current core mask and the mask of cores available.
  1513. * Writing to it will set the current core mask.
  1514. */
  1515. static DEVICE_ATTR(core_mask, 0644, show_core_mask, set_core_mask);
  1516. /**
  1517. * set_soft_event_timeout() - Store callback for the soft_event_timeout sysfs
  1518. * file.
  1519. *
  1520. * @dev: The device this sysfs file is for.
  1521. * @attr: The attributes of the sysfs file.
  1522. * @buf: The value written to the sysfs file.
  1523. * @count: The number of bytes written to the sysfs file.
  1524. *
  1525. * This allows setting the timeout for software event jobs. Waiting jobs will
  1526. * be cancelled after this period expires. This is expressed in milliseconds.
  1527. *
  1528. * Return: count if the function succeeded. An error code on failure.
  1529. */
  1530. static ssize_t set_soft_event_timeout(struct device *dev,
  1531. struct device_attribute *attr,
  1532. const char *buf, size_t count)
  1533. {
  1534. struct kbase_device *kbdev;
  1535. int soft_event_timeout_ms;
  1536. kbdev = to_kbase_device(dev);
  1537. if (!kbdev)
  1538. return -ENODEV;
  1539. if ((kstrtoint(buf, 0, &soft_event_timeout_ms) != 0) ||
  1540. (soft_event_timeout_ms <= 0))
  1541. return -EINVAL;
  1542. atomic_set(&kbdev->js_data.soft_event_timeout_ms,
  1543. soft_event_timeout_ms);
  1544. return count;
  1545. }
  1546. /**
  1547. * show_soft_event_timeout() - Show callback for the soft_event_timeout sysfs
  1548. * file.
  1549. *
  1550. * This will return the timeout for the software event jobs.
  1551. *
  1552. * @dev: The device this sysfs file is for.
  1553. * @attr: The attributes of the sysfs file.
  1554. * @buf: The output buffer for the sysfs file contents.
  1555. *
  1556. * Return: The number of bytes output to buf.
  1557. */
  1558. static ssize_t show_soft_event_timeout(struct device *dev,
  1559. struct device_attribute *attr,
  1560. char * const buf)
  1561. {
  1562. struct kbase_device *kbdev;
  1563. kbdev = to_kbase_device(dev);
  1564. if (!kbdev)
  1565. return -ENODEV;
  1566. return scnprintf(buf, PAGE_SIZE, "%i\n",
  1567. atomic_read(&kbdev->js_data.soft_event_timeout_ms));
  1568. }
  1569. static DEVICE_ATTR(soft_event_timeout, 0644,
  1570. show_soft_event_timeout, set_soft_event_timeout);
  1571. /** Store callback for the @c js_timeouts sysfs file.
  1572. *
  1573. * This function is called to get the contents of the @c js_timeouts sysfs
  1574. * file. This file contains five values separated by whitespace. The values
  1575. * are basically the same as JS_SOFT_STOP_TICKS, JS_HARD_STOP_TICKS_SS,
  1576. * JS_HARD_STOP_TICKS_DUMPING, JS_RESET_TICKS_SS, JS_RESET_TICKS_DUMPING
  1577. * configuration values (in that order), with the difference that the js_timeout
  1578. * values are expressed in MILLISECONDS.
  1579. *
  1580. * The js_timeouts sysfile file allows the current values in
  1581. * use by the job scheduler to get override. Note that a value needs to
  1582. * be other than 0 for it to override the current job scheduler value.
  1583. *
  1584. * @param dev The device with sysfs file is for
  1585. * @param attr The attributes of the sysfs file
  1586. * @param buf The value written to the sysfs file
  1587. * @param count The number of bytes written to the sysfs file
  1588. *
  1589. * @return @c count if the function succeeded. An error code on failure.
  1590. */
  1591. static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1592. {
  1593. struct kbase_device *kbdev;
  1594. int items;
  1595. long js_soft_stop_ms;
  1596. long js_soft_stop_ms_cl;
  1597. long js_hard_stop_ms_ss;
  1598. long js_hard_stop_ms_cl;
  1599. long js_hard_stop_ms_dumping;
  1600. long js_reset_ms_ss;
  1601. long js_reset_ms_cl;
  1602. long js_reset_ms_dumping;
  1603. kbdev = to_kbase_device(dev);
  1604. if (!kbdev)
  1605. return -ENODEV;
  1606. items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
  1607. &js_soft_stop_ms, &js_soft_stop_ms_cl,
  1608. &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
  1609. &js_hard_stop_ms_dumping, &js_reset_ms_ss,
  1610. &js_reset_ms_cl, &js_reset_ms_dumping);
  1611. if (items == 8) {
  1612. u64 ticks;
  1613. if (js_soft_stop_ms >= 0) {
  1614. ticks = js_soft_stop_ms * 1000000ULL;
  1615. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  1616. kbdev->js_soft_stop_ticks = ticks;
  1617. } else {
  1618. kbdev->js_soft_stop_ticks = -1;
  1619. }
  1620. if (js_soft_stop_ms_cl >= 0) {
  1621. ticks = js_soft_stop_ms_cl * 1000000ULL;
  1622. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  1623. kbdev->js_soft_stop_ticks_cl = ticks;
  1624. } else {
  1625. kbdev->js_soft_stop_ticks_cl = -1;
  1626. }
  1627. if (js_hard_stop_ms_ss >= 0) {
  1628. ticks = js_hard_stop_ms_ss * 1000000ULL;
  1629. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  1630. kbdev->js_hard_stop_ticks_ss = ticks;
  1631. } else {
  1632. kbdev->js_hard_stop_ticks_ss = -1;
  1633. }
  1634. if (js_hard_stop_ms_cl >= 0) {
  1635. ticks = js_hard_stop_ms_cl * 1000000ULL;
  1636. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  1637. kbdev->js_hard_stop_ticks_cl = ticks;
  1638. } else {
  1639. kbdev->js_hard_stop_ticks_cl = -1;
  1640. }
  1641. if (js_hard_stop_ms_dumping >= 0) {
  1642. ticks = js_hard_stop_ms_dumping * 1000000ULL;
  1643. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  1644. kbdev->js_hard_stop_ticks_dumping = ticks;
  1645. } else {
  1646. kbdev->js_hard_stop_ticks_dumping = -1;
  1647. }
  1648. if (js_reset_ms_ss >= 0) {
  1649. ticks = js_reset_ms_ss * 1000000ULL;
  1650. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  1651. kbdev->js_reset_ticks_ss = ticks;
  1652. } else {
  1653. kbdev->js_reset_ticks_ss = -1;
  1654. }
  1655. if (js_reset_ms_cl >= 0) {
  1656. ticks = js_reset_ms_cl * 1000000ULL;
  1657. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  1658. kbdev->js_reset_ticks_cl = ticks;
  1659. } else {
  1660. kbdev->js_reset_ticks_cl = -1;
  1661. }
  1662. if (js_reset_ms_dumping >= 0) {
  1663. ticks = js_reset_ms_dumping * 1000000ULL;
  1664. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  1665. kbdev->js_reset_ticks_dumping = ticks;
  1666. } else {
  1667. kbdev->js_reset_ticks_dumping = -1;
  1668. }
  1669. kbdev->js_timeouts_updated = true;
  1670. dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n",
  1671. (unsigned long)kbdev->js_soft_stop_ticks,
  1672. js_soft_stop_ms);
  1673. dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
  1674. (unsigned long)kbdev->js_soft_stop_ticks_cl,
  1675. js_soft_stop_ms_cl);
  1676. dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n",
  1677. (unsigned long)kbdev->js_hard_stop_ticks_ss,
  1678. js_hard_stop_ms_ss);
  1679. dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
  1680. (unsigned long)kbdev->js_hard_stop_ticks_cl,
  1681. js_hard_stop_ms_cl);
  1682. dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_DUMPING with %lu ticks (%lu ms)\n",
  1683. (unsigned long)
  1684. kbdev->js_hard_stop_ticks_dumping,
  1685. js_hard_stop_ms_dumping);
  1686. dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n",
  1687. (unsigned long)kbdev->js_reset_ticks_ss,
  1688. js_reset_ms_ss);
  1689. dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_CL with %lu ticks (%lu ms)\n",
  1690. (unsigned long)kbdev->js_reset_ticks_cl,
  1691. js_reset_ms_cl);
  1692. dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_DUMPING with %lu ticks (%lu ms)\n",
  1693. (unsigned long)kbdev->js_reset_ticks_dumping,
  1694. js_reset_ms_dumping);
  1695. return count;
  1696. }
  1697. dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
  1698. "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
  1699. "Write 0 for no change, -1 to restore default timeout\n");
  1700. return -EINVAL;
  1701. }
  1702. /** Show callback for the @c js_timeouts sysfs file.
  1703. *
  1704. * This function is called to get the contents of the @c js_timeouts sysfs
  1705. * file. It returns the last set values written to the js_timeouts sysfs file.
  1706. * If the file didn't get written yet, the values will be current setting in
  1707. * use.
  1708. * @param dev The device this sysfs file is for
  1709. * @param attr The attributes of the sysfs file
  1710. * @param buf The output buffer for the sysfs file contents
  1711. *
  1712. * @return The number of bytes output to @c buf.
  1713. */
  1714. static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
  1715. {
  1716. struct kbase_device *kbdev;
  1717. ssize_t ret;
  1718. u64 ms;
  1719. unsigned long js_soft_stop_ms;
  1720. unsigned long js_soft_stop_ms_cl;
  1721. unsigned long js_hard_stop_ms_ss;
  1722. unsigned long js_hard_stop_ms_cl;
  1723. unsigned long js_hard_stop_ms_dumping;
  1724. unsigned long js_reset_ms_ss;
  1725. unsigned long js_reset_ms_cl;
  1726. unsigned long js_reset_ms_dumping;
  1727. unsigned long ticks;
  1728. u32 scheduling_period_ns;
  1729. kbdev = to_kbase_device(dev);
  1730. if (!kbdev)
  1731. return -ENODEV;
  1732. /* If no contexts have been scheduled since js_timeouts was last written
  1733. * to, the new timeouts might not have been latched yet. So check if an
  1734. * update is pending and use the new values if necessary. */
  1735. if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
  1736. scheduling_period_ns = kbdev->js_scheduling_period_ns;
  1737. else
  1738. scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
  1739. if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
  1740. ticks = kbdev->js_soft_stop_ticks;
  1741. else
  1742. ticks = kbdev->js_data.soft_stop_ticks;
  1743. ms = (u64)ticks * scheduling_period_ns;
  1744. do_div(ms, 1000000UL);
  1745. js_soft_stop_ms = (unsigned long)ms;
  1746. if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
  1747. ticks = kbdev->js_soft_stop_ticks_cl;
  1748. else
  1749. ticks = kbdev->js_data.soft_stop_ticks_cl;
  1750. ms = (u64)ticks * scheduling_period_ns;
  1751. do_div(ms, 1000000UL);
  1752. js_soft_stop_ms_cl = (unsigned long)ms;
  1753. if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
  1754. ticks = kbdev->js_hard_stop_ticks_ss;
  1755. else
  1756. ticks = kbdev->js_data.hard_stop_ticks_ss;
  1757. ms = (u64)ticks * scheduling_period_ns;
  1758. do_div(ms, 1000000UL);
  1759. js_hard_stop_ms_ss = (unsigned long)ms;
  1760. if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
  1761. ticks = kbdev->js_hard_stop_ticks_cl;
  1762. else
  1763. ticks = kbdev->js_data.hard_stop_ticks_cl;
  1764. ms = (u64)ticks * scheduling_period_ns;
  1765. do_div(ms, 1000000UL);
  1766. js_hard_stop_ms_cl = (unsigned long)ms;
  1767. if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
  1768. ticks = kbdev->js_hard_stop_ticks_dumping;
  1769. else
  1770. ticks = kbdev->js_data.hard_stop_ticks_dumping;
  1771. ms = (u64)ticks * scheduling_period_ns;
  1772. do_div(ms, 1000000UL);
  1773. js_hard_stop_ms_dumping = (unsigned long)ms;
  1774. if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
  1775. ticks = kbdev->js_reset_ticks_ss;
  1776. else
  1777. ticks = kbdev->js_data.gpu_reset_ticks_ss;
  1778. ms = (u64)ticks * scheduling_period_ns;
  1779. do_div(ms, 1000000UL);
  1780. js_reset_ms_ss = (unsigned long)ms;
  1781. if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
  1782. ticks = kbdev->js_reset_ticks_cl;
  1783. else
  1784. ticks = kbdev->js_data.gpu_reset_ticks_cl;
  1785. ms = (u64)ticks * scheduling_period_ns;
  1786. do_div(ms, 1000000UL);
  1787. js_reset_ms_cl = (unsigned long)ms;
  1788. if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
  1789. ticks = kbdev->js_reset_ticks_dumping;
  1790. else
  1791. ticks = kbdev->js_data.gpu_reset_ticks_dumping;
  1792. ms = (u64)ticks * scheduling_period_ns;
  1793. do_div(ms, 1000000UL);
  1794. js_reset_ms_dumping = (unsigned long)ms;
  1795. ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
  1796. js_soft_stop_ms, js_soft_stop_ms_cl,
  1797. js_hard_stop_ms_ss, js_hard_stop_ms_cl,
  1798. js_hard_stop_ms_dumping, js_reset_ms_ss,
  1799. js_reset_ms_cl, js_reset_ms_dumping);
  1800. if (ret >= PAGE_SIZE) {
  1801. buf[PAGE_SIZE - 2] = '\n';
  1802. buf[PAGE_SIZE - 1] = '\0';
  1803. ret = PAGE_SIZE - 1;
  1804. }
  1805. return ret;
  1806. }
  1807. /** The sysfs file @c js_timeouts.
  1808. *
  1809. * This is used to override the current job scheduler values for
  1810. * JS_STOP_STOP_TICKS_SS
  1811. * JS_STOP_STOP_TICKS_CL
  1812. * JS_HARD_STOP_TICKS_SS
  1813. * JS_HARD_STOP_TICKS_CL
  1814. * JS_HARD_STOP_TICKS_DUMPING
  1815. * JS_RESET_TICKS_SS
  1816. * JS_RESET_TICKS_CL
  1817. * JS_RESET_TICKS_DUMPING.
  1818. */
  1819. static DEVICE_ATTR(js_timeouts, 0644, show_js_timeouts, set_js_timeouts);
  1820. /**
  1821. * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
  1822. * file
  1823. * @dev: The device the sysfs file is for
  1824. * @attr: The attributes of the sysfs file
  1825. * @buf: The value written to the sysfs file
  1826. * @count: The number of bytes written to the sysfs file
  1827. *
  1828. * This function is called when the js_scheduling_period sysfs file is written
  1829. * to. It checks the data written, and if valid updates the js_scheduling_period
  1830. * value
  1831. *
  1832. * Return: @c count if the function succeeded. An error code on failure.
  1833. */
  1834. static ssize_t set_js_scheduling_period(struct device *dev,
  1835. struct device_attribute *attr, const char *buf, size_t count)
  1836. {
  1837. struct kbase_device *kbdev;
  1838. int ret;
  1839. unsigned int js_scheduling_period;
  1840. u32 new_scheduling_period_ns;
  1841. u32 old_period;
  1842. u64 ticks;
  1843. kbdev = to_kbase_device(dev);
  1844. if (!kbdev)
  1845. return -ENODEV;
  1846. ret = kstrtouint(buf, 0, &js_scheduling_period);
  1847. if (ret || !js_scheduling_period) {
  1848. dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
  1849. "Use format <js_scheduling_period_ms>\n");
  1850. return -EINVAL;
  1851. }
  1852. new_scheduling_period_ns = js_scheduling_period * 1000000;
  1853. /* Update scheduling timeouts */
  1854. mutex_lock(&kbdev->js_data.runpool_mutex);
  1855. /* If no contexts have been scheduled since js_timeouts was last written
  1856. * to, the new timeouts might not have been latched yet. So check if an
  1857. * update is pending and use the new values if necessary. */
  1858. /* Use previous 'new' scheduling period as a base if present. */
  1859. if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns)
  1860. old_period = kbdev->js_scheduling_period_ns;
  1861. else
  1862. old_period = kbdev->js_data.scheduling_period_ns;
  1863. if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
  1864. ticks = (u64)kbdev->js_soft_stop_ticks * old_period;
  1865. else
  1866. ticks = (u64)kbdev->js_data.soft_stop_ticks *
  1867. kbdev->js_data.scheduling_period_ns;
  1868. do_div(ticks, new_scheduling_period_ns);
  1869. kbdev->js_soft_stop_ticks = ticks ? ticks : 1;
  1870. if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
  1871. ticks = (u64)kbdev->js_soft_stop_ticks_cl * old_period;
  1872. else
  1873. ticks = (u64)kbdev->js_data.soft_stop_ticks_cl *
  1874. kbdev->js_data.scheduling_period_ns;
  1875. do_div(ticks, new_scheduling_period_ns);
  1876. kbdev->js_soft_stop_ticks_cl = ticks ? ticks : 1;
  1877. if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
  1878. ticks = (u64)kbdev->js_hard_stop_ticks_ss * old_period;
  1879. else
  1880. ticks = (u64)kbdev->js_data.hard_stop_ticks_ss *
  1881. kbdev->js_data.scheduling_period_ns;
  1882. do_div(ticks, new_scheduling_period_ns);
  1883. kbdev->js_hard_stop_ticks_ss = ticks ? ticks : 1;
  1884. if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
  1885. ticks = (u64)kbdev->js_hard_stop_ticks_cl * old_period;
  1886. else
  1887. ticks = (u64)kbdev->js_data.hard_stop_ticks_cl *
  1888. kbdev->js_data.scheduling_period_ns;
  1889. do_div(ticks, new_scheduling_period_ns);
  1890. kbdev->js_hard_stop_ticks_cl = ticks ? ticks : 1;
  1891. if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
  1892. ticks = (u64)kbdev->js_hard_stop_ticks_dumping * old_period;
  1893. else
  1894. ticks = (u64)kbdev->js_data.hard_stop_ticks_dumping *
  1895. kbdev->js_data.scheduling_period_ns;
  1896. do_div(ticks, new_scheduling_period_ns);
  1897. kbdev->js_hard_stop_ticks_dumping = ticks ? ticks : 1;
  1898. if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
  1899. ticks = (u64)kbdev->js_reset_ticks_ss * old_period;
  1900. else
  1901. ticks = (u64)kbdev->js_data.gpu_reset_ticks_ss *
  1902. kbdev->js_data.scheduling_period_ns;
  1903. do_div(ticks, new_scheduling_period_ns);
  1904. kbdev->js_reset_ticks_ss = ticks ? ticks : 1;
  1905. if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
  1906. ticks = (u64)kbdev->js_reset_ticks_cl * old_period;
  1907. else
  1908. ticks = (u64)kbdev->js_data.gpu_reset_ticks_cl *
  1909. kbdev->js_data.scheduling_period_ns;
  1910. do_div(ticks, new_scheduling_period_ns);
  1911. kbdev->js_reset_ticks_cl = ticks ? ticks : 1;
  1912. if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
  1913. ticks = (u64)kbdev->js_reset_ticks_dumping * old_period;
  1914. else
  1915. ticks = (u64)kbdev->js_data.gpu_reset_ticks_dumping *
  1916. kbdev->js_data.scheduling_period_ns;
  1917. do_div(ticks, new_scheduling_period_ns);
  1918. kbdev->js_reset_ticks_dumping = ticks ? ticks : 1;
  1919. kbdev->js_scheduling_period_ns = new_scheduling_period_ns;
  1920. kbdev->js_timeouts_updated = true;
  1921. mutex_unlock(&kbdev->js_data.runpool_mutex);
  1922. dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
  1923. js_scheduling_period);
  1924. return count;
  1925. }
  1926. /**
  1927. * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
  1928. * entry.
  1929. * @dev: The device this sysfs file is for.
  1930. * @attr: The attributes of the sysfs file.
  1931. * @buf: The output buffer to receive the GPU information.
  1932. *
  1933. * This function is called to get the current period used for the JS scheduling
  1934. * period.
  1935. *
  1936. * Return: The number of bytes output to buf.
  1937. */
  1938. static ssize_t show_js_scheduling_period(struct device *dev,
  1939. struct device_attribute *attr, char * const buf)
  1940. {
  1941. struct kbase_device *kbdev;
  1942. u32 period;
  1943. ssize_t ret;
  1944. kbdev = to_kbase_device(dev);
  1945. if (!kbdev)
  1946. return -ENODEV;
  1947. if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
  1948. period = kbdev->js_scheduling_period_ns;
  1949. else
  1950. period = kbdev->js_data.scheduling_period_ns;
  1951. ret = scnprintf(buf, PAGE_SIZE, "%d\n",
  1952. period / 1000000);
  1953. return ret;
  1954. }
  1955. static DEVICE_ATTR(js_scheduling_period, 0644,
  1956. show_js_scheduling_period, set_js_scheduling_period);
  1957. #ifdef CONFIG_MALI_DEBUG
  1958. static ssize_t set_js_softstop_always(struct device *dev,
  1959. struct device_attribute *attr, const char *buf, size_t count)
  1960. {
  1961. struct kbase_device *kbdev;
  1962. int ret;
  1963. int softstop_always;
  1964. kbdev = to_kbase_device(dev);
  1965. if (!kbdev)
  1966. return -ENODEV;
  1967. ret = kstrtoint(buf, 0, &softstop_always);
  1968. if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
  1969. dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
  1970. "Use format <soft_stop_always>\n");
  1971. return -EINVAL;
  1972. }
  1973. kbdev->js_data.softstop_always = (bool) softstop_always;
  1974. dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
  1975. (kbdev->js_data.softstop_always) ?
  1976. "Enabled" : "Disabled");
  1977. return count;
  1978. }
  1979. static ssize_t show_js_softstop_always(struct device *dev,
  1980. struct device_attribute *attr, char * const buf)
  1981. {
  1982. struct kbase_device *kbdev;
  1983. ssize_t ret;
  1984. kbdev = to_kbase_device(dev);
  1985. if (!kbdev)
  1986. return -ENODEV;
  1987. ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
  1988. if (ret >= PAGE_SIZE) {
  1989. buf[PAGE_SIZE - 2] = '\n';
  1990. buf[PAGE_SIZE - 1] = '\0';
  1991. ret = PAGE_SIZE - 1;
  1992. }
  1993. return ret;
  1994. }
  1995. /*
  1996. * By default, soft-stops are disabled when only a single context is present. The ability to
  1997. * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
  1998. * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
  1999. */
  2000. static DEVICE_ATTR(js_softstop_always, 0644, show_js_softstop_always, set_js_softstop_always);
  2001. #endif /* CONFIG_MALI_DEBUG */
  2002. #ifdef CONFIG_MALI_DEBUG
  2003. typedef void (kbasep_debug_command_func) (struct kbase_device *);
  2004. enum kbasep_debug_command_code {
  2005. KBASEP_DEBUG_COMMAND_DUMPTRACE,
  2006. /* This must be the last enum */
  2007. KBASEP_DEBUG_COMMAND_COUNT
  2008. };
  2009. struct kbasep_debug_command {
  2010. char *str;
  2011. kbasep_debug_command_func *func;
  2012. };
  2013. /** Debug commands supported by the driver */
  2014. static const struct kbasep_debug_command debug_commands[] = {
  2015. {
  2016. .str = "dumptrace",
  2017. .func = &kbasep_trace_dump,
  2018. }
  2019. };
  2020. /** Show callback for the @c debug_command sysfs file.
  2021. *
  2022. * This function is called to get the contents of the @c debug_command sysfs
  2023. * file. This is a list of the available debug commands, separated by newlines.
  2024. *
  2025. * @param dev The device this sysfs file is for
  2026. * @param attr The attributes of the sysfs file
  2027. * @param buf The output buffer for the sysfs file contents
  2028. *
  2029. * @return The number of bytes output to @c buf.
  2030. */
  2031. static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
  2032. {
  2033. struct kbase_device *kbdev;
  2034. int i;
  2035. ssize_t ret = 0;
  2036. kbdev = to_kbase_device(dev);
  2037. if (!kbdev)
  2038. return -ENODEV;
  2039. for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
  2040. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
  2041. if (ret >= PAGE_SIZE) {
  2042. buf[PAGE_SIZE - 2] = '\n';
  2043. buf[PAGE_SIZE - 1] = '\0';
  2044. ret = PAGE_SIZE - 1;
  2045. }
  2046. return ret;
  2047. }
  2048. /** Store callback for the @c debug_command sysfs file.
  2049. *
  2050. * This function is called when the @c debug_command sysfs file is written to.
  2051. * It matches the requested command against the available commands, and if
  2052. * a matching command is found calls the associated function from
  2053. * @ref debug_commands to issue the command.
  2054. *
  2055. * @param dev The device with sysfs file is for
  2056. * @param attr The attributes of the sysfs file
  2057. * @param buf The value written to the sysfs file
  2058. * @param count The number of bytes written to the sysfs file
  2059. *
  2060. * @return @c count if the function succeeded. An error code on failure.
  2061. */
  2062. static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  2063. {
  2064. struct kbase_device *kbdev;
  2065. int i;
  2066. kbdev = to_kbase_device(dev);
  2067. if (!kbdev)
  2068. return -ENODEV;
  2069. for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
  2070. if (sysfs_streq(debug_commands[i].str, buf)) {
  2071. debug_commands[i].func(kbdev);
  2072. return count;
  2073. }
  2074. }
  2075. /* Debug Command not found */
  2076. dev_err(dev, "debug_command: command not known\n");
  2077. return -EINVAL;
  2078. }
  2079. /** The sysfs file @c debug_command.
  2080. *
  2081. * This is used to issue general debug commands to the device driver.
  2082. * Reading it will produce a list of debug commands, separated by newlines.
  2083. * Writing to it with one of those commands will issue said command.
  2084. */
  2085. static DEVICE_ATTR(debug_command, 0644, show_debug, issue_debug);
  2086. #endif /* CONFIG_MALI_DEBUG */
  2087. /**
  2088. * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
  2089. * @dev: The device this sysfs file is for.
  2090. * @attr: The attributes of the sysfs file.
  2091. * @buf: The output buffer to receive the GPU information.
  2092. *
  2093. * This function is called to get a description of the present Mali
  2094. * GPU via the gpuinfo sysfs entry. This includes the GPU family, the
  2095. * number of cores, the hardware version and the raw product id. For
  2096. * example:
  2097. *
  2098. * Mali-T60x MP4 r0p0 0x6956
  2099. *
  2100. * Return: The number of bytes output to buf.
  2101. */
  2102. static ssize_t kbase_show_gpuinfo(struct device *dev,
  2103. struct device_attribute *attr, char *buf)
  2104. {
  2105. static const struct gpu_product_id_name {
  2106. unsigned int id;
  2107. char *name;
  2108. } gpu_product_id_names[] = {
  2109. { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
  2110. { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
  2111. { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
  2112. { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
  2113. { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
  2114. { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
  2115. { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
  2116. { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
  2117. { .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
  2118. .name = "Mali-G71" },
  2119. };
  2120. const char *product_name = "(Unknown Mali GPU)";
  2121. struct kbase_device *kbdev;
  2122. u32 gpu_id;
  2123. unsigned int product_id, product_id_mask;
  2124. unsigned int i;
  2125. bool is_new_format;
  2126. kbdev = to_kbase_device(dev);
  2127. if (!kbdev)
  2128. return -ENODEV;
  2129. gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
  2130. product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
  2131. is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
  2132. product_id_mask =
  2133. (is_new_format ?
  2134. GPU_ID2_PRODUCT_MODEL :
  2135. GPU_ID_VERSION_PRODUCT_ID) >>
  2136. GPU_ID_VERSION_PRODUCT_ID_SHIFT;
  2137. for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
  2138. const struct gpu_product_id_name *p = &gpu_product_id_names[i];
  2139. if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
  2140. (p->id & product_id_mask) ==
  2141. (product_id & product_id_mask)) {
  2142. product_name = p->name;
  2143. break;
  2144. }
  2145. }
  2146. return scnprintf(buf, PAGE_SIZE, "%s MP%d r%dp%d 0x%04X\n",
  2147. product_name, kbdev->gpu_props.num_cores,
  2148. (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
  2149. (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
  2150. product_id);
  2151. }
  2152. static DEVICE_ATTR(gpuinfo, 0444, kbase_show_gpuinfo, NULL);
  2153. /**
  2154. * set_dvfs_period - Store callback for the dvfs_period sysfs file.
  2155. * @dev: The device with sysfs file is for
  2156. * @attr: The attributes of the sysfs file
  2157. * @buf: The value written to the sysfs file
  2158. * @count: The number of bytes written to the sysfs file
  2159. *
  2160. * This function is called when the dvfs_period sysfs file is written to. It
  2161. * checks the data written, and if valid updates the DVFS period variable,
  2162. *
  2163. * Return: @c count if the function succeeded. An error code on failure.
  2164. */
  2165. static ssize_t set_dvfs_period(struct device *dev,
  2166. struct device_attribute *attr, const char *buf, size_t count)
  2167. {
  2168. struct kbase_device *kbdev;
  2169. int ret;
  2170. int dvfs_period;
  2171. kbdev = to_kbase_device(dev);
  2172. if (!kbdev)
  2173. return -ENODEV;
  2174. ret = kstrtoint(buf, 0, &dvfs_period);
  2175. if (ret || dvfs_period <= 0) {
  2176. dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
  2177. "Use format <dvfs_period_ms>\n");
  2178. return -EINVAL;
  2179. }
  2180. kbdev->pm.dvfs_period = dvfs_period;
  2181. dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
  2182. return count;
  2183. }
  2184. /**
  2185. * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
  2186. * @dev: The device this sysfs file is for.
  2187. * @attr: The attributes of the sysfs file.
  2188. * @buf: The output buffer to receive the GPU information.
  2189. *
  2190. * This function is called to get the current period used for the DVFS sample
  2191. * timer.
  2192. *
  2193. * Return: The number of bytes output to buf.
  2194. */
  2195. static ssize_t show_dvfs_period(struct device *dev,
  2196. struct device_attribute *attr, char * const buf)
  2197. {
  2198. struct kbase_device *kbdev;
  2199. ssize_t ret;
  2200. kbdev = to_kbase_device(dev);
  2201. if (!kbdev)
  2202. return -ENODEV;
  2203. ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
  2204. return ret;
  2205. }
  2206. static DEVICE_ATTR(dvfs_period, 0644, show_dvfs_period,
  2207. set_dvfs_period);
  2208. /**
  2209. * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
  2210. * @dev: The device with sysfs file is for
  2211. * @attr: The attributes of the sysfs file
  2212. * @buf: The value written to the sysfs file
  2213. * @count: The number of bytes written to the sysfs file
  2214. *
  2215. * This function is called when the pm_poweroff sysfs file is written to.
  2216. *
  2217. * This file contains three values separated by whitespace. The values
  2218. * are gpu_poweroff_time (the period of the poweroff timer, in ns),
  2219. * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
  2220. * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
  2221. * ticks before the GPU is powered off), in that order.
  2222. *
  2223. * Return: @c count if the function succeeded. An error code on failure.
  2224. */
  2225. static ssize_t set_pm_poweroff(struct device *dev,
  2226. struct device_attribute *attr, const char *buf, size_t count)
  2227. {
  2228. struct kbase_device *kbdev;
  2229. int items;
  2230. s64 gpu_poweroff_time;
  2231. int poweroff_shader_ticks, poweroff_gpu_ticks;
  2232. kbdev = to_kbase_device(dev);
  2233. if (!kbdev)
  2234. return -ENODEV;
  2235. items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
  2236. &poweroff_shader_ticks,
  2237. &poweroff_gpu_ticks);
  2238. if (items != 3) {
  2239. dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
  2240. "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
  2241. return -EINVAL;
  2242. }
  2243. kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
  2244. kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks;
  2245. kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks;
  2246. return count;
  2247. }
  2248. /**
  2249. * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
  2250. * @dev: The device this sysfs file is for.
  2251. * @attr: The attributes of the sysfs file.
  2252. * @buf: The output buffer to receive the GPU information.
  2253. *
  2254. * This function is called to get the current period used for the DVFS sample
  2255. * timer.
  2256. *
  2257. * Return: The number of bytes output to buf.
  2258. */
  2259. static ssize_t show_pm_poweroff(struct device *dev,
  2260. struct device_attribute *attr, char * const buf)
  2261. {
  2262. struct kbase_device *kbdev;
  2263. ssize_t ret;
  2264. kbdev = to_kbase_device(dev);
  2265. if (!kbdev)
  2266. return -ENODEV;
  2267. ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n",
  2268. ktime_to_ns(kbdev->pm.gpu_poweroff_time),
  2269. kbdev->pm.poweroff_shader_ticks,
  2270. kbdev->pm.poweroff_gpu_ticks);
  2271. return ret;
  2272. }
  2273. static DEVICE_ATTR(pm_poweroff, 0644, show_pm_poweroff,
  2274. set_pm_poweroff);
  2275. /**
  2276. * set_reset_timeout - Store callback for the reset_timeout sysfs file.
  2277. * @dev: The device with sysfs file is for
  2278. * @attr: The attributes of the sysfs file
  2279. * @buf: The value written to the sysfs file
  2280. * @count: The number of bytes written to the sysfs file
  2281. *
  2282. * This function is called when the reset_timeout sysfs file is written to. It
  2283. * checks the data written, and if valid updates the reset timeout.
  2284. *
  2285. * Return: @c count if the function succeeded. An error code on failure.
  2286. */
  2287. static ssize_t set_reset_timeout(struct device *dev,
  2288. struct device_attribute *attr, const char *buf, size_t count)
  2289. {
  2290. struct kbase_device *kbdev;
  2291. int ret;
  2292. int reset_timeout;
  2293. kbdev = to_kbase_device(dev);
  2294. if (!kbdev)
  2295. return -ENODEV;
  2296. ret = kstrtoint(buf, 0, &reset_timeout);
  2297. if (ret || reset_timeout <= 0) {
  2298. dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
  2299. "Use format <reset_timeout_ms>\n");
  2300. return -EINVAL;
  2301. }
  2302. kbdev->reset_timeout_ms = reset_timeout;
  2303. dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
  2304. return count;
  2305. }
  2306. /**
  2307. * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
  2308. * @dev: The device this sysfs file is for.
  2309. * @attr: The attributes of the sysfs file.
  2310. * @buf: The output buffer to receive the GPU information.
  2311. *
  2312. * This function is called to get the current reset timeout.
  2313. *
  2314. * Return: The number of bytes output to buf.
  2315. */
  2316. static ssize_t show_reset_timeout(struct device *dev,
  2317. struct device_attribute *attr, char * const buf)
  2318. {
  2319. struct kbase_device *kbdev;
  2320. ssize_t ret;
  2321. kbdev = to_kbase_device(dev);
  2322. if (!kbdev)
  2323. return -ENODEV;
  2324. ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
  2325. return ret;
  2326. }
  2327. static DEVICE_ATTR(reset_timeout, 0644, show_reset_timeout,
  2328. set_reset_timeout);
  2329. static ssize_t show_mem_pool_size(struct device *dev,
  2330. struct device_attribute *attr, char * const buf)
  2331. {
  2332. struct kbase_device *kbdev;
  2333. ssize_t ret;
  2334. kbdev = to_kbase_device(dev);
  2335. if (!kbdev)
  2336. return -ENODEV;
  2337. ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
  2338. kbase_mem_pool_size(&kbdev->mem_pool));
  2339. return ret;
  2340. }
  2341. static ssize_t set_mem_pool_size(struct device *dev,
  2342. struct device_attribute *attr, const char *buf, size_t count)
  2343. {
  2344. struct kbase_device *kbdev;
  2345. size_t new_size;
  2346. int err;
  2347. kbdev = to_kbase_device(dev);
  2348. if (!kbdev)
  2349. return -ENODEV;
  2350. err = kstrtoul(buf, 0, (unsigned long *)&new_size);
  2351. if (err)
  2352. return err;
  2353. kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
  2354. return count;
  2355. }
  2356. static DEVICE_ATTR(mem_pool_size, 0644, show_mem_pool_size,
  2357. set_mem_pool_size);
  2358. static ssize_t show_mem_pool_max_size(struct device *dev,
  2359. struct device_attribute *attr, char * const buf)
  2360. {
  2361. struct kbase_device *kbdev;
  2362. ssize_t ret;
  2363. kbdev = to_kbase_device(dev);
  2364. if (!kbdev)
  2365. return -ENODEV;
  2366. ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
  2367. kbase_mem_pool_max_size(&kbdev->mem_pool));
  2368. return ret;
  2369. }
  2370. static ssize_t set_mem_pool_max_size(struct device *dev,
  2371. struct device_attribute *attr, const char *buf, size_t count)
  2372. {
  2373. struct kbase_device *kbdev;
  2374. size_t new_max_size;
  2375. int err;
  2376. kbdev = to_kbase_device(dev);
  2377. if (!kbdev)
  2378. return -ENODEV;
  2379. err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
  2380. if (err)
  2381. return -EINVAL;
  2382. kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
  2383. return count;
  2384. }
  2385. static DEVICE_ATTR(mem_pool_max_size, 0644, show_mem_pool_max_size,
  2386. set_mem_pool_max_size);
  2387. static int kbasep_secure_mode_enable(struct kbase_device *kbdev)
  2388. {
  2389. kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
  2390. GPU_COMMAND_SET_PROTECTED_MODE, NULL);
  2391. return 0;
  2392. }
  2393. static int kbasep_secure_mode_disable(struct kbase_device *kbdev)
  2394. {
  2395. if (!kbase_prepare_to_reset_gpu_locked(kbdev))
  2396. return -EBUSY;
  2397. kbase_reset_gpu_locked(kbdev);
  2398. return 0;
  2399. }
  2400. static struct kbase_secure_ops kbasep_secure_ops = {
  2401. .secure_mode_enable = kbasep_secure_mode_enable,
  2402. .secure_mode_disable = kbasep_secure_mode_disable,
  2403. };
  2404. static void kbasep_secure_mode_init(struct kbase_device *kbdev)
  2405. {
  2406. if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
  2407. /* Use native secure ops */
  2408. kbdev->secure_ops = &kbasep_secure_ops;
  2409. kbdev->secure_mode_support = true;
  2410. }
  2411. #ifdef SECURE_CALLBACKS
  2412. else {
  2413. kbdev->secure_ops = SECURE_CALLBACKS;
  2414. kbdev->secure_mode_support = false;
  2415. if (kbdev->secure_ops) {
  2416. int err;
  2417. /* Make sure secure mode is disabled on startup */
  2418. err = kbdev->secure_ops->secure_mode_disable(kbdev);
  2419. /* secure_mode_disable() returns -EINVAL if not
  2420. * supported
  2421. */
  2422. kbdev->secure_mode_support = (err != -EINVAL);
  2423. }
  2424. }
  2425. #endif
  2426. }
  2427. static int kbase_common_reg_map(struct kbase_device *kbdev)
  2428. {
  2429. int err = -ENOMEM;
  2430. if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
  2431. dev_err(kbdev->dev, "Register window unavailable\n");
  2432. err = -EIO;
  2433. goto out_region;
  2434. }
  2435. kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
  2436. if (!kbdev->reg) {
  2437. dev_err(kbdev->dev, "Can't remap register window\n");
  2438. err = -EINVAL;
  2439. goto out_ioremap;
  2440. }
  2441. return 0;
  2442. out_ioremap:
  2443. release_mem_region(kbdev->reg_start, kbdev->reg_size);
  2444. out_region:
  2445. return err;
  2446. }
  2447. static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
  2448. {
  2449. if (kbdev->reg) {
  2450. iounmap(kbdev->reg);
  2451. release_mem_region(kbdev->reg_start, kbdev->reg_size);
  2452. kbdev->reg = NULL;
  2453. kbdev->reg_start = 0;
  2454. kbdev->reg_size = 0;
  2455. }
  2456. }
  2457. static int registers_map(struct kbase_device * const kbdev)
  2458. {
  2459. /* the first memory resource is the physical address of the GPU
  2460. * registers */
  2461. struct platform_device *pdev = to_platform_device(kbdev->dev);
  2462. struct resource *reg_res;
  2463. int err;
  2464. reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2465. if (!reg_res) {
  2466. dev_err(kbdev->dev, "Invalid register resource\n");
  2467. return -ENOENT;
  2468. }
  2469. kbdev->reg_start = reg_res->start;
  2470. kbdev->reg_size = resource_size(reg_res);
  2471. err = kbase_common_reg_map(kbdev);
  2472. if (err) {
  2473. dev_err(kbdev->dev, "Failed to map registers\n");
  2474. return err;
  2475. }
  2476. return 0;
  2477. }
  2478. static void registers_unmap(struct kbase_device *kbdev)
  2479. {
  2480. kbase_common_reg_unmap(kbdev);
  2481. }
  2482. static int power_control_init(struct platform_device *pdev)
  2483. {
  2484. struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
  2485. int err = 0;
  2486. if (!kbdev)
  2487. return -ENODEV;
  2488. #if defined(CONFIG_OF) && defined(CONFIG_REGULATOR)
  2489. kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
  2490. if (IS_ERR_OR_NULL(kbdev->regulator)) {
  2491. err = PTR_ERR(kbdev->regulator);
  2492. kbdev->regulator = NULL;
  2493. if (err == -EPROBE_DEFER) {
  2494. dev_err(&pdev->dev, "Failed to get regulator\n");
  2495. return err;
  2496. }
  2497. dev_info(kbdev->dev,
  2498. "Continuing without Mali regulator control\n");
  2499. /* Allow probe to continue without regulator */
  2500. }
  2501. #endif
  2502. kbdev->clock = clk_get(kbdev->dev, NULL);
  2503. if (IS_ERR_OR_NULL(kbdev->clock)) {
  2504. err = PTR_ERR(kbdev->clock);
  2505. kbdev->clock = NULL;
  2506. if (err == -EPROBE_DEFER) {
  2507. dev_err(&pdev->dev, "Failed to get clock\n");
  2508. goto fail;
  2509. }
  2510. dev_info(kbdev->dev, "Continuing without Mali clock control\n");
  2511. /* Allow probe to continue without clock. */
  2512. } else {
  2513. err = clk_prepare_enable(kbdev->clock);
  2514. if (err) {
  2515. dev_err(kbdev->dev,
  2516. "Failed to prepare and enable clock (%d)\n",
  2517. err);
  2518. goto fail;
  2519. }
  2520. }
  2521. #if defined(CONFIG_OF) && defined(CONFIG_PM_OPP)
  2522. /* Register the OPPs if they are available in device tree */
  2523. err = dev_pm_opp_of_add_table(kbdev->dev);
  2524. #endif
  2525. if (err)
  2526. dev_dbg(kbdev->dev, "OPP table not found\n");
  2527. return 0;
  2528. fail:
  2529. if (kbdev->clock != NULL) {
  2530. clk_put(kbdev->clock);
  2531. kbdev->clock = NULL;
  2532. }
  2533. #ifdef CONFIG_REGULATOR
  2534. if (kbdev->regulator != NULL) {
  2535. regulator_put(kbdev->regulator);
  2536. kbdev->regulator = NULL;
  2537. }
  2538. #endif
  2539. return err;
  2540. }
  2541. static void power_control_term(struct kbase_device *kbdev)
  2542. {
  2543. dev_pm_opp_of_remove_table(kbdev->dev);
  2544. if (kbdev->clock) {
  2545. clk_disable_unprepare(kbdev->clock);
  2546. clk_put(kbdev->clock);
  2547. kbdev->clock = NULL;
  2548. }
  2549. #if defined(CONFIG_OF) && defined(CONFIG_REGULATOR)
  2550. if (kbdev->regulator) {
  2551. regulator_put(kbdev->regulator);
  2552. kbdev->regulator = NULL;
  2553. }
  2554. #endif
  2555. }
  2556. static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
  2557. {
  2558. return 0;
  2559. }
  2560. static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
  2561. static void kbase_device_coherency_init(struct kbase_device *kbdev, u32 gpu_id)
  2562. {
  2563. #ifdef CONFIG_OF
  2564. u32 supported_coherency_bitmap =
  2565. kbdev->gpu_props.props.raw_props.coherency_mode;
  2566. const void *coherency_override_dts;
  2567. u32 override_coherency;
  2568. #endif /* CONFIG_OF */
  2569. kbdev->system_coherency = COHERENCY_NONE;
  2570. /* device tree may override the coherency */
  2571. #ifdef CONFIG_OF
  2572. coherency_override_dts = of_get_property(kbdev->dev->of_node,
  2573. "system-coherency",
  2574. NULL);
  2575. if (coherency_override_dts) {
  2576. override_coherency = be32_to_cpup(coherency_override_dts);
  2577. if ((override_coherency <= COHERENCY_NONE) &&
  2578. (supported_coherency_bitmap &
  2579. COHERENCY_FEATURE_BIT(override_coherency))) {
  2580. kbdev->system_coherency = override_coherency;
  2581. dev_info(kbdev->dev,
  2582. "Using coherency mode %u set from dtb",
  2583. override_coherency);
  2584. } else
  2585. dev_warn(kbdev->dev,
  2586. "Ignoring unsupported coherency mode %u set from dtb",
  2587. override_coherency);
  2588. }
  2589. #endif /* CONFIG_OF */
  2590. kbdev->gpu_props.props.raw_props.coherency_mode =
  2591. kbdev->system_coherency;
  2592. }
  2593. static struct attribute *kbase_attrs[] = {
  2594. #ifdef CONFIG_MALI_DEBUG
  2595. &dev_attr_debug_command.attr,
  2596. &dev_attr_js_softstop_always.attr,
  2597. #endif
  2598. &dev_attr_js_timeouts.attr,
  2599. &dev_attr_soft_event_timeout.attr,
  2600. &dev_attr_gpuinfo.attr,
  2601. &dev_attr_dvfs_period.attr,
  2602. &dev_attr_pm_poweroff.attr,
  2603. &dev_attr_reset_timeout.attr,
  2604. &dev_attr_js_scheduling_period.attr,
  2605. &dev_attr_power_policy.attr,
  2606. &dev_attr_core_availability_policy.attr,
  2607. &dev_attr_core_mask.attr,
  2608. &dev_attr_mem_pool_size.attr,
  2609. &dev_attr_mem_pool_max_size.attr,
  2610. NULL
  2611. };
  2612. static const struct attribute_group kbase_attr_group = {
  2613. .attrs = kbase_attrs,
  2614. };
  2615. static int kbase_platform_device_remove(struct platform_device *pdev)
  2616. {
  2617. struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
  2618. const struct list_head *dev_list;
  2619. if (!kbdev)
  2620. return -ENODEV;
  2621. if (kbdev->inited_subsys & inited_sysfs_group) {
  2622. sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
  2623. kbdev->inited_subsys &= ~inited_sysfs_group;
  2624. }
  2625. if (kbdev->inited_subsys & inited_dev_list) {
  2626. dev_list = kbase_dev_list_get();
  2627. list_del(&kbdev->entry);
  2628. kbase_dev_list_put(dev_list);
  2629. kbdev->inited_subsys &= ~inited_dev_list;
  2630. }
  2631. if (kbdev->inited_subsys & inited_misc_register) {
  2632. misc_deregister(&kbdev->mdev);
  2633. kbdev->inited_subsys &= ~inited_misc_register;
  2634. }
  2635. if (kbdev->inited_subsys & inited_get_device) {
  2636. put_device(kbdev->dev);
  2637. kbdev->inited_subsys &= ~inited_get_device;
  2638. }
  2639. if (kbdev->inited_subsys & inited_debugfs) {
  2640. kbase_device_debugfs_term(kbdev);
  2641. kbdev->inited_subsys &= ~inited_debugfs;
  2642. }
  2643. if (kbdev->inited_subsys & inited_ipa) {
  2644. kbase_ipa_term(kbdev->ipa_ctx);
  2645. kbdev->inited_subsys &= ~inited_ipa;
  2646. }
  2647. if (kbdev->inited_subsys & inited_vinstr) {
  2648. kbase_vinstr_term(kbdev->vinstr_ctx);
  2649. kbdev->inited_subsys &= ~inited_vinstr;
  2650. }
  2651. #ifdef CONFIG_MALI_DEVFREQ
  2652. if (kbdev->inited_subsys & inited_devfreq) {
  2653. kbase_devfreq_term(kbdev);
  2654. kbdev->inited_subsys &= ~inited_devfreq;
  2655. }
  2656. #endif
  2657. if (kbdev->inited_subsys & inited_backend_late) {
  2658. kbase_backend_late_term(kbdev);
  2659. kbdev->inited_subsys &= ~inited_backend_late;
  2660. }
  2661. if (kbdev->inited_subsys & inited_tlstream) {
  2662. kbase_tlstream_term();
  2663. kbdev->inited_subsys &= ~inited_tlstream;
  2664. }
  2665. /* Bring job and mem sys to a halt before we continue termination */
  2666. if (kbdev->inited_subsys & inited_js)
  2667. kbasep_js_devdata_halt(kbdev);
  2668. if (kbdev->inited_subsys & inited_mem)
  2669. kbase_mem_halt(kbdev);
  2670. if (kbdev->inited_subsys & inited_js) {
  2671. kbasep_js_devdata_term(kbdev);
  2672. kbdev->inited_subsys &= ~inited_js;
  2673. }
  2674. if (kbdev->inited_subsys & inited_mem) {
  2675. kbase_mem_term(kbdev);
  2676. kbdev->inited_subsys &= ~inited_mem;
  2677. }
  2678. if (kbdev->inited_subsys & inited_pm_runtime_init) {
  2679. kbdev->pm.callback_power_runtime_term(kbdev);
  2680. kbdev->inited_subsys &= ~inited_pm_runtime_init;
  2681. }
  2682. if (kbdev->inited_subsys & inited_device) {
  2683. kbase_device_term(kbdev);
  2684. kbdev->inited_subsys &= ~inited_device;
  2685. }
  2686. if (kbdev->inited_subsys & inited_backend_early) {
  2687. kbase_backend_early_term(kbdev);
  2688. kbdev->inited_subsys &= ~inited_backend_early;
  2689. }
  2690. if (kbdev->inited_subsys & inited_power_control) {
  2691. power_control_term(kbdev);
  2692. kbdev->inited_subsys &= ~inited_power_control;
  2693. }
  2694. if (kbdev->inited_subsys & inited_registers_map) {
  2695. registers_unmap(kbdev);
  2696. kbdev->inited_subsys &= ~inited_registers_map;
  2697. }
  2698. if (kbdev->inited_subsys != 0)
  2699. dev_err(kbdev->dev, "Missing sub system termination\n");
  2700. kbase_device_free(kbdev);
  2701. return 0;
  2702. }
  2703. static int kbase_platform_device_probe(struct platform_device *pdev)
  2704. {
  2705. struct kbase_device *kbdev;
  2706. struct mali_base_gpu_core_props *core_props;
  2707. u32 gpu_id;
  2708. const struct list_head *dev_list;
  2709. int err = 0;
  2710. #ifdef CONFIG_OF
  2711. err = kbase_platform_early_init();
  2712. if (err) {
  2713. dev_err(&pdev->dev, "Early platform initialization failed\n");
  2714. kbase_platform_device_remove(pdev);
  2715. return err;
  2716. }
  2717. #endif
  2718. kbdev = kbase_device_alloc();
  2719. if (!kbdev) {
  2720. dev_err(&pdev->dev, "Allocate device failed\n");
  2721. kbase_platform_device_remove(pdev);
  2722. return -ENOMEM;
  2723. }
  2724. kbdev->dev = &pdev->dev;
  2725. dev_set_drvdata(kbdev->dev, kbdev);
  2726. err = assign_irqs(pdev);
  2727. if (err) {
  2728. dev_err(&pdev->dev, "IRQ search failed\n");
  2729. kbase_platform_device_remove(pdev);
  2730. return err;
  2731. }
  2732. err = registers_map(kbdev);
  2733. if (err) {
  2734. dev_err(&pdev->dev, "Register map failed\n");
  2735. kbase_platform_device_remove(pdev);
  2736. return err;
  2737. }
  2738. kbdev->inited_subsys |= inited_registers_map;
  2739. err = power_control_init(pdev);
  2740. if (err) {
  2741. dev_err(&pdev->dev, "Power control initialization failed\n");
  2742. kbase_platform_device_remove(pdev);
  2743. return err;
  2744. }
  2745. kbdev->inited_subsys |= inited_power_control;
  2746. err = kbase_backend_early_init(kbdev);
  2747. if (err) {
  2748. dev_err(kbdev->dev, "Early backend initialization failed\n");
  2749. kbase_platform_device_remove(pdev);
  2750. return err;
  2751. }
  2752. kbdev->inited_subsys |= inited_backend_early;
  2753. scnprintf(kbdev->devname, DEVNAME_SIZE, "mali%d", kbase_dev_nr);
  2754. kbase_disjoint_init(kbdev);
  2755. /* obtain min/max configured gpu frequencies */
  2756. core_props = &(kbdev->gpu_props.props.core_props);
  2757. core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
  2758. core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
  2759. kbdev->gpu_props.irq_throttle_time_us = DEFAULT_IRQ_THROTTLE_TIME_US;
  2760. err = kbase_device_init(kbdev);
  2761. if (err) {
  2762. dev_err(kbdev->dev, "Device initialization failed (%d)\n", err);
  2763. kbase_platform_device_remove(pdev);
  2764. return err;
  2765. }
  2766. kbdev->inited_subsys |= inited_device;
  2767. if (kbdev->pm.callback_power_runtime_init) {
  2768. err = kbdev->pm.callback_power_runtime_init(kbdev);
  2769. if (err) {
  2770. dev_err(kbdev->dev,
  2771. "Runtime PM initialization failed\n");
  2772. kbase_platform_device_remove(pdev);
  2773. return err;
  2774. }
  2775. kbdev->inited_subsys |= inited_pm_runtime_init;
  2776. }
  2777. err = kbase_mem_init(kbdev);
  2778. if (err) {
  2779. dev_err(kbdev->dev, "Memory subsystem initialization failed\n");
  2780. kbase_platform_device_remove(pdev);
  2781. return err;
  2782. }
  2783. kbdev->inited_subsys |= inited_mem;
  2784. gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
  2785. gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
  2786. gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
  2787. kbase_device_coherency_init(kbdev, gpu_id);
  2788. kbasep_secure_mode_init(kbdev);
  2789. err = kbasep_js_devdata_init(kbdev);
  2790. if (err) {
  2791. dev_err(kbdev->dev, "Job JS devdata initialization failed\n");
  2792. kbase_platform_device_remove(pdev);
  2793. return err;
  2794. }
  2795. kbdev->inited_subsys |= inited_js;
  2796. err = kbase_tlstream_init();
  2797. if (err) {
  2798. dev_err(kbdev->dev, "Timeline stream initialization failed\n");
  2799. kbase_platform_device_remove(pdev);
  2800. return err;
  2801. }
  2802. kbdev->inited_subsys |= inited_tlstream;
  2803. err = kbase_backend_late_init(kbdev);
  2804. if (err) {
  2805. dev_err(kbdev->dev, "Late backend initialization failed\n");
  2806. kbase_platform_device_remove(pdev);
  2807. return err;
  2808. }
  2809. kbdev->inited_subsys |= inited_backend_late;
  2810. #ifdef CONFIG_MALI_DEVFREQ
  2811. err = kbase_devfreq_init(kbdev);
  2812. if (err) {
  2813. dev_err(kbdev->dev, "Fevfreq initialization failed\n");
  2814. kbase_platform_device_remove(pdev);
  2815. return err;
  2816. }
  2817. kbdev->inited_subsys |= inited_devfreq;
  2818. #endif /* CONFIG_MALI_DEVFREQ */
  2819. kbdev->vinstr_ctx = kbase_vinstr_init(kbdev);
  2820. if (!kbdev->vinstr_ctx) {
  2821. dev_err(kbdev->dev,
  2822. "Virtual instrumentation initialization failed\n");
  2823. kbase_platform_device_remove(pdev);
  2824. return -EINVAL;
  2825. }
  2826. kbdev->inited_subsys |= inited_vinstr;
  2827. kbdev->ipa_ctx = kbase_ipa_init(kbdev);
  2828. if (!kbdev->ipa_ctx) {
  2829. dev_err(kbdev->dev, "IPA initialization failed\n");
  2830. kbase_platform_device_remove(pdev);
  2831. return -EINVAL;
  2832. }
  2833. kbdev->inited_subsys |= inited_ipa;
  2834. err = kbase_device_debugfs_init(kbdev);
  2835. if (err) {
  2836. dev_err(kbdev->dev, "DebugFS initialization failed");
  2837. kbase_platform_device_remove(pdev);
  2838. return err;
  2839. }
  2840. kbdev->inited_subsys |= inited_debugfs;
  2841. /* initialize the kctx list */
  2842. mutex_init(&kbdev->kctx_list_lock);
  2843. INIT_LIST_HEAD(&kbdev->kctx_list);
  2844. kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
  2845. kbdev->mdev.name = kbdev->devname;
  2846. kbdev->mdev.fops = &kbase_fops;
  2847. kbdev->mdev.parent = get_device(kbdev->dev);
  2848. kbdev->inited_subsys |= inited_get_device;
  2849. err = misc_register(&kbdev->mdev);
  2850. if (err) {
  2851. dev_err(kbdev->dev, "Misc device registration failed for %s\n",
  2852. kbdev->devname);
  2853. kbase_platform_device_remove(pdev);
  2854. return err;
  2855. }
  2856. kbdev->inited_subsys |= inited_misc_register;
  2857. dev_list = kbase_dev_list_get();
  2858. list_add(&kbdev->entry, &kbase_dev_list);
  2859. kbase_dev_list_put(dev_list);
  2860. kbdev->inited_subsys |= inited_dev_list;
  2861. err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
  2862. if (err) {
  2863. dev_err(&pdev->dev, "SysFS group creation failed\n");
  2864. kbase_platform_device_remove(pdev);
  2865. return err;
  2866. }
  2867. kbdev->inited_subsys |= inited_sysfs_group;
  2868. dev_info(kbdev->dev,
  2869. "Probed as %s\n", dev_name(kbdev->mdev.this_device));
  2870. kbase_dev_nr++;
  2871. return err;
  2872. }
  2873. /** Suspend callback from the OS.
  2874. *
  2875. * This is called by Linux when the device should suspend.
  2876. *
  2877. * @param dev The device to suspend
  2878. *
  2879. * @return A standard Linux error code
  2880. */
  2881. static int kbase_device_suspend(struct device *dev)
  2882. {
  2883. struct kbase_device *kbdev = to_kbase_device(dev);
  2884. if (!kbdev)
  2885. return -ENODEV;
  2886. #ifdef CONFIG_PM_DEVFREQ
  2887. devfreq_suspend_device(kbdev->devfreq);
  2888. #endif
  2889. kbase_pm_suspend(kbdev);
  2890. return 0;
  2891. }
  2892. /** Resume callback from the OS.
  2893. *
  2894. * This is called by Linux when the device should resume from suspension.
  2895. *
  2896. * @param dev The device to resume
  2897. *
  2898. * @return A standard Linux error code
  2899. */
  2900. static int kbase_device_resume(struct device *dev)
  2901. {
  2902. struct kbase_device *kbdev = to_kbase_device(dev);
  2903. if (!kbdev)
  2904. return -ENODEV;
  2905. kbase_pm_resume(kbdev);
  2906. #ifdef CONFIG_PM_DEVFREQ
  2907. devfreq_resume_device(kbdev->devfreq);
  2908. #endif
  2909. return 0;
  2910. }
  2911. /** Runtime suspend callback from the OS.
  2912. *
  2913. * This is called by Linux when the device should prepare for a condition in which it will
  2914. * not be able to communicate with the CPU(s) and RAM due to power management.
  2915. *
  2916. * @param dev The device to suspend
  2917. *
  2918. * @return A standard Linux error code
  2919. */
  2920. #ifdef KBASE_PM_RUNTIME
  2921. static int kbase_device_runtime_suspend(struct device *dev)
  2922. {
  2923. struct kbase_device *kbdev = to_kbase_device(dev);
  2924. if (!kbdev)
  2925. return -ENODEV;
  2926. #ifdef CONFIG_PM_DEVFREQ
  2927. devfreq_suspend_device(kbdev->devfreq);
  2928. #endif
  2929. if (kbdev->pm.backend.callback_power_runtime_off) {
  2930. kbdev->pm.backend.callback_power_runtime_off(kbdev);
  2931. dev_dbg(dev, "runtime suspend\n");
  2932. }
  2933. return 0;
  2934. }
  2935. #endif /* KBASE_PM_RUNTIME */
  2936. /** Runtime resume callback from the OS.
  2937. *
  2938. * This is called by Linux when the device should go into a fully active state.
  2939. *
  2940. * @param dev The device to suspend
  2941. *
  2942. * @return A standard Linux error code
  2943. */
  2944. #ifdef KBASE_PM_RUNTIME
  2945. static int kbase_device_runtime_resume(struct device *dev)
  2946. {
  2947. int ret = 0;
  2948. struct kbase_device *kbdev = to_kbase_device(dev);
  2949. if (!kbdev)
  2950. return -ENODEV;
  2951. if (kbdev->pm.backend.callback_power_runtime_on) {
  2952. ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
  2953. dev_dbg(dev, "runtime resume\n");
  2954. }
  2955. #ifdef CONFIG_PM_DEVFREQ
  2956. devfreq_resume_device(kbdev->devfreq);
  2957. #endif
  2958. return ret;
  2959. }
  2960. #endif /* KBASE_PM_RUNTIME */
  2961. #ifdef KBASE_PM_RUNTIME
  2962. /**
  2963. * kbase_device_runtime_idle - Runtime idle callback from the OS.
  2964. * @dev: The device to suspend
  2965. *
  2966. * This is called by Linux when the device appears to be inactive and it might
  2967. * be placed into a low power state.
  2968. *
  2969. * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
  2970. * otherwise a standard Linux error code
  2971. */
  2972. static int kbase_device_runtime_idle(struct device *dev)
  2973. {
  2974. struct kbase_device *kbdev = to_kbase_device(dev);
  2975. if (!kbdev)
  2976. return -ENODEV;
  2977. /* Use platform specific implementation if it exists. */
  2978. if (kbdev->pm.backend.callback_power_runtime_idle)
  2979. return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
  2980. return 0;
  2981. }
  2982. #endif /* KBASE_PM_RUNTIME */
  2983. /** The power management operations for the platform driver.
  2984. */
  2985. static const struct dev_pm_ops kbase_pm_ops = {
  2986. .suspend = kbase_device_suspend,
  2987. .resume = kbase_device_resume,
  2988. #ifdef KBASE_PM_RUNTIME
  2989. .runtime_suspend = kbase_device_runtime_suspend,
  2990. .runtime_resume = kbase_device_runtime_resume,
  2991. .runtime_idle = kbase_device_runtime_idle,
  2992. #endif /* KBASE_PM_RUNTIME */
  2993. };
  2994. #ifdef CONFIG_OF
  2995. static const struct of_device_id kbase_dt_ids[] = {
  2996. { .compatible = "arm,malit6xx" },
  2997. { .compatible = "arm,mali-t760" },
  2998. { .compatible = "arm,mali-midgard" },
  2999. { /* sentinel */ }
  3000. };
  3001. MODULE_DEVICE_TABLE(of, kbase_dt_ids);
  3002. #endif
  3003. static struct platform_driver kbase_platform_driver = {
  3004. .probe = kbase_platform_device_probe,
  3005. .remove = kbase_platform_device_remove,
  3006. .driver = {
  3007. .name = "mali",
  3008. .owner = THIS_MODULE,
  3009. .pm = &kbase_pm_ops,
  3010. .of_match_table = of_match_ptr(kbase_dt_ids),
  3011. },
  3012. };
  3013. /*
  3014. * The driver will not provide a shortcut to create the Mali platform device
  3015. * anymore when using Device Tree.
  3016. */
  3017. #ifdef CONFIG_OF
  3018. module_platform_driver(kbase_platform_driver);
  3019. #else
  3020. static int __init kbase_driver_init(void)
  3021. {
  3022. int ret;
  3023. ret = kbase_platform_early_init();
  3024. if (ret)
  3025. return ret;
  3026. ret = platform_driver_register(&kbase_platform_driver);
  3027. return ret;
  3028. }
  3029. static void __exit kbase_driver_exit(void)
  3030. {
  3031. platform_driver_unregister(&kbase_platform_driver);
  3032. }
  3033. module_init(kbase_driver_init);
  3034. module_exit(kbase_driver_exit);
  3035. #endif /* CONFIG_OF */
  3036. MODULE_LICENSE("GPL");
  3037. MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
  3038. __stringify(BASE_UK_VERSION_MAJOR) "." \
  3039. __stringify(BASE_UK_VERSION_MINOR) ")");
  3040. #ifdef CONFIG_MALI_SYSTEM_TRACE
  3041. #define CREATE_TRACE_POINTS
  3042. #endif
  3043. #ifdef CONFIG_MALI_SYSTEM_TRACE
  3044. #include "mali_linux_kbase_trace.h"
  3045. #endif