mali_kbase_core_linux.c 117 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484
  1. /*
  2. *
  3. * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
  4. *
  5. * This program is free software and is provided to you under the terms of the
  6. * GNU General Public License version 2 as published by the Free Software
  7. * Foundation, and any use by you of this program is subject to the terms
  8. * of such GNU licence.
  9. *
  10. * A copy of the licence is included with the program, and can also be obtained
  11. * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  12. * Boston, MA 02110-1301, USA.
  13. *
  14. */
  15. #include <mali_kbase.h>
  16. #include <mali_kbase_hwaccess_gpuprops.h>
  17. #include <mali_kbase_config_defaults.h>
  18. #include <mali_kbase_uku.h>
  19. #include <mali_midg_regmap.h>
  20. #include <mali_kbase_instr.h>
  21. #include <mali_kbase_gator.h>
  22. #include <backend/gpu/mali_kbase_js_affinity.h>
  23. #include <mali_kbase_mem_linux.h>
  24. #ifdef CONFIG_MALI_DEVFREQ
  25. #include <backend/gpu/mali_kbase_devfreq.h>
  26. #endif /* CONFIG_MALI_DEVFREQ */
  27. #ifdef CONFIG_MALI_NO_MALI
  28. #include "mali_kbase_model_linux.h"
  29. #endif /* CONFIG_MALI_NO_MALI */
  30. #include "mali_kbase_mem_profile_debugfs_buf_size.h"
  31. #include "mali_kbase_debug_mem_view.h"
  32. #include "mali_kbase_mem.h"
  33. #include "mali_kbase_mem_pool_debugfs.h"
  34. #if !MALI_CUSTOMER_RELEASE
  35. #include "mali_kbase_regs_dump_debugfs.h"
  36. #endif /* !MALI_CUSTOMER_RELEASE */
  37. #include <mali_kbase_hwaccess_backend.h>
  38. #include <mali_kbase_hwaccess_jm.h>
  39. #include <backend/gpu/mali_kbase_device_internal.h>
  40. #ifdef CONFIG_KDS
  41. #include <linux/kds.h>
  42. #include <linux/anon_inodes.h>
  43. #include <linux/syscalls.h>
  44. #endif /* CONFIG_KDS */
  45. #include <linux/module.h>
  46. #include <linux/init.h>
  47. #include <linux/poll.h>
  48. #include <linux/kernel.h>
  49. #include <linux/errno.h>
  50. #include <linux/of.h>
  51. #include <linux/platform_device.h>
  52. #include <linux/miscdevice.h>
  53. #include <linux/list.h>
  54. #include <linux/semaphore.h>
  55. #include <linux/fs.h>
  56. #include <linux/uaccess.h>
  57. #include <linux/interrupt.h>
  58. #include <linux/io.h>
  59. #include <linux/mm.h>
  60. #include <linux/compat.h> /* is_compat_task */
  61. #include <linux/mman.h>
  62. #include <linux/version.h>
  63. #include <linux/security.h>
  64. #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
  65. #include <linux/pm_runtime.h>
  66. #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
  67. #include <mali_kbase_hw.h>
  68. #include <platform/mali_kbase_platform_common.h>
  69. #ifdef CONFIG_MALI_PLATFORM_FAKE
  70. #include <platform/mali_kbase_platform_fake.h>
  71. #endif /*CONFIG_MALI_PLATFORM_FAKE */
  72. #ifdef CONFIG_SYNC
  73. #include <mali_kbase_sync.h>
  74. #endif /* CONFIG_SYNC */
  75. #ifdef CONFIG_PM_DEVFREQ
  76. #include <linux/devfreq.h>
  77. #endif /* CONFIG_PM_DEVFREQ */
  78. #include <linux/clk.h>
  79. #include <linux/delay.h>
  80. #include <mali_kbase_config.h>
  81. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
  82. #include <linux/pm_opp.h>
  83. #else
  84. #include <linux/opp.h>
  85. #endif
  86. #include <mali_kbase_tlstream.h>
  87. /* GPU IRQ Tags */
  88. #define JOB_IRQ_TAG 0
  89. #define MMU_IRQ_TAG 1
  90. #define GPU_IRQ_TAG 2
  91. #if MALI_UNIT_TEST
  92. static struct kbase_exported_test_data shared_kernel_test_data;
  93. EXPORT_SYMBOL(shared_kernel_test_data);
  94. #endif /* MALI_UNIT_TEST */
  95. static int kbase_dev_nr;
  96. static DEFINE_MUTEX(kbase_dev_list_lock);
  97. static LIST_HEAD(kbase_dev_list);
  98. #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
  99. static inline void __compile_time_asserts(void)
  100. {
  101. CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
  102. }
  103. #ifdef CONFIG_KDS
  104. struct kbasep_kds_resource_set_file_data {
  105. struct kds_resource_set *lock;
  106. };
  107. static int kds_resource_release(struct inode *inode, struct file *file);
  108. static const struct file_operations kds_resource_fops = {
  109. .release = kds_resource_release
  110. };
  111. struct kbase_kds_resource_list_data {
  112. struct kds_resource **kds_resources;
  113. unsigned long *kds_access_bitmap;
  114. int num_elems;
  115. };
  116. static int kds_resource_release(struct inode *inode, struct file *file)
  117. {
  118. struct kbasep_kds_resource_set_file_data *data;
  119. data = (struct kbasep_kds_resource_set_file_data *)file->private_data;
  120. if (NULL != data) {
  121. if (NULL != data->lock)
  122. kds_resource_set_release(&data->lock);
  123. kfree(data);
  124. }
  125. return 0;
  126. }
  127. static int kbasep_kds_allocate_resource_list_data(struct kbase_context *kctx, struct base_external_resource *ext_res, int num_elems, struct kbase_kds_resource_list_data *resources_list)
  128. {
  129. struct base_external_resource *res = ext_res;
  130. int res_id;
  131. /* assume we have to wait for all */
  132. KBASE_DEBUG_ASSERT(0 != num_elems);
  133. resources_list->kds_resources = kmalloc_array(num_elems,
  134. sizeof(struct kds_resource *), GFP_KERNEL);
  135. if (NULL == resources_list->kds_resources)
  136. return -ENOMEM;
  137. KBASE_DEBUG_ASSERT(0 != num_elems);
  138. resources_list->kds_access_bitmap = kzalloc(
  139. sizeof(unsigned long) *
  140. ((num_elems + BITS_PER_LONG - 1) / BITS_PER_LONG),
  141. GFP_KERNEL);
  142. if (NULL == resources_list->kds_access_bitmap) {
  143. kfree(resources_list->kds_access_bitmap);
  144. return -ENOMEM;
  145. }
  146. kbase_gpu_vm_lock(kctx);
  147. for (res_id = 0; res_id < num_elems; res_id++, res++) {
  148. int exclusive;
  149. struct kbase_va_region *reg;
  150. struct kds_resource *kds_res = NULL;
  151. exclusive = res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE;
  152. reg = kbase_region_tracker_find_region_enclosing_address(kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
  153. /* did we find a matching region object? */
  154. if (NULL == reg || (reg->flags & KBASE_REG_FREE))
  155. break;
  156. /* no need to check reg->alloc as only regions with an alloc has
  157. * a size, and kbase_region_tracker_find_region_enclosing_address
  158. * only returns regions with size > 0 */
  159. switch (reg->gpu_alloc->type) {
  160. #if defined(CONFIG_UMP) && defined(CONFIG_KDS)
  161. case KBASE_MEM_TYPE_IMPORTED_UMP:
  162. kds_res = ump_dd_kds_resource_get(reg->gpu_alloc->imported.ump_handle);
  163. break;
  164. #endif /* defined(CONFIG_UMP) && defined(CONFIG_KDS) */
  165. default:
  166. break;
  167. }
  168. /* no kds resource for the region ? */
  169. if (!kds_res)
  170. break;
  171. resources_list->kds_resources[res_id] = kds_res;
  172. if (exclusive)
  173. set_bit(res_id, resources_list->kds_access_bitmap);
  174. }
  175. kbase_gpu_vm_unlock(kctx);
  176. /* did the loop run to completion? */
  177. if (res_id == num_elems)
  178. return 0;
  179. /* Clean up as the resource list is not valid. */
  180. kfree(resources_list->kds_resources);
  181. kfree(resources_list->kds_access_bitmap);
  182. return -EINVAL;
  183. }
  184. static bool kbasep_validate_kbase_pointer(
  185. struct kbase_context *kctx, union kbase_pointer *p)
  186. {
  187. if (kctx->is_compat) {
  188. if (p->compat_value == 0)
  189. return false;
  190. } else {
  191. if (NULL == p->value)
  192. return false;
  193. }
  194. return true;
  195. }
  196. static int kbase_external_buffer_lock(struct kbase_context *kctx,
  197. struct kbase_uk_ext_buff_kds_data *args, u32 args_size)
  198. {
  199. struct base_external_resource *ext_res_copy;
  200. size_t ext_resource_size;
  201. int ret = -EINVAL;
  202. int fd = -EBADF;
  203. struct base_external_resource __user *ext_res_user;
  204. int __user *file_desc_usr;
  205. struct kbasep_kds_resource_set_file_data *fdata;
  206. struct kbase_kds_resource_list_data resource_list_data;
  207. if (args_size != sizeof(struct kbase_uk_ext_buff_kds_data))
  208. return -EINVAL;
  209. /* Check user space has provided valid data */
  210. if (!kbasep_validate_kbase_pointer(kctx, &args->external_resource) ||
  211. !kbasep_validate_kbase_pointer(kctx, &args->file_descriptor) ||
  212. (0 == args->num_res) ||
  213. (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES))
  214. return -EINVAL;
  215. ext_resource_size = sizeof(struct base_external_resource) * args->num_res;
  216. KBASE_DEBUG_ASSERT(0 != ext_resource_size);
  217. ext_res_copy = kmalloc(ext_resource_size, GFP_KERNEL);
  218. if (!ext_res_copy)
  219. return -EINVAL;
  220. #ifdef CONFIG_COMPAT
  221. if (kctx->is_compat) {
  222. ext_res_user = compat_ptr(args->external_resource.compat_value);
  223. file_desc_usr = compat_ptr(args->file_descriptor.compat_value);
  224. } else {
  225. #endif /* CONFIG_COMPAT */
  226. ext_res_user = args->external_resource.value;
  227. file_desc_usr = args->file_descriptor.value;
  228. #ifdef CONFIG_COMPAT
  229. }
  230. #endif /* CONFIG_COMPAT */
  231. /* Copy the external resources to lock from user space */
  232. if (copy_from_user(ext_res_copy, ext_res_user, ext_resource_size))
  233. goto out;
  234. /* Allocate data to be stored in the file */
  235. fdata = kmalloc(sizeof(*fdata), GFP_KERNEL);
  236. if (!fdata) {
  237. ret = -ENOMEM;
  238. goto out;
  239. }
  240. /* Parse given elements and create resource and access lists */
  241. ret = kbasep_kds_allocate_resource_list_data(kctx,
  242. ext_res_copy, args->num_res, &resource_list_data);
  243. if (!ret) {
  244. long err;
  245. fdata->lock = NULL;
  246. fd = anon_inode_getfd("kds_ext", &kds_resource_fops, fdata, 0);
  247. err = copy_to_user(file_desc_usr, &fd, sizeof(fd));
  248. /* If the file descriptor was valid and we successfully copied
  249. * it to user space, then we can try and lock the requested
  250. * kds resources.
  251. */
  252. if ((fd >= 0) && (0 == err)) {
  253. struct kds_resource_set *lock;
  254. lock = kds_waitall(args->num_res,
  255. resource_list_data.kds_access_bitmap,
  256. resource_list_data.kds_resources,
  257. KDS_WAIT_BLOCKING);
  258. if (!lock) {
  259. ret = -EINVAL;
  260. } else if (IS_ERR(lock)) {
  261. ret = PTR_ERR(lock);
  262. } else {
  263. ret = 0;
  264. fdata->lock = lock;
  265. }
  266. } else {
  267. ret = -EINVAL;
  268. }
  269. kfree(resource_list_data.kds_resources);
  270. kfree(resource_list_data.kds_access_bitmap);
  271. }
  272. if (ret) {
  273. /* If the file was opened successfully then close it which will
  274. * clean up the file data, otherwise we clean up the file data
  275. * ourself.
  276. */
  277. if (fd >= 0)
  278. sys_close(fd);
  279. else
  280. kfree(fdata);
  281. }
  282. out:
  283. kfree(ext_res_copy);
  284. return ret;
  285. }
  286. #endif /* CONFIG_KDS */
  287. static void kbase_create_timeline_objects(struct kbase_context *kctx)
  288. {
  289. struct kbase_device *kbdev = kctx->kbdev;
  290. unsigned int lpu_id;
  291. unsigned int as_nr;
  292. struct kbasep_kctx_list_element *element;
  293. /* Create LPU objects. */
  294. for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
  295. u32 *lpu =
  296. &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
  297. kbase_tlstream_tl_summary_new_lpu(lpu, lpu_id, *lpu);
  298. }
  299. /* Create Address Space objects. */
  300. for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
  301. kbase_tlstream_tl_summary_new_as(&kbdev->as[as_nr], as_nr);
  302. /* Create GPU object and make it retain all LPUs and address spaces. */
  303. kbase_tlstream_tl_summary_new_gpu(
  304. kbdev,
  305. kbdev->gpu_props.props.raw_props.gpu_id,
  306. kbdev->gpu_props.num_cores);
  307. for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
  308. void *lpu =
  309. &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
  310. kbase_tlstream_tl_summary_lifelink_lpu_gpu(lpu, kbdev);
  311. }
  312. for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
  313. kbase_tlstream_tl_summary_lifelink_as_gpu(
  314. &kbdev->as[as_nr],
  315. kbdev);
  316. /* Create object for each known context. */
  317. mutex_lock(&kbdev->kctx_list_lock);
  318. list_for_each_entry(element, &kbdev->kctx_list, link) {
  319. kbase_tlstream_tl_summary_new_ctx(
  320. element->kctx,
  321. (u32)(element->kctx->id),
  322. (u32)(element->kctx->tgid));
  323. }
  324. /* Before releasing the lock, reset body stream buffers.
  325. * This will prevent context creation message to be directed to both
  326. * summary and body stream. */
  327. kbase_tlstream_reset_body_streams();
  328. mutex_unlock(&kbdev->kctx_list_lock);
  329. /* Static object are placed into summary packet that needs to be
  330. * transmitted first. Flush all streams to make it available to
  331. * user space. */
  332. kbase_tlstream_flush_streams();
  333. }
  334. static void kbase_api_handshake(struct uku_version_check_args *version)
  335. {
  336. switch (version->major) {
  337. #ifdef BASE_LEGACY_UK6_SUPPORT
  338. case 6:
  339. /* We are backwards compatible with version 6,
  340. * so pretend to be the old version */
  341. version->major = 6;
  342. version->minor = 1;
  343. break;
  344. #endif /* BASE_LEGACY_UK6_SUPPORT */
  345. #ifdef BASE_LEGACY_UK7_SUPPORT
  346. case 7:
  347. /* We are backwards compatible with version 7,
  348. * so pretend to be the old version */
  349. version->major = 7;
  350. version->minor = 1;
  351. break;
  352. #endif /* BASE_LEGACY_UK7_SUPPORT */
  353. #ifdef BASE_LEGACY_UK8_SUPPORT
  354. case 8:
  355. /* We are backwards compatible with version 8,
  356. * so pretend to be the old version */
  357. version->major = 8;
  358. version->minor = 4;
  359. break;
  360. #endif /* BASE_LEGACY_UK8_SUPPORT */
  361. #ifdef BASE_LEGACY_UK9_SUPPORT
  362. case 9:
  363. /* We are backwards compatible with version 9,
  364. * so pretend to be the old version */
  365. version->major = 9;
  366. version->minor = 0;
  367. break;
  368. #endif /* BASE_LEGACY_UK8_SUPPORT */
  369. case BASE_UK_VERSION_MAJOR:
  370. /* set minor to be the lowest common */
  371. version->minor = min_t(int, BASE_UK_VERSION_MINOR,
  372. (int)version->minor);
  373. break;
  374. default:
  375. /* We return our actual version regardless if it
  376. * matches the version returned by userspace -
  377. * userspace can bail if it can't handle this
  378. * version */
  379. version->major = BASE_UK_VERSION_MAJOR;
  380. version->minor = BASE_UK_VERSION_MINOR;
  381. break;
  382. }
  383. }
  384. /**
  385. * enum mali_error - Mali error codes shared with userspace
  386. *
  387. * This is subset of those common Mali errors that can be returned to userspace.
  388. * Values of matching user and kernel space enumerators MUST be the same.
  389. * MALI_ERROR_NONE is guaranteed to be 0.
  390. */
  391. enum mali_error {
  392. MALI_ERROR_NONE = 0,
  393. MALI_ERROR_OUT_OF_GPU_MEMORY,
  394. MALI_ERROR_OUT_OF_MEMORY,
  395. MALI_ERROR_FUNCTION_FAILED,
  396. };
  397. enum {
  398. inited_mem = (1u << 0),
  399. inited_js = (1u << 1),
  400. inited_pm_runtime_init = (1u << 2),
  401. #ifdef CONFIG_MALI_DEVFREQ
  402. inited_devfreq = (1u << 3),
  403. #endif /* CONFIG_MALI_DEVFREQ */
  404. inited_tlstream = (1u << 4),
  405. inited_backend_early = (1u << 5),
  406. inited_backend_late = (1u << 6),
  407. inited_device = (1u << 7),
  408. inited_vinstr = (1u << 8),
  409. #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
  410. inited_ipa = (1u << 9),
  411. #endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
  412. inited_job_fault = (1u << 10),
  413. inited_misc_register = (1u << 11),
  414. inited_get_device = (1u << 12),
  415. inited_sysfs_group = (1u << 13),
  416. inited_dev_list = (1u << 14),
  417. inited_debugfs = (1u << 15),
  418. inited_gpu_device = (1u << 16),
  419. inited_registers_map = (1u << 17),
  420. inited_power_control = (1u << 19),
  421. inited_buslogger = (1u << 20)
  422. };
  423. #ifdef CONFIG_MALI_DEBUG
  424. #define INACTIVE_WAIT_MS (5000)
  425. void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
  426. {
  427. kbdev->driver_inactive = inactive;
  428. wake_up(&kbdev->driver_inactive_wait);
  429. /* Wait for any running IOCTLs to complete */
  430. if (inactive)
  431. msleep(INACTIVE_WAIT_MS);
  432. }
  433. KBASE_EXPORT_TEST_API(kbase_set_driver_inactive);
  434. #endif /* CONFIG_MALI_DEBUG */
  435. /* Condensed hex dump. */
  436. #define LINE_SHIFT 4
  437. #define LINE_LENGTH (1 << LINE_SHIFT)
  438. #define LINE_FORMAT "0x%02X, 0x%02X, 0x%02X, 0x%02X, " \
  439. "0x%02X, 0x%02X, 0x%02X, 0x%02X, " \
  440. "0x%02X, 0x%02X, 0x%02X, 0x%02X, " \
  441. "0x%02X, 0x%02X, 0x%02X, 0x%02X"
  442. #define LINE_INPUT(b) b[0], b[1], b[2], b[3], \
  443. b[4], b[5], b[6], b[7], \
  444. b[8], b[9], b[10], b[11], \
  445. b[12], b[13], b[14], b[15]
  446. #define END_HEX(E) if(repeat_count && !(E && c == 0)) { \
  447. printk("memset(%s + %d, %d, %d);", \
  448. array, repeat_start << LINE_SHIFT, \
  449. c, repeat_count << LINE_SHIFT); \
  450. repeat_count = 0; \
  451. }
  452. /*static void formatted_hex_dump(char *array, uint8_t *buffer, size_t s)
  453. {
  454. int i = 0;
  455. uint8_t *out = kmalloc(3 * s, GFP_KERNEL);
  456. printk("%s", array);
  457. for(i = 0; i < s; ++i) {
  458. sprintf(out + (3 * i), "%02X ", buffer[i]);
  459. }
  460. out[(3*s) - 1] = 0;
  461. printk(out);
  462. kfree(out);
  463. }*/
  464. static void formatted_hex_dump(char *array, uint8_t *buffer, size_t sz)
  465. {
  466. if(!buffer) {
  467. printk("Bad buffer");
  468. return;
  469. }
  470. int line_count = sz >> LINE_SHIFT;
  471. /* Repeated character */
  472. uint8_t c = 0;
  473. int repeat_count = 0;
  474. int repeat_start = 0;
  475. int b, line;
  476. for(line = 0; line < line_count; ++line) {
  477. uint8_t *offset = buffer + (line << LINE_SHIFT);
  478. bool same = true;
  479. /* Check if still repeating */
  480. if(offset[0] != c) END_HEX(0);
  481. /* Check sameness */
  482. c = offset[0];
  483. for(b = 1; b < LINE_LENGTH; ++b) {
  484. if(offset[b] != c) same = false;
  485. }
  486. if(same) {
  487. if(!repeat_count) repeat_start = line;
  488. ++repeat_count;
  489. } else {
  490. printk("%s[%d] = {" LINE_FORMAT "};",
  491. array, line << LINE_SHIFT,
  492. LINE_INPUT(offset));
  493. }
  494. }
  495. END_HEX(1);
  496. }
  497. /* Dump CPU memory by address. */
  498. #define CPU_DUMP_SIZE 256
  499. static void* kbase_fetch_cpu(struct kbase_context *kctx, void __user *cpu_addr, size_t *size_o, size_t size_i)
  500. {
  501. uint8_t *buffer;
  502. if(!size_i) size_i = CPU_DUMP_SIZE;
  503. if(size_o) *size_o = size_i;
  504. buffer = kmalloc(size_i, GFP_KERNEL);
  505. if(!buffer) {
  506. return NULL;
  507. }
  508. if(copy_from_user(buffer, cpu_addr, size_i) != 0) {
  509. kfree(buffer);
  510. return NULL;
  511. }
  512. return buffer;
  513. }
  514. /* Dump GPU memory by address.
  515. * See mali_kbase_debug_mem_view.c for more information */
  516. static void* kbase_fetch_gpu(struct kbase_context *kctx, u64 gpu_addr, size_t *size_o, size_t size_i)
  517. {
  518. struct kbase_va_region *reg;
  519. struct kbase_mem_phy_alloc *alloc;
  520. uint8_t *buffer;
  521. uint8_t *buffer_on;
  522. int p;
  523. pgprot_t prot = PAGE_KERNEL;
  524. uint64_t offset;
  525. reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
  526. if(!reg) {
  527. printk("Region not found!");
  528. return NULL;
  529. }
  530. if(!reg->gpu_alloc) {
  531. printk("No alloc!\n");
  532. return NULL;
  533. }
  534. offset = gpu_addr - (reg->start_pfn << PAGE_SHIFT);
  535. if(offset < 0) {
  536. printk("What?\n");
  537. printk("GPU addr: %LX", gpu_addr);
  538. printk("start_pfn: %LX", reg->start_pfn);
  539. return NULL;
  540. }
  541. alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
  542. if(!size_i) size_i = (alloc->nents << PAGE_SHIFT) - offset;
  543. if(size_o) *size_o = size_i;
  544. if (!(reg->flags & KBASE_REG_CPU_CACHED))
  545. prot = pgprot_writecombine(prot);
  546. buffer = kmalloc(alloc->nents << PAGE_SHIFT, GFP_KERNEL);
  547. printk("Buf: %p\n", buffer);
  548. if(!buffer) {
  549. printk("Bad alloc");
  550. return NULL;
  551. }
  552. for(p = 0; p < alloc->nents; ++p) {
  553. struct page *page = pfn_to_page(PFN_DOWN(alloc->pages[p]));
  554. uint8_t *mapping = vmap(&page, 1, VM_MAP, prot);
  555. if(!mapping) {
  556. printk("Bad mapping");
  557. kfree(buffer);
  558. return NULL;
  559. }
  560. memcpy(buffer + (p << PAGE_SHIFT), mapping, PAGE_SIZE);
  561. vunmap(mapping);
  562. }
  563. if(offset) {
  564. buffer_on = kmalloc(size_i, GFP_KERNEL);
  565. memcpy(buffer_on, buffer + offset, size_i);
  566. kfree(buffer);
  567. return buffer_on;
  568. }
  569. return buffer;
  570. }
  571. /* The ioctl tracer is automatically generated by black */
  572. #include "black-output-trace.c"
  573. static int kbase_dispatch(struct kbase_context *kctx, void * const args, u32 args_size)
  574. {
  575. struct kbase_device *kbdev;
  576. union uk_header *ukh = args;
  577. u32 id;
  578. int ret = 0;
  579. KBASE_DEBUG_ASSERT(ukh != NULL);
  580. kbdev = kctx->kbdev;
  581. id = ukh->id;
  582. ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
  583. kbase_trace_call(kctx, args, id, args_size, true);
  584. #ifdef CONFIG_MALI_DEBUG
  585. wait_event(kbdev->driver_inactive_wait,
  586. kbdev->driver_inactive == false);
  587. #endif /* CONFIG_MALI_DEBUG */
  588. if (UKP_FUNC_ID_CHECK_VERSION == id) {
  589. struct uku_version_check_args *version_check;
  590. if (args_size != sizeof(struct uku_version_check_args)) {
  591. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  592. return 0;
  593. }
  594. version_check = (struct uku_version_check_args *)args;
  595. kbase_api_handshake(version_check);
  596. /* save the proposed version number for later use */
  597. kctx->api_version = KBASE_API_VERSION(version_check->major,
  598. version_check->minor);
  599. ukh->ret = MALI_ERROR_NONE;
  600. return 0;
  601. }
  602. /* block calls until version handshake */
  603. if (kctx->api_version == 0)
  604. return -EINVAL;
  605. if (!atomic_read(&kctx->setup_complete)) {
  606. struct kbase_uk_set_flags *kbase_set_flags;
  607. /* setup pending, try to signal that we'll do the setup,
  608. * if setup was already in progress, err this call
  609. */
  610. if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
  611. return -EINVAL;
  612. /* if unexpected call, will stay stuck in setup mode
  613. * (is it the only call we accept?)
  614. */
  615. if (id != KBASE_FUNC_SET_FLAGS)
  616. return -EINVAL;
  617. kbase_set_flags = (struct kbase_uk_set_flags *)args;
  618. /* if not matching the expected call, stay in setup mode */
  619. if (sizeof(*kbase_set_flags) != args_size)
  620. goto bad_size;
  621. /* if bad flags, will stay stuck in setup mode */
  622. if (kbase_context_set_create_flags(kctx,
  623. kbase_set_flags->create_flags) != 0)
  624. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  625. atomic_set(&kctx->setup_complete, 1);
  626. return 0;
  627. }
  628. /* setup complete, perform normal operation */
  629. switch (id) {
  630. case KBASE_FUNC_MEM_JIT_INIT:
  631. {
  632. struct kbase_uk_mem_jit_init *jit_init = args;
  633. if (sizeof(*jit_init) != args_size)
  634. goto bad_size;
  635. if (kbase_region_tracker_init_jit(kctx,
  636. jit_init->va_pages))
  637. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  638. break;
  639. }
  640. case KBASE_FUNC_MEM_ALLOC:
  641. {
  642. struct kbase_uk_mem_alloc *mem = args;
  643. struct kbase_va_region *reg;
  644. if (sizeof(*mem) != args_size)
  645. goto bad_size;
  646. #if defined(CONFIG_64BIT)
  647. if (!kctx->is_compat) {
  648. /* force SAME_VA if a 64-bit client */
  649. mem->flags |= BASE_MEM_SAME_VA;
  650. }
  651. #endif
  652. reg = kbase_mem_alloc(kctx, mem->va_pages,
  653. mem->commit_pages, mem->extent,
  654. &mem->flags, &mem->gpu_va,
  655. &mem->va_alignment);
  656. if (!reg)
  657. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  658. break;
  659. }
  660. case KBASE_FUNC_MEM_IMPORT: {
  661. struct kbase_uk_mem_import *mem_import = args;
  662. void __user *phandle;
  663. if (sizeof(*mem_import) != args_size)
  664. goto bad_size;
  665. #ifdef CONFIG_COMPAT
  666. if (kctx->is_compat)
  667. phandle = compat_ptr(mem_import->phandle.compat_value);
  668. else
  669. #endif
  670. phandle = mem_import->phandle.value;
  671. if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
  672. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  673. break;
  674. }
  675. if (kbase_mem_import(kctx, mem_import->type, phandle,
  676. &mem_import->gpu_va,
  677. &mem_import->va_pages,
  678. &mem_import->flags)) {
  679. mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
  680. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  681. }
  682. break;
  683. }
  684. case KBASE_FUNC_MEM_ALIAS: {
  685. struct kbase_uk_mem_alias *alias = args;
  686. struct base_mem_aliasing_info __user *user_ai;
  687. struct base_mem_aliasing_info *ai;
  688. if (sizeof(*alias) != args_size)
  689. goto bad_size;
  690. if (alias->nents > 2048) {
  691. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  692. break;
  693. }
  694. if (!alias->nents) {
  695. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  696. break;
  697. }
  698. #ifdef CONFIG_COMPAT
  699. if (kctx->is_compat)
  700. user_ai = compat_ptr(alias->ai.compat_value);
  701. else
  702. #endif
  703. user_ai = alias->ai.value;
  704. ai = vmalloc(sizeof(*ai) * alias->nents);
  705. if (!ai) {
  706. ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
  707. break;
  708. }
  709. if (copy_from_user(ai, user_ai,
  710. sizeof(*ai) * alias->nents)) {
  711. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  712. goto copy_failed;
  713. }
  714. alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
  715. alias->stride,
  716. alias->nents, ai,
  717. &alias->va_pages);
  718. if (!alias->gpu_va) {
  719. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  720. goto no_alias;
  721. }
  722. no_alias:
  723. copy_failed:
  724. vfree(ai);
  725. break;
  726. }
  727. case KBASE_FUNC_MEM_COMMIT:
  728. {
  729. struct kbase_uk_mem_commit *commit = args;
  730. if (sizeof(*commit) != args_size)
  731. goto bad_size;
  732. if (commit->gpu_addr & ~PAGE_MASK) {
  733. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid");
  734. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  735. break;
  736. }
  737. if (kbase_mem_commit(kctx, commit->gpu_addr,
  738. commit->pages,
  739. (base_backing_threshold_status *)
  740. &commit->result_subcode) != 0)
  741. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  742. break;
  743. }
  744. case KBASE_FUNC_MEM_QUERY:
  745. {
  746. struct kbase_uk_mem_query *query = args;
  747. if (sizeof(*query) != args_size)
  748. goto bad_size;
  749. if (query->gpu_addr & ~PAGE_MASK) {
  750. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid");
  751. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  752. break;
  753. }
  754. if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE &&
  755. query->query != KBASE_MEM_QUERY_VA_SIZE &&
  756. query->query != KBASE_MEM_QUERY_FLAGS) {
  757. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query);
  758. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  759. break;
  760. }
  761. if (kbase_mem_query(kctx, query->gpu_addr,
  762. query->query, &query->value) != 0)
  763. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  764. else
  765. ukh->ret = MALI_ERROR_NONE;
  766. break;
  767. }
  768. break;
  769. case KBASE_FUNC_MEM_FLAGS_CHANGE:
  770. {
  771. struct kbase_uk_mem_flags_change *fc = args;
  772. if (sizeof(*fc) != args_size)
  773. goto bad_size;
  774. if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) {
  775. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid");
  776. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  777. break;
  778. }
  779. if (kbase_mem_flags_change(kctx, fc->gpu_va,
  780. fc->flags, fc->mask) != 0)
  781. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  782. break;
  783. }
  784. case KBASE_FUNC_MEM_FREE:
  785. {
  786. struct kbase_uk_mem_free *mem = args;
  787. if (sizeof(*mem) != args_size)
  788. goto bad_size;
  789. if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) {
  790. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid");
  791. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  792. break;
  793. }
  794. if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
  795. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  796. break;
  797. }
  798. case KBASE_FUNC_JOB_SUBMIT:
  799. {
  800. struct kbase_uk_job_submit *job = args;
  801. if (sizeof(*job) != args_size)
  802. goto bad_size;
  803. #ifdef BASE_LEGACY_UK6_SUPPORT
  804. if (kbase_jd_submit(kctx, job, 0) != 0)
  805. #else
  806. if (kbase_jd_submit(kctx, job) != 0)
  807. #endif /* BASE_LEGACY_UK6_SUPPORT */
  808. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  809. break;
  810. }
  811. #ifdef BASE_LEGACY_UK6_SUPPORT
  812. case KBASE_FUNC_JOB_SUBMIT_UK6:
  813. {
  814. struct kbase_uk_job_submit *job = args;
  815. if (sizeof(*job) != args_size)
  816. goto bad_size;
  817. if (kbase_jd_submit(kctx, job, 1) != 0)
  818. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  819. break;
  820. }
  821. #endif
  822. case KBASE_FUNC_SYNC:
  823. {
  824. struct kbase_uk_sync_now *sn = args;
  825. if (sizeof(*sn) != args_size)
  826. goto bad_size;
  827. if (sn->sset.basep_sset.mem_handle.basep.handle & ~PAGE_MASK) {
  828. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid");
  829. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  830. break;
  831. }
  832. #ifndef CONFIG_MALI_COH_USER
  833. if (kbase_sync_now(kctx, &sn->sset) != 0)
  834. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  835. #endif
  836. break;
  837. }
  838. case KBASE_FUNC_DISJOINT_QUERY:
  839. {
  840. struct kbase_uk_disjoint_query *dquery = args;
  841. if (sizeof(*dquery) != args_size)
  842. goto bad_size;
  843. /* Get the disjointness counter value. */
  844. dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
  845. break;
  846. }
  847. case KBASE_FUNC_POST_TERM:
  848. {
  849. kbase_event_close(kctx);
  850. break;
  851. }
  852. case KBASE_FUNC_HWCNT_SETUP:
  853. {
  854. struct kbase_uk_hwcnt_setup *setup = args;
  855. if (sizeof(*setup) != args_size)
  856. goto bad_size;
  857. mutex_lock(&kctx->vinstr_cli_lock);
  858. if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
  859. &kctx->vinstr_cli, setup) != 0)
  860. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  861. mutex_unlock(&kctx->vinstr_cli_lock);
  862. break;
  863. }
  864. case KBASE_FUNC_HWCNT_DUMP:
  865. {
  866. /* args ignored */
  867. mutex_lock(&kctx->vinstr_cli_lock);
  868. if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
  869. BASE_HWCNT_READER_EVENT_MANUAL) != 0)
  870. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  871. mutex_unlock(&kctx->vinstr_cli_lock);
  872. break;
  873. }
  874. case KBASE_FUNC_HWCNT_CLEAR:
  875. {
  876. /* args ignored */
  877. mutex_lock(&kctx->vinstr_cli_lock);
  878. if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
  879. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  880. mutex_unlock(&kctx->vinstr_cli_lock);
  881. break;
  882. }
  883. case KBASE_FUNC_HWCNT_READER_SETUP:
  884. {
  885. struct kbase_uk_hwcnt_reader_setup *setup = args;
  886. if (sizeof(*setup) != args_size)
  887. goto bad_size;
  888. mutex_lock(&kctx->vinstr_cli_lock);
  889. if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
  890. setup) != 0)
  891. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  892. mutex_unlock(&kctx->vinstr_cli_lock);
  893. break;
  894. }
  895. case KBASE_FUNC_GPU_PROPS_REG_DUMP:
  896. {
  897. struct kbase_uk_gpuprops *setup = args;
  898. if (sizeof(*setup) != args_size)
  899. goto bad_size;
  900. if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
  901. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  902. break;
  903. }
  904. case KBASE_FUNC_FIND_CPU_OFFSET:
  905. {
  906. struct kbase_uk_find_cpu_offset *find = args;
  907. if (sizeof(*find) != args_size)
  908. goto bad_size;
  909. if (find->gpu_addr & ~PAGE_MASK) {
  910. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
  911. goto out_bad;
  912. }
  913. if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
  914. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  915. } else {
  916. int err;
  917. err = kbasep_find_enclosing_cpu_mapping_offset(
  918. kctx,
  919. find->gpu_addr,
  920. (uintptr_t) find->cpu_addr,
  921. (size_t) find->size,
  922. &find->offset);
  923. if (err)
  924. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  925. }
  926. break;
  927. }
  928. case KBASE_FUNC_GET_VERSION:
  929. {
  930. struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
  931. if (sizeof(*get_version) != args_size)
  932. goto bad_size;
  933. /* version buffer size check is made in compile time assert */
  934. memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
  935. get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
  936. break;
  937. }
  938. case KBASE_FUNC_STREAM_CREATE:
  939. {
  940. #ifdef CONFIG_SYNC
  941. struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
  942. if (sizeof(*screate) != args_size)
  943. goto bad_size;
  944. if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
  945. /* not NULL terminated */
  946. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  947. break;
  948. }
  949. if (kbase_stream_create(screate->name, &screate->fd) != 0)
  950. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  951. else
  952. ukh->ret = MALI_ERROR_NONE;
  953. #else /* CONFIG_SYNC */
  954. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  955. #endif /* CONFIG_SYNC */
  956. break;
  957. }
  958. case KBASE_FUNC_FENCE_VALIDATE:
  959. {
  960. #ifdef CONFIG_SYNC
  961. struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
  962. if (sizeof(*fence_validate) != args_size)
  963. goto bad_size;
  964. if (kbase_fence_validate(fence_validate->fd) != 0)
  965. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  966. else
  967. ukh->ret = MALI_ERROR_NONE;
  968. #endif /* CONFIG_SYNC */
  969. break;
  970. }
  971. case KBASE_FUNC_EXT_BUFFER_LOCK:
  972. {
  973. #ifdef CONFIG_KDS
  974. ret = kbase_external_buffer_lock(kctx,
  975. (struct kbase_uk_ext_buff_kds_data *)args,
  976. args_size);
  977. switch (ret) {
  978. case 0:
  979. ukh->ret = MALI_ERROR_NONE;
  980. break;
  981. case -ENOMEM:
  982. ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
  983. break;
  984. default:
  985. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  986. }
  987. #endif /* CONFIG_KDS */
  988. break;
  989. }
  990. case KBASE_FUNC_SET_TEST_DATA:
  991. {
  992. #if MALI_UNIT_TEST
  993. struct kbase_uk_set_test_data *set_data = args;
  994. shared_kernel_test_data = set_data->test_data;
  995. shared_kernel_test_data.kctx.value = (void __user *)kctx;
  996. shared_kernel_test_data.mm.value = (void __user *)current->mm;
  997. ukh->ret = MALI_ERROR_NONE;
  998. #endif /* MALI_UNIT_TEST */
  999. break;
  1000. }
  1001. case KBASE_FUNC_INJECT_ERROR:
  1002. {
  1003. #ifdef CONFIG_MALI_ERROR_INJECT
  1004. unsigned long flags;
  1005. struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
  1006. /*mutex lock */
  1007. spin_lock_irqsave(&kbdev->reg_op_lock, flags);
  1008. if (job_atom_inject_error(&params) != 0)
  1009. ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
  1010. else
  1011. ukh->ret = MALI_ERROR_NONE;
  1012. spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
  1013. /*mutex unlock */
  1014. #endif /* CONFIG_MALI_ERROR_INJECT */
  1015. break;
  1016. }
  1017. case KBASE_FUNC_MODEL_CONTROL:
  1018. {
  1019. #ifdef CONFIG_MALI_NO_MALI
  1020. unsigned long flags;
  1021. struct kbase_model_control_params params =
  1022. ((struct kbase_uk_model_control_params *)args)->params;
  1023. /*mutex lock */
  1024. spin_lock_irqsave(&kbdev->reg_op_lock, flags);
  1025. if (gpu_model_control(kbdev->model, &params) != 0)
  1026. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  1027. else
  1028. ukh->ret = MALI_ERROR_NONE;
  1029. spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
  1030. /*mutex unlock */
  1031. #endif /* CONFIG_MALI_NO_MALI */
  1032. break;
  1033. }
  1034. #ifdef BASE_LEGACY_UK8_SUPPORT
  1035. case KBASE_FUNC_KEEP_GPU_POWERED:
  1036. {
  1037. dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_KEEP_GPU_POWERED: function is deprecated and disabled\n");
  1038. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  1039. break;
  1040. }
  1041. #endif /* BASE_LEGACY_UK8_SUPPORT */
  1042. case KBASE_FUNC_GET_PROFILING_CONTROLS:
  1043. {
  1044. struct kbase_uk_profiling_controls *controls =
  1045. (struct kbase_uk_profiling_controls *)args;
  1046. u32 i;
  1047. if (sizeof(*controls) != args_size)
  1048. goto bad_size;
  1049. for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
  1050. controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i);
  1051. break;
  1052. }
  1053. /* used only for testing purposes; these controls are to be set by gator through gator API */
  1054. case KBASE_FUNC_SET_PROFILING_CONTROLS:
  1055. {
  1056. struct kbase_uk_profiling_controls *controls =
  1057. (struct kbase_uk_profiling_controls *)args;
  1058. u32 i;
  1059. if (sizeof(*controls) != args_size)
  1060. goto bad_size;
  1061. for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
  1062. _mali_profiling_control(i, controls->profiling_controls[i]);
  1063. break;
  1064. }
  1065. case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
  1066. {
  1067. struct kbase_uk_debugfs_mem_profile_add *add_data =
  1068. (struct kbase_uk_debugfs_mem_profile_add *)args;
  1069. char *buf;
  1070. char __user *user_buf;
  1071. if (sizeof(*add_data) != args_size)
  1072. goto bad_size;
  1073. if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
  1074. dev_err(kbdev->dev, "buffer too big\n");
  1075. goto out_bad;
  1076. }
  1077. #ifdef CONFIG_COMPAT
  1078. if (kctx->is_compat)
  1079. user_buf = compat_ptr(add_data->buf.compat_value);
  1080. else
  1081. #endif
  1082. user_buf = add_data->buf.value;
  1083. buf = kmalloc(add_data->len, GFP_KERNEL);
  1084. if (!buf)
  1085. goto out_bad;
  1086. if (0 != copy_from_user(buf, user_buf, add_data->len)) {
  1087. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  1088. kfree(buf);
  1089. goto out_bad;
  1090. }
  1091. if (kbasep_mem_profile_debugfs_insert(kctx, buf,
  1092. add_data->len)) {
  1093. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  1094. kfree(buf);
  1095. goto out_bad;
  1096. }
  1097. break;
  1098. }
  1099. #ifdef CONFIG_MALI_NO_MALI
  1100. case KBASE_FUNC_SET_PRFCNT_VALUES:
  1101. {
  1102. struct kbase_uk_prfcnt_values *params =
  1103. ((struct kbase_uk_prfcnt_values *)args);
  1104. gpu_model_set_dummy_prfcnt_sample(params->data,
  1105. params->size);
  1106. break;
  1107. }
  1108. #endif /* CONFIG_MALI_NO_MALI */
  1109. case KBASE_FUNC_TLSTREAM_ACQUIRE:
  1110. {
  1111. struct kbase_uk_tlstream_acquire *tlstream_acquire =
  1112. args;
  1113. if (sizeof(*tlstream_acquire) != args_size)
  1114. goto bad_size;
  1115. if (0 != kbase_tlstream_acquire(
  1116. kctx,
  1117. &tlstream_acquire->fd)) {
  1118. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  1119. } else if (0 <= tlstream_acquire->fd) {
  1120. /* Summary stream was cleared during acquire.
  1121. * Create static timeline objects that will be
  1122. * read by client. */
  1123. kbase_create_timeline_objects(kctx);
  1124. }
  1125. break;
  1126. }
  1127. case KBASE_FUNC_TLSTREAM_FLUSH:
  1128. {
  1129. struct kbase_uk_tlstream_flush *tlstream_flush =
  1130. args;
  1131. if (sizeof(*tlstream_flush) != args_size)
  1132. goto bad_size;
  1133. kbase_tlstream_flush_streams();
  1134. break;
  1135. }
  1136. #if MALI_UNIT_TEST
  1137. case KBASE_FUNC_TLSTREAM_TEST:
  1138. {
  1139. struct kbase_uk_tlstream_test *tlstream_test = args;
  1140. if (sizeof(*tlstream_test) != args_size)
  1141. goto bad_size;
  1142. kbase_tlstream_test(
  1143. tlstream_test->tpw_count,
  1144. tlstream_test->msg_delay,
  1145. tlstream_test->msg_count,
  1146. tlstream_test->aux_msg);
  1147. break;
  1148. }
  1149. case KBASE_FUNC_TLSTREAM_STATS:
  1150. {
  1151. struct kbase_uk_tlstream_stats *tlstream_stats = args;
  1152. if (sizeof(*tlstream_stats) != args_size)
  1153. goto bad_size;
  1154. kbase_tlstream_stats(
  1155. &tlstream_stats->bytes_collected,
  1156. &tlstream_stats->bytes_generated);
  1157. break;
  1158. }
  1159. #endif /* MALI_UNIT_TEST */
  1160. case KBASE_FUNC_GET_CONTEXT_ID:
  1161. {
  1162. struct kbase_uk_context_id *info = args;
  1163. info->id = kctx->id;
  1164. break;
  1165. }
  1166. case KBASE_FUNC_SOFT_EVENT_UPDATE:
  1167. {
  1168. struct kbase_uk_soft_event_update *update = args;
  1169. if (sizeof(*update) != args_size)
  1170. goto bad_size;
  1171. if (((update->new_status != BASE_JD_SOFT_EVENT_SET) &&
  1172. (update->new_status != BASE_JD_SOFT_EVENT_RESET)) ||
  1173. (update->flags != 0))
  1174. goto out_bad;
  1175. if (kbasep_write_soft_event_status(
  1176. kctx, update->evt,
  1177. update->new_status) != 0) {
  1178. ukh->ret = MALI_ERROR_FUNCTION_FAILED;
  1179. break;
  1180. }
  1181. if (update->new_status == BASE_JD_SOFT_EVENT_SET)
  1182. kbasep_complete_triggered_soft_events(
  1183. kctx, update->evt);
  1184. break;
  1185. }
  1186. default:
  1187. dev_err(kbdev->dev, "unknown ioctl %u\n", id);
  1188. goto out_bad;
  1189. }
  1190. kbase_trace_call(kctx, args, id, args_size, false);
  1191. return ret;
  1192. bad_size:
  1193. dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
  1194. out_bad:
  1195. return -EINVAL;
  1196. }
  1197. static struct kbase_device *to_kbase_device(struct device *dev)
  1198. {
  1199. return dev_get_drvdata(dev);
  1200. }
  1201. static int assign_irqs(struct platform_device *pdev)
  1202. {
  1203. struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
  1204. int i;
  1205. if (!kbdev)
  1206. return -ENODEV;
  1207. /* 3 IRQ resources */
  1208. for (i = 0; i < 3; i++) {
  1209. struct resource *irq_res;
  1210. int irqtag;
  1211. irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
  1212. if (!irq_res) {
  1213. dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
  1214. return -ENOENT;
  1215. }
  1216. #ifdef CONFIG_OF
  1217. if (!strcmp(irq_res->name, "JOB")) {
  1218. irqtag = JOB_IRQ_TAG;
  1219. } else if (!strcmp(irq_res->name, "MMU")) {
  1220. irqtag = MMU_IRQ_TAG;
  1221. } else if (!strcmp(irq_res->name, "GPU")) {
  1222. irqtag = GPU_IRQ_TAG;
  1223. } else {
  1224. dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
  1225. irq_res->name);
  1226. return -EINVAL;
  1227. }
  1228. #else
  1229. irqtag = i;
  1230. #endif /* CONFIG_OF */
  1231. kbdev->irqs[irqtag].irq = irq_res->start;
  1232. kbdev->irqs[irqtag].flags = irq_res->flags & IRQF_TRIGGER_MASK;
  1233. }
  1234. return 0;
  1235. }
  1236. /*
  1237. * API to acquire device list mutex and
  1238. * return pointer to the device list head
  1239. */
  1240. const struct list_head *kbase_dev_list_get(void)
  1241. {
  1242. mutex_lock(&kbase_dev_list_lock);
  1243. return &kbase_dev_list;
  1244. }
  1245. KBASE_EXPORT_TEST_API(kbase_dev_list_get);
  1246. /* API to release the device list mutex */
  1247. void kbase_dev_list_put(const struct list_head *dev_list)
  1248. {
  1249. mutex_unlock(&kbase_dev_list_lock);
  1250. }
  1251. KBASE_EXPORT_TEST_API(kbase_dev_list_put);
  1252. /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
  1253. struct kbase_device *kbase_find_device(int minor)
  1254. {
  1255. struct kbase_device *kbdev = NULL;
  1256. struct list_head *entry;
  1257. const struct list_head *dev_list = kbase_dev_list_get();
  1258. list_for_each(entry, dev_list) {
  1259. struct kbase_device *tmp;
  1260. tmp = list_entry(entry, struct kbase_device, entry);
  1261. if (tmp->mdev.minor == minor || minor == -1) {
  1262. kbdev = tmp;
  1263. get_device(kbdev->dev);
  1264. break;
  1265. }
  1266. }
  1267. kbase_dev_list_put(dev_list);
  1268. return kbdev;
  1269. }
  1270. EXPORT_SYMBOL(kbase_find_device);
  1271. void kbase_release_device(struct kbase_device *kbdev)
  1272. {
  1273. put_device(kbdev->dev);
  1274. }
  1275. EXPORT_SYMBOL(kbase_release_device);
  1276. static int kbase_open(struct inode *inode, struct file *filp)
  1277. {
  1278. struct kbase_device *kbdev = NULL;
  1279. struct kbase_context *kctx;
  1280. int ret = 0;
  1281. #ifdef CONFIG_DEBUG_FS
  1282. char kctx_name[64];
  1283. #endif
  1284. kbdev = kbase_find_device(iminor(inode));
  1285. if (!kbdev)
  1286. return -ENODEV;
  1287. kctx = kbase_create_context(kbdev, is_compat_task());
  1288. if (!kctx) {
  1289. ret = -ENOMEM;
  1290. goto out;
  1291. }
  1292. init_waitqueue_head(&kctx->event_queue);
  1293. filp->private_data = kctx;
  1294. kctx->filp = filp;
  1295. kctx->infinite_cache_active = kbdev->infinite_cache_active_default;
  1296. #ifdef CONFIG_DEBUG_FS
  1297. snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
  1298. kctx->kctx_dentry = debugfs_create_dir(kctx_name,
  1299. kbdev->debugfs_ctx_directory);
  1300. if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
  1301. ret = -ENOMEM;
  1302. goto out;
  1303. }
  1304. #ifdef CONFIG_MALI_COH_USER
  1305. /* if cache is completely coherent at hardware level, then remove the
  1306. * infinite cache control support from debugfs.
  1307. */
  1308. #else
  1309. debugfs_create_bool("infinite_cache", 0644, kctx->kctx_dentry,
  1310. &kctx->infinite_cache_active);
  1311. #endif /* CONFIG_MALI_COH_USER */
  1312. mutex_init(&kctx->mem_profile_lock);
  1313. kbasep_jd_debugfs_ctx_add(kctx);
  1314. kbase_debug_mem_view_init(filp);
  1315. kbase_debug_job_fault_context_init(kctx);
  1316. kbase_mem_pool_debugfs_add(kctx->kctx_dentry, &kctx->mem_pool);
  1317. kbase_jit_debugfs_add(kctx);
  1318. #endif /* CONFIG_DEBUGFS */
  1319. dev_dbg(kbdev->dev, "created base context\n");
  1320. {
  1321. struct kbasep_kctx_list_element *element;
  1322. element = kzalloc(sizeof(*element), GFP_KERNEL);
  1323. if (element) {
  1324. mutex_lock(&kbdev->kctx_list_lock);
  1325. element->kctx = kctx;
  1326. list_add(&element->link, &kbdev->kctx_list);
  1327. kbase_tlstream_tl_new_ctx(
  1328. element->kctx,
  1329. (u32)(element->kctx->id),
  1330. (u32)(element->kctx->tgid));
  1331. mutex_unlock(&kbdev->kctx_list_lock);
  1332. } else {
  1333. /* we don't treat this as a fail - just warn about it */
  1334. dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
  1335. }
  1336. }
  1337. return 0;
  1338. out:
  1339. kbase_release_device(kbdev);
  1340. return ret;
  1341. }
  1342. static int kbase_release(struct inode *inode, struct file *filp)
  1343. {
  1344. struct kbase_context *kctx = filp->private_data;
  1345. struct kbase_device *kbdev = kctx->kbdev;
  1346. struct kbasep_kctx_list_element *element, *tmp;
  1347. bool found_element = false;
  1348. kbase_tlstream_tl_del_ctx(kctx);
  1349. #ifdef CONFIG_DEBUG_FS
  1350. debugfs_remove_recursive(kctx->kctx_dentry);
  1351. kbasep_mem_profile_debugfs_remove(kctx);
  1352. kbase_debug_job_fault_context_term(kctx);
  1353. #endif
  1354. mutex_lock(&kbdev->kctx_list_lock);
  1355. list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
  1356. if (element->kctx == kctx) {
  1357. list_del(&element->link);
  1358. kfree(element);
  1359. found_element = true;
  1360. }
  1361. }
  1362. mutex_unlock(&kbdev->kctx_list_lock);
  1363. if (!found_element)
  1364. dev_warn(kbdev->dev, "kctx not in kctx_list\n");
  1365. filp->private_data = NULL;
  1366. mutex_lock(&kctx->vinstr_cli_lock);
  1367. /* If this client was performing hwcnt dumping and did not explicitly
  1368. * detach itself, remove it from the vinstr core now */
  1369. if (kctx->vinstr_cli) {
  1370. struct kbase_uk_hwcnt_setup setup;
  1371. setup.dump_buffer = 0llu;
  1372. kbase_vinstr_legacy_hwc_setup(
  1373. kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
  1374. }
  1375. mutex_unlock(&kctx->vinstr_cli_lock);
  1376. kbase_destroy_context(kctx);
  1377. dev_dbg(kbdev->dev, "deleted base context\n");
  1378. kbase_release_device(kbdev);
  1379. return 0;
  1380. }
  1381. #define CALL_MAX_SIZE 536
  1382. static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1383. {
  1384. u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull }; /* alignment fixup */
  1385. u32 size = _IOC_SIZE(cmd);
  1386. struct kbase_context *kctx = filp->private_data;
  1387. if (size > CALL_MAX_SIZE)
  1388. return -ENOTTY;
  1389. if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
  1390. dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
  1391. return -EFAULT;
  1392. }
  1393. if (kbase_dispatch(kctx, &msg, size) != 0)
  1394. return -EFAULT;
  1395. if (0 != copy_to_user((void __user *)arg, &msg, size)) {
  1396. dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
  1397. return -EFAULT;
  1398. }
  1399. return 0;
  1400. }
  1401. static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
  1402. {
  1403. struct kbase_context *kctx = filp->private_data;
  1404. struct base_jd_event_v2 uevent;
  1405. int out_count = 0;
  1406. if (count < sizeof(uevent))
  1407. return -ENOBUFS;
  1408. do {
  1409. while (kbase_event_dequeue(kctx, &uevent)) {
  1410. if (out_count > 0)
  1411. goto out;
  1412. if (filp->f_flags & O_NONBLOCK)
  1413. return -EAGAIN;
  1414. if (wait_event_interruptible(kctx->event_queue,
  1415. kbase_event_pending(kctx)) != 0)
  1416. return -ERESTARTSYS;
  1417. }
  1418. if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
  1419. if (out_count == 0)
  1420. return -EPIPE;
  1421. goto out;
  1422. }
  1423. if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
  1424. return -EFAULT;
  1425. buf += sizeof(uevent);
  1426. out_count++;
  1427. count -= sizeof(uevent);
  1428. } while (count >= sizeof(uevent));
  1429. out:
  1430. return out_count * sizeof(uevent);
  1431. }
  1432. static unsigned int kbase_poll(struct file *filp, poll_table *wait)
  1433. {
  1434. struct kbase_context *kctx = filp->private_data;
  1435. poll_wait(filp, &kctx->event_queue, wait);
  1436. if (kbase_event_pending(kctx))
  1437. return POLLIN | POLLRDNORM;
  1438. return 0;
  1439. }
  1440. void kbase_event_wakeup(struct kbase_context *kctx)
  1441. {
  1442. KBASE_DEBUG_ASSERT(kctx);
  1443. wake_up_interruptible(&kctx->event_queue);
  1444. }
  1445. KBASE_EXPORT_TEST_API(kbase_event_wakeup);
  1446. static int kbase_check_flags(int flags)
  1447. {
  1448. /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
  1449. * closes the file descriptor in a child process.
  1450. */
  1451. if (0 == (flags & O_CLOEXEC))
  1452. return -EINVAL;
  1453. return 0;
  1454. }
  1455. #ifdef CONFIG_64BIT
  1456. /* The following function is taken from the kernel and just
  1457. * renamed. As it's not exported to modules we must copy-paste it here.
  1458. */
  1459. static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
  1460. *info)
  1461. {
  1462. struct mm_struct *mm = current->mm;
  1463. struct vm_area_struct *vma;
  1464. unsigned long length, low_limit, high_limit, gap_start, gap_end;
  1465. /* Adjust search length to account for worst case alignment overhead */
  1466. length = info->length + info->align_mask;
  1467. if (length < info->length)
  1468. return -ENOMEM;
  1469. /*
  1470. * Adjust search limits by the desired length.
  1471. * See implementation comment at top of unmapped_area().
  1472. */
  1473. gap_end = info->high_limit;
  1474. if (gap_end < length)
  1475. return -ENOMEM;
  1476. high_limit = gap_end - length;
  1477. if (info->low_limit > high_limit)
  1478. return -ENOMEM;
  1479. low_limit = info->low_limit + length;
  1480. /* Check highest gap, which does not precede any rbtree node */
  1481. gap_start = mm->highest_vm_end;
  1482. if (gap_start <= high_limit)
  1483. goto found_highest;
  1484. /* Check if rbtree root looks promising */
  1485. if (RB_EMPTY_ROOT(&mm->mm_rb))
  1486. return -ENOMEM;
  1487. vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
  1488. if (vma->rb_subtree_gap < length)
  1489. return -ENOMEM;
  1490. while (true) {
  1491. /* Visit right subtree if it looks promising */
  1492. gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
  1493. if (gap_start <= high_limit && vma->vm_rb.rb_right) {
  1494. struct vm_area_struct *right =
  1495. rb_entry(vma->vm_rb.rb_right,
  1496. struct vm_area_struct, vm_rb);
  1497. if (right->rb_subtree_gap >= length) {
  1498. vma = right;
  1499. continue;
  1500. }
  1501. }
  1502. check_current:
  1503. /* Check if current node has a suitable gap */
  1504. gap_end = vma->vm_start;
  1505. if (gap_end < low_limit)
  1506. return -ENOMEM;
  1507. if (gap_start <= high_limit && gap_end - gap_start >= length)
  1508. goto found;
  1509. /* Visit left subtree if it looks promising */
  1510. if (vma->vm_rb.rb_left) {
  1511. struct vm_area_struct *left =
  1512. rb_entry(vma->vm_rb.rb_left,
  1513. struct vm_area_struct, vm_rb);
  1514. if (left->rb_subtree_gap >= length) {
  1515. vma = left;
  1516. continue;
  1517. }
  1518. }
  1519. /* Go back up the rbtree to find next candidate node */
  1520. while (true) {
  1521. struct rb_node *prev = &vma->vm_rb;
  1522. if (!rb_parent(prev))
  1523. return -ENOMEM;
  1524. vma = rb_entry(rb_parent(prev),
  1525. struct vm_area_struct, vm_rb);
  1526. if (prev == vma->vm_rb.rb_right) {
  1527. gap_start = vma->vm_prev ?
  1528. vma->vm_prev->vm_end : 0;
  1529. goto check_current;
  1530. }
  1531. }
  1532. }
  1533. found:
  1534. /* We found a suitable gap. Clip it with the original high_limit. */
  1535. if (gap_end > info->high_limit)
  1536. gap_end = info->high_limit;
  1537. found_highest:
  1538. /* Compute highest gap address at the desired alignment */
  1539. gap_end -= info->length;
  1540. gap_end -= (gap_end - info->align_offset) & info->align_mask;
  1541. VM_BUG_ON(gap_end < info->low_limit);
  1542. VM_BUG_ON(gap_end < gap_start);
  1543. return gap_end;
  1544. }
  1545. static unsigned long kbase_get_unmapped_area(struct file *filp,
  1546. const unsigned long addr, const unsigned long len,
  1547. const unsigned long pgoff, const unsigned long flags)
  1548. {
  1549. /* based on get_unmapped_area, but simplified slightly due to that some
  1550. * values are known in advance */
  1551. struct kbase_context *kctx = filp->private_data;
  1552. struct mm_struct *mm = current->mm;
  1553. struct vm_unmapped_area_info info;
  1554. /* err on fixed address */
  1555. if ((flags & MAP_FIXED) || addr)
  1556. return -EINVAL;
  1557. /* too big? */
  1558. if (len > TASK_SIZE - SZ_2M)
  1559. return -ENOMEM;
  1560. if (kctx->is_compat)
  1561. return current->mm->get_unmapped_area(filp, addr, len, pgoff,
  1562. flags);
  1563. if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) {
  1564. info.high_limit = kctx->same_va_end << PAGE_SHIFT;
  1565. info.align_mask = 0;
  1566. info.align_offset = 0;
  1567. } else {
  1568. info.high_limit = min_t(unsigned long, mm->mmap_base,
  1569. (kctx->same_va_end << PAGE_SHIFT));
  1570. if (len >= SZ_2M) {
  1571. info.align_offset = SZ_2M;
  1572. info.align_mask = SZ_2M - 1;
  1573. } else {
  1574. info.align_mask = 0;
  1575. info.align_offset = 0;
  1576. }
  1577. }
  1578. info.flags = 0;
  1579. info.length = len;
  1580. info.low_limit = SZ_2M;
  1581. return kbase_unmapped_area_topdown(&info);
  1582. }
  1583. #endif
  1584. static const struct file_operations kbase_fops = {
  1585. .owner = THIS_MODULE,
  1586. .open = kbase_open,
  1587. .release = kbase_release,
  1588. .read = kbase_read,
  1589. .poll = kbase_poll,
  1590. .unlocked_ioctl = kbase_ioctl,
  1591. .compat_ioctl = kbase_ioctl,
  1592. .mmap = kbase_mmap,
  1593. .check_flags = kbase_check_flags,
  1594. #ifdef CONFIG_64BIT
  1595. .get_unmapped_area = kbase_get_unmapped_area,
  1596. #endif
  1597. };
  1598. #ifndef CONFIG_MALI_NO_MALI
  1599. void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
  1600. {
  1601. writel(value, kbdev->reg + offset);
  1602. }
  1603. u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
  1604. {
  1605. return readl(kbdev->reg + offset);
  1606. }
  1607. #endif /* !CONFIG_MALI_NO_MALI */
  1608. /** Show callback for the @c power_policy sysfs file.
  1609. *
  1610. * This function is called to get the contents of the @c power_policy sysfs
  1611. * file. This is a list of the available policies with the currently active one
  1612. * surrounded by square brackets.
  1613. *
  1614. * @param dev The device this sysfs file is for
  1615. * @param attr The attributes of the sysfs file
  1616. * @param buf The output buffer for the sysfs file contents
  1617. *
  1618. * @return The number of bytes output to @c buf.
  1619. */
  1620. static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
  1621. {
  1622. struct kbase_device *kbdev;
  1623. const struct kbase_pm_policy *current_policy;
  1624. const struct kbase_pm_policy *const *policy_list;
  1625. int policy_count;
  1626. int i;
  1627. ssize_t ret = 0;
  1628. kbdev = to_kbase_device(dev);
  1629. if (!kbdev)
  1630. return -ENODEV;
  1631. current_policy = kbase_pm_get_policy(kbdev);
  1632. policy_count = kbase_pm_list_policies(&policy_list);
  1633. for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
  1634. if (policy_list[i] == current_policy)
  1635. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
  1636. else
  1637. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
  1638. }
  1639. if (ret < PAGE_SIZE - 1) {
  1640. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
  1641. } else {
  1642. buf[PAGE_SIZE - 2] = '\n';
  1643. buf[PAGE_SIZE - 1] = '\0';
  1644. ret = PAGE_SIZE - 1;
  1645. }
  1646. return ret;
  1647. }
  1648. /** Store callback for the @c power_policy sysfs file.
  1649. *
  1650. * This function is called when the @c power_policy sysfs file is written to.
  1651. * It matches the requested policy against the available policies and if a
  1652. * matching policy is found calls @ref kbase_pm_set_policy to change the
  1653. * policy.
  1654. *
  1655. * @param dev The device with sysfs file is for
  1656. * @param attr The attributes of the sysfs file
  1657. * @param buf The value written to the sysfs file
  1658. * @param count The number of bytes written to the sysfs file
  1659. *
  1660. * @return @c count if the function succeeded. An error code on failure.
  1661. */
  1662. static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1663. {
  1664. struct kbase_device *kbdev;
  1665. const struct kbase_pm_policy *new_policy = NULL;
  1666. const struct kbase_pm_policy *const *policy_list;
  1667. int policy_count;
  1668. int i;
  1669. kbdev = to_kbase_device(dev);
  1670. if (!kbdev)
  1671. return -ENODEV;
  1672. policy_count = kbase_pm_list_policies(&policy_list);
  1673. for (i = 0; i < policy_count; i++) {
  1674. if (sysfs_streq(policy_list[i]->name, buf)) {
  1675. new_policy = policy_list[i];
  1676. break;
  1677. }
  1678. }
  1679. if (!new_policy) {
  1680. dev_err(dev, "power_policy: policy not found\n");
  1681. return -EINVAL;
  1682. }
  1683. kbase_pm_set_policy(kbdev, new_policy);
  1684. return count;
  1685. }
  1686. /** The sysfs file @c power_policy.
  1687. *
  1688. * This is used for obtaining information about the available policies,
  1689. * determining which policy is currently active, and changing the active
  1690. * policy.
  1691. */
  1692. static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
  1693. /** Show callback for the @c core_availability_policy sysfs file.
  1694. *
  1695. * This function is called to get the contents of the @c core_availability_policy
  1696. * sysfs file. This is a list of the available policies with the currently
  1697. * active one surrounded by square brackets.
  1698. *
  1699. * @param dev The device this sysfs file is for
  1700. * @param attr The attributes of the sysfs file
  1701. * @param buf The output buffer for the sysfs file contents
  1702. *
  1703. * @return The number of bytes output to @c buf.
  1704. */
  1705. static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
  1706. {
  1707. struct kbase_device *kbdev;
  1708. const struct kbase_pm_ca_policy *current_policy;
  1709. const struct kbase_pm_ca_policy *const *policy_list;
  1710. int policy_count;
  1711. int i;
  1712. ssize_t ret = 0;
  1713. kbdev = to_kbase_device(dev);
  1714. if (!kbdev)
  1715. return -ENODEV;
  1716. current_policy = kbase_pm_ca_get_policy(kbdev);
  1717. policy_count = kbase_pm_ca_list_policies(&policy_list);
  1718. for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
  1719. if (policy_list[i] == current_policy)
  1720. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
  1721. else
  1722. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
  1723. }
  1724. if (ret < PAGE_SIZE - 1) {
  1725. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
  1726. } else {
  1727. buf[PAGE_SIZE - 2] = '\n';
  1728. buf[PAGE_SIZE - 1] = '\0';
  1729. ret = PAGE_SIZE - 1;
  1730. }
  1731. return ret;
  1732. }
  1733. /** Store callback for the @c core_availability_policy sysfs file.
  1734. *
  1735. * This function is called when the @c core_availability_policy sysfs file is
  1736. * written to. It matches the requested policy against the available policies
  1737. * and if a matching policy is found calls @ref kbase_pm_set_policy to change
  1738. * the policy.
  1739. *
  1740. * @param dev The device with sysfs file is for
  1741. * @param attr The attributes of the sysfs file
  1742. * @param buf The value written to the sysfs file
  1743. * @param count The number of bytes written to the sysfs file
  1744. *
  1745. * @return @c count if the function succeeded. An error code on failure.
  1746. */
  1747. static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1748. {
  1749. struct kbase_device *kbdev;
  1750. const struct kbase_pm_ca_policy *new_policy = NULL;
  1751. const struct kbase_pm_ca_policy *const *policy_list;
  1752. int policy_count;
  1753. int i;
  1754. kbdev = to_kbase_device(dev);
  1755. if (!kbdev)
  1756. return -ENODEV;
  1757. policy_count = kbase_pm_ca_list_policies(&policy_list);
  1758. for (i = 0; i < policy_count; i++) {
  1759. if (sysfs_streq(policy_list[i]->name, buf)) {
  1760. new_policy = policy_list[i];
  1761. break;
  1762. }
  1763. }
  1764. if (!new_policy) {
  1765. dev_err(dev, "core_availability_policy: policy not found\n");
  1766. return -EINVAL;
  1767. }
  1768. kbase_pm_ca_set_policy(kbdev, new_policy);
  1769. return count;
  1770. }
  1771. /** The sysfs file @c core_availability_policy
  1772. *
  1773. * This is used for obtaining information about the available policies,
  1774. * determining which policy is currently active, and changing the active
  1775. * policy.
  1776. */
  1777. static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
  1778. /** Show callback for the @c core_mask sysfs file.
  1779. *
  1780. * This function is called to get the contents of the @c core_mask sysfs
  1781. * file.
  1782. *
  1783. * @param dev The device this sysfs file is for
  1784. * @param attr The attributes of the sysfs file
  1785. * @param buf The output buffer for the sysfs file contents
  1786. *
  1787. * @return The number of bytes output to @c buf.
  1788. */
  1789. static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
  1790. {
  1791. struct kbase_device *kbdev;
  1792. ssize_t ret = 0;
  1793. kbdev = to_kbase_device(dev);
  1794. if (!kbdev)
  1795. return -ENODEV;
  1796. ret += scnprintf(buf + ret, PAGE_SIZE - ret,
  1797. "Current core mask (JS0) : 0x%llX\n",
  1798. kbdev->pm.debug_core_mask[0]);
  1799. ret += scnprintf(buf + ret, PAGE_SIZE - ret,
  1800. "Current core mask (JS1) : 0x%llX\n",
  1801. kbdev->pm.debug_core_mask[1]);
  1802. ret += scnprintf(buf + ret, PAGE_SIZE - ret,
  1803. "Current core mask (JS2) : 0x%llX\n",
  1804. kbdev->pm.debug_core_mask[2]);
  1805. ret += scnprintf(buf + ret, PAGE_SIZE - ret,
  1806. "Available core mask : 0x%llX\n",
  1807. kbdev->gpu_props.props.raw_props.shader_present);
  1808. return ret;
  1809. }
  1810. /** Store callback for the @c core_mask sysfs file.
  1811. *
  1812. * This function is called when the @c core_mask sysfs file is written to.
  1813. *
  1814. * @param dev The device with sysfs file is for
  1815. * @param attr The attributes of the sysfs file
  1816. * @param buf The value written to the sysfs file
  1817. * @param count The number of bytes written to the sysfs file
  1818. *
  1819. * @return @c count if the function succeeded. An error code on failure.
  1820. */
  1821. static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1822. {
  1823. struct kbase_device *kbdev;
  1824. u64 new_core_mask[3];
  1825. int items;
  1826. kbdev = to_kbase_device(dev);
  1827. if (!kbdev)
  1828. return -ENODEV;
  1829. items = sscanf(buf, "%llx %llx %llx",
  1830. &new_core_mask[0], &new_core_mask[1],
  1831. &new_core_mask[2]);
  1832. if (items == 1)
  1833. new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
  1834. if (items == 1 || items == 3) {
  1835. u64 shader_present =
  1836. kbdev->gpu_props.props.raw_props.shader_present;
  1837. u64 group0_core_mask =
  1838. kbdev->gpu_props.props.coherency_info.group[0].
  1839. core_mask;
  1840. if ((new_core_mask[0] & shader_present) != new_core_mask[0] ||
  1841. !(new_core_mask[0] & group0_core_mask) ||
  1842. (new_core_mask[1] & shader_present) !=
  1843. new_core_mask[1] ||
  1844. !(new_core_mask[1] & group0_core_mask) ||
  1845. (new_core_mask[2] & shader_present) !=
  1846. new_core_mask[2] ||
  1847. !(new_core_mask[2] & group0_core_mask)) {
  1848. dev_err(dev, "power_policy: invalid core specification\n");
  1849. return -EINVAL;
  1850. }
  1851. if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
  1852. kbdev->pm.debug_core_mask[1] !=
  1853. new_core_mask[1] ||
  1854. kbdev->pm.debug_core_mask[2] !=
  1855. new_core_mask[2]) {
  1856. unsigned long flags;
  1857. spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
  1858. kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
  1859. new_core_mask[1], new_core_mask[2]);
  1860. spin_unlock_irqrestore(&kbdev->pm.power_change_lock,
  1861. flags);
  1862. }
  1863. return count;
  1864. }
  1865. dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n"
  1866. "Use format <core_mask>\n"
  1867. "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
  1868. return -EINVAL;
  1869. }
  1870. /** The sysfs file @c core_mask.
  1871. *
  1872. * This is used to restrict shader core availability for debugging purposes.
  1873. * Reading it will show the current core mask and the mask of cores available.
  1874. * Writing to it will set the current core mask.
  1875. */
  1876. static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
  1877. /**
  1878. * set_soft_event_timeout() - Store callback for the soft_event_timeout sysfs
  1879. * file.
  1880. *
  1881. * @dev: The device this sysfs file is for.
  1882. * @attr: The attributes of the sysfs file.
  1883. * @buf: The value written to the sysfs file.
  1884. * @count: The number of bytes written to the sysfs file.
  1885. *
  1886. * This allows setting the timeout for software event jobs. Waiting jobs will
  1887. * be cancelled after this period expires. This is expressed in milliseconds.
  1888. *
  1889. * Return: count if the function succeeded. An error code on failure.
  1890. */
  1891. static ssize_t set_soft_event_timeout(struct device *dev,
  1892. struct device_attribute *attr,
  1893. const char *buf, size_t count)
  1894. {
  1895. struct kbase_device *kbdev;
  1896. int soft_event_timeout_ms;
  1897. kbdev = to_kbase_device(dev);
  1898. if (!kbdev)
  1899. return -ENODEV;
  1900. if ((kstrtoint(buf, 0, &soft_event_timeout_ms) != 0) ||
  1901. (soft_event_timeout_ms <= 0))
  1902. return -EINVAL;
  1903. atomic_set(&kbdev->js_data.soft_event_timeout_ms,
  1904. soft_event_timeout_ms);
  1905. return count;
  1906. }
  1907. /**
  1908. * show_soft_event_timeout() - Show callback for the soft_event_timeout sysfs
  1909. * file.
  1910. *
  1911. * This will return the timeout for the software event jobs.
  1912. *
  1913. * @dev: The device this sysfs file is for.
  1914. * @attr: The attributes of the sysfs file.
  1915. * @buf: The output buffer for the sysfs file contents.
  1916. *
  1917. * Return: The number of bytes output to buf.
  1918. */
  1919. static ssize_t show_soft_event_timeout(struct device *dev,
  1920. struct device_attribute *attr,
  1921. char * const buf)
  1922. {
  1923. struct kbase_device *kbdev;
  1924. kbdev = to_kbase_device(dev);
  1925. if (!kbdev)
  1926. return -ENODEV;
  1927. return scnprintf(buf, PAGE_SIZE, "%i\n",
  1928. atomic_read(&kbdev->js_data.soft_event_timeout_ms));
  1929. }
  1930. static DEVICE_ATTR(soft_event_timeout, S_IRUGO | S_IWUSR,
  1931. show_soft_event_timeout, set_soft_event_timeout);
  1932. /** Store callback for the @c js_timeouts sysfs file.
  1933. *
  1934. * This function is called to get the contents of the @c js_timeouts sysfs
  1935. * file. This file contains five values separated by whitespace. The values
  1936. * are basically the same as JS_SOFT_STOP_TICKS, JS_HARD_STOP_TICKS_SS,
  1937. * JS_HARD_STOP_TICKS_DUMPING, JS_RESET_TICKS_SS, JS_RESET_TICKS_DUMPING
  1938. * configuration values (in that order), with the difference that the js_timeout
  1939. * values are expressed in MILLISECONDS.
  1940. *
  1941. * The js_timeouts sysfile file allows the current values in
  1942. * use by the job scheduler to get override. Note that a value needs to
  1943. * be other than 0 for it to override the current job scheduler value.
  1944. *
  1945. * @param dev The device with sysfs file is for
  1946. * @param attr The attributes of the sysfs file
  1947. * @param buf The value written to the sysfs file
  1948. * @param count The number of bytes written to the sysfs file
  1949. *
  1950. * @return @c count if the function succeeded. An error code on failure.
  1951. */
  1952. static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  1953. {
  1954. struct kbase_device *kbdev;
  1955. int items;
  1956. long js_soft_stop_ms;
  1957. long js_soft_stop_ms_cl;
  1958. long js_hard_stop_ms_ss;
  1959. long js_hard_stop_ms_cl;
  1960. long js_hard_stop_ms_dumping;
  1961. long js_reset_ms_ss;
  1962. long js_reset_ms_cl;
  1963. long js_reset_ms_dumping;
  1964. kbdev = to_kbase_device(dev);
  1965. if (!kbdev)
  1966. return -ENODEV;
  1967. items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
  1968. &js_soft_stop_ms, &js_soft_stop_ms_cl,
  1969. &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
  1970. &js_hard_stop_ms_dumping, &js_reset_ms_ss,
  1971. &js_reset_ms_cl, &js_reset_ms_dumping);
  1972. if (items == 8) {
  1973. u64 ticks;
  1974. if (js_soft_stop_ms >= 0) {
  1975. ticks = js_soft_stop_ms * 1000000ULL;
  1976. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  1977. kbdev->js_soft_stop_ticks = ticks;
  1978. } else {
  1979. kbdev->js_soft_stop_ticks = -1;
  1980. }
  1981. if (js_soft_stop_ms_cl >= 0) {
  1982. ticks = js_soft_stop_ms_cl * 1000000ULL;
  1983. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  1984. kbdev->js_soft_stop_ticks_cl = ticks;
  1985. } else {
  1986. kbdev->js_soft_stop_ticks_cl = -1;
  1987. }
  1988. if (js_hard_stop_ms_ss >= 0) {
  1989. ticks = js_hard_stop_ms_ss * 1000000ULL;
  1990. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  1991. kbdev->js_hard_stop_ticks_ss = ticks;
  1992. } else {
  1993. kbdev->js_hard_stop_ticks_ss = -1;
  1994. }
  1995. if (js_hard_stop_ms_cl >= 0) {
  1996. ticks = js_hard_stop_ms_cl * 1000000ULL;
  1997. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  1998. kbdev->js_hard_stop_ticks_cl = ticks;
  1999. } else {
  2000. kbdev->js_hard_stop_ticks_cl = -1;
  2001. }
  2002. if (js_hard_stop_ms_dumping >= 0) {
  2003. ticks = js_hard_stop_ms_dumping * 1000000ULL;
  2004. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  2005. kbdev->js_hard_stop_ticks_dumping = ticks;
  2006. } else {
  2007. kbdev->js_hard_stop_ticks_dumping = -1;
  2008. }
  2009. if (js_reset_ms_ss >= 0) {
  2010. ticks = js_reset_ms_ss * 1000000ULL;
  2011. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  2012. kbdev->js_reset_ticks_ss = ticks;
  2013. } else {
  2014. kbdev->js_reset_ticks_ss = -1;
  2015. }
  2016. if (js_reset_ms_cl >= 0) {
  2017. ticks = js_reset_ms_cl * 1000000ULL;
  2018. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  2019. kbdev->js_reset_ticks_cl = ticks;
  2020. } else {
  2021. kbdev->js_reset_ticks_cl = -1;
  2022. }
  2023. if (js_reset_ms_dumping >= 0) {
  2024. ticks = js_reset_ms_dumping * 1000000ULL;
  2025. do_div(ticks, kbdev->js_data.scheduling_period_ns);
  2026. kbdev->js_reset_ticks_dumping = ticks;
  2027. } else {
  2028. kbdev->js_reset_ticks_dumping = -1;
  2029. }
  2030. kbdev->js_timeouts_updated = true;
  2031. dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n",
  2032. (unsigned long)kbdev->js_soft_stop_ticks,
  2033. js_soft_stop_ms);
  2034. dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
  2035. (unsigned long)kbdev->js_soft_stop_ticks_cl,
  2036. js_soft_stop_ms_cl);
  2037. dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n",
  2038. (unsigned long)kbdev->js_hard_stop_ticks_ss,
  2039. js_hard_stop_ms_ss);
  2040. dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
  2041. (unsigned long)kbdev->js_hard_stop_ticks_cl,
  2042. js_hard_stop_ms_cl);
  2043. dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_DUMPING with %lu ticks (%lu ms)\n",
  2044. (unsigned long)
  2045. kbdev->js_hard_stop_ticks_dumping,
  2046. js_hard_stop_ms_dumping);
  2047. dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n",
  2048. (unsigned long)kbdev->js_reset_ticks_ss,
  2049. js_reset_ms_ss);
  2050. dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_CL with %lu ticks (%lu ms)\n",
  2051. (unsigned long)kbdev->js_reset_ticks_cl,
  2052. js_reset_ms_cl);
  2053. dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_DUMPING with %lu ticks (%lu ms)\n",
  2054. (unsigned long)kbdev->js_reset_ticks_dumping,
  2055. js_reset_ms_dumping);
  2056. return count;
  2057. }
  2058. dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
  2059. "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
  2060. "Write 0 for no change, -1 to restore default timeout\n");
  2061. return -EINVAL;
  2062. }
  2063. /** Show callback for the @c js_timeouts sysfs file.
  2064. *
  2065. * This function is called to get the contents of the @c js_timeouts sysfs
  2066. * file. It returns the last set values written to the js_timeouts sysfs file.
  2067. * If the file didn't get written yet, the values will be current setting in
  2068. * use.
  2069. * @param dev The device this sysfs file is for
  2070. * @param attr The attributes of the sysfs file
  2071. * @param buf The output buffer for the sysfs file contents
  2072. *
  2073. * @return The number of bytes output to @c buf.
  2074. */
  2075. static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
  2076. {
  2077. struct kbase_device *kbdev;
  2078. ssize_t ret;
  2079. u64 ms;
  2080. unsigned long js_soft_stop_ms;
  2081. unsigned long js_soft_stop_ms_cl;
  2082. unsigned long js_hard_stop_ms_ss;
  2083. unsigned long js_hard_stop_ms_cl;
  2084. unsigned long js_hard_stop_ms_dumping;
  2085. unsigned long js_reset_ms_ss;
  2086. unsigned long js_reset_ms_cl;
  2087. unsigned long js_reset_ms_dumping;
  2088. unsigned long ticks;
  2089. u32 scheduling_period_ns;
  2090. kbdev = to_kbase_device(dev);
  2091. if (!kbdev)
  2092. return -ENODEV;
  2093. /* If no contexts have been scheduled since js_timeouts was last written
  2094. * to, the new timeouts might not have been latched yet. So check if an
  2095. * update is pending and use the new values if necessary. */
  2096. if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
  2097. scheduling_period_ns = kbdev->js_scheduling_period_ns;
  2098. else
  2099. scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
  2100. if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
  2101. ticks = kbdev->js_soft_stop_ticks;
  2102. else
  2103. ticks = kbdev->js_data.soft_stop_ticks;
  2104. ms = (u64)ticks * scheduling_period_ns;
  2105. do_div(ms, 1000000UL);
  2106. js_soft_stop_ms = (unsigned long)ms;
  2107. if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
  2108. ticks = kbdev->js_soft_stop_ticks_cl;
  2109. else
  2110. ticks = kbdev->js_data.soft_stop_ticks_cl;
  2111. ms = (u64)ticks * scheduling_period_ns;
  2112. do_div(ms, 1000000UL);
  2113. js_soft_stop_ms_cl = (unsigned long)ms;
  2114. if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
  2115. ticks = kbdev->js_hard_stop_ticks_ss;
  2116. else
  2117. ticks = kbdev->js_data.hard_stop_ticks_ss;
  2118. ms = (u64)ticks * scheduling_period_ns;
  2119. do_div(ms, 1000000UL);
  2120. js_hard_stop_ms_ss = (unsigned long)ms;
  2121. if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
  2122. ticks = kbdev->js_hard_stop_ticks_cl;
  2123. else
  2124. ticks = kbdev->js_data.hard_stop_ticks_cl;
  2125. ms = (u64)ticks * scheduling_period_ns;
  2126. do_div(ms, 1000000UL);
  2127. js_hard_stop_ms_cl = (unsigned long)ms;
  2128. if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
  2129. ticks = kbdev->js_hard_stop_ticks_dumping;
  2130. else
  2131. ticks = kbdev->js_data.hard_stop_ticks_dumping;
  2132. ms = (u64)ticks * scheduling_period_ns;
  2133. do_div(ms, 1000000UL);
  2134. js_hard_stop_ms_dumping = (unsigned long)ms;
  2135. if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
  2136. ticks = kbdev->js_reset_ticks_ss;
  2137. else
  2138. ticks = kbdev->js_data.gpu_reset_ticks_ss;
  2139. ms = (u64)ticks * scheduling_period_ns;
  2140. do_div(ms, 1000000UL);
  2141. js_reset_ms_ss = (unsigned long)ms;
  2142. if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
  2143. ticks = kbdev->js_reset_ticks_cl;
  2144. else
  2145. ticks = kbdev->js_data.gpu_reset_ticks_cl;
  2146. ms = (u64)ticks * scheduling_period_ns;
  2147. do_div(ms, 1000000UL);
  2148. js_reset_ms_cl = (unsigned long)ms;
  2149. if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
  2150. ticks = kbdev->js_reset_ticks_dumping;
  2151. else
  2152. ticks = kbdev->js_data.gpu_reset_ticks_dumping;
  2153. ms = (u64)ticks * scheduling_period_ns;
  2154. do_div(ms, 1000000UL);
  2155. js_reset_ms_dumping = (unsigned long)ms;
  2156. ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
  2157. js_soft_stop_ms, js_soft_stop_ms_cl,
  2158. js_hard_stop_ms_ss, js_hard_stop_ms_cl,
  2159. js_hard_stop_ms_dumping, js_reset_ms_ss,
  2160. js_reset_ms_cl, js_reset_ms_dumping);
  2161. if (ret >= PAGE_SIZE) {
  2162. buf[PAGE_SIZE - 2] = '\n';
  2163. buf[PAGE_SIZE - 1] = '\0';
  2164. ret = PAGE_SIZE - 1;
  2165. }
  2166. return ret;
  2167. }
  2168. /** The sysfs file @c js_timeouts.
  2169. *
  2170. * This is used to override the current job scheduler values for
  2171. * JS_STOP_STOP_TICKS_SS
  2172. * JS_STOP_STOP_TICKS_CL
  2173. * JS_HARD_STOP_TICKS_SS
  2174. * JS_HARD_STOP_TICKS_CL
  2175. * JS_HARD_STOP_TICKS_DUMPING
  2176. * JS_RESET_TICKS_SS
  2177. * JS_RESET_TICKS_CL
  2178. * JS_RESET_TICKS_DUMPING.
  2179. */
  2180. static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
  2181. /**
  2182. * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
  2183. * file
  2184. * @dev: The device the sysfs file is for
  2185. * @attr: The attributes of the sysfs file
  2186. * @buf: The value written to the sysfs file
  2187. * @count: The number of bytes written to the sysfs file
  2188. *
  2189. * This function is called when the js_scheduling_period sysfs file is written
  2190. * to. It checks the data written, and if valid updates the js_scheduling_period
  2191. * value
  2192. *
  2193. * Return: @c count if the function succeeded. An error code on failure.
  2194. */
  2195. static ssize_t set_js_scheduling_period(struct device *dev,
  2196. struct device_attribute *attr, const char *buf, size_t count)
  2197. {
  2198. struct kbase_device *kbdev;
  2199. int ret;
  2200. unsigned int js_scheduling_period;
  2201. u32 new_scheduling_period_ns;
  2202. u32 old_period;
  2203. u64 ticks;
  2204. kbdev = to_kbase_device(dev);
  2205. if (!kbdev)
  2206. return -ENODEV;
  2207. ret = kstrtouint(buf, 0, &js_scheduling_period);
  2208. if (ret || !js_scheduling_period) {
  2209. dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
  2210. "Use format <js_scheduling_period_ms>\n");
  2211. return -EINVAL;
  2212. }
  2213. new_scheduling_period_ns = js_scheduling_period * 1000000;
  2214. /* Update scheduling timeouts */
  2215. mutex_lock(&kbdev->js_data.runpool_mutex);
  2216. /* If no contexts have been scheduled since js_timeouts was last written
  2217. * to, the new timeouts might not have been latched yet. So check if an
  2218. * update is pending and use the new values if necessary. */
  2219. /* Use previous 'new' scheduling period as a base if present. */
  2220. if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns)
  2221. old_period = kbdev->js_scheduling_period_ns;
  2222. else
  2223. old_period = kbdev->js_data.scheduling_period_ns;
  2224. if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
  2225. ticks = (u64)kbdev->js_soft_stop_ticks * old_period;
  2226. else
  2227. ticks = (u64)kbdev->js_data.soft_stop_ticks *
  2228. kbdev->js_data.scheduling_period_ns;
  2229. do_div(ticks, new_scheduling_period_ns);
  2230. kbdev->js_soft_stop_ticks = ticks ? ticks : 1;
  2231. if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
  2232. ticks = (u64)kbdev->js_soft_stop_ticks_cl * old_period;
  2233. else
  2234. ticks = (u64)kbdev->js_data.soft_stop_ticks_cl *
  2235. kbdev->js_data.scheduling_period_ns;
  2236. do_div(ticks, new_scheduling_period_ns);
  2237. kbdev->js_soft_stop_ticks_cl = ticks ? ticks : 1;
  2238. if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
  2239. ticks = (u64)kbdev->js_hard_stop_ticks_ss * old_period;
  2240. else
  2241. ticks = (u64)kbdev->js_data.hard_stop_ticks_ss *
  2242. kbdev->js_data.scheduling_period_ns;
  2243. do_div(ticks, new_scheduling_period_ns);
  2244. kbdev->js_hard_stop_ticks_ss = ticks ? ticks : 1;
  2245. if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
  2246. ticks = (u64)kbdev->js_hard_stop_ticks_cl * old_period;
  2247. else
  2248. ticks = (u64)kbdev->js_data.hard_stop_ticks_cl *
  2249. kbdev->js_data.scheduling_period_ns;
  2250. do_div(ticks, new_scheduling_period_ns);
  2251. kbdev->js_hard_stop_ticks_cl = ticks ? ticks : 1;
  2252. if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
  2253. ticks = (u64)kbdev->js_hard_stop_ticks_dumping * old_period;
  2254. else
  2255. ticks = (u64)kbdev->js_data.hard_stop_ticks_dumping *
  2256. kbdev->js_data.scheduling_period_ns;
  2257. do_div(ticks, new_scheduling_period_ns);
  2258. kbdev->js_hard_stop_ticks_dumping = ticks ? ticks : 1;
  2259. if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
  2260. ticks = (u64)kbdev->js_reset_ticks_ss * old_period;
  2261. else
  2262. ticks = (u64)kbdev->js_data.gpu_reset_ticks_ss *
  2263. kbdev->js_data.scheduling_period_ns;
  2264. do_div(ticks, new_scheduling_period_ns);
  2265. kbdev->js_reset_ticks_ss = ticks ? ticks : 1;
  2266. if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
  2267. ticks = (u64)kbdev->js_reset_ticks_cl * old_period;
  2268. else
  2269. ticks = (u64)kbdev->js_data.gpu_reset_ticks_cl *
  2270. kbdev->js_data.scheduling_period_ns;
  2271. do_div(ticks, new_scheduling_period_ns);
  2272. kbdev->js_reset_ticks_cl = ticks ? ticks : 1;
  2273. if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
  2274. ticks = (u64)kbdev->js_reset_ticks_dumping * old_period;
  2275. else
  2276. ticks = (u64)kbdev->js_data.gpu_reset_ticks_dumping *
  2277. kbdev->js_data.scheduling_period_ns;
  2278. do_div(ticks, new_scheduling_period_ns);
  2279. kbdev->js_reset_ticks_dumping = ticks ? ticks : 1;
  2280. kbdev->js_scheduling_period_ns = new_scheduling_period_ns;
  2281. kbdev->js_timeouts_updated = true;
  2282. mutex_unlock(&kbdev->js_data.runpool_mutex);
  2283. dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
  2284. js_scheduling_period);
  2285. return count;
  2286. }
  2287. /**
  2288. * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
  2289. * entry.
  2290. * @dev: The device this sysfs file is for.
  2291. * @attr: The attributes of the sysfs file.
  2292. * @buf: The output buffer to receive the GPU information.
  2293. *
  2294. * This function is called to get the current period used for the JS scheduling
  2295. * period.
  2296. *
  2297. * Return: The number of bytes output to buf.
  2298. */
  2299. static ssize_t show_js_scheduling_period(struct device *dev,
  2300. struct device_attribute *attr, char * const buf)
  2301. {
  2302. struct kbase_device *kbdev;
  2303. u32 period;
  2304. ssize_t ret;
  2305. kbdev = to_kbase_device(dev);
  2306. if (!kbdev)
  2307. return -ENODEV;
  2308. if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
  2309. period = kbdev->js_scheduling_period_ns;
  2310. else
  2311. period = kbdev->js_data.scheduling_period_ns;
  2312. ret = scnprintf(buf, PAGE_SIZE, "%d\n",
  2313. period / 1000000);
  2314. return ret;
  2315. }
  2316. static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
  2317. show_js_scheduling_period, set_js_scheduling_period);
  2318. #if !MALI_CUSTOMER_RELEASE
  2319. /** Store callback for the @c force_replay sysfs file.
  2320. *
  2321. * @param dev The device with sysfs file is for
  2322. * @param attr The attributes of the sysfs file
  2323. * @param buf The value written to the sysfs file
  2324. * @param count The number of bytes written to the sysfs file
  2325. *
  2326. * @return @c count if the function succeeded. An error code on failure.
  2327. */
  2328. static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  2329. {
  2330. struct kbase_device *kbdev;
  2331. kbdev = to_kbase_device(dev);
  2332. if (!kbdev)
  2333. return -ENODEV;
  2334. if (!strncmp("limit=", buf, MIN(6, count))) {
  2335. int force_replay_limit;
  2336. int items = sscanf(buf, "limit=%u", &force_replay_limit);
  2337. if (items == 1) {
  2338. kbdev->force_replay_random = false;
  2339. kbdev->force_replay_limit = force_replay_limit;
  2340. kbdev->force_replay_count = 0;
  2341. return count;
  2342. }
  2343. } else if (!strncmp("random_limit", buf, MIN(12, count))) {
  2344. kbdev->force_replay_random = true;
  2345. kbdev->force_replay_count = 0;
  2346. return count;
  2347. } else if (!strncmp("norandom_limit", buf, MIN(14, count))) {
  2348. kbdev->force_replay_random = false;
  2349. kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED;
  2350. kbdev->force_replay_count = 0;
  2351. return count;
  2352. } else if (!strncmp("core_req=", buf, MIN(9, count))) {
  2353. unsigned int core_req;
  2354. int items = sscanf(buf, "core_req=%x", &core_req);
  2355. if (items == 1) {
  2356. kbdev->force_replay_core_req = (base_jd_core_req)core_req;
  2357. return count;
  2358. }
  2359. }
  2360. dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=<limit>, random_limit, norandom_limit, core_req=<core_req>\n");
  2361. return -EINVAL;
  2362. }
  2363. /** Show callback for the @c force_replay sysfs file.
  2364. *
  2365. * This function is called to get the contents of the @c force_replay sysfs
  2366. * file. It returns the last set value written to the force_replay sysfs file.
  2367. * If the file didn't get written yet, the values will be 0.
  2368. *
  2369. * @param dev The device this sysfs file is for
  2370. * @param attr The attributes of the sysfs file
  2371. * @param buf The output buffer for the sysfs file contents
  2372. *
  2373. * @return The number of bytes output to @c buf.
  2374. */
  2375. static ssize_t show_force_replay(struct device *dev,
  2376. struct device_attribute *attr, char * const buf)
  2377. {
  2378. struct kbase_device *kbdev;
  2379. ssize_t ret;
  2380. kbdev = to_kbase_device(dev);
  2381. if (!kbdev)
  2382. return -ENODEV;
  2383. if (kbdev->force_replay_random)
  2384. ret = scnprintf(buf, PAGE_SIZE,
  2385. "limit=0\nrandom_limit\ncore_req=%x\n",
  2386. kbdev->force_replay_core_req);
  2387. else
  2388. ret = scnprintf(buf, PAGE_SIZE,
  2389. "limit=%u\nnorandom_limit\ncore_req=%x\n",
  2390. kbdev->force_replay_limit,
  2391. kbdev->force_replay_core_req);
  2392. if (ret >= PAGE_SIZE) {
  2393. buf[PAGE_SIZE - 2] = '\n';
  2394. buf[PAGE_SIZE - 1] = '\0';
  2395. ret = PAGE_SIZE - 1;
  2396. }
  2397. return ret;
  2398. }
  2399. /** The sysfs file @c force_replay.
  2400. *
  2401. */
  2402. static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay,
  2403. set_force_replay);
  2404. #endif /* !MALI_CUSTOMER_RELEASE */
  2405. #ifdef CONFIG_MALI_DEBUG
  2406. static ssize_t set_js_softstop_always(struct device *dev,
  2407. struct device_attribute *attr, const char *buf, size_t count)
  2408. {
  2409. struct kbase_device *kbdev;
  2410. int ret;
  2411. int softstop_always;
  2412. kbdev = to_kbase_device(dev);
  2413. if (!kbdev)
  2414. return -ENODEV;
  2415. ret = kstrtoint(buf, 0, &softstop_always);
  2416. if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
  2417. dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
  2418. "Use format <soft_stop_always>\n");
  2419. return -EINVAL;
  2420. }
  2421. kbdev->js_data.softstop_always = (bool) softstop_always;
  2422. dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
  2423. (kbdev->js_data.softstop_always) ?
  2424. "Enabled" : "Disabled");
  2425. return count;
  2426. }
  2427. static ssize_t show_js_softstop_always(struct device *dev,
  2428. struct device_attribute *attr, char * const buf)
  2429. {
  2430. struct kbase_device *kbdev;
  2431. ssize_t ret;
  2432. kbdev = to_kbase_device(dev);
  2433. if (!kbdev)
  2434. return -ENODEV;
  2435. ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
  2436. if (ret >= PAGE_SIZE) {
  2437. buf[PAGE_SIZE - 2] = '\n';
  2438. buf[PAGE_SIZE - 1] = '\0';
  2439. ret = PAGE_SIZE - 1;
  2440. }
  2441. return ret;
  2442. }
  2443. /*
  2444. * By default, soft-stops are disabled when only a single context is present. The ability to
  2445. * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
  2446. * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
  2447. */
  2448. static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
  2449. #endif /* CONFIG_MALI_DEBUG */
  2450. #ifdef CONFIG_MALI_DEBUG
  2451. typedef void (kbasep_debug_command_func) (struct kbase_device *);
  2452. enum kbasep_debug_command_code {
  2453. KBASEP_DEBUG_COMMAND_DUMPTRACE,
  2454. /* This must be the last enum */
  2455. KBASEP_DEBUG_COMMAND_COUNT
  2456. };
  2457. struct kbasep_debug_command {
  2458. char *str;
  2459. kbasep_debug_command_func *func;
  2460. };
  2461. /** Debug commands supported by the driver */
  2462. static const struct kbasep_debug_command debug_commands[] = {
  2463. {
  2464. .str = "dumptrace",
  2465. .func = &kbasep_trace_dump,
  2466. }
  2467. };
  2468. /** Show callback for the @c debug_command sysfs file.
  2469. *
  2470. * This function is called to get the contents of the @c debug_command sysfs
  2471. * file. This is a list of the available debug commands, separated by newlines.
  2472. *
  2473. * @param dev The device this sysfs file is for
  2474. * @param attr The attributes of the sysfs file
  2475. * @param buf The output buffer for the sysfs file contents
  2476. *
  2477. * @return The number of bytes output to @c buf.
  2478. */
  2479. static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
  2480. {
  2481. struct kbase_device *kbdev;
  2482. int i;
  2483. ssize_t ret = 0;
  2484. kbdev = to_kbase_device(dev);
  2485. if (!kbdev)
  2486. return -ENODEV;
  2487. for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
  2488. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
  2489. if (ret >= PAGE_SIZE) {
  2490. buf[PAGE_SIZE - 2] = '\n';
  2491. buf[PAGE_SIZE - 1] = '\0';
  2492. ret = PAGE_SIZE - 1;
  2493. }
  2494. return ret;
  2495. }
  2496. /** Store callback for the @c debug_command sysfs file.
  2497. *
  2498. * This function is called when the @c debug_command sysfs file is written to.
  2499. * It matches the requested command against the available commands, and if
  2500. * a matching command is found calls the associated function from
  2501. * @ref debug_commands to issue the command.
  2502. *
  2503. * @param dev The device with sysfs file is for
  2504. * @param attr The attributes of the sysfs file
  2505. * @param buf The value written to the sysfs file
  2506. * @param count The number of bytes written to the sysfs file
  2507. *
  2508. * @return @c count if the function succeeded. An error code on failure.
  2509. */
  2510. static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  2511. {
  2512. struct kbase_device *kbdev;
  2513. int i;
  2514. kbdev = to_kbase_device(dev);
  2515. if (!kbdev)
  2516. return -ENODEV;
  2517. for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
  2518. if (sysfs_streq(debug_commands[i].str, buf)) {
  2519. debug_commands[i].func(kbdev);
  2520. return count;
  2521. }
  2522. }
  2523. /* Debug Command not found */
  2524. dev_err(dev, "debug_command: command not known\n");
  2525. return -EINVAL;
  2526. }
  2527. /** The sysfs file @c debug_command.
  2528. *
  2529. * This is used to issue general debug commands to the device driver.
  2530. * Reading it will produce a list of debug commands, separated by newlines.
  2531. * Writing to it with one of those commands will issue said command.
  2532. */
  2533. static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
  2534. #endif /* CONFIG_MALI_DEBUG */
  2535. /**
  2536. * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
  2537. * @dev: The device this sysfs file is for.
  2538. * @attr: The attributes of the sysfs file.
  2539. * @buf: The output buffer to receive the GPU information.
  2540. *
  2541. * This function is called to get a description of the present Mali
  2542. * GPU via the gpuinfo sysfs entry. This includes the GPU family, the
  2543. * number of cores, the hardware version and the raw product id. For
  2544. * example:
  2545. *
  2546. * Mali-T60x MP4 r0p0 0x6956
  2547. *
  2548. * Return: The number of bytes output to buf.
  2549. */
  2550. static ssize_t kbase_show_gpuinfo(struct device *dev,
  2551. struct device_attribute *attr, char *buf)
  2552. {
  2553. static const struct gpu_product_id_name {
  2554. unsigned id;
  2555. char *name;
  2556. } gpu_product_id_names[] = {
  2557. { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
  2558. { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
  2559. { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
  2560. { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
  2561. { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
  2562. { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
  2563. { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
  2564. { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
  2565. { .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
  2566. .name = "Mali-G71" },
  2567. };
  2568. const char *product_name = "(Unknown Mali GPU)";
  2569. struct kbase_device *kbdev;
  2570. u32 gpu_id;
  2571. unsigned product_id, product_id_mask;
  2572. unsigned i;
  2573. bool is_new_format;
  2574. kbdev = to_kbase_device(dev);
  2575. if (!kbdev)
  2576. return -ENODEV;
  2577. gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
  2578. product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
  2579. is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
  2580. product_id_mask =
  2581. (is_new_format ?
  2582. GPU_ID2_PRODUCT_MODEL :
  2583. GPU_ID_VERSION_PRODUCT_ID) >>
  2584. GPU_ID_VERSION_PRODUCT_ID_SHIFT;
  2585. for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
  2586. const struct gpu_product_id_name *p = &gpu_product_id_names[i];
  2587. if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
  2588. (p->id & product_id_mask) ==
  2589. (product_id & product_id_mask)) {
  2590. product_name = p->name;
  2591. break;
  2592. }
  2593. }
  2594. return scnprintf(buf, PAGE_SIZE, "%s MP%d r%dp%d 0x%04X\n",
  2595. product_name, kbdev->gpu_props.num_cores,
  2596. (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
  2597. (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
  2598. product_id);
  2599. }
  2600. static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
  2601. /**
  2602. * set_dvfs_period - Store callback for the dvfs_period sysfs file.
  2603. * @dev: The device with sysfs file is for
  2604. * @attr: The attributes of the sysfs file
  2605. * @buf: The value written to the sysfs file
  2606. * @count: The number of bytes written to the sysfs file
  2607. *
  2608. * This function is called when the dvfs_period sysfs file is written to. It
  2609. * checks the data written, and if valid updates the DVFS period variable,
  2610. *
  2611. * Return: @c count if the function succeeded. An error code on failure.
  2612. */
  2613. static ssize_t set_dvfs_period(struct device *dev,
  2614. struct device_attribute *attr, const char *buf, size_t count)
  2615. {
  2616. struct kbase_device *kbdev;
  2617. int ret;
  2618. int dvfs_period;
  2619. kbdev = to_kbase_device(dev);
  2620. if (!kbdev)
  2621. return -ENODEV;
  2622. ret = kstrtoint(buf, 0, &dvfs_period);
  2623. if (ret || dvfs_period <= 0) {
  2624. dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
  2625. "Use format <dvfs_period_ms>\n");
  2626. return -EINVAL;
  2627. }
  2628. kbdev->pm.dvfs_period = dvfs_period;
  2629. dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
  2630. return count;
  2631. }
  2632. /**
  2633. * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
  2634. * @dev: The device this sysfs file is for.
  2635. * @attr: The attributes of the sysfs file.
  2636. * @buf: The output buffer to receive the GPU information.
  2637. *
  2638. * This function is called to get the current period used for the DVFS sample
  2639. * timer.
  2640. *
  2641. * Return: The number of bytes output to buf.
  2642. */
  2643. static ssize_t show_dvfs_period(struct device *dev,
  2644. struct device_attribute *attr, char * const buf)
  2645. {
  2646. struct kbase_device *kbdev;
  2647. ssize_t ret;
  2648. kbdev = to_kbase_device(dev);
  2649. if (!kbdev)
  2650. return -ENODEV;
  2651. ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
  2652. return ret;
  2653. }
  2654. static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
  2655. set_dvfs_period);
  2656. /**
  2657. * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
  2658. * @dev: The device with sysfs file is for
  2659. * @attr: The attributes of the sysfs file
  2660. * @buf: The value written to the sysfs file
  2661. * @count: The number of bytes written to the sysfs file
  2662. *
  2663. * This function is called when the pm_poweroff sysfs file is written to.
  2664. *
  2665. * This file contains three values separated by whitespace. The values
  2666. * are gpu_poweroff_time (the period of the poweroff timer, in ns),
  2667. * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
  2668. * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
  2669. * ticks before the GPU is powered off), in that order.
  2670. *
  2671. * Return: @c count if the function succeeded. An error code on failure.
  2672. */
  2673. static ssize_t set_pm_poweroff(struct device *dev,
  2674. struct device_attribute *attr, const char *buf, size_t count)
  2675. {
  2676. struct kbase_device *kbdev;
  2677. int items;
  2678. s64 gpu_poweroff_time;
  2679. int poweroff_shader_ticks, poweroff_gpu_ticks;
  2680. kbdev = to_kbase_device(dev);
  2681. if (!kbdev)
  2682. return -ENODEV;
  2683. items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
  2684. &poweroff_shader_ticks,
  2685. &poweroff_gpu_ticks);
  2686. if (items != 3) {
  2687. dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
  2688. "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
  2689. return -EINVAL;
  2690. }
  2691. kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
  2692. kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks;
  2693. kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks;
  2694. return count;
  2695. }
  2696. /**
  2697. * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
  2698. * @dev: The device this sysfs file is for.
  2699. * @attr: The attributes of the sysfs file.
  2700. * @buf: The output buffer to receive the GPU information.
  2701. *
  2702. * This function is called to get the current period used for the DVFS sample
  2703. * timer.
  2704. *
  2705. * Return: The number of bytes output to buf.
  2706. */
  2707. static ssize_t show_pm_poweroff(struct device *dev,
  2708. struct device_attribute *attr, char * const buf)
  2709. {
  2710. struct kbase_device *kbdev;
  2711. ssize_t ret;
  2712. kbdev = to_kbase_device(dev);
  2713. if (!kbdev)
  2714. return -ENODEV;
  2715. ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n",
  2716. ktime_to_ns(kbdev->pm.gpu_poweroff_time),
  2717. kbdev->pm.poweroff_shader_ticks,
  2718. kbdev->pm.poweroff_gpu_ticks);
  2719. return ret;
  2720. }
  2721. static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
  2722. set_pm_poweroff);
  2723. /**
  2724. * set_reset_timeout - Store callback for the reset_timeout sysfs file.
  2725. * @dev: The device with sysfs file is for
  2726. * @attr: The attributes of the sysfs file
  2727. * @buf: The value written to the sysfs file
  2728. * @count: The number of bytes written to the sysfs file
  2729. *
  2730. * This function is called when the reset_timeout sysfs file is written to. It
  2731. * checks the data written, and if valid updates the reset timeout.
  2732. *
  2733. * Return: @c count if the function succeeded. An error code on failure.
  2734. */
  2735. static ssize_t set_reset_timeout(struct device *dev,
  2736. struct device_attribute *attr, const char *buf, size_t count)
  2737. {
  2738. struct kbase_device *kbdev;
  2739. int ret;
  2740. int reset_timeout;
  2741. kbdev = to_kbase_device(dev);
  2742. if (!kbdev)
  2743. return -ENODEV;
  2744. ret = kstrtoint(buf, 0, &reset_timeout);
  2745. if (ret || reset_timeout <= 0) {
  2746. dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
  2747. "Use format <reset_timeout_ms>\n");
  2748. return -EINVAL;
  2749. }
  2750. kbdev->reset_timeout_ms = reset_timeout;
  2751. dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
  2752. return count;
  2753. }
  2754. /**
  2755. * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
  2756. * @dev: The device this sysfs file is for.
  2757. * @attr: The attributes of the sysfs file.
  2758. * @buf: The output buffer to receive the GPU information.
  2759. *
  2760. * This function is called to get the current reset timeout.
  2761. *
  2762. * Return: The number of bytes output to buf.
  2763. */
  2764. static ssize_t show_reset_timeout(struct device *dev,
  2765. struct device_attribute *attr, char * const buf)
  2766. {
  2767. struct kbase_device *kbdev;
  2768. ssize_t ret;
  2769. kbdev = to_kbase_device(dev);
  2770. if (!kbdev)
  2771. return -ENODEV;
  2772. ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
  2773. return ret;
  2774. }
  2775. static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
  2776. set_reset_timeout);
  2777. static ssize_t show_mem_pool_size(struct device *dev,
  2778. struct device_attribute *attr, char * const buf)
  2779. {
  2780. struct kbase_device *kbdev;
  2781. ssize_t ret;
  2782. kbdev = to_kbase_device(dev);
  2783. if (!kbdev)
  2784. return -ENODEV;
  2785. ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
  2786. kbase_mem_pool_size(&kbdev->mem_pool));
  2787. return ret;
  2788. }
  2789. static ssize_t set_mem_pool_size(struct device *dev,
  2790. struct device_attribute *attr, const char *buf, size_t count)
  2791. {
  2792. struct kbase_device *kbdev;
  2793. size_t new_size;
  2794. int err;
  2795. kbdev = to_kbase_device(dev);
  2796. if (!kbdev)
  2797. return -ENODEV;
  2798. err = kstrtoul(buf, 0, (unsigned long *)&new_size);
  2799. if (err)
  2800. return err;
  2801. kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
  2802. return count;
  2803. }
  2804. static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
  2805. set_mem_pool_size);
  2806. static ssize_t show_mem_pool_max_size(struct device *dev,
  2807. struct device_attribute *attr, char * const buf)
  2808. {
  2809. struct kbase_device *kbdev;
  2810. ssize_t ret;
  2811. kbdev = to_kbase_device(dev);
  2812. if (!kbdev)
  2813. return -ENODEV;
  2814. ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
  2815. kbase_mem_pool_max_size(&kbdev->mem_pool));
  2816. return ret;
  2817. }
  2818. static ssize_t set_mem_pool_max_size(struct device *dev,
  2819. struct device_attribute *attr, const char *buf, size_t count)
  2820. {
  2821. struct kbase_device *kbdev;
  2822. size_t new_max_size;
  2823. int err;
  2824. kbdev = to_kbase_device(dev);
  2825. if (!kbdev)
  2826. return -ENODEV;
  2827. err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
  2828. if (err)
  2829. return -EINVAL;
  2830. kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
  2831. return count;
  2832. }
  2833. static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
  2834. set_mem_pool_max_size);
  2835. static int kbasep_secure_mode_enable(struct kbase_device *kbdev)
  2836. {
  2837. kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
  2838. GPU_COMMAND_SET_PROTECTED_MODE, NULL);
  2839. return 0;
  2840. }
  2841. static int kbasep_secure_mode_disable(struct kbase_device *kbdev)
  2842. {
  2843. if (!kbase_prepare_to_reset_gpu_locked(kbdev))
  2844. return -EBUSY;
  2845. kbase_reset_gpu_locked(kbdev);
  2846. return 0;
  2847. }
  2848. static struct kbase_secure_ops kbasep_secure_ops = {
  2849. .secure_mode_enable = kbasep_secure_mode_enable,
  2850. .secure_mode_disable = kbasep_secure_mode_disable,
  2851. };
  2852. static void kbasep_secure_mode_init(struct kbase_device *kbdev)
  2853. {
  2854. if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
  2855. /* Use native secure ops */
  2856. kbdev->secure_ops = &kbasep_secure_ops;
  2857. kbdev->secure_mode_support = true;
  2858. }
  2859. #ifdef SECURE_CALLBACKS
  2860. else {
  2861. kbdev->secure_ops = SECURE_CALLBACKS;
  2862. kbdev->secure_mode_support = false;
  2863. if (kbdev->secure_ops) {
  2864. int err;
  2865. /* Make sure secure mode is disabled on startup */
  2866. err = kbdev->secure_ops->secure_mode_disable(kbdev);
  2867. /* secure_mode_disable() returns -EINVAL if not
  2868. * supported
  2869. */
  2870. kbdev->secure_mode_support = (err != -EINVAL);
  2871. }
  2872. }
  2873. #endif
  2874. }
  2875. #ifdef CONFIG_MALI_NO_MALI
  2876. static int kbase_common_reg_map(struct kbase_device *kbdev)
  2877. {
  2878. return 0;
  2879. }
  2880. static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
  2881. {
  2882. }
  2883. #else /* CONFIG_MALI_NO_MALI */
  2884. static int kbase_common_reg_map(struct kbase_device *kbdev)
  2885. {
  2886. int err = -ENOMEM;
  2887. if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
  2888. dev_err(kbdev->dev, "Register window unavailable\n");
  2889. err = -EIO;
  2890. goto out_region;
  2891. }
  2892. kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
  2893. if (!kbdev->reg) {
  2894. dev_err(kbdev->dev, "Can't remap register window\n");
  2895. err = -EINVAL;
  2896. goto out_ioremap;
  2897. }
  2898. return 0;
  2899. out_ioremap:
  2900. release_mem_region(kbdev->reg_start, kbdev->reg_size);
  2901. out_region:
  2902. return err;
  2903. }
  2904. static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
  2905. {
  2906. if (kbdev->reg) {
  2907. iounmap(kbdev->reg);
  2908. release_mem_region(kbdev->reg_start, kbdev->reg_size);
  2909. kbdev->reg = NULL;
  2910. kbdev->reg_start = 0;
  2911. kbdev->reg_size = 0;
  2912. }
  2913. }
  2914. #endif /* CONFIG_MALI_NO_MALI */
  2915. static int registers_map(struct kbase_device * const kbdev)
  2916. {
  2917. /* the first memory resource is the physical address of the GPU
  2918. * registers */
  2919. struct platform_device *pdev = to_platform_device(kbdev->dev);
  2920. struct resource *reg_res;
  2921. int err;
  2922. reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2923. if (!reg_res) {
  2924. dev_err(kbdev->dev, "Invalid register resource\n");
  2925. return -ENOENT;
  2926. }
  2927. kbdev->reg_start = reg_res->start;
  2928. kbdev->reg_size = resource_size(reg_res);
  2929. err = kbase_common_reg_map(kbdev);
  2930. if (err) {
  2931. dev_err(kbdev->dev, "Failed to map registers\n");
  2932. return err;
  2933. }
  2934. return 0;
  2935. }
  2936. static void registers_unmap(struct kbase_device *kbdev)
  2937. {
  2938. kbase_common_reg_unmap(kbdev);
  2939. }
  2940. static int power_control_init(struct platform_device *pdev)
  2941. {
  2942. struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
  2943. int err = 0;
  2944. if (!kbdev)
  2945. return -ENODEV;
  2946. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
  2947. && defined(CONFIG_REGULATOR)
  2948. kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
  2949. if (IS_ERR_OR_NULL(kbdev->regulator)) {
  2950. err = PTR_ERR(kbdev->regulator);
  2951. kbdev->regulator = NULL;
  2952. if (err == -EPROBE_DEFER) {
  2953. dev_err(&pdev->dev, "Failed to get regulator\n");
  2954. return err;
  2955. }
  2956. dev_info(kbdev->dev,
  2957. "Continuing without Mali regulator control\n");
  2958. /* Allow probe to continue without regulator */
  2959. }
  2960. #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
  2961. kbdev->clock = clk_get(kbdev->dev, "clk_mali");
  2962. if (IS_ERR_OR_NULL(kbdev->clock)) {
  2963. err = PTR_ERR(kbdev->clock);
  2964. kbdev->clock = NULL;
  2965. if (err == -EPROBE_DEFER) {
  2966. dev_err(&pdev->dev, "Failed to get clock\n");
  2967. goto fail;
  2968. }
  2969. dev_info(kbdev->dev, "Continuing without Mali clock control\n");
  2970. /* Allow probe to continue without clock. */
  2971. } else {
  2972. err = clk_prepare_enable(kbdev->clock);
  2973. if (err) {
  2974. dev_err(kbdev->dev,
  2975. "Failed to prepare and enable clock (%d)\n",
  2976. err);
  2977. goto fail;
  2978. }
  2979. }
  2980. #if defined(CONFIG_OF) && defined(CONFIG_PM_OPP)
  2981. /* Register the OPPs if they are available in device tree */
  2982. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
  2983. err = dev_pm_opp_of_add_table(kbdev->dev);
  2984. #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
  2985. err = of_init_opp_table(kbdev->dev);
  2986. #else
  2987. err = 0;
  2988. #endif /* LINUX_VERSION_CODE */
  2989. if (err)
  2990. dev_dbg(kbdev->dev, "OPP table not found\n");
  2991. #endif /* CONFIG_OF && CONFIG_PM_OPP */
  2992. return 0;
  2993. fail:
  2994. if (kbdev->clock != NULL) {
  2995. clk_put(kbdev->clock);
  2996. kbdev->clock = NULL;
  2997. }
  2998. #ifdef CONFIG_REGULATOR
  2999. if (NULL != kbdev->regulator) {
  3000. regulator_put(kbdev->regulator);
  3001. kbdev->regulator = NULL;
  3002. }
  3003. #endif
  3004. return err;
  3005. }
  3006. static void power_control_term(struct kbase_device *kbdev)
  3007. {
  3008. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
  3009. dev_pm_opp_of_remove_table(kbdev->dev);
  3010. #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
  3011. of_free_opp_table(kbdev->dev);
  3012. #endif
  3013. if (kbdev->clock) {
  3014. clk_disable_unprepare(kbdev->clock);
  3015. clk_put(kbdev->clock);
  3016. kbdev->clock = NULL;
  3017. }
  3018. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
  3019. && defined(CONFIG_REGULATOR)
  3020. if (kbdev->regulator) {
  3021. regulator_put(kbdev->regulator);
  3022. kbdev->regulator = NULL;
  3023. }
  3024. #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
  3025. }
  3026. #ifdef CONFIG_DEBUG_FS
  3027. #if KBASE_GPU_RESET_EN
  3028. #include <mali_kbase_hwaccess_jm.h>
  3029. static void trigger_quirks_reload(struct kbase_device *kbdev)
  3030. {
  3031. kbase_pm_context_active(kbdev);
  3032. if (kbase_prepare_to_reset_gpu(kbdev))
  3033. kbase_reset_gpu(kbdev);
  3034. kbase_pm_context_idle(kbdev);
  3035. }
  3036. #define MAKE_QUIRK_ACCESSORS(type) \
  3037. static int type##_quirks_set(void *data, u64 val) \
  3038. { \
  3039. struct kbase_device *kbdev; \
  3040. kbdev = (struct kbase_device *)data; \
  3041. kbdev->hw_quirks_##type = (u32)val; \
  3042. trigger_quirks_reload(kbdev); \
  3043. return 0;\
  3044. } \
  3045. \
  3046. static int type##_quirks_get(void *data, u64 *val) \
  3047. { \
  3048. struct kbase_device *kbdev;\
  3049. kbdev = (struct kbase_device *)data;\
  3050. *val = kbdev->hw_quirks_##type;\
  3051. return 0;\
  3052. } \
  3053. DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
  3054. type##_quirks_set, "%llu\n")
  3055. MAKE_QUIRK_ACCESSORS(sc);
  3056. MAKE_QUIRK_ACCESSORS(tiler);
  3057. MAKE_QUIRK_ACCESSORS(mmu);
  3058. #endif /* KBASE_GPU_RESET_EN */
  3059. static int kbase_device_debugfs_init(struct kbase_device *kbdev)
  3060. {
  3061. struct dentry *debugfs_ctx_defaults_directory;
  3062. int err;
  3063. kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
  3064. NULL);
  3065. if (!kbdev->mali_debugfs_directory) {
  3066. dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
  3067. err = -ENOMEM;
  3068. goto out;
  3069. }
  3070. kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
  3071. kbdev->mali_debugfs_directory);
  3072. if (!kbdev->debugfs_ctx_directory) {
  3073. dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
  3074. err = -ENOMEM;
  3075. goto out;
  3076. }
  3077. debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
  3078. kbdev->debugfs_ctx_directory);
  3079. if (!debugfs_ctx_defaults_directory) {
  3080. dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
  3081. err = -ENOMEM;
  3082. goto out;
  3083. }
  3084. #if !MALI_CUSTOMER_RELEASE
  3085. kbasep_regs_dump_debugfs_add(kbdev);
  3086. #endif /* !MALI_CUSTOMER_RELEASE */
  3087. kbase_debug_job_fault_debugfs_init(kbdev);
  3088. kbasep_gpu_memory_debugfs_init(kbdev);
  3089. #if KBASE_GPU_RESET_EN
  3090. debugfs_create_file("quirks_sc", 0644,
  3091. kbdev->mali_debugfs_directory, kbdev,
  3092. &fops_sc_quirks);
  3093. debugfs_create_file("quirks_tiler", 0644,
  3094. kbdev->mali_debugfs_directory, kbdev,
  3095. &fops_tiler_quirks);
  3096. debugfs_create_file("quirks_mmu", 0644,
  3097. kbdev->mali_debugfs_directory, kbdev,
  3098. &fops_mmu_quirks);
  3099. #endif /* KBASE_GPU_RESET_EN */
  3100. #ifndef CONFIG_MALI_COH_USER
  3101. debugfs_create_bool("infinite_cache", 0644,
  3102. debugfs_ctx_defaults_directory,
  3103. &kbdev->infinite_cache_active_default);
  3104. #endif /* CONFIG_MALI_COH_USER */
  3105. debugfs_create_size_t("mem_pool_max_size", 0644,
  3106. debugfs_ctx_defaults_directory,
  3107. &kbdev->mem_pool_max_size_default);
  3108. #if KBASE_TRACE_ENABLE
  3109. kbasep_trace_debugfs_init(kbdev);
  3110. #endif /* KBASE_TRACE_ENABLE */
  3111. #ifdef CONFIG_MALI_TRACE_TIMELINE
  3112. kbasep_trace_timeline_debugfs_init(kbdev);
  3113. #endif /* CONFIG_MALI_TRACE_TIMELINE */
  3114. return 0;
  3115. out:
  3116. debugfs_remove_recursive(kbdev->mali_debugfs_directory);
  3117. return err;
  3118. }
  3119. static void kbase_device_debugfs_term(struct kbase_device *kbdev)
  3120. {
  3121. debugfs_remove_recursive(kbdev->mali_debugfs_directory);
  3122. }
  3123. #else /* CONFIG_DEBUG_FS */
  3124. static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
  3125. {
  3126. return 0;
  3127. }
  3128. static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
  3129. #endif /* CONFIG_DEBUG_FS */
  3130. static void kbase_device_coherency_init(struct kbase_device *kbdev, u32 gpu_id)
  3131. {
  3132. #ifdef CONFIG_OF
  3133. u32 supported_coherency_bitmap =
  3134. kbdev->gpu_props.props.raw_props.coherency_mode;
  3135. const void *coherency_override_dts;
  3136. u32 override_coherency;
  3137. #endif /* CONFIG_OF */
  3138. kbdev->system_coherency = COHERENCY_NONE;
  3139. /* device tree may override the coherency */
  3140. #ifdef CONFIG_OF
  3141. coherency_override_dts = of_get_property(kbdev->dev->of_node,
  3142. "system-coherency",
  3143. NULL);
  3144. if (coherency_override_dts) {
  3145. override_coherency = be32_to_cpup(coherency_override_dts);
  3146. if ((override_coherency <= COHERENCY_NONE) &&
  3147. (supported_coherency_bitmap &
  3148. COHERENCY_FEATURE_BIT(override_coherency))) {
  3149. kbdev->system_coherency = override_coherency;
  3150. dev_info(kbdev->dev,
  3151. "Using coherency mode %u set from dtb",
  3152. override_coherency);
  3153. } else
  3154. dev_warn(kbdev->dev,
  3155. "Ignoring unsupported coherency mode %u set from dtb",
  3156. override_coherency);
  3157. }
  3158. #endif /* CONFIG_OF */
  3159. kbdev->gpu_props.props.raw_props.coherency_mode =
  3160. kbdev->system_coherency;
  3161. }
  3162. #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
  3163. /* Callback used by the kbase bus logger client, to initiate a GPU reset
  3164. * when the bus log is restarted. GPU reset is used as reference point
  3165. * in HW bus log analyses.
  3166. */
  3167. static void kbase_logging_started_cb(void *data)
  3168. {
  3169. struct kbase_device *kbdev = (struct kbase_device *)data;
  3170. if (kbase_prepare_to_reset_gpu(kbdev))
  3171. kbase_reset_gpu(kbdev);
  3172. dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
  3173. }
  3174. #endif
  3175. static struct attribute *kbase_attrs[] = {
  3176. #ifdef CONFIG_MALI_DEBUG
  3177. &dev_attr_debug_command.attr,
  3178. &dev_attr_js_softstop_always.attr,
  3179. #endif
  3180. #if !MALI_CUSTOMER_RELEASE
  3181. &dev_attr_force_replay.attr,
  3182. #endif
  3183. &dev_attr_js_timeouts.attr,
  3184. &dev_attr_soft_event_timeout.attr,
  3185. &dev_attr_gpuinfo.attr,
  3186. &dev_attr_dvfs_period.attr,
  3187. &dev_attr_pm_poweroff.attr,
  3188. &dev_attr_reset_timeout.attr,
  3189. &dev_attr_js_scheduling_period.attr,
  3190. &dev_attr_power_policy.attr,
  3191. &dev_attr_core_availability_policy.attr,
  3192. &dev_attr_core_mask.attr,
  3193. &dev_attr_mem_pool_size.attr,
  3194. &dev_attr_mem_pool_max_size.attr,
  3195. NULL
  3196. };
  3197. static const struct attribute_group kbase_attr_group = {
  3198. .attrs = kbase_attrs,
  3199. };
  3200. static int kbase_platform_device_remove(struct platform_device *pdev)
  3201. {
  3202. struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
  3203. const struct list_head *dev_list;
  3204. if (!kbdev)
  3205. return -ENODEV;
  3206. #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
  3207. if (kbdev->inited_subsys & inited_buslogger) {
  3208. bl_core_client_unregister(kbdev->buslogger);
  3209. kbdev->inited_subsys &= ~inited_buslogger;
  3210. }
  3211. #endif
  3212. if (kbdev->inited_subsys & inited_sysfs_group) {
  3213. sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
  3214. kbdev->inited_subsys &= ~inited_sysfs_group;
  3215. }
  3216. if (kbdev->inited_subsys & inited_dev_list) {
  3217. dev_list = kbase_dev_list_get();
  3218. list_del(&kbdev->entry);
  3219. kbase_dev_list_put(dev_list);
  3220. kbdev->inited_subsys &= ~inited_dev_list;
  3221. }
  3222. if (kbdev->inited_subsys & inited_misc_register) {
  3223. misc_deregister(&kbdev->mdev);
  3224. kbdev->inited_subsys &= ~inited_misc_register;
  3225. }
  3226. if (kbdev->inited_subsys & inited_get_device) {
  3227. put_device(kbdev->dev);
  3228. kbdev->inited_subsys &= ~inited_get_device;
  3229. }
  3230. if (kbdev->inited_subsys & inited_debugfs) {
  3231. kbase_device_debugfs_term(kbdev);
  3232. kbdev->inited_subsys &= ~inited_debugfs;
  3233. }
  3234. if (kbdev->inited_subsys & inited_job_fault) {
  3235. kbase_debug_job_fault_dev_term(kbdev);
  3236. kbdev->inited_subsys &= ~inited_job_fault;
  3237. }
  3238. #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
  3239. if (kbdev->inited_subsys & inited_ipa) {
  3240. kbase_ipa_term(kbdev->ipa_ctx);
  3241. kbdev->inited_subsys &= ~inited_ipa;
  3242. }
  3243. #endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
  3244. if (kbdev->inited_subsys & inited_vinstr) {
  3245. kbase_vinstr_term(kbdev->vinstr_ctx);
  3246. kbdev->inited_subsys &= ~inited_vinstr;
  3247. }
  3248. #ifdef CONFIG_MALI_DEVFREQ
  3249. if (kbdev->inited_subsys & inited_devfreq) {
  3250. kbase_devfreq_term(kbdev);
  3251. kbdev->inited_subsys &= ~inited_devfreq;
  3252. }
  3253. #endif
  3254. if (kbdev->inited_subsys & inited_backend_late) {
  3255. kbase_backend_late_term(kbdev);
  3256. kbdev->inited_subsys &= ~inited_backend_late;
  3257. }
  3258. if (kbdev->inited_subsys & inited_tlstream) {
  3259. kbase_tlstream_term();
  3260. kbdev->inited_subsys &= ~inited_tlstream;
  3261. }
  3262. /* Bring job and mem sys to a halt before we continue termination */
  3263. if (kbdev->inited_subsys & inited_js)
  3264. kbasep_js_devdata_halt(kbdev);
  3265. if (kbdev->inited_subsys & inited_mem)
  3266. kbase_mem_halt(kbdev);
  3267. if (kbdev->inited_subsys & inited_js) {
  3268. kbasep_js_devdata_term(kbdev);
  3269. kbdev->inited_subsys &= ~inited_js;
  3270. }
  3271. if (kbdev->inited_subsys & inited_mem) {
  3272. kbase_mem_term(kbdev);
  3273. kbdev->inited_subsys &= ~inited_mem;
  3274. }
  3275. if (kbdev->inited_subsys & inited_pm_runtime_init) {
  3276. kbdev->pm.callback_power_runtime_term(kbdev);
  3277. kbdev->inited_subsys &= ~inited_pm_runtime_init;
  3278. }
  3279. if (kbdev->inited_subsys & inited_device) {
  3280. kbase_device_term(kbdev);
  3281. kbdev->inited_subsys &= ~inited_device;
  3282. }
  3283. if (kbdev->inited_subsys & inited_backend_early) {
  3284. kbase_backend_early_term(kbdev);
  3285. kbdev->inited_subsys &= ~inited_backend_early;
  3286. }
  3287. if (kbdev->inited_subsys & inited_power_control) {
  3288. power_control_term(kbdev);
  3289. kbdev->inited_subsys &= ~inited_power_control;
  3290. }
  3291. if (kbdev->inited_subsys & inited_registers_map) {
  3292. registers_unmap(kbdev);
  3293. kbdev->inited_subsys &= ~inited_registers_map;
  3294. }
  3295. #ifdef CONFIG_MALI_NO_MALI
  3296. if (kbdev->inited_subsys & inited_gpu_device) {
  3297. gpu_device_destroy(kbdev);
  3298. kbdev->inited_subsys &= ~inited_gpu_device;
  3299. }
  3300. #endif /* CONFIG_MALI_NO_MALI */
  3301. if (kbdev->inited_subsys != 0)
  3302. dev_err(kbdev->dev, "Missing sub system termination\n");
  3303. kbase_device_free(kbdev);
  3304. return 0;
  3305. }
  3306. static int kbase_platform_device_probe(struct platform_device *pdev)
  3307. {
  3308. struct kbase_device *kbdev;
  3309. struct mali_base_gpu_core_props *core_props;
  3310. u32 gpu_id;
  3311. const struct list_head *dev_list;
  3312. int err = 0;
  3313. #ifdef CONFIG_OF
  3314. err = kbase_platform_early_init();
  3315. if (err) {
  3316. dev_err(&pdev->dev, "Early platform initialization failed\n");
  3317. kbase_platform_device_remove(pdev);
  3318. return err;
  3319. }
  3320. #endif
  3321. kbdev = kbase_device_alloc();
  3322. if (!kbdev) {
  3323. dev_err(&pdev->dev, "Allocate device failed\n");
  3324. kbase_platform_device_remove(pdev);
  3325. return -ENOMEM;
  3326. }
  3327. kbdev->dev = &pdev->dev;
  3328. dev_set_drvdata(kbdev->dev, kbdev);
  3329. #ifdef CONFIG_MALI_NO_MALI
  3330. err = gpu_device_create(kbdev);
  3331. if (err) {
  3332. dev_err(&pdev->dev, "Dummy model initialization failed\n");
  3333. kbase_platform_device_remove(pdev);
  3334. return err;
  3335. }
  3336. kbdev->inited_subsys |= inited_gpu_device;
  3337. #endif /* CONFIG_MALI_NO_MALI */
  3338. err = assign_irqs(pdev);
  3339. if (err) {
  3340. dev_err(&pdev->dev, "IRQ search failed\n");
  3341. kbase_platform_device_remove(pdev);
  3342. return err;
  3343. }
  3344. err = registers_map(kbdev);
  3345. if (err) {
  3346. dev_err(&pdev->dev, "Register map failed\n");
  3347. kbase_platform_device_remove(pdev);
  3348. return err;
  3349. }
  3350. kbdev->inited_subsys |= inited_registers_map;
  3351. err = power_control_init(pdev);
  3352. if (err) {
  3353. dev_err(&pdev->dev, "Power control initialization failed\n");
  3354. kbase_platform_device_remove(pdev);
  3355. return err;
  3356. }
  3357. kbdev->inited_subsys |= inited_power_control;
  3358. err = kbase_backend_early_init(kbdev);
  3359. if (err) {
  3360. dev_err(kbdev->dev, "Early backend initialization failed\n");
  3361. kbase_platform_device_remove(pdev);
  3362. return err;
  3363. }
  3364. kbdev->inited_subsys |= inited_backend_early;
  3365. scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
  3366. kbase_dev_nr);
  3367. kbase_disjoint_init(kbdev);
  3368. /* obtain min/max configured gpu frequencies */
  3369. core_props = &(kbdev->gpu_props.props.core_props);
  3370. core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
  3371. core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
  3372. kbdev->gpu_props.irq_throttle_time_us = DEFAULT_IRQ_THROTTLE_TIME_US;
  3373. err = kbase_device_init(kbdev);
  3374. if (err) {
  3375. dev_err(kbdev->dev, "Device initialization failed (%d)\n", err);
  3376. kbase_platform_device_remove(pdev);
  3377. return err;
  3378. }
  3379. kbdev->inited_subsys |= inited_device;
  3380. if (kbdev->pm.callback_power_runtime_init) {
  3381. err = kbdev->pm.callback_power_runtime_init(kbdev);
  3382. if (err) {
  3383. dev_err(kbdev->dev,
  3384. "Runtime PM initialization failed\n");
  3385. kbase_platform_device_remove(pdev);
  3386. return err;
  3387. }
  3388. kbdev->inited_subsys |= inited_pm_runtime_init;
  3389. }
  3390. err = kbase_mem_init(kbdev);
  3391. if (err) {
  3392. dev_err(kbdev->dev, "Memory subsystem initialization failed\n");
  3393. kbase_platform_device_remove(pdev);
  3394. return err;
  3395. }
  3396. kbdev->inited_subsys |= inited_mem;
  3397. gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
  3398. gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
  3399. gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
  3400. kbase_device_coherency_init(kbdev, gpu_id);
  3401. kbasep_secure_mode_init(kbdev);
  3402. err = kbasep_js_devdata_init(kbdev);
  3403. if (err) {
  3404. dev_err(kbdev->dev, "Job JS devdata initialization failed\n");
  3405. kbase_platform_device_remove(pdev);
  3406. return err;
  3407. }
  3408. kbdev->inited_subsys |= inited_js;
  3409. err = kbase_tlstream_init();
  3410. if (err) {
  3411. dev_err(kbdev->dev, "Timeline stream initialization failed\n");
  3412. kbase_platform_device_remove(pdev);
  3413. return err;
  3414. }
  3415. kbdev->inited_subsys |= inited_tlstream;
  3416. err = kbase_backend_late_init(kbdev);
  3417. if (err) {
  3418. dev_err(kbdev->dev, "Late backend initialization failed\n");
  3419. kbase_platform_device_remove(pdev);
  3420. return err;
  3421. }
  3422. kbdev->inited_subsys |= inited_backend_late;
  3423. #ifdef CONFIG_MALI_DEVFREQ
  3424. err = kbase_devfreq_init(kbdev);
  3425. if (err) {
  3426. dev_err(kbdev->dev, "Fevfreq initialization failed\n");
  3427. kbase_platform_device_remove(pdev);
  3428. return err;
  3429. }
  3430. kbdev->inited_subsys |= inited_devfreq;
  3431. #endif /* CONFIG_MALI_DEVFREQ */
  3432. kbdev->vinstr_ctx = kbase_vinstr_init(kbdev);
  3433. if (!kbdev->vinstr_ctx) {
  3434. dev_err(kbdev->dev,
  3435. "Virtual instrumentation initialization failed\n");
  3436. kbase_platform_device_remove(pdev);
  3437. return -EINVAL;
  3438. }
  3439. kbdev->inited_subsys |= inited_vinstr;
  3440. #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
  3441. kbdev->ipa_ctx = kbase_ipa_init(kbdev);
  3442. if (!kbdev->ipa_ctx) {
  3443. dev_err(kbdev->dev, "IPA initialization failed\n");
  3444. kbase_platform_device_remove(pdev);
  3445. return -EINVAL;
  3446. }
  3447. kbdev->inited_subsys |= inited_ipa;
  3448. #endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
  3449. err = kbase_debug_job_fault_dev_init(kbdev);
  3450. if (err) {
  3451. dev_err(kbdev->dev, "Job fault debug initialization failed\n");
  3452. kbase_platform_device_remove(pdev);
  3453. return err;
  3454. }
  3455. kbdev->inited_subsys |= inited_job_fault;
  3456. err = kbase_device_debugfs_init(kbdev);
  3457. if (err) {
  3458. dev_err(kbdev->dev, "DebugFS initialization failed");
  3459. kbase_platform_device_remove(pdev);
  3460. return err;
  3461. }
  3462. kbdev->inited_subsys |= inited_debugfs;
  3463. /* initialize the kctx list */
  3464. mutex_init(&kbdev->kctx_list_lock);
  3465. INIT_LIST_HEAD(&kbdev->kctx_list);
  3466. kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
  3467. kbdev->mdev.name = kbdev->devname;
  3468. kbdev->mdev.fops = &kbase_fops;
  3469. kbdev->mdev.parent = get_device(kbdev->dev);
  3470. kbdev->inited_subsys |= inited_get_device;
  3471. err = misc_register(&kbdev->mdev);
  3472. if (err) {
  3473. dev_err(kbdev->dev, "Misc device registration failed for %s\n",
  3474. kbdev->devname);
  3475. kbase_platform_device_remove(pdev);
  3476. return err;
  3477. }
  3478. kbdev->inited_subsys |= inited_misc_register;
  3479. dev_list = kbase_dev_list_get();
  3480. list_add(&kbdev->entry, &kbase_dev_list);
  3481. kbase_dev_list_put(dev_list);
  3482. kbdev->inited_subsys |= inited_dev_list;
  3483. err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
  3484. if (err) {
  3485. dev_err(&pdev->dev, "SysFS group creation failed\n");
  3486. kbase_platform_device_remove(pdev);
  3487. return err;
  3488. }
  3489. kbdev->inited_subsys |= inited_sysfs_group;
  3490. #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
  3491. err = bl_core_client_register(kbdev->devname,
  3492. kbase_logging_started_cb,
  3493. kbdev, &kbdev->buslogger,
  3494. THIS_MODULE, NULL);
  3495. if (err == 0) {
  3496. kbdev->inited_subsys |= inited_buslogger;
  3497. bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
  3498. } else {
  3499. dev_warn(kbdev->dev, "Bus log client registration failed\n");
  3500. err = 0;
  3501. }
  3502. #endif
  3503. dev_info(kbdev->dev,
  3504. "Probed as %s\n", dev_name(kbdev->mdev.this_device));
  3505. kbase_dev_nr++;
  3506. return err;
  3507. }
  3508. /** Suspend callback from the OS.
  3509. *
  3510. * This is called by Linux when the device should suspend.
  3511. *
  3512. * @param dev The device to suspend
  3513. *
  3514. * @return A standard Linux error code
  3515. */
  3516. static int kbase_device_suspend(struct device *dev)
  3517. {
  3518. struct kbase_device *kbdev = to_kbase_device(dev);
  3519. if (!kbdev)
  3520. return -ENODEV;
  3521. #if defined(CONFIG_PM_DEVFREQ) && \
  3522. (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
  3523. devfreq_suspend_device(kbdev->devfreq);
  3524. #endif
  3525. kbase_pm_suspend(kbdev);
  3526. return 0;
  3527. }
  3528. /** Resume callback from the OS.
  3529. *
  3530. * This is called by Linux when the device should resume from suspension.
  3531. *
  3532. * @param dev The device to resume
  3533. *
  3534. * @return A standard Linux error code
  3535. */
  3536. static int kbase_device_resume(struct device *dev)
  3537. {
  3538. struct kbase_device *kbdev = to_kbase_device(dev);
  3539. if (!kbdev)
  3540. return -ENODEV;
  3541. kbase_pm_resume(kbdev);
  3542. #if defined(CONFIG_PM_DEVFREQ) && \
  3543. (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
  3544. devfreq_resume_device(kbdev->devfreq);
  3545. #endif
  3546. return 0;
  3547. }
  3548. /** Runtime suspend callback from the OS.
  3549. *
  3550. * This is called by Linux when the device should prepare for a condition in which it will
  3551. * not be able to communicate with the CPU(s) and RAM due to power management.
  3552. *
  3553. * @param dev The device to suspend
  3554. *
  3555. * @return A standard Linux error code
  3556. */
  3557. #ifdef KBASE_PM_RUNTIME
  3558. static int kbase_device_runtime_suspend(struct device *dev)
  3559. {
  3560. struct kbase_device *kbdev = to_kbase_device(dev);
  3561. if (!kbdev)
  3562. return -ENODEV;
  3563. #if defined(CONFIG_PM_DEVFREQ) && \
  3564. (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
  3565. devfreq_suspend_device(kbdev->devfreq);
  3566. #endif
  3567. if (kbdev->pm.backend.callback_power_runtime_off) {
  3568. kbdev->pm.backend.callback_power_runtime_off(kbdev);
  3569. dev_dbg(dev, "runtime suspend\n");
  3570. }
  3571. return 0;
  3572. }
  3573. #endif /* KBASE_PM_RUNTIME */
  3574. /** Runtime resume callback from the OS.
  3575. *
  3576. * This is called by Linux when the device should go into a fully active state.
  3577. *
  3578. * @param dev The device to suspend
  3579. *
  3580. * @return A standard Linux error code
  3581. */
  3582. #ifdef KBASE_PM_RUNTIME
  3583. static int kbase_device_runtime_resume(struct device *dev)
  3584. {
  3585. int ret = 0;
  3586. struct kbase_device *kbdev = to_kbase_device(dev);
  3587. if (!kbdev)
  3588. return -ENODEV;
  3589. if (kbdev->pm.backend.callback_power_runtime_on) {
  3590. ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
  3591. dev_dbg(dev, "runtime resume\n");
  3592. }
  3593. #if defined(CONFIG_PM_DEVFREQ) && \
  3594. (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
  3595. devfreq_resume_device(kbdev->devfreq);
  3596. #endif
  3597. return ret;
  3598. }
  3599. #endif /* KBASE_PM_RUNTIME */
  3600. #ifdef KBASE_PM_RUNTIME
  3601. /**
  3602. * kbase_device_runtime_idle - Runtime idle callback from the OS.
  3603. * @dev: The device to suspend
  3604. *
  3605. * This is called by Linux when the device appears to be inactive and it might
  3606. * be placed into a low power state.
  3607. *
  3608. * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
  3609. * otherwise a standard Linux error code
  3610. */
  3611. static int kbase_device_runtime_idle(struct device *dev)
  3612. {
  3613. struct kbase_device *kbdev = to_kbase_device(dev);
  3614. if (!kbdev)
  3615. return -ENODEV;
  3616. /* Use platform specific implementation if it exists. */
  3617. if (kbdev->pm.backend.callback_power_runtime_idle)
  3618. return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
  3619. return 0;
  3620. }
  3621. #endif /* KBASE_PM_RUNTIME */
  3622. /** The power management operations for the platform driver.
  3623. */
  3624. static const struct dev_pm_ops kbase_pm_ops = {
  3625. .suspend = kbase_device_suspend,
  3626. .resume = kbase_device_resume,
  3627. #ifdef KBASE_PM_RUNTIME
  3628. .runtime_suspend = kbase_device_runtime_suspend,
  3629. .runtime_resume = kbase_device_runtime_resume,
  3630. .runtime_idle = kbase_device_runtime_idle,
  3631. #endif /* KBASE_PM_RUNTIME */
  3632. };
  3633. #ifdef CONFIG_OF
  3634. static const struct of_device_id kbase_dt_ids[] = {
  3635. { .compatible = "arm,malit6xx" },
  3636. { .compatible = "arm,mali-midgard" },
  3637. { /* sentinel */ }
  3638. };
  3639. MODULE_DEVICE_TABLE(of, kbase_dt_ids);
  3640. #endif
  3641. static struct platform_driver kbase_platform_driver = {
  3642. .probe = kbase_platform_device_probe,
  3643. .remove = kbase_platform_device_remove,
  3644. .driver = {
  3645. .name = kbase_drv_name,
  3646. .owner = THIS_MODULE,
  3647. .pm = &kbase_pm_ops,
  3648. .of_match_table = of_match_ptr(kbase_dt_ids),
  3649. },
  3650. };
  3651. /*
  3652. * The driver will not provide a shortcut to create the Mali platform device
  3653. * anymore when using Device Tree.
  3654. */
  3655. #ifdef CONFIG_OF
  3656. module_platform_driver(kbase_platform_driver);
  3657. #else
  3658. static int __init kbase_driver_init(void)
  3659. {
  3660. int ret;
  3661. ret = kbase_platform_early_init();
  3662. if (ret)
  3663. return ret;
  3664. #ifdef CONFIG_MALI_PLATFORM_FAKE
  3665. ret = kbase_platform_fake_register();
  3666. if (ret)
  3667. return ret;
  3668. #endif
  3669. ret = platform_driver_register(&kbase_platform_driver);
  3670. #ifdef CONFIG_MALI_PLATFORM_FAKE
  3671. if (ret)
  3672. kbase_platform_fake_unregister();
  3673. #endif
  3674. return ret;
  3675. }
  3676. static void __exit kbase_driver_exit(void)
  3677. {
  3678. platform_driver_unregister(&kbase_platform_driver);
  3679. #ifdef CONFIG_MALI_PLATFORM_FAKE
  3680. kbase_platform_fake_unregister();
  3681. #endif
  3682. }
  3683. module_init(kbase_driver_init);
  3684. module_exit(kbase_driver_exit);
  3685. #endif /* CONFIG_OF */
  3686. MODULE_LICENSE("GPL");
  3687. MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
  3688. __stringify(BASE_UK_VERSION_MAJOR) "." \
  3689. __stringify(BASE_UK_VERSION_MINOR) ")");
  3690. #if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
  3691. #define CREATE_TRACE_POINTS
  3692. #endif
  3693. #ifdef CONFIG_MALI_GATOR_SUPPORT
  3694. /* Create the trace points (otherwise we just get code to call a tracepoint) */
  3695. #include "mali_linux_trace.h"
  3696. EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
  3697. EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
  3698. EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on);
  3699. EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off);
  3700. EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
  3701. EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use);
  3702. EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released);
  3703. EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
  3704. EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counter);
  3705. void kbase_trace_mali_pm_status(u32 event, u64 value)
  3706. {
  3707. trace_mali_pm_status(event, value);
  3708. }
  3709. void kbase_trace_mali_pm_power_off(u32 event, u64 value)
  3710. {
  3711. trace_mali_pm_power_off(event, value);
  3712. }
  3713. void kbase_trace_mali_pm_power_on(u32 event, u64 value)
  3714. {
  3715. trace_mali_pm_power_on(event, value);
  3716. }
  3717. void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
  3718. {
  3719. trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
  3720. }
  3721. void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
  3722. {
  3723. trace_mali_page_fault_insert_pages(event, value);
  3724. }
  3725. void kbase_trace_mali_mmu_as_in_use(int event)
  3726. {
  3727. trace_mali_mmu_as_in_use(event);
  3728. }
  3729. void kbase_trace_mali_mmu_as_released(int event)
  3730. {
  3731. trace_mali_mmu_as_released(event);
  3732. }
  3733. void kbase_trace_mali_total_alloc_pages_change(long long int event)
  3734. {
  3735. trace_mali_total_alloc_pages_change(event);
  3736. }
  3737. #endif /* CONFIG_MALI_GATOR_SUPPORT */
  3738. #ifdef CONFIG_MALI_SYSTEM_TRACE
  3739. #include "mali_linux_kbase_trace.h"
  3740. #endif