12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484 |
- /*
- *
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * A copy of the licence is included with the program, and can also be obtained
- * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- *
- */
- #include <mali_kbase.h>
- #include <mali_kbase_hwaccess_gpuprops.h>
- #include <mali_kbase_config_defaults.h>
- #include <mali_kbase_uku.h>
- #include <mali_midg_regmap.h>
- #include <mali_kbase_instr.h>
- #include <mali_kbase_gator.h>
- #include <backend/gpu/mali_kbase_js_affinity.h>
- #include <mali_kbase_mem_linux.h>
- #ifdef CONFIG_MALI_DEVFREQ
- #include <backend/gpu/mali_kbase_devfreq.h>
- #endif /* CONFIG_MALI_DEVFREQ */
- #ifdef CONFIG_MALI_NO_MALI
- #include "mali_kbase_model_linux.h"
- #endif /* CONFIG_MALI_NO_MALI */
- #include "mali_kbase_mem_profile_debugfs_buf_size.h"
- #include "mali_kbase_debug_mem_view.h"
- #include "mali_kbase_mem.h"
- #include "mali_kbase_mem_pool_debugfs.h"
- #if !MALI_CUSTOMER_RELEASE
- #include "mali_kbase_regs_dump_debugfs.h"
- #endif /* !MALI_CUSTOMER_RELEASE */
- #include <mali_kbase_hwaccess_backend.h>
- #include <mali_kbase_hwaccess_jm.h>
- #include <backend/gpu/mali_kbase_device_internal.h>
- #ifdef CONFIG_KDS
- #include <linux/kds.h>
- #include <linux/anon_inodes.h>
- #include <linux/syscalls.h>
- #endif /* CONFIG_KDS */
- #include <linux/module.h>
- #include <linux/init.h>
- #include <linux/poll.h>
- #include <linux/kernel.h>
- #include <linux/errno.h>
- #include <linux/of.h>
- #include <linux/platform_device.h>
- #include <linux/miscdevice.h>
- #include <linux/list.h>
- #include <linux/semaphore.h>
- #include <linux/fs.h>
- #include <linux/uaccess.h>
- #include <linux/interrupt.h>
- #include <linux/io.h>
- #include <linux/mm.h>
- #include <linux/compat.h> /* is_compat_task */
- #include <linux/mman.h>
- #include <linux/version.h>
- #include <linux/security.h>
- #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
- #include <linux/pm_runtime.h>
- #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
- #include <mali_kbase_hw.h>
- #include <platform/mali_kbase_platform_common.h>
- #ifdef CONFIG_MALI_PLATFORM_FAKE
- #include <platform/mali_kbase_platform_fake.h>
- #endif /*CONFIG_MALI_PLATFORM_FAKE */
- #ifdef CONFIG_SYNC
- #include <mali_kbase_sync.h>
- #endif /* CONFIG_SYNC */
- #ifdef CONFIG_PM_DEVFREQ
- #include <linux/devfreq.h>
- #endif /* CONFIG_PM_DEVFREQ */
- #include <linux/clk.h>
- #include <linux/delay.h>
- #include <mali_kbase_config.h>
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
- #include <linux/pm_opp.h>
- #else
- #include <linux/opp.h>
- #endif
- #include <mali_kbase_tlstream.h>
- /* GPU IRQ Tags */
- #define JOB_IRQ_TAG 0
- #define MMU_IRQ_TAG 1
- #define GPU_IRQ_TAG 2
- #if MALI_UNIT_TEST
- static struct kbase_exported_test_data shared_kernel_test_data;
- EXPORT_SYMBOL(shared_kernel_test_data);
- #endif /* MALI_UNIT_TEST */
- static int kbase_dev_nr;
- static DEFINE_MUTEX(kbase_dev_list_lock);
- static LIST_HEAD(kbase_dev_list);
- #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
- static inline void __compile_time_asserts(void)
- {
- CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
- }
- #ifdef CONFIG_KDS
- struct kbasep_kds_resource_set_file_data {
- struct kds_resource_set *lock;
- };
- static int kds_resource_release(struct inode *inode, struct file *file);
- static const struct file_operations kds_resource_fops = {
- .release = kds_resource_release
- };
- struct kbase_kds_resource_list_data {
- struct kds_resource **kds_resources;
- unsigned long *kds_access_bitmap;
- int num_elems;
- };
- static int kds_resource_release(struct inode *inode, struct file *file)
- {
- struct kbasep_kds_resource_set_file_data *data;
- data = (struct kbasep_kds_resource_set_file_data *)file->private_data;
- if (NULL != data) {
- if (NULL != data->lock)
- kds_resource_set_release(&data->lock);
- kfree(data);
- }
- return 0;
- }
- static int kbasep_kds_allocate_resource_list_data(struct kbase_context *kctx, struct base_external_resource *ext_res, int num_elems, struct kbase_kds_resource_list_data *resources_list)
- {
- struct base_external_resource *res = ext_res;
- int res_id;
- /* assume we have to wait for all */
- KBASE_DEBUG_ASSERT(0 != num_elems);
- resources_list->kds_resources = kmalloc_array(num_elems,
- sizeof(struct kds_resource *), GFP_KERNEL);
- if (NULL == resources_list->kds_resources)
- return -ENOMEM;
- KBASE_DEBUG_ASSERT(0 != num_elems);
- resources_list->kds_access_bitmap = kzalloc(
- sizeof(unsigned long) *
- ((num_elems + BITS_PER_LONG - 1) / BITS_PER_LONG),
- GFP_KERNEL);
- if (NULL == resources_list->kds_access_bitmap) {
- kfree(resources_list->kds_access_bitmap);
- return -ENOMEM;
- }
- kbase_gpu_vm_lock(kctx);
- for (res_id = 0; res_id < num_elems; res_id++, res++) {
- int exclusive;
- struct kbase_va_region *reg;
- struct kds_resource *kds_res = NULL;
- exclusive = res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE;
- reg = kbase_region_tracker_find_region_enclosing_address(kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
- /* did we find a matching region object? */
- if (NULL == reg || (reg->flags & KBASE_REG_FREE))
- break;
- /* no need to check reg->alloc as only regions with an alloc has
- * a size, and kbase_region_tracker_find_region_enclosing_address
- * only returns regions with size > 0 */
- switch (reg->gpu_alloc->type) {
- #if defined(CONFIG_UMP) && defined(CONFIG_KDS)
- case KBASE_MEM_TYPE_IMPORTED_UMP:
- kds_res = ump_dd_kds_resource_get(reg->gpu_alloc->imported.ump_handle);
- break;
- #endif /* defined(CONFIG_UMP) && defined(CONFIG_KDS) */
- default:
- break;
- }
- /* no kds resource for the region ? */
- if (!kds_res)
- break;
- resources_list->kds_resources[res_id] = kds_res;
- if (exclusive)
- set_bit(res_id, resources_list->kds_access_bitmap);
- }
- kbase_gpu_vm_unlock(kctx);
- /* did the loop run to completion? */
- if (res_id == num_elems)
- return 0;
- /* Clean up as the resource list is not valid. */
- kfree(resources_list->kds_resources);
- kfree(resources_list->kds_access_bitmap);
- return -EINVAL;
- }
- static bool kbasep_validate_kbase_pointer(
- struct kbase_context *kctx, union kbase_pointer *p)
- {
- if (kctx->is_compat) {
- if (p->compat_value == 0)
- return false;
- } else {
- if (NULL == p->value)
- return false;
- }
- return true;
- }
- static int kbase_external_buffer_lock(struct kbase_context *kctx,
- struct kbase_uk_ext_buff_kds_data *args, u32 args_size)
- {
- struct base_external_resource *ext_res_copy;
- size_t ext_resource_size;
- int ret = -EINVAL;
- int fd = -EBADF;
- struct base_external_resource __user *ext_res_user;
- int __user *file_desc_usr;
- struct kbasep_kds_resource_set_file_data *fdata;
- struct kbase_kds_resource_list_data resource_list_data;
- if (args_size != sizeof(struct kbase_uk_ext_buff_kds_data))
- return -EINVAL;
- /* Check user space has provided valid data */
- if (!kbasep_validate_kbase_pointer(kctx, &args->external_resource) ||
- !kbasep_validate_kbase_pointer(kctx, &args->file_descriptor) ||
- (0 == args->num_res) ||
- (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES))
- return -EINVAL;
- ext_resource_size = sizeof(struct base_external_resource) * args->num_res;
- KBASE_DEBUG_ASSERT(0 != ext_resource_size);
- ext_res_copy = kmalloc(ext_resource_size, GFP_KERNEL);
- if (!ext_res_copy)
- return -EINVAL;
- #ifdef CONFIG_COMPAT
- if (kctx->is_compat) {
- ext_res_user = compat_ptr(args->external_resource.compat_value);
- file_desc_usr = compat_ptr(args->file_descriptor.compat_value);
- } else {
- #endif /* CONFIG_COMPAT */
- ext_res_user = args->external_resource.value;
- file_desc_usr = args->file_descriptor.value;
- #ifdef CONFIG_COMPAT
- }
- #endif /* CONFIG_COMPAT */
- /* Copy the external resources to lock from user space */
- if (copy_from_user(ext_res_copy, ext_res_user, ext_resource_size))
- goto out;
- /* Allocate data to be stored in the file */
- fdata = kmalloc(sizeof(*fdata), GFP_KERNEL);
- if (!fdata) {
- ret = -ENOMEM;
- goto out;
- }
- /* Parse given elements and create resource and access lists */
- ret = kbasep_kds_allocate_resource_list_data(kctx,
- ext_res_copy, args->num_res, &resource_list_data);
- if (!ret) {
- long err;
- fdata->lock = NULL;
- fd = anon_inode_getfd("kds_ext", &kds_resource_fops, fdata, 0);
- err = copy_to_user(file_desc_usr, &fd, sizeof(fd));
- /* If the file descriptor was valid and we successfully copied
- * it to user space, then we can try and lock the requested
- * kds resources.
- */
- if ((fd >= 0) && (0 == err)) {
- struct kds_resource_set *lock;
- lock = kds_waitall(args->num_res,
- resource_list_data.kds_access_bitmap,
- resource_list_data.kds_resources,
- KDS_WAIT_BLOCKING);
- if (!lock) {
- ret = -EINVAL;
- } else if (IS_ERR(lock)) {
- ret = PTR_ERR(lock);
- } else {
- ret = 0;
- fdata->lock = lock;
- }
- } else {
- ret = -EINVAL;
- }
- kfree(resource_list_data.kds_resources);
- kfree(resource_list_data.kds_access_bitmap);
- }
- if (ret) {
- /* If the file was opened successfully then close it which will
- * clean up the file data, otherwise we clean up the file data
- * ourself.
- */
- if (fd >= 0)
- sys_close(fd);
- else
- kfree(fdata);
- }
- out:
- kfree(ext_res_copy);
- return ret;
- }
- #endif /* CONFIG_KDS */
- static void kbase_create_timeline_objects(struct kbase_context *kctx)
- {
- struct kbase_device *kbdev = kctx->kbdev;
- unsigned int lpu_id;
- unsigned int as_nr;
- struct kbasep_kctx_list_element *element;
- /* Create LPU objects. */
- for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
- u32 *lpu =
- &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
- kbase_tlstream_tl_summary_new_lpu(lpu, lpu_id, *lpu);
- }
- /* Create Address Space objects. */
- for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
- kbase_tlstream_tl_summary_new_as(&kbdev->as[as_nr], as_nr);
- /* Create GPU object and make it retain all LPUs and address spaces. */
- kbase_tlstream_tl_summary_new_gpu(
- kbdev,
- kbdev->gpu_props.props.raw_props.gpu_id,
- kbdev->gpu_props.num_cores);
- for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
- void *lpu =
- &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
- kbase_tlstream_tl_summary_lifelink_lpu_gpu(lpu, kbdev);
- }
- for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
- kbase_tlstream_tl_summary_lifelink_as_gpu(
- &kbdev->as[as_nr],
- kbdev);
- /* Create object for each known context. */
- mutex_lock(&kbdev->kctx_list_lock);
- list_for_each_entry(element, &kbdev->kctx_list, link) {
- kbase_tlstream_tl_summary_new_ctx(
- element->kctx,
- (u32)(element->kctx->id),
- (u32)(element->kctx->tgid));
- }
- /* Before releasing the lock, reset body stream buffers.
- * This will prevent context creation message to be directed to both
- * summary and body stream. */
- kbase_tlstream_reset_body_streams();
- mutex_unlock(&kbdev->kctx_list_lock);
- /* Static object are placed into summary packet that needs to be
- * transmitted first. Flush all streams to make it available to
- * user space. */
- kbase_tlstream_flush_streams();
- }
- static void kbase_api_handshake(struct uku_version_check_args *version)
- {
- switch (version->major) {
- #ifdef BASE_LEGACY_UK6_SUPPORT
- case 6:
- /* We are backwards compatible with version 6,
- * so pretend to be the old version */
- version->major = 6;
- version->minor = 1;
- break;
- #endif /* BASE_LEGACY_UK6_SUPPORT */
- #ifdef BASE_LEGACY_UK7_SUPPORT
- case 7:
- /* We are backwards compatible with version 7,
- * so pretend to be the old version */
- version->major = 7;
- version->minor = 1;
- break;
- #endif /* BASE_LEGACY_UK7_SUPPORT */
- #ifdef BASE_LEGACY_UK8_SUPPORT
- case 8:
- /* We are backwards compatible with version 8,
- * so pretend to be the old version */
- version->major = 8;
- version->minor = 4;
- break;
- #endif /* BASE_LEGACY_UK8_SUPPORT */
- #ifdef BASE_LEGACY_UK9_SUPPORT
- case 9:
- /* We are backwards compatible with version 9,
- * so pretend to be the old version */
- version->major = 9;
- version->minor = 0;
- break;
- #endif /* BASE_LEGACY_UK8_SUPPORT */
- case BASE_UK_VERSION_MAJOR:
- /* set minor to be the lowest common */
- version->minor = min_t(int, BASE_UK_VERSION_MINOR,
- (int)version->minor);
- break;
- default:
- /* We return our actual version regardless if it
- * matches the version returned by userspace -
- * userspace can bail if it can't handle this
- * version */
- version->major = BASE_UK_VERSION_MAJOR;
- version->minor = BASE_UK_VERSION_MINOR;
- break;
- }
- }
- /**
- * enum mali_error - Mali error codes shared with userspace
- *
- * This is subset of those common Mali errors that can be returned to userspace.
- * Values of matching user and kernel space enumerators MUST be the same.
- * MALI_ERROR_NONE is guaranteed to be 0.
- */
- enum mali_error {
- MALI_ERROR_NONE = 0,
- MALI_ERROR_OUT_OF_GPU_MEMORY,
- MALI_ERROR_OUT_OF_MEMORY,
- MALI_ERROR_FUNCTION_FAILED,
- };
- enum {
- inited_mem = (1u << 0),
- inited_js = (1u << 1),
- inited_pm_runtime_init = (1u << 2),
- #ifdef CONFIG_MALI_DEVFREQ
- inited_devfreq = (1u << 3),
- #endif /* CONFIG_MALI_DEVFREQ */
- inited_tlstream = (1u << 4),
- inited_backend_early = (1u << 5),
- inited_backend_late = (1u << 6),
- inited_device = (1u << 7),
- inited_vinstr = (1u << 8),
- #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
- inited_ipa = (1u << 9),
- #endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
- inited_job_fault = (1u << 10),
- inited_misc_register = (1u << 11),
- inited_get_device = (1u << 12),
- inited_sysfs_group = (1u << 13),
- inited_dev_list = (1u << 14),
- inited_debugfs = (1u << 15),
- inited_gpu_device = (1u << 16),
- inited_registers_map = (1u << 17),
- inited_power_control = (1u << 19),
- inited_buslogger = (1u << 20)
- };
- #ifdef CONFIG_MALI_DEBUG
- #define INACTIVE_WAIT_MS (5000)
- void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
- {
- kbdev->driver_inactive = inactive;
- wake_up(&kbdev->driver_inactive_wait);
- /* Wait for any running IOCTLs to complete */
- if (inactive)
- msleep(INACTIVE_WAIT_MS);
- }
- KBASE_EXPORT_TEST_API(kbase_set_driver_inactive);
- #endif /* CONFIG_MALI_DEBUG */
- /* Condensed hex dump. */
- #define LINE_SHIFT 4
- #define LINE_LENGTH (1 << LINE_SHIFT)
- #define LINE_FORMAT "0x%02X, 0x%02X, 0x%02X, 0x%02X, " \
- "0x%02X, 0x%02X, 0x%02X, 0x%02X, " \
- "0x%02X, 0x%02X, 0x%02X, 0x%02X, " \
- "0x%02X, 0x%02X, 0x%02X, 0x%02X"
- #define LINE_INPUT(b) b[0], b[1], b[2], b[3], \
- b[4], b[5], b[6], b[7], \
- b[8], b[9], b[10], b[11], \
- b[12], b[13], b[14], b[15]
- #define END_HEX(E) if(repeat_count && !(E && c == 0)) { \
- printk("memset(%s + %d, %d, %d);", \
- array, repeat_start << LINE_SHIFT, \
- c, repeat_count << LINE_SHIFT); \
- repeat_count = 0; \
- }
- /*static void formatted_hex_dump(char *array, uint8_t *buffer, size_t s)
- {
- int i = 0;
- uint8_t *out = kmalloc(3 * s, GFP_KERNEL);
- printk("%s", array);
- for(i = 0; i < s; ++i) {
- sprintf(out + (3 * i), "%02X ", buffer[i]);
- }
- out[(3*s) - 1] = 0;
- printk(out);
- kfree(out);
- }*/
- static void formatted_hex_dump(char *array, uint8_t *buffer, size_t sz)
- {
- if(!buffer) {
- printk("Bad buffer");
- return;
- }
- int line_count = sz >> LINE_SHIFT;
- /* Repeated character */
- uint8_t c = 0;
- int repeat_count = 0;
- int repeat_start = 0;
- int b, line;
- for(line = 0; line < line_count; ++line) {
- uint8_t *offset = buffer + (line << LINE_SHIFT);
- bool same = true;
- /* Check if still repeating */
- if(offset[0] != c) END_HEX(0);
- /* Check sameness */
- c = offset[0];
- for(b = 1; b < LINE_LENGTH; ++b) {
- if(offset[b] != c) same = false;
- }
- if(same) {
- if(!repeat_count) repeat_start = line;
- ++repeat_count;
- } else {
- printk("%s[%d] = {" LINE_FORMAT "};",
- array, line << LINE_SHIFT,
- LINE_INPUT(offset));
- }
- }
- END_HEX(1);
- }
- /* Dump CPU memory by address. */
- #define CPU_DUMP_SIZE 256
- static void* kbase_fetch_cpu(struct kbase_context *kctx, void __user *cpu_addr, size_t *size_o, size_t size_i)
- {
- uint8_t *buffer;
- if(!size_i) size_i = CPU_DUMP_SIZE;
- if(size_o) *size_o = size_i;
- buffer = kmalloc(size_i, GFP_KERNEL);
- if(!buffer) {
- return NULL;
- }
- if(copy_from_user(buffer, cpu_addr, size_i) != 0) {
- kfree(buffer);
- return NULL;
- }
- return buffer;
- }
- /* Dump GPU memory by address.
- * See mali_kbase_debug_mem_view.c for more information */
- static void* kbase_fetch_gpu(struct kbase_context *kctx, u64 gpu_addr, size_t *size_o, size_t size_i)
- {
- struct kbase_va_region *reg;
- struct kbase_mem_phy_alloc *alloc;
- uint8_t *buffer;
- uint8_t *buffer_on;
- int p;
- pgprot_t prot = PAGE_KERNEL;
- uint64_t offset;
-
- reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
- if(!reg) {
- printk("Region not found!");
- return NULL;
- }
- if(!reg->gpu_alloc) {
- printk("No alloc!\n");
- return NULL;
- }
- offset = gpu_addr - (reg->start_pfn << PAGE_SHIFT);
- if(offset < 0) {
- printk("What?\n");
- printk("GPU addr: %LX", gpu_addr);
- printk("start_pfn: %LX", reg->start_pfn);
- return NULL;
- }
- alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
- if(!size_i) size_i = (alloc->nents << PAGE_SHIFT) - offset;
- if(size_o) *size_o = size_i;
- if (!(reg->flags & KBASE_REG_CPU_CACHED))
- prot = pgprot_writecombine(prot);
- buffer = kmalloc(alloc->nents << PAGE_SHIFT, GFP_KERNEL);
- printk("Buf: %p\n", buffer);
- if(!buffer) {
- printk("Bad alloc");
- return NULL;
- }
- for(p = 0; p < alloc->nents; ++p) {
- struct page *page = pfn_to_page(PFN_DOWN(alloc->pages[p]));
- uint8_t *mapping = vmap(&page, 1, VM_MAP, prot);
- if(!mapping) {
- printk("Bad mapping");
- kfree(buffer);
- return NULL;
- }
- memcpy(buffer + (p << PAGE_SHIFT), mapping, PAGE_SIZE);
- vunmap(mapping);
- }
- if(offset) {
- buffer_on = kmalloc(size_i, GFP_KERNEL);
- memcpy(buffer_on, buffer + offset, size_i);
- kfree(buffer);
- return buffer_on;
- }
- return buffer;
- }
- /* The ioctl tracer is automatically generated by black */
- #include "black-output-trace.c"
- static int kbase_dispatch(struct kbase_context *kctx, void * const args, u32 args_size)
- {
- struct kbase_device *kbdev;
- union uk_header *ukh = args;
- u32 id;
- int ret = 0;
- KBASE_DEBUG_ASSERT(ukh != NULL);
- kbdev = kctx->kbdev;
- id = ukh->id;
- ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
- kbase_trace_call(kctx, args, id, args_size, true);
- #ifdef CONFIG_MALI_DEBUG
- wait_event(kbdev->driver_inactive_wait,
- kbdev->driver_inactive == false);
- #endif /* CONFIG_MALI_DEBUG */
- if (UKP_FUNC_ID_CHECK_VERSION == id) {
- struct uku_version_check_args *version_check;
- if (args_size != sizeof(struct uku_version_check_args)) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- return 0;
- }
- version_check = (struct uku_version_check_args *)args;
- kbase_api_handshake(version_check);
- /* save the proposed version number for later use */
- kctx->api_version = KBASE_API_VERSION(version_check->major,
- version_check->minor);
- ukh->ret = MALI_ERROR_NONE;
- return 0;
- }
- /* block calls until version handshake */
- if (kctx->api_version == 0)
- return -EINVAL;
- if (!atomic_read(&kctx->setup_complete)) {
- struct kbase_uk_set_flags *kbase_set_flags;
- /* setup pending, try to signal that we'll do the setup,
- * if setup was already in progress, err this call
- */
- if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
- return -EINVAL;
- /* if unexpected call, will stay stuck in setup mode
- * (is it the only call we accept?)
- */
- if (id != KBASE_FUNC_SET_FLAGS)
- return -EINVAL;
- kbase_set_flags = (struct kbase_uk_set_flags *)args;
- /* if not matching the expected call, stay in setup mode */
- if (sizeof(*kbase_set_flags) != args_size)
- goto bad_size;
- /* if bad flags, will stay stuck in setup mode */
- if (kbase_context_set_create_flags(kctx,
- kbase_set_flags->create_flags) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- atomic_set(&kctx->setup_complete, 1);
- return 0;
- }
- /* setup complete, perform normal operation */
- switch (id) {
- case KBASE_FUNC_MEM_JIT_INIT:
- {
- struct kbase_uk_mem_jit_init *jit_init = args;
- if (sizeof(*jit_init) != args_size)
- goto bad_size;
- if (kbase_region_tracker_init_jit(kctx,
- jit_init->va_pages))
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- case KBASE_FUNC_MEM_ALLOC:
- {
- struct kbase_uk_mem_alloc *mem = args;
- struct kbase_va_region *reg;
- if (sizeof(*mem) != args_size)
- goto bad_size;
- #if defined(CONFIG_64BIT)
- if (!kctx->is_compat) {
- /* force SAME_VA if a 64-bit client */
- mem->flags |= BASE_MEM_SAME_VA;
- }
- #endif
- reg = kbase_mem_alloc(kctx, mem->va_pages,
- mem->commit_pages, mem->extent,
- &mem->flags, &mem->gpu_va,
- &mem->va_alignment);
- if (!reg)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- case KBASE_FUNC_MEM_IMPORT: {
- struct kbase_uk_mem_import *mem_import = args;
- void __user *phandle;
- if (sizeof(*mem_import) != args_size)
- goto bad_size;
- #ifdef CONFIG_COMPAT
- if (kctx->is_compat)
- phandle = compat_ptr(mem_import->phandle.compat_value);
- else
- #endif
- phandle = mem_import->phandle.value;
- if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- if (kbase_mem_import(kctx, mem_import->type, phandle,
- &mem_import->gpu_va,
- &mem_import->va_pages,
- &mem_import->flags)) {
- mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- }
- break;
- }
- case KBASE_FUNC_MEM_ALIAS: {
- struct kbase_uk_mem_alias *alias = args;
- struct base_mem_aliasing_info __user *user_ai;
- struct base_mem_aliasing_info *ai;
- if (sizeof(*alias) != args_size)
- goto bad_size;
- if (alias->nents > 2048) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- if (!alias->nents) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- #ifdef CONFIG_COMPAT
- if (kctx->is_compat)
- user_ai = compat_ptr(alias->ai.compat_value);
- else
- #endif
- user_ai = alias->ai.value;
- ai = vmalloc(sizeof(*ai) * alias->nents);
- if (!ai) {
- ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
- break;
- }
- if (copy_from_user(ai, user_ai,
- sizeof(*ai) * alias->nents)) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- goto copy_failed;
- }
- alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
- alias->stride,
- alias->nents, ai,
- &alias->va_pages);
- if (!alias->gpu_va) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- goto no_alias;
- }
- no_alias:
- copy_failed:
- vfree(ai);
- break;
- }
- case KBASE_FUNC_MEM_COMMIT:
- {
- struct kbase_uk_mem_commit *commit = args;
- if (sizeof(*commit) != args_size)
- goto bad_size;
- if (commit->gpu_addr & ~PAGE_MASK) {
- dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid");
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- if (kbase_mem_commit(kctx, commit->gpu_addr,
- commit->pages,
- (base_backing_threshold_status *)
- &commit->result_subcode) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- case KBASE_FUNC_MEM_QUERY:
- {
- struct kbase_uk_mem_query *query = args;
- if (sizeof(*query) != args_size)
- goto bad_size;
- if (query->gpu_addr & ~PAGE_MASK) {
- dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid");
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE &&
- query->query != KBASE_MEM_QUERY_VA_SIZE &&
- query->query != KBASE_MEM_QUERY_FLAGS) {
- dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query);
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- if (kbase_mem_query(kctx, query->gpu_addr,
- query->query, &query->value) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- else
- ukh->ret = MALI_ERROR_NONE;
- break;
- }
- break;
- case KBASE_FUNC_MEM_FLAGS_CHANGE:
- {
- struct kbase_uk_mem_flags_change *fc = args;
- if (sizeof(*fc) != args_size)
- goto bad_size;
- if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) {
- dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid");
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- if (kbase_mem_flags_change(kctx, fc->gpu_va,
- fc->flags, fc->mask) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- case KBASE_FUNC_MEM_FREE:
- {
- struct kbase_uk_mem_free *mem = args;
- if (sizeof(*mem) != args_size)
- goto bad_size;
- if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) {
- dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid");
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- case KBASE_FUNC_JOB_SUBMIT:
- {
- struct kbase_uk_job_submit *job = args;
- if (sizeof(*job) != args_size)
- goto bad_size;
- #ifdef BASE_LEGACY_UK6_SUPPORT
- if (kbase_jd_submit(kctx, job, 0) != 0)
- #else
- if (kbase_jd_submit(kctx, job) != 0)
- #endif /* BASE_LEGACY_UK6_SUPPORT */
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- #ifdef BASE_LEGACY_UK6_SUPPORT
- case KBASE_FUNC_JOB_SUBMIT_UK6:
- {
- struct kbase_uk_job_submit *job = args;
- if (sizeof(*job) != args_size)
- goto bad_size;
- if (kbase_jd_submit(kctx, job, 1) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- #endif
- case KBASE_FUNC_SYNC:
- {
- struct kbase_uk_sync_now *sn = args;
- if (sizeof(*sn) != args_size)
- goto bad_size;
- if (sn->sset.basep_sset.mem_handle.basep.handle & ~PAGE_MASK) {
- dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid");
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- #ifndef CONFIG_MALI_COH_USER
- if (kbase_sync_now(kctx, &sn->sset) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- #endif
- break;
- }
- case KBASE_FUNC_DISJOINT_QUERY:
- {
- struct kbase_uk_disjoint_query *dquery = args;
- if (sizeof(*dquery) != args_size)
- goto bad_size;
- /* Get the disjointness counter value. */
- dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
- break;
- }
- case KBASE_FUNC_POST_TERM:
- {
- kbase_event_close(kctx);
- break;
- }
- case KBASE_FUNC_HWCNT_SETUP:
- {
- struct kbase_uk_hwcnt_setup *setup = args;
- if (sizeof(*setup) != args_size)
- goto bad_size;
- mutex_lock(&kctx->vinstr_cli_lock);
- if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
- &kctx->vinstr_cli, setup) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- mutex_unlock(&kctx->vinstr_cli_lock);
- break;
- }
- case KBASE_FUNC_HWCNT_DUMP:
- {
- /* args ignored */
- mutex_lock(&kctx->vinstr_cli_lock);
- if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
- BASE_HWCNT_READER_EVENT_MANUAL) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- mutex_unlock(&kctx->vinstr_cli_lock);
- break;
- }
- case KBASE_FUNC_HWCNT_CLEAR:
- {
- /* args ignored */
- mutex_lock(&kctx->vinstr_cli_lock);
- if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- mutex_unlock(&kctx->vinstr_cli_lock);
- break;
- }
- case KBASE_FUNC_HWCNT_READER_SETUP:
- {
- struct kbase_uk_hwcnt_reader_setup *setup = args;
- if (sizeof(*setup) != args_size)
- goto bad_size;
- mutex_lock(&kctx->vinstr_cli_lock);
- if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
- setup) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- mutex_unlock(&kctx->vinstr_cli_lock);
- break;
- }
- case KBASE_FUNC_GPU_PROPS_REG_DUMP:
- {
- struct kbase_uk_gpuprops *setup = args;
- if (sizeof(*setup) != args_size)
- goto bad_size;
- if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- case KBASE_FUNC_FIND_CPU_OFFSET:
- {
- struct kbase_uk_find_cpu_offset *find = args;
- if (sizeof(*find) != args_size)
- goto bad_size;
- if (find->gpu_addr & ~PAGE_MASK) {
- dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
- goto out_bad;
- }
- if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- } else {
- int err;
- err = kbasep_find_enclosing_cpu_mapping_offset(
- kctx,
- find->gpu_addr,
- (uintptr_t) find->cpu_addr,
- (size_t) find->size,
- &find->offset);
- if (err)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- }
- break;
- }
- case KBASE_FUNC_GET_VERSION:
- {
- struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
- if (sizeof(*get_version) != args_size)
- goto bad_size;
- /* version buffer size check is made in compile time assert */
- memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
- get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
- break;
- }
- case KBASE_FUNC_STREAM_CREATE:
- {
- #ifdef CONFIG_SYNC
- struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
- if (sizeof(*screate) != args_size)
- goto bad_size;
- if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
- /* not NULL terminated */
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- if (kbase_stream_create(screate->name, &screate->fd) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- else
- ukh->ret = MALI_ERROR_NONE;
- #else /* CONFIG_SYNC */
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- #endif /* CONFIG_SYNC */
- break;
- }
- case KBASE_FUNC_FENCE_VALIDATE:
- {
- #ifdef CONFIG_SYNC
- struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
- if (sizeof(*fence_validate) != args_size)
- goto bad_size;
- if (kbase_fence_validate(fence_validate->fd) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- else
- ukh->ret = MALI_ERROR_NONE;
- #endif /* CONFIG_SYNC */
- break;
- }
- case KBASE_FUNC_EXT_BUFFER_LOCK:
- {
- #ifdef CONFIG_KDS
- ret = kbase_external_buffer_lock(kctx,
- (struct kbase_uk_ext_buff_kds_data *)args,
- args_size);
- switch (ret) {
- case 0:
- ukh->ret = MALI_ERROR_NONE;
- break;
- case -ENOMEM:
- ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
- break;
- default:
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- }
- #endif /* CONFIG_KDS */
- break;
- }
- case KBASE_FUNC_SET_TEST_DATA:
- {
- #if MALI_UNIT_TEST
- struct kbase_uk_set_test_data *set_data = args;
- shared_kernel_test_data = set_data->test_data;
- shared_kernel_test_data.kctx.value = (void __user *)kctx;
- shared_kernel_test_data.mm.value = (void __user *)current->mm;
- ukh->ret = MALI_ERROR_NONE;
- #endif /* MALI_UNIT_TEST */
- break;
- }
- case KBASE_FUNC_INJECT_ERROR:
- {
- #ifdef CONFIG_MALI_ERROR_INJECT
- unsigned long flags;
- struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
- /*mutex lock */
- spin_lock_irqsave(&kbdev->reg_op_lock, flags);
- if (job_atom_inject_error(¶ms) != 0)
- ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
- else
- ukh->ret = MALI_ERROR_NONE;
- spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
- /*mutex unlock */
- #endif /* CONFIG_MALI_ERROR_INJECT */
- break;
- }
- case KBASE_FUNC_MODEL_CONTROL:
- {
- #ifdef CONFIG_MALI_NO_MALI
- unsigned long flags;
- struct kbase_model_control_params params =
- ((struct kbase_uk_model_control_params *)args)->params;
- /*mutex lock */
- spin_lock_irqsave(&kbdev->reg_op_lock, flags);
- if (gpu_model_control(kbdev->model, ¶ms) != 0)
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- else
- ukh->ret = MALI_ERROR_NONE;
- spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
- /*mutex unlock */
- #endif /* CONFIG_MALI_NO_MALI */
- break;
- }
- #ifdef BASE_LEGACY_UK8_SUPPORT
- case KBASE_FUNC_KEEP_GPU_POWERED:
- {
- dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_KEEP_GPU_POWERED: function is deprecated and disabled\n");
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- #endif /* BASE_LEGACY_UK8_SUPPORT */
- case KBASE_FUNC_GET_PROFILING_CONTROLS:
- {
- struct kbase_uk_profiling_controls *controls =
- (struct kbase_uk_profiling_controls *)args;
- u32 i;
- if (sizeof(*controls) != args_size)
- goto bad_size;
- for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
- controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i);
- break;
- }
- /* used only for testing purposes; these controls are to be set by gator through gator API */
- case KBASE_FUNC_SET_PROFILING_CONTROLS:
- {
- struct kbase_uk_profiling_controls *controls =
- (struct kbase_uk_profiling_controls *)args;
- u32 i;
- if (sizeof(*controls) != args_size)
- goto bad_size;
- for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
- _mali_profiling_control(i, controls->profiling_controls[i]);
- break;
- }
- case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
- {
- struct kbase_uk_debugfs_mem_profile_add *add_data =
- (struct kbase_uk_debugfs_mem_profile_add *)args;
- char *buf;
- char __user *user_buf;
- if (sizeof(*add_data) != args_size)
- goto bad_size;
- if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
- dev_err(kbdev->dev, "buffer too big\n");
- goto out_bad;
- }
- #ifdef CONFIG_COMPAT
- if (kctx->is_compat)
- user_buf = compat_ptr(add_data->buf.compat_value);
- else
- #endif
- user_buf = add_data->buf.value;
- buf = kmalloc(add_data->len, GFP_KERNEL);
- if (!buf)
- goto out_bad;
- if (0 != copy_from_user(buf, user_buf, add_data->len)) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- kfree(buf);
- goto out_bad;
- }
- if (kbasep_mem_profile_debugfs_insert(kctx, buf,
- add_data->len)) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- kfree(buf);
- goto out_bad;
- }
- break;
- }
- #ifdef CONFIG_MALI_NO_MALI
- case KBASE_FUNC_SET_PRFCNT_VALUES:
- {
- struct kbase_uk_prfcnt_values *params =
- ((struct kbase_uk_prfcnt_values *)args);
- gpu_model_set_dummy_prfcnt_sample(params->data,
- params->size);
- break;
- }
- #endif /* CONFIG_MALI_NO_MALI */
- case KBASE_FUNC_TLSTREAM_ACQUIRE:
- {
- struct kbase_uk_tlstream_acquire *tlstream_acquire =
- args;
- if (sizeof(*tlstream_acquire) != args_size)
- goto bad_size;
- if (0 != kbase_tlstream_acquire(
- kctx,
- &tlstream_acquire->fd)) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- } else if (0 <= tlstream_acquire->fd) {
- /* Summary stream was cleared during acquire.
- * Create static timeline objects that will be
- * read by client. */
- kbase_create_timeline_objects(kctx);
- }
- break;
- }
- case KBASE_FUNC_TLSTREAM_FLUSH:
- {
- struct kbase_uk_tlstream_flush *tlstream_flush =
- args;
- if (sizeof(*tlstream_flush) != args_size)
- goto bad_size;
- kbase_tlstream_flush_streams();
- break;
- }
- #if MALI_UNIT_TEST
- case KBASE_FUNC_TLSTREAM_TEST:
- {
- struct kbase_uk_tlstream_test *tlstream_test = args;
- if (sizeof(*tlstream_test) != args_size)
- goto bad_size;
- kbase_tlstream_test(
- tlstream_test->tpw_count,
- tlstream_test->msg_delay,
- tlstream_test->msg_count,
- tlstream_test->aux_msg);
- break;
- }
- case KBASE_FUNC_TLSTREAM_STATS:
- {
- struct kbase_uk_tlstream_stats *tlstream_stats = args;
- if (sizeof(*tlstream_stats) != args_size)
- goto bad_size;
- kbase_tlstream_stats(
- &tlstream_stats->bytes_collected,
- &tlstream_stats->bytes_generated);
- break;
- }
- #endif /* MALI_UNIT_TEST */
- case KBASE_FUNC_GET_CONTEXT_ID:
- {
- struct kbase_uk_context_id *info = args;
- info->id = kctx->id;
- break;
- }
- case KBASE_FUNC_SOFT_EVENT_UPDATE:
- {
- struct kbase_uk_soft_event_update *update = args;
- if (sizeof(*update) != args_size)
- goto bad_size;
- if (((update->new_status != BASE_JD_SOFT_EVENT_SET) &&
- (update->new_status != BASE_JD_SOFT_EVENT_RESET)) ||
- (update->flags != 0))
- goto out_bad;
- if (kbasep_write_soft_event_status(
- kctx, update->evt,
- update->new_status) != 0) {
- ukh->ret = MALI_ERROR_FUNCTION_FAILED;
- break;
- }
- if (update->new_status == BASE_JD_SOFT_EVENT_SET)
- kbasep_complete_triggered_soft_events(
- kctx, update->evt);
- break;
- }
- default:
- dev_err(kbdev->dev, "unknown ioctl %u\n", id);
- goto out_bad;
- }
- kbase_trace_call(kctx, args, id, args_size, false);
- return ret;
- bad_size:
- dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
- out_bad:
- return -EINVAL;
- }
- static struct kbase_device *to_kbase_device(struct device *dev)
- {
- return dev_get_drvdata(dev);
- }
- static int assign_irqs(struct platform_device *pdev)
- {
- struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
- int i;
- if (!kbdev)
- return -ENODEV;
- /* 3 IRQ resources */
- for (i = 0; i < 3; i++) {
- struct resource *irq_res;
- int irqtag;
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (!irq_res) {
- dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
- return -ENOENT;
- }
- #ifdef CONFIG_OF
- if (!strcmp(irq_res->name, "JOB")) {
- irqtag = JOB_IRQ_TAG;
- } else if (!strcmp(irq_res->name, "MMU")) {
- irqtag = MMU_IRQ_TAG;
- } else if (!strcmp(irq_res->name, "GPU")) {
- irqtag = GPU_IRQ_TAG;
- } else {
- dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
- irq_res->name);
- return -EINVAL;
- }
- #else
- irqtag = i;
- #endif /* CONFIG_OF */
- kbdev->irqs[irqtag].irq = irq_res->start;
- kbdev->irqs[irqtag].flags = irq_res->flags & IRQF_TRIGGER_MASK;
- }
- return 0;
- }
- /*
- * API to acquire device list mutex and
- * return pointer to the device list head
- */
- const struct list_head *kbase_dev_list_get(void)
- {
- mutex_lock(&kbase_dev_list_lock);
- return &kbase_dev_list;
- }
- KBASE_EXPORT_TEST_API(kbase_dev_list_get);
- /* API to release the device list mutex */
- void kbase_dev_list_put(const struct list_head *dev_list)
- {
- mutex_unlock(&kbase_dev_list_lock);
- }
- KBASE_EXPORT_TEST_API(kbase_dev_list_put);
- /* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
- struct kbase_device *kbase_find_device(int minor)
- {
- struct kbase_device *kbdev = NULL;
- struct list_head *entry;
- const struct list_head *dev_list = kbase_dev_list_get();
- list_for_each(entry, dev_list) {
- struct kbase_device *tmp;
- tmp = list_entry(entry, struct kbase_device, entry);
- if (tmp->mdev.minor == minor || minor == -1) {
- kbdev = tmp;
- get_device(kbdev->dev);
- break;
- }
- }
- kbase_dev_list_put(dev_list);
- return kbdev;
- }
- EXPORT_SYMBOL(kbase_find_device);
- void kbase_release_device(struct kbase_device *kbdev)
- {
- put_device(kbdev->dev);
- }
- EXPORT_SYMBOL(kbase_release_device);
- static int kbase_open(struct inode *inode, struct file *filp)
- {
- struct kbase_device *kbdev = NULL;
- struct kbase_context *kctx;
- int ret = 0;
- #ifdef CONFIG_DEBUG_FS
- char kctx_name[64];
- #endif
- kbdev = kbase_find_device(iminor(inode));
- if (!kbdev)
- return -ENODEV;
- kctx = kbase_create_context(kbdev, is_compat_task());
- if (!kctx) {
- ret = -ENOMEM;
- goto out;
- }
- init_waitqueue_head(&kctx->event_queue);
- filp->private_data = kctx;
- kctx->filp = filp;
- kctx->infinite_cache_active = kbdev->infinite_cache_active_default;
- #ifdef CONFIG_DEBUG_FS
- snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
- kctx->kctx_dentry = debugfs_create_dir(kctx_name,
- kbdev->debugfs_ctx_directory);
- if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
- ret = -ENOMEM;
- goto out;
- }
- #ifdef CONFIG_MALI_COH_USER
- /* if cache is completely coherent at hardware level, then remove the
- * infinite cache control support from debugfs.
- */
- #else
- debugfs_create_bool("infinite_cache", 0644, kctx->kctx_dentry,
- &kctx->infinite_cache_active);
- #endif /* CONFIG_MALI_COH_USER */
- mutex_init(&kctx->mem_profile_lock);
- kbasep_jd_debugfs_ctx_add(kctx);
- kbase_debug_mem_view_init(filp);
- kbase_debug_job_fault_context_init(kctx);
- kbase_mem_pool_debugfs_add(kctx->kctx_dentry, &kctx->mem_pool);
- kbase_jit_debugfs_add(kctx);
- #endif /* CONFIG_DEBUGFS */
- dev_dbg(kbdev->dev, "created base context\n");
- {
- struct kbasep_kctx_list_element *element;
- element = kzalloc(sizeof(*element), GFP_KERNEL);
- if (element) {
- mutex_lock(&kbdev->kctx_list_lock);
- element->kctx = kctx;
- list_add(&element->link, &kbdev->kctx_list);
- kbase_tlstream_tl_new_ctx(
- element->kctx,
- (u32)(element->kctx->id),
- (u32)(element->kctx->tgid));
- mutex_unlock(&kbdev->kctx_list_lock);
- } else {
- /* we don't treat this as a fail - just warn about it */
- dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
- }
- }
- return 0;
- out:
- kbase_release_device(kbdev);
- return ret;
- }
- static int kbase_release(struct inode *inode, struct file *filp)
- {
- struct kbase_context *kctx = filp->private_data;
- struct kbase_device *kbdev = kctx->kbdev;
- struct kbasep_kctx_list_element *element, *tmp;
- bool found_element = false;
- kbase_tlstream_tl_del_ctx(kctx);
- #ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(kctx->kctx_dentry);
- kbasep_mem_profile_debugfs_remove(kctx);
- kbase_debug_job_fault_context_term(kctx);
- #endif
- mutex_lock(&kbdev->kctx_list_lock);
- list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
- if (element->kctx == kctx) {
- list_del(&element->link);
- kfree(element);
- found_element = true;
- }
- }
- mutex_unlock(&kbdev->kctx_list_lock);
- if (!found_element)
- dev_warn(kbdev->dev, "kctx not in kctx_list\n");
- filp->private_data = NULL;
- mutex_lock(&kctx->vinstr_cli_lock);
- /* If this client was performing hwcnt dumping and did not explicitly
- * detach itself, remove it from the vinstr core now */
- if (kctx->vinstr_cli) {
- struct kbase_uk_hwcnt_setup setup;
- setup.dump_buffer = 0llu;
- kbase_vinstr_legacy_hwc_setup(
- kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
- }
- mutex_unlock(&kctx->vinstr_cli_lock);
- kbase_destroy_context(kctx);
- dev_dbg(kbdev->dev, "deleted base context\n");
- kbase_release_device(kbdev);
- return 0;
- }
- #define CALL_MAX_SIZE 536
- static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
- {
- u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull }; /* alignment fixup */
- u32 size = _IOC_SIZE(cmd);
- struct kbase_context *kctx = filp->private_data;
- if (size > CALL_MAX_SIZE)
- return -ENOTTY;
- if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
- dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
- return -EFAULT;
- }
- if (kbase_dispatch(kctx, &msg, size) != 0)
- return -EFAULT;
- if (0 != copy_to_user((void __user *)arg, &msg, size)) {
- dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
- return -EFAULT;
- }
- return 0;
- }
- static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
- {
- struct kbase_context *kctx = filp->private_data;
- struct base_jd_event_v2 uevent;
- int out_count = 0;
- if (count < sizeof(uevent))
- return -ENOBUFS;
- do {
- while (kbase_event_dequeue(kctx, &uevent)) {
- if (out_count > 0)
- goto out;
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
- if (wait_event_interruptible(kctx->event_queue,
- kbase_event_pending(kctx)) != 0)
- return -ERESTARTSYS;
- }
- if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
- if (out_count == 0)
- return -EPIPE;
- goto out;
- }
- if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
- return -EFAULT;
- buf += sizeof(uevent);
- out_count++;
- count -= sizeof(uevent);
- } while (count >= sizeof(uevent));
- out:
- return out_count * sizeof(uevent);
- }
- static unsigned int kbase_poll(struct file *filp, poll_table *wait)
- {
- struct kbase_context *kctx = filp->private_data;
- poll_wait(filp, &kctx->event_queue, wait);
- if (kbase_event_pending(kctx))
- return POLLIN | POLLRDNORM;
- return 0;
- }
- void kbase_event_wakeup(struct kbase_context *kctx)
- {
- KBASE_DEBUG_ASSERT(kctx);
- wake_up_interruptible(&kctx->event_queue);
- }
- KBASE_EXPORT_TEST_API(kbase_event_wakeup);
- static int kbase_check_flags(int flags)
- {
- /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
- * closes the file descriptor in a child process.
- */
- if (0 == (flags & O_CLOEXEC))
- return -EINVAL;
- return 0;
- }
- #ifdef CONFIG_64BIT
- /* The following function is taken from the kernel and just
- * renamed. As it's not exported to modules we must copy-paste it here.
- */
- static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
- *info)
- {
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long length, low_limit, high_limit, gap_start, gap_end;
- /* Adjust search length to account for worst case alignment overhead */
- length = info->length + info->align_mask;
- if (length < info->length)
- return -ENOMEM;
- /*
- * Adjust search limits by the desired length.
- * See implementation comment at top of unmapped_area().
- */
- gap_end = info->high_limit;
- if (gap_end < length)
- return -ENOMEM;
- high_limit = gap_end - length;
- if (info->low_limit > high_limit)
- return -ENOMEM;
- low_limit = info->low_limit + length;
- /* Check highest gap, which does not precede any rbtree node */
- gap_start = mm->highest_vm_end;
- if (gap_start <= high_limit)
- goto found_highest;
- /* Check if rbtree root looks promising */
- if (RB_EMPTY_ROOT(&mm->mm_rb))
- return -ENOMEM;
- vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
- if (vma->rb_subtree_gap < length)
- return -ENOMEM;
- while (true) {
- /* Visit right subtree if it looks promising */
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
- if (gap_start <= high_limit && vma->vm_rb.rb_right) {
- struct vm_area_struct *right =
- rb_entry(vma->vm_rb.rb_right,
- struct vm_area_struct, vm_rb);
- if (right->rb_subtree_gap >= length) {
- vma = right;
- continue;
- }
- }
- check_current:
- /* Check if current node has a suitable gap */
- gap_end = vma->vm_start;
- if (gap_end < low_limit)
- return -ENOMEM;
- if (gap_start <= high_limit && gap_end - gap_start >= length)
- goto found;
- /* Visit left subtree if it looks promising */
- if (vma->vm_rb.rb_left) {
- struct vm_area_struct *left =
- rb_entry(vma->vm_rb.rb_left,
- struct vm_area_struct, vm_rb);
- if (left->rb_subtree_gap >= length) {
- vma = left;
- continue;
- }
- }
- /* Go back up the rbtree to find next candidate node */
- while (true) {
- struct rb_node *prev = &vma->vm_rb;
- if (!rb_parent(prev))
- return -ENOMEM;
- vma = rb_entry(rb_parent(prev),
- struct vm_area_struct, vm_rb);
- if (prev == vma->vm_rb.rb_right) {
- gap_start = vma->vm_prev ?
- vma->vm_prev->vm_end : 0;
- goto check_current;
- }
- }
- }
- found:
- /* We found a suitable gap. Clip it with the original high_limit. */
- if (gap_end > info->high_limit)
- gap_end = info->high_limit;
- found_highest:
- /* Compute highest gap address at the desired alignment */
- gap_end -= info->length;
- gap_end -= (gap_end - info->align_offset) & info->align_mask;
- VM_BUG_ON(gap_end < info->low_limit);
- VM_BUG_ON(gap_end < gap_start);
- return gap_end;
- }
- static unsigned long kbase_get_unmapped_area(struct file *filp,
- const unsigned long addr, const unsigned long len,
- const unsigned long pgoff, const unsigned long flags)
- {
- /* based on get_unmapped_area, but simplified slightly due to that some
- * values are known in advance */
- struct kbase_context *kctx = filp->private_data;
- struct mm_struct *mm = current->mm;
- struct vm_unmapped_area_info info;
- /* err on fixed address */
- if ((flags & MAP_FIXED) || addr)
- return -EINVAL;
- /* too big? */
- if (len > TASK_SIZE - SZ_2M)
- return -ENOMEM;
- if (kctx->is_compat)
- return current->mm->get_unmapped_area(filp, addr, len, pgoff,
- flags);
- if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) {
- info.high_limit = kctx->same_va_end << PAGE_SHIFT;
- info.align_mask = 0;
- info.align_offset = 0;
- } else {
- info.high_limit = min_t(unsigned long, mm->mmap_base,
- (kctx->same_va_end << PAGE_SHIFT));
- if (len >= SZ_2M) {
- info.align_offset = SZ_2M;
- info.align_mask = SZ_2M - 1;
- } else {
- info.align_mask = 0;
- info.align_offset = 0;
- }
- }
- info.flags = 0;
- info.length = len;
- info.low_limit = SZ_2M;
- return kbase_unmapped_area_topdown(&info);
- }
- #endif
- static const struct file_operations kbase_fops = {
- .owner = THIS_MODULE,
- .open = kbase_open,
- .release = kbase_release,
- .read = kbase_read,
- .poll = kbase_poll,
- .unlocked_ioctl = kbase_ioctl,
- .compat_ioctl = kbase_ioctl,
- .mmap = kbase_mmap,
- .check_flags = kbase_check_flags,
- #ifdef CONFIG_64BIT
- .get_unmapped_area = kbase_get_unmapped_area,
- #endif
- };
- #ifndef CONFIG_MALI_NO_MALI
- void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
- {
- writel(value, kbdev->reg + offset);
- }
- u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
- {
- return readl(kbdev->reg + offset);
- }
- #endif /* !CONFIG_MALI_NO_MALI */
- /** Show callback for the @c power_policy sysfs file.
- *
- * This function is called to get the contents of the @c power_policy sysfs
- * file. This is a list of the available policies with the currently active one
- * surrounded by square brackets.
- *
- * @param dev The device this sysfs file is for
- * @param attr The attributes of the sysfs file
- * @param buf The output buffer for the sysfs file contents
- *
- * @return The number of bytes output to @c buf.
- */
- static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
- {
- struct kbase_device *kbdev;
- const struct kbase_pm_policy *current_policy;
- const struct kbase_pm_policy *const *policy_list;
- int policy_count;
- int i;
- ssize_t ret = 0;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- current_policy = kbase_pm_get_policy(kbdev);
- policy_count = kbase_pm_list_policies(&policy_list);
- for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
- if (policy_list[i] == current_policy)
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
- else
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
- }
- if (ret < PAGE_SIZE - 1) {
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
- } else {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
- return ret;
- }
- /** Store callback for the @c power_policy sysfs file.
- *
- * This function is called when the @c power_policy sysfs file is written to.
- * It matches the requested policy against the available policies and if a
- * matching policy is found calls @ref kbase_pm_set_policy to change the
- * policy.
- *
- * @param dev The device with sysfs file is for
- * @param attr The attributes of the sysfs file
- * @param buf The value written to the sysfs file
- * @param count The number of bytes written to the sysfs file
- *
- * @return @c count if the function succeeded. An error code on failure.
- */
- static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- const struct kbase_pm_policy *new_policy = NULL;
- const struct kbase_pm_policy *const *policy_list;
- int policy_count;
- int i;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- policy_count = kbase_pm_list_policies(&policy_list);
- for (i = 0; i < policy_count; i++) {
- if (sysfs_streq(policy_list[i]->name, buf)) {
- new_policy = policy_list[i];
- break;
- }
- }
- if (!new_policy) {
- dev_err(dev, "power_policy: policy not found\n");
- return -EINVAL;
- }
- kbase_pm_set_policy(kbdev, new_policy);
- return count;
- }
- /** The sysfs file @c power_policy.
- *
- * This is used for obtaining information about the available policies,
- * determining which policy is currently active, and changing the active
- * policy.
- */
- static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
- /** Show callback for the @c core_availability_policy sysfs file.
- *
- * This function is called to get the contents of the @c core_availability_policy
- * sysfs file. This is a list of the available policies with the currently
- * active one surrounded by square brackets.
- *
- * @param dev The device this sysfs file is for
- * @param attr The attributes of the sysfs file
- * @param buf The output buffer for the sysfs file contents
- *
- * @return The number of bytes output to @c buf.
- */
- static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
- {
- struct kbase_device *kbdev;
- const struct kbase_pm_ca_policy *current_policy;
- const struct kbase_pm_ca_policy *const *policy_list;
- int policy_count;
- int i;
- ssize_t ret = 0;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- current_policy = kbase_pm_ca_get_policy(kbdev);
- policy_count = kbase_pm_ca_list_policies(&policy_list);
- for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
- if (policy_list[i] == current_policy)
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
- else
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
- }
- if (ret < PAGE_SIZE - 1) {
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
- } else {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
- return ret;
- }
- /** Store callback for the @c core_availability_policy sysfs file.
- *
- * This function is called when the @c core_availability_policy sysfs file is
- * written to. It matches the requested policy against the available policies
- * and if a matching policy is found calls @ref kbase_pm_set_policy to change
- * the policy.
- *
- * @param dev The device with sysfs file is for
- * @param attr The attributes of the sysfs file
- * @param buf The value written to the sysfs file
- * @param count The number of bytes written to the sysfs file
- *
- * @return @c count if the function succeeded. An error code on failure.
- */
- static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- const struct kbase_pm_ca_policy *new_policy = NULL;
- const struct kbase_pm_ca_policy *const *policy_list;
- int policy_count;
- int i;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- policy_count = kbase_pm_ca_list_policies(&policy_list);
- for (i = 0; i < policy_count; i++) {
- if (sysfs_streq(policy_list[i]->name, buf)) {
- new_policy = policy_list[i];
- break;
- }
- }
- if (!new_policy) {
- dev_err(dev, "core_availability_policy: policy not found\n");
- return -EINVAL;
- }
- kbase_pm_ca_set_policy(kbdev, new_policy);
- return count;
- }
- /** The sysfs file @c core_availability_policy
- *
- * This is used for obtaining information about the available policies,
- * determining which policy is currently active, and changing the active
- * policy.
- */
- static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
- /** Show callback for the @c core_mask sysfs file.
- *
- * This function is called to get the contents of the @c core_mask sysfs
- * file.
- *
- * @param dev The device this sysfs file is for
- * @param attr The attributes of the sysfs file
- * @param buf The output buffer for the sysfs file contents
- *
- * @return The number of bytes output to @c buf.
- */
- static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
- {
- struct kbase_device *kbdev;
- ssize_t ret = 0;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "Current core mask (JS0) : 0x%llX\n",
- kbdev->pm.debug_core_mask[0]);
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "Current core mask (JS1) : 0x%llX\n",
- kbdev->pm.debug_core_mask[1]);
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "Current core mask (JS2) : 0x%llX\n",
- kbdev->pm.debug_core_mask[2]);
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "Available core mask : 0x%llX\n",
- kbdev->gpu_props.props.raw_props.shader_present);
- return ret;
- }
- /** Store callback for the @c core_mask sysfs file.
- *
- * This function is called when the @c core_mask sysfs file is written to.
- *
- * @param dev The device with sysfs file is for
- * @param attr The attributes of the sysfs file
- * @param buf The value written to the sysfs file
- * @param count The number of bytes written to the sysfs file
- *
- * @return @c count if the function succeeded. An error code on failure.
- */
- static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- u64 new_core_mask[3];
- int items;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- items = sscanf(buf, "%llx %llx %llx",
- &new_core_mask[0], &new_core_mask[1],
- &new_core_mask[2]);
- if (items == 1)
- new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
- if (items == 1 || items == 3) {
- u64 shader_present =
- kbdev->gpu_props.props.raw_props.shader_present;
- u64 group0_core_mask =
- kbdev->gpu_props.props.coherency_info.group[0].
- core_mask;
- if ((new_core_mask[0] & shader_present) != new_core_mask[0] ||
- !(new_core_mask[0] & group0_core_mask) ||
- (new_core_mask[1] & shader_present) !=
- new_core_mask[1] ||
- !(new_core_mask[1] & group0_core_mask) ||
- (new_core_mask[2] & shader_present) !=
- new_core_mask[2] ||
- !(new_core_mask[2] & group0_core_mask)) {
- dev_err(dev, "power_policy: invalid core specification\n");
- return -EINVAL;
- }
- if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
- kbdev->pm.debug_core_mask[1] !=
- new_core_mask[1] ||
- kbdev->pm.debug_core_mask[2] !=
- new_core_mask[2]) {
- unsigned long flags;
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
- kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
- new_core_mask[1], new_core_mask[2]);
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock,
- flags);
- }
- return count;
- }
- dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n"
- "Use format <core_mask>\n"
- "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
- return -EINVAL;
- }
- /** The sysfs file @c core_mask.
- *
- * This is used to restrict shader core availability for debugging purposes.
- * Reading it will show the current core mask and the mask of cores available.
- * Writing to it will set the current core mask.
- */
- static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
- /**
- * set_soft_event_timeout() - Store callback for the soft_event_timeout sysfs
- * file.
- *
- * @dev: The device this sysfs file is for.
- * @attr: The attributes of the sysfs file.
- * @buf: The value written to the sysfs file.
- * @count: The number of bytes written to the sysfs file.
- *
- * This allows setting the timeout for software event jobs. Waiting jobs will
- * be cancelled after this period expires. This is expressed in milliseconds.
- *
- * Return: count if the function succeeded. An error code on failure.
- */
- static ssize_t set_soft_event_timeout(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- int soft_event_timeout_ms;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- if ((kstrtoint(buf, 0, &soft_event_timeout_ms) != 0) ||
- (soft_event_timeout_ms <= 0))
- return -EINVAL;
- atomic_set(&kbdev->js_data.soft_event_timeout_ms,
- soft_event_timeout_ms);
- return count;
- }
- /**
- * show_soft_event_timeout() - Show callback for the soft_event_timeout sysfs
- * file.
- *
- * This will return the timeout for the software event jobs.
- *
- * @dev: The device this sysfs file is for.
- * @attr: The attributes of the sysfs file.
- * @buf: The output buffer for the sysfs file contents.
- *
- * Return: The number of bytes output to buf.
- */
- static ssize_t show_soft_event_timeout(struct device *dev,
- struct device_attribute *attr,
- char * const buf)
- {
- struct kbase_device *kbdev;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- return scnprintf(buf, PAGE_SIZE, "%i\n",
- atomic_read(&kbdev->js_data.soft_event_timeout_ms));
- }
- static DEVICE_ATTR(soft_event_timeout, S_IRUGO | S_IWUSR,
- show_soft_event_timeout, set_soft_event_timeout);
- /** Store callback for the @c js_timeouts sysfs file.
- *
- * This function is called to get the contents of the @c js_timeouts sysfs
- * file. This file contains five values separated by whitespace. The values
- * are basically the same as JS_SOFT_STOP_TICKS, JS_HARD_STOP_TICKS_SS,
- * JS_HARD_STOP_TICKS_DUMPING, JS_RESET_TICKS_SS, JS_RESET_TICKS_DUMPING
- * configuration values (in that order), with the difference that the js_timeout
- * values are expressed in MILLISECONDS.
- *
- * The js_timeouts sysfile file allows the current values in
- * use by the job scheduler to get override. Note that a value needs to
- * be other than 0 for it to override the current job scheduler value.
- *
- * @param dev The device with sysfs file is for
- * @param attr The attributes of the sysfs file
- * @param buf The value written to the sysfs file
- * @param count The number of bytes written to the sysfs file
- *
- * @return @c count if the function succeeded. An error code on failure.
- */
- static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- int items;
- long js_soft_stop_ms;
- long js_soft_stop_ms_cl;
- long js_hard_stop_ms_ss;
- long js_hard_stop_ms_cl;
- long js_hard_stop_ms_dumping;
- long js_reset_ms_ss;
- long js_reset_ms_cl;
- long js_reset_ms_dumping;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
- &js_soft_stop_ms, &js_soft_stop_ms_cl,
- &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
- &js_hard_stop_ms_dumping, &js_reset_ms_ss,
- &js_reset_ms_cl, &js_reset_ms_dumping);
- if (items == 8) {
- u64 ticks;
- if (js_soft_stop_ms >= 0) {
- ticks = js_soft_stop_ms * 1000000ULL;
- do_div(ticks, kbdev->js_data.scheduling_period_ns);
- kbdev->js_soft_stop_ticks = ticks;
- } else {
- kbdev->js_soft_stop_ticks = -1;
- }
- if (js_soft_stop_ms_cl >= 0) {
- ticks = js_soft_stop_ms_cl * 1000000ULL;
- do_div(ticks, kbdev->js_data.scheduling_period_ns);
- kbdev->js_soft_stop_ticks_cl = ticks;
- } else {
- kbdev->js_soft_stop_ticks_cl = -1;
- }
- if (js_hard_stop_ms_ss >= 0) {
- ticks = js_hard_stop_ms_ss * 1000000ULL;
- do_div(ticks, kbdev->js_data.scheduling_period_ns);
- kbdev->js_hard_stop_ticks_ss = ticks;
- } else {
- kbdev->js_hard_stop_ticks_ss = -1;
- }
- if (js_hard_stop_ms_cl >= 0) {
- ticks = js_hard_stop_ms_cl * 1000000ULL;
- do_div(ticks, kbdev->js_data.scheduling_period_ns);
- kbdev->js_hard_stop_ticks_cl = ticks;
- } else {
- kbdev->js_hard_stop_ticks_cl = -1;
- }
- if (js_hard_stop_ms_dumping >= 0) {
- ticks = js_hard_stop_ms_dumping * 1000000ULL;
- do_div(ticks, kbdev->js_data.scheduling_period_ns);
- kbdev->js_hard_stop_ticks_dumping = ticks;
- } else {
- kbdev->js_hard_stop_ticks_dumping = -1;
- }
- if (js_reset_ms_ss >= 0) {
- ticks = js_reset_ms_ss * 1000000ULL;
- do_div(ticks, kbdev->js_data.scheduling_period_ns);
- kbdev->js_reset_ticks_ss = ticks;
- } else {
- kbdev->js_reset_ticks_ss = -1;
- }
- if (js_reset_ms_cl >= 0) {
- ticks = js_reset_ms_cl * 1000000ULL;
- do_div(ticks, kbdev->js_data.scheduling_period_ns);
- kbdev->js_reset_ticks_cl = ticks;
- } else {
- kbdev->js_reset_ticks_cl = -1;
- }
- if (js_reset_ms_dumping >= 0) {
- ticks = js_reset_ms_dumping * 1000000ULL;
- do_div(ticks, kbdev->js_data.scheduling_period_ns);
- kbdev->js_reset_ticks_dumping = ticks;
- } else {
- kbdev->js_reset_ticks_dumping = -1;
- }
- kbdev->js_timeouts_updated = true;
- dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n",
- (unsigned long)kbdev->js_soft_stop_ticks,
- js_soft_stop_ms);
- dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
- (unsigned long)kbdev->js_soft_stop_ticks_cl,
- js_soft_stop_ms_cl);
- dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n",
- (unsigned long)kbdev->js_hard_stop_ticks_ss,
- js_hard_stop_ms_ss);
- dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n",
- (unsigned long)kbdev->js_hard_stop_ticks_cl,
- js_hard_stop_ms_cl);
- dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_DUMPING with %lu ticks (%lu ms)\n",
- (unsigned long)
- kbdev->js_hard_stop_ticks_dumping,
- js_hard_stop_ms_dumping);
- dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n",
- (unsigned long)kbdev->js_reset_ticks_ss,
- js_reset_ms_ss);
- dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_CL with %lu ticks (%lu ms)\n",
- (unsigned long)kbdev->js_reset_ticks_cl,
- js_reset_ms_cl);
- dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_DUMPING with %lu ticks (%lu ms)\n",
- (unsigned long)kbdev->js_reset_ticks_dumping,
- js_reset_ms_dumping);
- return count;
- }
- dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
- "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
- "Write 0 for no change, -1 to restore default timeout\n");
- return -EINVAL;
- }
- /** Show callback for the @c js_timeouts sysfs file.
- *
- * This function is called to get the contents of the @c js_timeouts sysfs
- * file. It returns the last set values written to the js_timeouts sysfs file.
- * If the file didn't get written yet, the values will be current setting in
- * use.
- * @param dev The device this sysfs file is for
- * @param attr The attributes of the sysfs file
- * @param buf The output buffer for the sysfs file contents
- *
- * @return The number of bytes output to @c buf.
- */
- static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
- {
- struct kbase_device *kbdev;
- ssize_t ret;
- u64 ms;
- unsigned long js_soft_stop_ms;
- unsigned long js_soft_stop_ms_cl;
- unsigned long js_hard_stop_ms_ss;
- unsigned long js_hard_stop_ms_cl;
- unsigned long js_hard_stop_ms_dumping;
- unsigned long js_reset_ms_ss;
- unsigned long js_reset_ms_cl;
- unsigned long js_reset_ms_dumping;
- unsigned long ticks;
- u32 scheduling_period_ns;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- /* If no contexts have been scheduled since js_timeouts was last written
- * to, the new timeouts might not have been latched yet. So check if an
- * update is pending and use the new values if necessary. */
- if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
- scheduling_period_ns = kbdev->js_scheduling_period_ns;
- else
- scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
- if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
- ticks = kbdev->js_soft_stop_ticks;
- else
- ticks = kbdev->js_data.soft_stop_ticks;
- ms = (u64)ticks * scheduling_period_ns;
- do_div(ms, 1000000UL);
- js_soft_stop_ms = (unsigned long)ms;
- if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
- ticks = kbdev->js_soft_stop_ticks_cl;
- else
- ticks = kbdev->js_data.soft_stop_ticks_cl;
- ms = (u64)ticks * scheduling_period_ns;
- do_div(ms, 1000000UL);
- js_soft_stop_ms_cl = (unsigned long)ms;
- if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
- ticks = kbdev->js_hard_stop_ticks_ss;
- else
- ticks = kbdev->js_data.hard_stop_ticks_ss;
- ms = (u64)ticks * scheduling_period_ns;
- do_div(ms, 1000000UL);
- js_hard_stop_ms_ss = (unsigned long)ms;
- if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
- ticks = kbdev->js_hard_stop_ticks_cl;
- else
- ticks = kbdev->js_data.hard_stop_ticks_cl;
- ms = (u64)ticks * scheduling_period_ns;
- do_div(ms, 1000000UL);
- js_hard_stop_ms_cl = (unsigned long)ms;
- if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
- ticks = kbdev->js_hard_stop_ticks_dumping;
- else
- ticks = kbdev->js_data.hard_stop_ticks_dumping;
- ms = (u64)ticks * scheduling_period_ns;
- do_div(ms, 1000000UL);
- js_hard_stop_ms_dumping = (unsigned long)ms;
- if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
- ticks = kbdev->js_reset_ticks_ss;
- else
- ticks = kbdev->js_data.gpu_reset_ticks_ss;
- ms = (u64)ticks * scheduling_period_ns;
- do_div(ms, 1000000UL);
- js_reset_ms_ss = (unsigned long)ms;
- if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
- ticks = kbdev->js_reset_ticks_cl;
- else
- ticks = kbdev->js_data.gpu_reset_ticks_cl;
- ms = (u64)ticks * scheduling_period_ns;
- do_div(ms, 1000000UL);
- js_reset_ms_cl = (unsigned long)ms;
- if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
- ticks = kbdev->js_reset_ticks_dumping;
- else
- ticks = kbdev->js_data.gpu_reset_ticks_dumping;
- ms = (u64)ticks * scheduling_period_ns;
- do_div(ms, 1000000UL);
- js_reset_ms_dumping = (unsigned long)ms;
- ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
- js_soft_stop_ms, js_soft_stop_ms_cl,
- js_hard_stop_ms_ss, js_hard_stop_ms_cl,
- js_hard_stop_ms_dumping, js_reset_ms_ss,
- js_reset_ms_cl, js_reset_ms_dumping);
- if (ret >= PAGE_SIZE) {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
- return ret;
- }
- /** The sysfs file @c js_timeouts.
- *
- * This is used to override the current job scheduler values for
- * JS_STOP_STOP_TICKS_SS
- * JS_STOP_STOP_TICKS_CL
- * JS_HARD_STOP_TICKS_SS
- * JS_HARD_STOP_TICKS_CL
- * JS_HARD_STOP_TICKS_DUMPING
- * JS_RESET_TICKS_SS
- * JS_RESET_TICKS_CL
- * JS_RESET_TICKS_DUMPING.
- */
- static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
- /**
- * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
- * file
- * @dev: The device the sysfs file is for
- * @attr: The attributes of the sysfs file
- * @buf: The value written to the sysfs file
- * @count: The number of bytes written to the sysfs file
- *
- * This function is called when the js_scheduling_period sysfs file is written
- * to. It checks the data written, and if valid updates the js_scheduling_period
- * value
- *
- * Return: @c count if the function succeeded. An error code on failure.
- */
- static ssize_t set_js_scheduling_period(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- int ret;
- unsigned int js_scheduling_period;
- u32 new_scheduling_period_ns;
- u32 old_period;
- u64 ticks;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- ret = kstrtouint(buf, 0, &js_scheduling_period);
- if (ret || !js_scheduling_period) {
- dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
- "Use format <js_scheduling_period_ms>\n");
- return -EINVAL;
- }
- new_scheduling_period_ns = js_scheduling_period * 1000000;
- /* Update scheduling timeouts */
- mutex_lock(&kbdev->js_data.runpool_mutex);
- /* If no contexts have been scheduled since js_timeouts was last written
- * to, the new timeouts might not have been latched yet. So check if an
- * update is pending and use the new values if necessary. */
- /* Use previous 'new' scheduling period as a base if present. */
- if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns)
- old_period = kbdev->js_scheduling_period_ns;
- else
- old_period = kbdev->js_data.scheduling_period_ns;
- if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0)
- ticks = (u64)kbdev->js_soft_stop_ticks * old_period;
- else
- ticks = (u64)kbdev->js_data.soft_stop_ticks *
- kbdev->js_data.scheduling_period_ns;
- do_div(ticks, new_scheduling_period_ns);
- kbdev->js_soft_stop_ticks = ticks ? ticks : 1;
- if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0)
- ticks = (u64)kbdev->js_soft_stop_ticks_cl * old_period;
- else
- ticks = (u64)kbdev->js_data.soft_stop_ticks_cl *
- kbdev->js_data.scheduling_period_ns;
- do_div(ticks, new_scheduling_period_ns);
- kbdev->js_soft_stop_ticks_cl = ticks ? ticks : 1;
- if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0)
- ticks = (u64)kbdev->js_hard_stop_ticks_ss * old_period;
- else
- ticks = (u64)kbdev->js_data.hard_stop_ticks_ss *
- kbdev->js_data.scheduling_period_ns;
- do_div(ticks, new_scheduling_period_ns);
- kbdev->js_hard_stop_ticks_ss = ticks ? ticks : 1;
- if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0)
- ticks = (u64)kbdev->js_hard_stop_ticks_cl * old_period;
- else
- ticks = (u64)kbdev->js_data.hard_stop_ticks_cl *
- kbdev->js_data.scheduling_period_ns;
- do_div(ticks, new_scheduling_period_ns);
- kbdev->js_hard_stop_ticks_cl = ticks ? ticks : 1;
- if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0)
- ticks = (u64)kbdev->js_hard_stop_ticks_dumping * old_period;
- else
- ticks = (u64)kbdev->js_data.hard_stop_ticks_dumping *
- kbdev->js_data.scheduling_period_ns;
- do_div(ticks, new_scheduling_period_ns);
- kbdev->js_hard_stop_ticks_dumping = ticks ? ticks : 1;
- if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0)
- ticks = (u64)kbdev->js_reset_ticks_ss * old_period;
- else
- ticks = (u64)kbdev->js_data.gpu_reset_ticks_ss *
- kbdev->js_data.scheduling_period_ns;
- do_div(ticks, new_scheduling_period_ns);
- kbdev->js_reset_ticks_ss = ticks ? ticks : 1;
- if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0)
- ticks = (u64)kbdev->js_reset_ticks_cl * old_period;
- else
- ticks = (u64)kbdev->js_data.gpu_reset_ticks_cl *
- kbdev->js_data.scheduling_period_ns;
- do_div(ticks, new_scheduling_period_ns);
- kbdev->js_reset_ticks_cl = ticks ? ticks : 1;
- if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0)
- ticks = (u64)kbdev->js_reset_ticks_dumping * old_period;
- else
- ticks = (u64)kbdev->js_data.gpu_reset_ticks_dumping *
- kbdev->js_data.scheduling_period_ns;
- do_div(ticks, new_scheduling_period_ns);
- kbdev->js_reset_ticks_dumping = ticks ? ticks : 1;
- kbdev->js_scheduling_period_ns = new_scheduling_period_ns;
- kbdev->js_timeouts_updated = true;
- mutex_unlock(&kbdev->js_data.runpool_mutex);
- dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
- js_scheduling_period);
- return count;
- }
- /**
- * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
- * entry.
- * @dev: The device this sysfs file is for.
- * @attr: The attributes of the sysfs file.
- * @buf: The output buffer to receive the GPU information.
- *
- * This function is called to get the current period used for the JS scheduling
- * period.
- *
- * Return: The number of bytes output to buf.
- */
- static ssize_t show_js_scheduling_period(struct device *dev,
- struct device_attribute *attr, char * const buf)
- {
- struct kbase_device *kbdev;
- u32 period;
- ssize_t ret;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0)
- period = kbdev->js_scheduling_period_ns;
- else
- period = kbdev->js_data.scheduling_period_ns;
- ret = scnprintf(buf, PAGE_SIZE, "%d\n",
- period / 1000000);
- return ret;
- }
- static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
- show_js_scheduling_period, set_js_scheduling_period);
- #if !MALI_CUSTOMER_RELEASE
- /** Store callback for the @c force_replay sysfs file.
- *
- * @param dev The device with sysfs file is for
- * @param attr The attributes of the sysfs file
- * @param buf The value written to the sysfs file
- * @param count The number of bytes written to the sysfs file
- *
- * @return @c count if the function succeeded. An error code on failure.
- */
- static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- if (!strncmp("limit=", buf, MIN(6, count))) {
- int force_replay_limit;
- int items = sscanf(buf, "limit=%u", &force_replay_limit);
- if (items == 1) {
- kbdev->force_replay_random = false;
- kbdev->force_replay_limit = force_replay_limit;
- kbdev->force_replay_count = 0;
- return count;
- }
- } else if (!strncmp("random_limit", buf, MIN(12, count))) {
- kbdev->force_replay_random = true;
- kbdev->force_replay_count = 0;
- return count;
- } else if (!strncmp("norandom_limit", buf, MIN(14, count))) {
- kbdev->force_replay_random = false;
- kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED;
- kbdev->force_replay_count = 0;
- return count;
- } else if (!strncmp("core_req=", buf, MIN(9, count))) {
- unsigned int core_req;
- int items = sscanf(buf, "core_req=%x", &core_req);
- if (items == 1) {
- kbdev->force_replay_core_req = (base_jd_core_req)core_req;
- return count;
- }
- }
- dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=<limit>, random_limit, norandom_limit, core_req=<core_req>\n");
- return -EINVAL;
- }
- /** Show callback for the @c force_replay sysfs file.
- *
- * This function is called to get the contents of the @c force_replay sysfs
- * file. It returns the last set value written to the force_replay sysfs file.
- * If the file didn't get written yet, the values will be 0.
- *
- * @param dev The device this sysfs file is for
- * @param attr The attributes of the sysfs file
- * @param buf The output buffer for the sysfs file contents
- *
- * @return The number of bytes output to @c buf.
- */
- static ssize_t show_force_replay(struct device *dev,
- struct device_attribute *attr, char * const buf)
- {
- struct kbase_device *kbdev;
- ssize_t ret;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- if (kbdev->force_replay_random)
- ret = scnprintf(buf, PAGE_SIZE,
- "limit=0\nrandom_limit\ncore_req=%x\n",
- kbdev->force_replay_core_req);
- else
- ret = scnprintf(buf, PAGE_SIZE,
- "limit=%u\nnorandom_limit\ncore_req=%x\n",
- kbdev->force_replay_limit,
- kbdev->force_replay_core_req);
- if (ret >= PAGE_SIZE) {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
- return ret;
- }
- /** The sysfs file @c force_replay.
- *
- */
- static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay,
- set_force_replay);
- #endif /* !MALI_CUSTOMER_RELEASE */
- #ifdef CONFIG_MALI_DEBUG
- static ssize_t set_js_softstop_always(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- int ret;
- int softstop_always;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- ret = kstrtoint(buf, 0, &softstop_always);
- if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
- dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
- "Use format <soft_stop_always>\n");
- return -EINVAL;
- }
- kbdev->js_data.softstop_always = (bool) softstop_always;
- dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
- (kbdev->js_data.softstop_always) ?
- "Enabled" : "Disabled");
- return count;
- }
- static ssize_t show_js_softstop_always(struct device *dev,
- struct device_attribute *attr, char * const buf)
- {
- struct kbase_device *kbdev;
- ssize_t ret;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
- if (ret >= PAGE_SIZE) {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
- return ret;
- }
- /*
- * By default, soft-stops are disabled when only a single context is present. The ability to
- * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
- * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
- */
- static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
- #endif /* CONFIG_MALI_DEBUG */
- #ifdef CONFIG_MALI_DEBUG
- typedef void (kbasep_debug_command_func) (struct kbase_device *);
- enum kbasep_debug_command_code {
- KBASEP_DEBUG_COMMAND_DUMPTRACE,
- /* This must be the last enum */
- KBASEP_DEBUG_COMMAND_COUNT
- };
- struct kbasep_debug_command {
- char *str;
- kbasep_debug_command_func *func;
- };
- /** Debug commands supported by the driver */
- static const struct kbasep_debug_command debug_commands[] = {
- {
- .str = "dumptrace",
- .func = &kbasep_trace_dump,
- }
- };
- /** Show callback for the @c debug_command sysfs file.
- *
- * This function is called to get the contents of the @c debug_command sysfs
- * file. This is a list of the available debug commands, separated by newlines.
- *
- * @param dev The device this sysfs file is for
- * @param attr The attributes of the sysfs file
- * @param buf The output buffer for the sysfs file contents
- *
- * @return The number of bytes output to @c buf.
- */
- static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
- {
- struct kbase_device *kbdev;
- int i;
- ssize_t ret = 0;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
- if (ret >= PAGE_SIZE) {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
- return ret;
- }
- /** Store callback for the @c debug_command sysfs file.
- *
- * This function is called when the @c debug_command sysfs file is written to.
- * It matches the requested command against the available commands, and if
- * a matching command is found calls the associated function from
- * @ref debug_commands to issue the command.
- *
- * @param dev The device with sysfs file is for
- * @param attr The attributes of the sysfs file
- * @param buf The value written to the sysfs file
- * @param count The number of bytes written to the sysfs file
- *
- * @return @c count if the function succeeded. An error code on failure.
- */
- static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- int i;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
- if (sysfs_streq(debug_commands[i].str, buf)) {
- debug_commands[i].func(kbdev);
- return count;
- }
- }
- /* Debug Command not found */
- dev_err(dev, "debug_command: command not known\n");
- return -EINVAL;
- }
- /** The sysfs file @c debug_command.
- *
- * This is used to issue general debug commands to the device driver.
- * Reading it will produce a list of debug commands, separated by newlines.
- * Writing to it with one of those commands will issue said command.
- */
- static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
- #endif /* CONFIG_MALI_DEBUG */
- /**
- * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
- * @dev: The device this sysfs file is for.
- * @attr: The attributes of the sysfs file.
- * @buf: The output buffer to receive the GPU information.
- *
- * This function is called to get a description of the present Mali
- * GPU via the gpuinfo sysfs entry. This includes the GPU family, the
- * number of cores, the hardware version and the raw product id. For
- * example:
- *
- * Mali-T60x MP4 r0p0 0x6956
- *
- * Return: The number of bytes output to buf.
- */
- static ssize_t kbase_show_gpuinfo(struct device *dev,
- struct device_attribute *attr, char *buf)
- {
- static const struct gpu_product_id_name {
- unsigned id;
- char *name;
- } gpu_product_id_names[] = {
- { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
- { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
- { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
- { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
- { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
- { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
- { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
- { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
- { .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G71" },
- };
- const char *product_name = "(Unknown Mali GPU)";
- struct kbase_device *kbdev;
- u32 gpu_id;
- unsigned product_id, product_id_mask;
- unsigned i;
- bool is_new_format;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
- product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
- is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
- product_id_mask =
- (is_new_format ?
- GPU_ID2_PRODUCT_MODEL :
- GPU_ID_VERSION_PRODUCT_ID) >>
- GPU_ID_VERSION_PRODUCT_ID_SHIFT;
- for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
- const struct gpu_product_id_name *p = &gpu_product_id_names[i];
- if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
- (p->id & product_id_mask) ==
- (product_id & product_id_mask)) {
- product_name = p->name;
- break;
- }
- }
- return scnprintf(buf, PAGE_SIZE, "%s MP%d r%dp%d 0x%04X\n",
- product_name, kbdev->gpu_props.num_cores,
- (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
- (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
- product_id);
- }
- static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
- /**
- * set_dvfs_period - Store callback for the dvfs_period sysfs file.
- * @dev: The device with sysfs file is for
- * @attr: The attributes of the sysfs file
- * @buf: The value written to the sysfs file
- * @count: The number of bytes written to the sysfs file
- *
- * This function is called when the dvfs_period sysfs file is written to. It
- * checks the data written, and if valid updates the DVFS period variable,
- *
- * Return: @c count if the function succeeded. An error code on failure.
- */
- static ssize_t set_dvfs_period(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- int ret;
- int dvfs_period;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- ret = kstrtoint(buf, 0, &dvfs_period);
- if (ret || dvfs_period <= 0) {
- dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
- "Use format <dvfs_period_ms>\n");
- return -EINVAL;
- }
- kbdev->pm.dvfs_period = dvfs_period;
- dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
- return count;
- }
- /**
- * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
- * @dev: The device this sysfs file is for.
- * @attr: The attributes of the sysfs file.
- * @buf: The output buffer to receive the GPU information.
- *
- * This function is called to get the current period used for the DVFS sample
- * timer.
- *
- * Return: The number of bytes output to buf.
- */
- static ssize_t show_dvfs_period(struct device *dev,
- struct device_attribute *attr, char * const buf)
- {
- struct kbase_device *kbdev;
- ssize_t ret;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
- return ret;
- }
- static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
- set_dvfs_period);
- /**
- * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
- * @dev: The device with sysfs file is for
- * @attr: The attributes of the sysfs file
- * @buf: The value written to the sysfs file
- * @count: The number of bytes written to the sysfs file
- *
- * This function is called when the pm_poweroff sysfs file is written to.
- *
- * This file contains three values separated by whitespace. The values
- * are gpu_poweroff_time (the period of the poweroff timer, in ns),
- * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
- * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
- * ticks before the GPU is powered off), in that order.
- *
- * Return: @c count if the function succeeded. An error code on failure.
- */
- static ssize_t set_pm_poweroff(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- int items;
- s64 gpu_poweroff_time;
- int poweroff_shader_ticks, poweroff_gpu_ticks;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
- &poweroff_shader_ticks,
- &poweroff_gpu_ticks);
- if (items != 3) {
- dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
- "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
- return -EINVAL;
- }
- kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
- kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks;
- kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks;
- return count;
- }
- /**
- * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
- * @dev: The device this sysfs file is for.
- * @attr: The attributes of the sysfs file.
- * @buf: The output buffer to receive the GPU information.
- *
- * This function is called to get the current period used for the DVFS sample
- * timer.
- *
- * Return: The number of bytes output to buf.
- */
- static ssize_t show_pm_poweroff(struct device *dev,
- struct device_attribute *attr, char * const buf)
- {
- struct kbase_device *kbdev;
- ssize_t ret;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n",
- ktime_to_ns(kbdev->pm.gpu_poweroff_time),
- kbdev->pm.poweroff_shader_ticks,
- kbdev->pm.poweroff_gpu_ticks);
- return ret;
- }
- static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
- set_pm_poweroff);
- /**
- * set_reset_timeout - Store callback for the reset_timeout sysfs file.
- * @dev: The device with sysfs file is for
- * @attr: The attributes of the sysfs file
- * @buf: The value written to the sysfs file
- * @count: The number of bytes written to the sysfs file
- *
- * This function is called when the reset_timeout sysfs file is written to. It
- * checks the data written, and if valid updates the reset timeout.
- *
- * Return: @c count if the function succeeded. An error code on failure.
- */
- static ssize_t set_reset_timeout(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- int ret;
- int reset_timeout;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- ret = kstrtoint(buf, 0, &reset_timeout);
- if (ret || reset_timeout <= 0) {
- dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
- "Use format <reset_timeout_ms>\n");
- return -EINVAL;
- }
- kbdev->reset_timeout_ms = reset_timeout;
- dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
- return count;
- }
- /**
- * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
- * @dev: The device this sysfs file is for.
- * @attr: The attributes of the sysfs file.
- * @buf: The output buffer to receive the GPU information.
- *
- * This function is called to get the current reset timeout.
- *
- * Return: The number of bytes output to buf.
- */
- static ssize_t show_reset_timeout(struct device *dev,
- struct device_attribute *attr, char * const buf)
- {
- struct kbase_device *kbdev;
- ssize_t ret;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
- return ret;
- }
- static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
- set_reset_timeout);
- static ssize_t show_mem_pool_size(struct device *dev,
- struct device_attribute *attr, char * const buf)
- {
- struct kbase_device *kbdev;
- ssize_t ret;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
- kbase_mem_pool_size(&kbdev->mem_pool));
- return ret;
- }
- static ssize_t set_mem_pool_size(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- size_t new_size;
- int err;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- err = kstrtoul(buf, 0, (unsigned long *)&new_size);
- if (err)
- return err;
- kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
- return count;
- }
- static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
- set_mem_pool_size);
- static ssize_t show_mem_pool_max_size(struct device *dev,
- struct device_attribute *attr, char * const buf)
- {
- struct kbase_device *kbdev;
- ssize_t ret;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
- kbase_mem_pool_max_size(&kbdev->mem_pool));
- return ret;
- }
- static ssize_t set_mem_pool_max_size(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
- {
- struct kbase_device *kbdev;
- size_t new_max_size;
- int err;
- kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
- if (err)
- return -EINVAL;
- kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
- return count;
- }
- static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
- set_mem_pool_max_size);
- static int kbasep_secure_mode_enable(struct kbase_device *kbdev)
- {
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_SET_PROTECTED_MODE, NULL);
- return 0;
- }
- static int kbasep_secure_mode_disable(struct kbase_device *kbdev)
- {
- if (!kbase_prepare_to_reset_gpu_locked(kbdev))
- return -EBUSY;
- kbase_reset_gpu_locked(kbdev);
- return 0;
- }
- static struct kbase_secure_ops kbasep_secure_ops = {
- .secure_mode_enable = kbasep_secure_mode_enable,
- .secure_mode_disable = kbasep_secure_mode_disable,
- };
- static void kbasep_secure_mode_init(struct kbase_device *kbdev)
- {
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
- /* Use native secure ops */
- kbdev->secure_ops = &kbasep_secure_ops;
- kbdev->secure_mode_support = true;
- }
- #ifdef SECURE_CALLBACKS
- else {
- kbdev->secure_ops = SECURE_CALLBACKS;
- kbdev->secure_mode_support = false;
- if (kbdev->secure_ops) {
- int err;
- /* Make sure secure mode is disabled on startup */
- err = kbdev->secure_ops->secure_mode_disable(kbdev);
- /* secure_mode_disable() returns -EINVAL if not
- * supported
- */
- kbdev->secure_mode_support = (err != -EINVAL);
- }
- }
- #endif
- }
- #ifdef CONFIG_MALI_NO_MALI
- static int kbase_common_reg_map(struct kbase_device *kbdev)
- {
- return 0;
- }
- static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
- {
- }
- #else /* CONFIG_MALI_NO_MALI */
- static int kbase_common_reg_map(struct kbase_device *kbdev)
- {
- int err = -ENOMEM;
- if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
- dev_err(kbdev->dev, "Register window unavailable\n");
- err = -EIO;
- goto out_region;
- }
- kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
- if (!kbdev->reg) {
- dev_err(kbdev->dev, "Can't remap register window\n");
- err = -EINVAL;
- goto out_ioremap;
- }
- return 0;
- out_ioremap:
- release_mem_region(kbdev->reg_start, kbdev->reg_size);
- out_region:
- return err;
- }
- static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
- {
- if (kbdev->reg) {
- iounmap(kbdev->reg);
- release_mem_region(kbdev->reg_start, kbdev->reg_size);
- kbdev->reg = NULL;
- kbdev->reg_start = 0;
- kbdev->reg_size = 0;
- }
- }
- #endif /* CONFIG_MALI_NO_MALI */
- static int registers_map(struct kbase_device * const kbdev)
- {
- /* the first memory resource is the physical address of the GPU
- * registers */
- struct platform_device *pdev = to_platform_device(kbdev->dev);
- struct resource *reg_res;
- int err;
- reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!reg_res) {
- dev_err(kbdev->dev, "Invalid register resource\n");
- return -ENOENT;
- }
- kbdev->reg_start = reg_res->start;
- kbdev->reg_size = resource_size(reg_res);
- err = kbase_common_reg_map(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Failed to map registers\n");
- return err;
- }
- return 0;
- }
- static void registers_unmap(struct kbase_device *kbdev)
- {
- kbase_common_reg_unmap(kbdev);
- }
- static int power_control_init(struct platform_device *pdev)
- {
- struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
- int err = 0;
- if (!kbdev)
- return -ENODEV;
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
- && defined(CONFIG_REGULATOR)
- kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
- if (IS_ERR_OR_NULL(kbdev->regulator)) {
- err = PTR_ERR(kbdev->regulator);
- kbdev->regulator = NULL;
- if (err == -EPROBE_DEFER) {
- dev_err(&pdev->dev, "Failed to get regulator\n");
- return err;
- }
- dev_info(kbdev->dev,
- "Continuing without Mali regulator control\n");
- /* Allow probe to continue without regulator */
- }
- #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
- kbdev->clock = clk_get(kbdev->dev, "clk_mali");
- if (IS_ERR_OR_NULL(kbdev->clock)) {
- err = PTR_ERR(kbdev->clock);
- kbdev->clock = NULL;
- if (err == -EPROBE_DEFER) {
- dev_err(&pdev->dev, "Failed to get clock\n");
- goto fail;
- }
- dev_info(kbdev->dev, "Continuing without Mali clock control\n");
- /* Allow probe to continue without clock. */
- } else {
- err = clk_prepare_enable(kbdev->clock);
- if (err) {
- dev_err(kbdev->dev,
- "Failed to prepare and enable clock (%d)\n",
- err);
- goto fail;
- }
- }
- #if defined(CONFIG_OF) && defined(CONFIG_PM_OPP)
- /* Register the OPPs if they are available in device tree */
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
- err = dev_pm_opp_of_add_table(kbdev->dev);
- #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
- err = of_init_opp_table(kbdev->dev);
- #else
- err = 0;
- #endif /* LINUX_VERSION_CODE */
- if (err)
- dev_dbg(kbdev->dev, "OPP table not found\n");
- #endif /* CONFIG_OF && CONFIG_PM_OPP */
- return 0;
- fail:
- if (kbdev->clock != NULL) {
- clk_put(kbdev->clock);
- kbdev->clock = NULL;
- }
- #ifdef CONFIG_REGULATOR
- if (NULL != kbdev->regulator) {
- regulator_put(kbdev->regulator);
- kbdev->regulator = NULL;
- }
- #endif
- return err;
- }
- static void power_control_term(struct kbase_device *kbdev)
- {
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
- dev_pm_opp_of_remove_table(kbdev->dev);
- #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
- of_free_opp_table(kbdev->dev);
- #endif
- if (kbdev->clock) {
- clk_disable_unprepare(kbdev->clock);
- clk_put(kbdev->clock);
- kbdev->clock = NULL;
- }
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
- && defined(CONFIG_REGULATOR)
- if (kbdev->regulator) {
- regulator_put(kbdev->regulator);
- kbdev->regulator = NULL;
- }
- #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
- }
- #ifdef CONFIG_DEBUG_FS
- #if KBASE_GPU_RESET_EN
- #include <mali_kbase_hwaccess_jm.h>
- static void trigger_quirks_reload(struct kbase_device *kbdev)
- {
- kbase_pm_context_active(kbdev);
- if (kbase_prepare_to_reset_gpu(kbdev))
- kbase_reset_gpu(kbdev);
- kbase_pm_context_idle(kbdev);
- }
- #define MAKE_QUIRK_ACCESSORS(type) \
- static int type##_quirks_set(void *data, u64 val) \
- { \
- struct kbase_device *kbdev; \
- kbdev = (struct kbase_device *)data; \
- kbdev->hw_quirks_##type = (u32)val; \
- trigger_quirks_reload(kbdev); \
- return 0;\
- } \
- \
- static int type##_quirks_get(void *data, u64 *val) \
- { \
- struct kbase_device *kbdev;\
- kbdev = (struct kbase_device *)data;\
- *val = kbdev->hw_quirks_##type;\
- return 0;\
- } \
- DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
- type##_quirks_set, "%llu\n")
- MAKE_QUIRK_ACCESSORS(sc);
- MAKE_QUIRK_ACCESSORS(tiler);
- MAKE_QUIRK_ACCESSORS(mmu);
- #endif /* KBASE_GPU_RESET_EN */
- static int kbase_device_debugfs_init(struct kbase_device *kbdev)
- {
- struct dentry *debugfs_ctx_defaults_directory;
- int err;
- kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
- NULL);
- if (!kbdev->mali_debugfs_directory) {
- dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
- err = -ENOMEM;
- goto out;
- }
- kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
- kbdev->mali_debugfs_directory);
- if (!kbdev->debugfs_ctx_directory) {
- dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
- err = -ENOMEM;
- goto out;
- }
- debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
- kbdev->debugfs_ctx_directory);
- if (!debugfs_ctx_defaults_directory) {
- dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
- err = -ENOMEM;
- goto out;
- }
- #if !MALI_CUSTOMER_RELEASE
- kbasep_regs_dump_debugfs_add(kbdev);
- #endif /* !MALI_CUSTOMER_RELEASE */
- kbase_debug_job_fault_debugfs_init(kbdev);
- kbasep_gpu_memory_debugfs_init(kbdev);
- #if KBASE_GPU_RESET_EN
- debugfs_create_file("quirks_sc", 0644,
- kbdev->mali_debugfs_directory, kbdev,
- &fops_sc_quirks);
- debugfs_create_file("quirks_tiler", 0644,
- kbdev->mali_debugfs_directory, kbdev,
- &fops_tiler_quirks);
- debugfs_create_file("quirks_mmu", 0644,
- kbdev->mali_debugfs_directory, kbdev,
- &fops_mmu_quirks);
- #endif /* KBASE_GPU_RESET_EN */
- #ifndef CONFIG_MALI_COH_USER
- debugfs_create_bool("infinite_cache", 0644,
- debugfs_ctx_defaults_directory,
- &kbdev->infinite_cache_active_default);
- #endif /* CONFIG_MALI_COH_USER */
- debugfs_create_size_t("mem_pool_max_size", 0644,
- debugfs_ctx_defaults_directory,
- &kbdev->mem_pool_max_size_default);
- #if KBASE_TRACE_ENABLE
- kbasep_trace_debugfs_init(kbdev);
- #endif /* KBASE_TRACE_ENABLE */
- #ifdef CONFIG_MALI_TRACE_TIMELINE
- kbasep_trace_timeline_debugfs_init(kbdev);
- #endif /* CONFIG_MALI_TRACE_TIMELINE */
- return 0;
- out:
- debugfs_remove_recursive(kbdev->mali_debugfs_directory);
- return err;
- }
- static void kbase_device_debugfs_term(struct kbase_device *kbdev)
- {
- debugfs_remove_recursive(kbdev->mali_debugfs_directory);
- }
- #else /* CONFIG_DEBUG_FS */
- static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
- {
- return 0;
- }
- static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
- #endif /* CONFIG_DEBUG_FS */
- static void kbase_device_coherency_init(struct kbase_device *kbdev, u32 gpu_id)
- {
- #ifdef CONFIG_OF
- u32 supported_coherency_bitmap =
- kbdev->gpu_props.props.raw_props.coherency_mode;
- const void *coherency_override_dts;
- u32 override_coherency;
- #endif /* CONFIG_OF */
- kbdev->system_coherency = COHERENCY_NONE;
- /* device tree may override the coherency */
- #ifdef CONFIG_OF
- coherency_override_dts = of_get_property(kbdev->dev->of_node,
- "system-coherency",
- NULL);
- if (coherency_override_dts) {
- override_coherency = be32_to_cpup(coherency_override_dts);
- if ((override_coherency <= COHERENCY_NONE) &&
- (supported_coherency_bitmap &
- COHERENCY_FEATURE_BIT(override_coherency))) {
- kbdev->system_coherency = override_coherency;
- dev_info(kbdev->dev,
- "Using coherency mode %u set from dtb",
- override_coherency);
- } else
- dev_warn(kbdev->dev,
- "Ignoring unsupported coherency mode %u set from dtb",
- override_coherency);
- }
- #endif /* CONFIG_OF */
- kbdev->gpu_props.props.raw_props.coherency_mode =
- kbdev->system_coherency;
- }
- #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
- /* Callback used by the kbase bus logger client, to initiate a GPU reset
- * when the bus log is restarted. GPU reset is used as reference point
- * in HW bus log analyses.
- */
- static void kbase_logging_started_cb(void *data)
- {
- struct kbase_device *kbdev = (struct kbase_device *)data;
- if (kbase_prepare_to_reset_gpu(kbdev))
- kbase_reset_gpu(kbdev);
- dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
- }
- #endif
- static struct attribute *kbase_attrs[] = {
- #ifdef CONFIG_MALI_DEBUG
- &dev_attr_debug_command.attr,
- &dev_attr_js_softstop_always.attr,
- #endif
- #if !MALI_CUSTOMER_RELEASE
- &dev_attr_force_replay.attr,
- #endif
- &dev_attr_js_timeouts.attr,
- &dev_attr_soft_event_timeout.attr,
- &dev_attr_gpuinfo.attr,
- &dev_attr_dvfs_period.attr,
- &dev_attr_pm_poweroff.attr,
- &dev_attr_reset_timeout.attr,
- &dev_attr_js_scheduling_period.attr,
- &dev_attr_power_policy.attr,
- &dev_attr_core_availability_policy.attr,
- &dev_attr_core_mask.attr,
- &dev_attr_mem_pool_size.attr,
- &dev_attr_mem_pool_max_size.attr,
- NULL
- };
- static const struct attribute_group kbase_attr_group = {
- .attrs = kbase_attrs,
- };
- static int kbase_platform_device_remove(struct platform_device *pdev)
- {
- struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
- const struct list_head *dev_list;
- if (!kbdev)
- return -ENODEV;
- #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
- if (kbdev->inited_subsys & inited_buslogger) {
- bl_core_client_unregister(kbdev->buslogger);
- kbdev->inited_subsys &= ~inited_buslogger;
- }
- #endif
- if (kbdev->inited_subsys & inited_sysfs_group) {
- sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
- kbdev->inited_subsys &= ~inited_sysfs_group;
- }
- if (kbdev->inited_subsys & inited_dev_list) {
- dev_list = kbase_dev_list_get();
- list_del(&kbdev->entry);
- kbase_dev_list_put(dev_list);
- kbdev->inited_subsys &= ~inited_dev_list;
- }
- if (kbdev->inited_subsys & inited_misc_register) {
- misc_deregister(&kbdev->mdev);
- kbdev->inited_subsys &= ~inited_misc_register;
- }
- if (kbdev->inited_subsys & inited_get_device) {
- put_device(kbdev->dev);
- kbdev->inited_subsys &= ~inited_get_device;
- }
- if (kbdev->inited_subsys & inited_debugfs) {
- kbase_device_debugfs_term(kbdev);
- kbdev->inited_subsys &= ~inited_debugfs;
- }
- if (kbdev->inited_subsys & inited_job_fault) {
- kbase_debug_job_fault_dev_term(kbdev);
- kbdev->inited_subsys &= ~inited_job_fault;
- }
- #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
- if (kbdev->inited_subsys & inited_ipa) {
- kbase_ipa_term(kbdev->ipa_ctx);
- kbdev->inited_subsys &= ~inited_ipa;
- }
- #endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
- if (kbdev->inited_subsys & inited_vinstr) {
- kbase_vinstr_term(kbdev->vinstr_ctx);
- kbdev->inited_subsys &= ~inited_vinstr;
- }
- #ifdef CONFIG_MALI_DEVFREQ
- if (kbdev->inited_subsys & inited_devfreq) {
- kbase_devfreq_term(kbdev);
- kbdev->inited_subsys &= ~inited_devfreq;
- }
- #endif
- if (kbdev->inited_subsys & inited_backend_late) {
- kbase_backend_late_term(kbdev);
- kbdev->inited_subsys &= ~inited_backend_late;
- }
- if (kbdev->inited_subsys & inited_tlstream) {
- kbase_tlstream_term();
- kbdev->inited_subsys &= ~inited_tlstream;
- }
- /* Bring job and mem sys to a halt before we continue termination */
- if (kbdev->inited_subsys & inited_js)
- kbasep_js_devdata_halt(kbdev);
- if (kbdev->inited_subsys & inited_mem)
- kbase_mem_halt(kbdev);
- if (kbdev->inited_subsys & inited_js) {
- kbasep_js_devdata_term(kbdev);
- kbdev->inited_subsys &= ~inited_js;
- }
- if (kbdev->inited_subsys & inited_mem) {
- kbase_mem_term(kbdev);
- kbdev->inited_subsys &= ~inited_mem;
- }
- if (kbdev->inited_subsys & inited_pm_runtime_init) {
- kbdev->pm.callback_power_runtime_term(kbdev);
- kbdev->inited_subsys &= ~inited_pm_runtime_init;
- }
- if (kbdev->inited_subsys & inited_device) {
- kbase_device_term(kbdev);
- kbdev->inited_subsys &= ~inited_device;
- }
- if (kbdev->inited_subsys & inited_backend_early) {
- kbase_backend_early_term(kbdev);
- kbdev->inited_subsys &= ~inited_backend_early;
- }
- if (kbdev->inited_subsys & inited_power_control) {
- power_control_term(kbdev);
- kbdev->inited_subsys &= ~inited_power_control;
- }
- if (kbdev->inited_subsys & inited_registers_map) {
- registers_unmap(kbdev);
- kbdev->inited_subsys &= ~inited_registers_map;
- }
- #ifdef CONFIG_MALI_NO_MALI
- if (kbdev->inited_subsys & inited_gpu_device) {
- gpu_device_destroy(kbdev);
- kbdev->inited_subsys &= ~inited_gpu_device;
- }
- #endif /* CONFIG_MALI_NO_MALI */
- if (kbdev->inited_subsys != 0)
- dev_err(kbdev->dev, "Missing sub system termination\n");
- kbase_device_free(kbdev);
- return 0;
- }
- static int kbase_platform_device_probe(struct platform_device *pdev)
- {
- struct kbase_device *kbdev;
- struct mali_base_gpu_core_props *core_props;
- u32 gpu_id;
- const struct list_head *dev_list;
- int err = 0;
- #ifdef CONFIG_OF
- err = kbase_platform_early_init();
- if (err) {
- dev_err(&pdev->dev, "Early platform initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- #endif
- kbdev = kbase_device_alloc();
- if (!kbdev) {
- dev_err(&pdev->dev, "Allocate device failed\n");
- kbase_platform_device_remove(pdev);
- return -ENOMEM;
- }
- kbdev->dev = &pdev->dev;
- dev_set_drvdata(kbdev->dev, kbdev);
- #ifdef CONFIG_MALI_NO_MALI
- err = gpu_device_create(kbdev);
- if (err) {
- dev_err(&pdev->dev, "Dummy model initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_gpu_device;
- #endif /* CONFIG_MALI_NO_MALI */
- err = assign_irqs(pdev);
- if (err) {
- dev_err(&pdev->dev, "IRQ search failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- err = registers_map(kbdev);
- if (err) {
- dev_err(&pdev->dev, "Register map failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_registers_map;
- err = power_control_init(pdev);
- if (err) {
- dev_err(&pdev->dev, "Power control initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_power_control;
- err = kbase_backend_early_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Early backend initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_backend_early;
- scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
- kbase_dev_nr);
- kbase_disjoint_init(kbdev);
- /* obtain min/max configured gpu frequencies */
- core_props = &(kbdev->gpu_props.props.core_props);
- core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
- core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
- kbdev->gpu_props.irq_throttle_time_us = DEFAULT_IRQ_THROTTLE_TIME_US;
- err = kbase_device_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Device initialization failed (%d)\n", err);
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_device;
- if (kbdev->pm.callback_power_runtime_init) {
- err = kbdev->pm.callback_power_runtime_init(kbdev);
- if (err) {
- dev_err(kbdev->dev,
- "Runtime PM initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_pm_runtime_init;
- }
- err = kbase_mem_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Memory subsystem initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_mem;
- gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
- gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
- gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
- kbase_device_coherency_init(kbdev, gpu_id);
- kbasep_secure_mode_init(kbdev);
- err = kbasep_js_devdata_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Job JS devdata initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_js;
- err = kbase_tlstream_init();
- if (err) {
- dev_err(kbdev->dev, "Timeline stream initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_tlstream;
- err = kbase_backend_late_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Late backend initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_backend_late;
- #ifdef CONFIG_MALI_DEVFREQ
- err = kbase_devfreq_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Fevfreq initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_devfreq;
- #endif /* CONFIG_MALI_DEVFREQ */
- kbdev->vinstr_ctx = kbase_vinstr_init(kbdev);
- if (!kbdev->vinstr_ctx) {
- dev_err(kbdev->dev,
- "Virtual instrumentation initialization failed\n");
- kbase_platform_device_remove(pdev);
- return -EINVAL;
- }
- kbdev->inited_subsys |= inited_vinstr;
- #ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
- kbdev->ipa_ctx = kbase_ipa_init(kbdev);
- if (!kbdev->ipa_ctx) {
- dev_err(kbdev->dev, "IPA initialization failed\n");
- kbase_platform_device_remove(pdev);
- return -EINVAL;
- }
- kbdev->inited_subsys |= inited_ipa;
- #endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
- err = kbase_debug_job_fault_dev_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "Job fault debug initialization failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_job_fault;
- err = kbase_device_debugfs_init(kbdev);
- if (err) {
- dev_err(kbdev->dev, "DebugFS initialization failed");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_debugfs;
- /* initialize the kctx list */
- mutex_init(&kbdev->kctx_list_lock);
- INIT_LIST_HEAD(&kbdev->kctx_list);
- kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
- kbdev->mdev.name = kbdev->devname;
- kbdev->mdev.fops = &kbase_fops;
- kbdev->mdev.parent = get_device(kbdev->dev);
- kbdev->inited_subsys |= inited_get_device;
- err = misc_register(&kbdev->mdev);
- if (err) {
- dev_err(kbdev->dev, "Misc device registration failed for %s\n",
- kbdev->devname);
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_misc_register;
- dev_list = kbase_dev_list_get();
- list_add(&kbdev->entry, &kbase_dev_list);
- kbase_dev_list_put(dev_list);
- kbdev->inited_subsys |= inited_dev_list;
- err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
- if (err) {
- dev_err(&pdev->dev, "SysFS group creation failed\n");
- kbase_platform_device_remove(pdev);
- return err;
- }
- kbdev->inited_subsys |= inited_sysfs_group;
- #ifdef CONFIG_MALI_FPGA_BUS_LOGGER
- err = bl_core_client_register(kbdev->devname,
- kbase_logging_started_cb,
- kbdev, &kbdev->buslogger,
- THIS_MODULE, NULL);
- if (err == 0) {
- kbdev->inited_subsys |= inited_buslogger;
- bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
- } else {
- dev_warn(kbdev->dev, "Bus log client registration failed\n");
- err = 0;
- }
- #endif
- dev_info(kbdev->dev,
- "Probed as %s\n", dev_name(kbdev->mdev.this_device));
- kbase_dev_nr++;
- return err;
- }
- /** Suspend callback from the OS.
- *
- * This is called by Linux when the device should suspend.
- *
- * @param dev The device to suspend
- *
- * @return A standard Linux error code
- */
- static int kbase_device_suspend(struct device *dev)
- {
- struct kbase_device *kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- #if defined(CONFIG_PM_DEVFREQ) && \
- (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
- devfreq_suspend_device(kbdev->devfreq);
- #endif
- kbase_pm_suspend(kbdev);
- return 0;
- }
- /** Resume callback from the OS.
- *
- * This is called by Linux when the device should resume from suspension.
- *
- * @param dev The device to resume
- *
- * @return A standard Linux error code
- */
- static int kbase_device_resume(struct device *dev)
- {
- struct kbase_device *kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- kbase_pm_resume(kbdev);
- #if defined(CONFIG_PM_DEVFREQ) && \
- (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
- devfreq_resume_device(kbdev->devfreq);
- #endif
- return 0;
- }
- /** Runtime suspend callback from the OS.
- *
- * This is called by Linux when the device should prepare for a condition in which it will
- * not be able to communicate with the CPU(s) and RAM due to power management.
- *
- * @param dev The device to suspend
- *
- * @return A standard Linux error code
- */
- #ifdef KBASE_PM_RUNTIME
- static int kbase_device_runtime_suspend(struct device *dev)
- {
- struct kbase_device *kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- #if defined(CONFIG_PM_DEVFREQ) && \
- (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
- devfreq_suspend_device(kbdev->devfreq);
- #endif
- if (kbdev->pm.backend.callback_power_runtime_off) {
- kbdev->pm.backend.callback_power_runtime_off(kbdev);
- dev_dbg(dev, "runtime suspend\n");
- }
- return 0;
- }
- #endif /* KBASE_PM_RUNTIME */
- /** Runtime resume callback from the OS.
- *
- * This is called by Linux when the device should go into a fully active state.
- *
- * @param dev The device to suspend
- *
- * @return A standard Linux error code
- */
- #ifdef KBASE_PM_RUNTIME
- static int kbase_device_runtime_resume(struct device *dev)
- {
- int ret = 0;
- struct kbase_device *kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- if (kbdev->pm.backend.callback_power_runtime_on) {
- ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
- dev_dbg(dev, "runtime resume\n");
- }
- #if defined(CONFIG_PM_DEVFREQ) && \
- (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
- devfreq_resume_device(kbdev->devfreq);
- #endif
- return ret;
- }
- #endif /* KBASE_PM_RUNTIME */
- #ifdef KBASE_PM_RUNTIME
- /**
- * kbase_device_runtime_idle - Runtime idle callback from the OS.
- * @dev: The device to suspend
- *
- * This is called by Linux when the device appears to be inactive and it might
- * be placed into a low power state.
- *
- * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
- * otherwise a standard Linux error code
- */
- static int kbase_device_runtime_idle(struct device *dev)
- {
- struct kbase_device *kbdev = to_kbase_device(dev);
- if (!kbdev)
- return -ENODEV;
- /* Use platform specific implementation if it exists. */
- if (kbdev->pm.backend.callback_power_runtime_idle)
- return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
- return 0;
- }
- #endif /* KBASE_PM_RUNTIME */
- /** The power management operations for the platform driver.
- */
- static const struct dev_pm_ops kbase_pm_ops = {
- .suspend = kbase_device_suspend,
- .resume = kbase_device_resume,
- #ifdef KBASE_PM_RUNTIME
- .runtime_suspend = kbase_device_runtime_suspend,
- .runtime_resume = kbase_device_runtime_resume,
- .runtime_idle = kbase_device_runtime_idle,
- #endif /* KBASE_PM_RUNTIME */
- };
- #ifdef CONFIG_OF
- static const struct of_device_id kbase_dt_ids[] = {
- { .compatible = "arm,malit6xx" },
- { .compatible = "arm,mali-midgard" },
- { /* sentinel */ }
- };
- MODULE_DEVICE_TABLE(of, kbase_dt_ids);
- #endif
- static struct platform_driver kbase_platform_driver = {
- .probe = kbase_platform_device_probe,
- .remove = kbase_platform_device_remove,
- .driver = {
- .name = kbase_drv_name,
- .owner = THIS_MODULE,
- .pm = &kbase_pm_ops,
- .of_match_table = of_match_ptr(kbase_dt_ids),
- },
- };
- /*
- * The driver will not provide a shortcut to create the Mali platform device
- * anymore when using Device Tree.
- */
- #ifdef CONFIG_OF
- module_platform_driver(kbase_platform_driver);
- #else
- static int __init kbase_driver_init(void)
- {
- int ret;
- ret = kbase_platform_early_init();
- if (ret)
- return ret;
- #ifdef CONFIG_MALI_PLATFORM_FAKE
- ret = kbase_platform_fake_register();
- if (ret)
- return ret;
- #endif
- ret = platform_driver_register(&kbase_platform_driver);
- #ifdef CONFIG_MALI_PLATFORM_FAKE
- if (ret)
- kbase_platform_fake_unregister();
- #endif
- return ret;
- }
- static void __exit kbase_driver_exit(void)
- {
- platform_driver_unregister(&kbase_platform_driver);
- #ifdef CONFIG_MALI_PLATFORM_FAKE
- kbase_platform_fake_unregister();
- #endif
- }
- module_init(kbase_driver_init);
- module_exit(kbase_driver_exit);
- #endif /* CONFIG_OF */
- MODULE_LICENSE("GPL");
- MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
- __stringify(BASE_UK_VERSION_MAJOR) "." \
- __stringify(BASE_UK_VERSION_MINOR) ")");
- #if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
- #define CREATE_TRACE_POINTS
- #endif
- #ifdef CONFIG_MALI_GATOR_SUPPORT
- /* Create the trace points (otherwise we just get code to call a tracepoint) */
- #include "mali_linux_trace.h"
- EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
- EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
- EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on);
- EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off);
- EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
- EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use);
- EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released);
- EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
- EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counter);
- void kbase_trace_mali_pm_status(u32 event, u64 value)
- {
- trace_mali_pm_status(event, value);
- }
- void kbase_trace_mali_pm_power_off(u32 event, u64 value)
- {
- trace_mali_pm_power_off(event, value);
- }
- void kbase_trace_mali_pm_power_on(u32 event, u64 value)
- {
- trace_mali_pm_power_on(event, value);
- }
- void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
- {
- trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
- }
- void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
- {
- trace_mali_page_fault_insert_pages(event, value);
- }
- void kbase_trace_mali_mmu_as_in_use(int event)
- {
- trace_mali_mmu_as_in_use(event);
- }
- void kbase_trace_mali_mmu_as_released(int event)
- {
- trace_mali_mmu_as_released(event);
- }
- void kbase_trace_mali_total_alloc_pages_change(long long int event)
- {
- trace_mali_total_alloc_pages_change(event);
- }
- #endif /* CONFIG_MALI_GATOR_SUPPORT */
- #ifdef CONFIG_MALI_SYSTEM_TRACE
- #include "mali_linux_kbase_trace.h"
- #endif
|