ufshcd.c 179 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831
  1. /*
  2. * Universal Flash Storage Host controller driver Core
  3. *
  4. * This code is based on drivers/scsi/ufs/ufshcd.c
  5. * Copyright (C) 2011-2013 Samsung India Software Operations
  6. * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
  7. *
  8. * Authors:
  9. * Santosh Yaraganavi <santosh.sy@samsung.com>
  10. * Vinayak Holikatti <h.vinayak@samsung.com>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version 2
  15. * of the License, or (at your option) any later version.
  16. * See the COPYING file in the top-level directory or visit
  17. * <http://www.gnu.org/licenses/gpl-2.0.html>
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * This program is provided "AS IS" and "WITH ALL FAULTS" and
  25. * without warranty of any kind. You are solely responsible for
  26. * determining the appropriateness of using and distributing
  27. * the program and assume all risks associated with your exercise
  28. * of rights with respect to the program, including but not limited
  29. * to infringement of third party rights, the risks and costs of
  30. * program errors, damage to or loss of data, programs or equipment,
  31. * and unavailability or interruption of operations. Under no
  32. * circumstances will the contributor of this Program be liable for
  33. * any damages of any kind arising from your use or distribution of
  34. * this program.
  35. *
  36. * The Linux Foundation chooses to take subject only to the GPLv2
  37. * license terms, and distributes only under these terms.
  38. */
  39. #include <linux/async.h>
  40. #include <linux/devfreq.h>
  41. #include <linux/nls.h>
  42. #include <linux/of.h>
  43. #include "ufshcd.h"
  44. #include "ufs_quirks.h"
  45. #include "unipro.h"
  46. #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
  47. UTP_TASK_REQ_COMPL |\
  48. UFSHCD_ERROR_MASK)
  49. /* UIC command timeout, unit: ms */
  50. #define UIC_CMD_TIMEOUT 500
  51. /* NOP OUT retries waiting for NOP IN response */
  52. #define NOP_OUT_RETRIES 10
  53. /* Timeout after 30 msecs if NOP OUT hangs without response */
  54. #define NOP_OUT_TIMEOUT 30 /* msecs */
  55. /* Query request retries */
  56. #define QUERY_REQ_RETRIES 10
  57. /* Query request timeout */
  58. #define QUERY_REQ_TIMEOUT 30 /* msec */
  59. /*
  60. * Query request timeout for fDeviceInit flag
  61. * fDeviceInit query response time for some devices is too large that default
  62. * QUERY_REQ_TIMEOUT may not be enough for such devices.
  63. */
  64. #define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
  65. /* Task management command timeout */
  66. #define TM_CMD_TIMEOUT 100 /* msecs */
  67. /* maximum number of retries for a general UIC command */
  68. #define UFS_UIC_COMMAND_RETRIES 3
  69. /* maximum number of link-startup retries */
  70. #define DME_LINKSTARTUP_RETRIES 3
  71. /* Maximum retries for Hibern8 enter */
  72. #define UIC_HIBERN8_ENTER_RETRIES 3
  73. /* maximum number of reset retries before giving up */
  74. #define MAX_HOST_RESET_RETRIES 5
  75. /* Expose the flag value from utp_upiu_query.value */
  76. #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
  77. /* Interrupt aggregation default timeout, unit: 40us */
  78. #define INT_AGGR_DEF_TO 0x02
  79. #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
  80. ({ \
  81. int _ret; \
  82. if (_on) \
  83. _ret = ufshcd_enable_vreg(_dev, _vreg); \
  84. else \
  85. _ret = ufshcd_disable_vreg(_dev, _vreg); \
  86. _ret; \
  87. })
  88. enum {
  89. UFSHCD_MAX_CHANNEL = 0,
  90. UFSHCD_MAX_ID = 1,
  91. UFSHCD_CMD_PER_LUN = 32,
  92. UFSHCD_CAN_QUEUE = 32,
  93. };
  94. /* UFSHCD states */
  95. enum {
  96. UFSHCD_STATE_RESET,
  97. UFSHCD_STATE_ERROR,
  98. UFSHCD_STATE_OPERATIONAL,
  99. UFSHCD_STATE_EH_SCHEDULED,
  100. };
  101. /* UFSHCD error handling flags */
  102. enum {
  103. UFSHCD_EH_IN_PROGRESS = (1 << 0),
  104. };
  105. /* UFSHCD UIC layer error flags */
  106. enum {
  107. UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
  108. UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
  109. UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
  110. UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
  111. UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
  112. UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
  113. };
  114. /* Interrupt configuration options */
  115. enum {
  116. UFSHCD_INT_DISABLE,
  117. UFSHCD_INT_ENABLE,
  118. UFSHCD_INT_CLEAR,
  119. };
  120. #define ufshcd_set_eh_in_progress(h) \
  121. (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
  122. #define ufshcd_eh_in_progress(h) \
  123. (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
  124. #define ufshcd_clear_eh_in_progress(h) \
  125. (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
  126. #define ufshcd_set_ufs_dev_active(h) \
  127. ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
  128. #define ufshcd_set_ufs_dev_sleep(h) \
  129. ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
  130. #define ufshcd_set_ufs_dev_poweroff(h) \
  131. ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
  132. #define ufshcd_is_ufs_dev_active(h) \
  133. ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
  134. #define ufshcd_is_ufs_dev_sleep(h) \
  135. ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
  136. #define ufshcd_is_ufs_dev_poweroff(h) \
  137. ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
  138. static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
  139. {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
  140. {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
  141. {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
  142. {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
  143. {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
  144. {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
  145. };
  146. static inline enum ufs_dev_pwr_mode
  147. ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
  148. {
  149. return ufs_pm_lvl_states[lvl].dev_state;
  150. }
  151. static inline enum uic_link_state
  152. ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
  153. {
  154. return ufs_pm_lvl_states[lvl].link_state;
  155. }
  156. static struct ufs_dev_fix ufs_fixups[] = {
  157. /* UFS cards deviations table */
  158. UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
  159. UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
  160. UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
  161. UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
  162. UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
  163. UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
  164. UFS_DEVICE_NO_FASTAUTO),
  165. UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
  166. UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
  167. UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
  168. UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
  169. UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
  170. UFS_DEVICE_QUIRK_PA_TACTIVATE),
  171. UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
  172. UFS_DEVICE_QUIRK_PA_TACTIVATE),
  173. UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
  174. UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
  175. UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
  176. END_FIX
  177. };
  178. static void ufshcd_tmc_handler(struct ufs_hba *hba);
  179. static void ufshcd_async_scan(void *data, async_cookie_t cookie);
  180. static int ufshcd_reset_and_restore(struct ufs_hba *hba);
  181. static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
  182. static void ufshcd_hba_exit(struct ufs_hba *hba);
  183. static int ufshcd_probe_hba(struct ufs_hba *hba);
  184. static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
  185. bool skip_ref_clk);
  186. static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
  187. static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
  188. static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
  189. static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
  190. static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
  191. static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
  192. static irqreturn_t ufshcd_intr(int irq, void *__hba);
  193. static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
  194. struct ufs_pa_layer_attr *desired_pwr_mode);
  195. static int ufshcd_change_power_mode(struct ufs_hba *hba,
  196. struct ufs_pa_layer_attr *pwr_mode);
  197. static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
  198. {
  199. return tag >= 0 && tag < hba->nutrs;
  200. }
  201. static inline int ufshcd_enable_irq(struct ufs_hba *hba)
  202. {
  203. int ret = 0;
  204. if (!hba->is_irq_enabled) {
  205. ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
  206. hba);
  207. if (ret)
  208. dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
  209. __func__, ret);
  210. hba->is_irq_enabled = true;
  211. }
  212. return ret;
  213. }
  214. static inline void ufshcd_disable_irq(struct ufs_hba *hba)
  215. {
  216. if (hba->is_irq_enabled) {
  217. free_irq(hba->irq, hba);
  218. hba->is_irq_enabled = false;
  219. }
  220. }
  221. /* replace non-printable or non-ASCII characters with spaces */
  222. static inline void ufshcd_remove_non_printable(char *val)
  223. {
  224. if (!val)
  225. return;
  226. if (*val < 0x20 || *val > 0x7e)
  227. *val = ' ';
  228. }
  229. /*
  230. * ufshcd_wait_for_register - wait for register value to change
  231. * @hba - per-adapter interface
  232. * @reg - mmio register offset
  233. * @mask - mask to apply to read register value
  234. * @val - wait condition
  235. * @interval_us - polling interval in microsecs
  236. * @timeout_ms - timeout in millisecs
  237. * @can_sleep - perform sleep or just spin
  238. *
  239. * Returns -ETIMEDOUT on error, zero on success
  240. */
  241. int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
  242. u32 val, unsigned long interval_us,
  243. unsigned long timeout_ms, bool can_sleep)
  244. {
  245. int err = 0;
  246. unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
  247. /* ignore bits that we don't intend to wait on */
  248. val = val & mask;
  249. while ((ufshcd_readl(hba, reg) & mask) != val) {
  250. if (can_sleep)
  251. usleep_range(interval_us, interval_us + 50);
  252. else
  253. udelay(interval_us);
  254. if (time_after(jiffies, timeout)) {
  255. if ((ufshcd_readl(hba, reg) & mask) != val)
  256. err = -ETIMEDOUT;
  257. break;
  258. }
  259. }
  260. return err;
  261. }
  262. /**
  263. * ufshcd_get_intr_mask - Get the interrupt bit mask
  264. * @hba - Pointer to adapter instance
  265. *
  266. * Returns interrupt bit mask per version
  267. */
  268. static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
  269. {
  270. if (hba->ufs_version == UFSHCI_VERSION_10)
  271. return INTERRUPT_MASK_ALL_VER_10;
  272. else
  273. return INTERRUPT_MASK_ALL_VER_11;
  274. }
  275. /**
  276. * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
  277. * @hba - Pointer to adapter instance
  278. *
  279. * Returns UFSHCI version supported by the controller
  280. */
  281. static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
  282. {
  283. if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
  284. return ufshcd_vops_get_ufs_hci_version(hba);
  285. return ufshcd_readl(hba, REG_UFS_VERSION);
  286. }
  287. /**
  288. * ufshcd_is_device_present - Check if any device connected to
  289. * the host controller
  290. * @hba: pointer to adapter instance
  291. *
  292. * Returns 1 if device present, 0 if no device detected
  293. */
  294. static inline int ufshcd_is_device_present(struct ufs_hba *hba)
  295. {
  296. return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
  297. DEVICE_PRESENT) ? 1 : 0;
  298. }
  299. /**
  300. * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
  301. * @lrb: pointer to local command reference block
  302. *
  303. * This function is used to get the OCS field from UTRD
  304. * Returns the OCS field in the UTRD
  305. */
  306. static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
  307. {
  308. return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
  309. }
  310. /**
  311. * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
  312. * @task_req_descp: pointer to utp_task_req_desc structure
  313. *
  314. * This function is used to get the OCS field from UTMRD
  315. * Returns the OCS field in the UTMRD
  316. */
  317. static inline int
  318. ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
  319. {
  320. return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
  321. }
  322. /**
  323. * ufshcd_get_tm_free_slot - get a free slot for task management request
  324. * @hba: per adapter instance
  325. * @free_slot: pointer to variable with available slot value
  326. *
  327. * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
  328. * Returns 0 if free slot is not available, else return 1 with tag value
  329. * in @free_slot.
  330. */
  331. static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
  332. {
  333. int tag;
  334. bool ret = false;
  335. if (!free_slot)
  336. goto out;
  337. do {
  338. tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
  339. if (tag >= hba->nutmrs)
  340. goto out;
  341. } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
  342. *free_slot = tag;
  343. ret = true;
  344. out:
  345. return ret;
  346. }
  347. static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
  348. {
  349. clear_bit_unlock(slot, &hba->tm_slots_in_use);
  350. }
  351. /**
  352. * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
  353. * @hba: per adapter instance
  354. * @pos: position of the bit to be cleared
  355. */
  356. static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
  357. {
  358. ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
  359. }
  360. /**
  361. * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
  362. * @hba: per adapter instance
  363. * @tag: position of the bit to be cleared
  364. */
  365. static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
  366. {
  367. __clear_bit(tag, &hba->outstanding_reqs);
  368. }
  369. /**
  370. * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
  371. * @reg: Register value of host controller status
  372. *
  373. * Returns integer, 0 on Success and positive value if failed
  374. */
  375. static inline int ufshcd_get_lists_status(u32 reg)
  376. {
  377. /*
  378. * The mask 0xFF is for the following HCS register bits
  379. * Bit Description
  380. * 0 Device Present
  381. * 1 UTRLRDY
  382. * 2 UTMRLRDY
  383. * 3 UCRDY
  384. * 4-7 reserved
  385. */
  386. return ((reg & 0xFF) >> 1) ^ 0x07;
  387. }
  388. /**
  389. * ufshcd_get_uic_cmd_result - Get the UIC command result
  390. * @hba: Pointer to adapter instance
  391. *
  392. * This function gets the result of UIC command completion
  393. * Returns 0 on success, non zero value on error
  394. */
  395. static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
  396. {
  397. return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
  398. MASK_UIC_COMMAND_RESULT;
  399. }
  400. /**
  401. * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
  402. * @hba: Pointer to adapter instance
  403. *
  404. * This function gets UIC command argument3
  405. * Returns 0 on success, non zero value on error
  406. */
  407. static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
  408. {
  409. return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
  410. }
  411. /**
  412. * ufshcd_get_req_rsp - returns the TR response transaction type
  413. * @ucd_rsp_ptr: pointer to response UPIU
  414. */
  415. static inline int
  416. ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
  417. {
  418. return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
  419. }
  420. /**
  421. * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
  422. * @ucd_rsp_ptr: pointer to response UPIU
  423. *
  424. * This function gets the response status and scsi_status from response UPIU
  425. * Returns the response result code.
  426. */
  427. static inline int
  428. ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
  429. {
  430. return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
  431. }
  432. /*
  433. * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
  434. * from response UPIU
  435. * @ucd_rsp_ptr: pointer to response UPIU
  436. *
  437. * Return the data segment length.
  438. */
  439. static inline unsigned int
  440. ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
  441. {
  442. return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
  443. MASK_RSP_UPIU_DATA_SEG_LEN;
  444. }
  445. /**
  446. * ufshcd_is_exception_event - Check if the device raised an exception event
  447. * @ucd_rsp_ptr: pointer to response UPIU
  448. *
  449. * The function checks if the device raised an exception event indicated in
  450. * the Device Information field of response UPIU.
  451. *
  452. * Returns true if exception is raised, false otherwise.
  453. */
  454. static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
  455. {
  456. return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
  457. MASK_RSP_EXCEPTION_EVENT ? true : false;
  458. }
  459. /**
  460. * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
  461. * @hba: per adapter instance
  462. */
  463. static inline void
  464. ufshcd_reset_intr_aggr(struct ufs_hba *hba)
  465. {
  466. ufshcd_writel(hba, INT_AGGR_ENABLE |
  467. INT_AGGR_COUNTER_AND_TIMER_RESET,
  468. REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
  469. }
  470. /**
  471. * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
  472. * @hba: per adapter instance
  473. * @cnt: Interrupt aggregation counter threshold
  474. * @tmout: Interrupt aggregation timeout value
  475. */
  476. static inline void
  477. ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
  478. {
  479. ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
  480. INT_AGGR_COUNTER_THLD_VAL(cnt) |
  481. INT_AGGR_TIMEOUT_VAL(tmout),
  482. REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
  483. }
  484. /**
  485. * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
  486. * @hba: per adapter instance
  487. */
  488. static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
  489. {
  490. ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
  491. }
  492. /**
  493. * ufshcd_enable_run_stop_reg - Enable run-stop registers,
  494. * When run-stop registers are set to 1, it indicates the
  495. * host controller that it can process the requests
  496. * @hba: per adapter instance
  497. */
  498. static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
  499. {
  500. ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
  501. REG_UTP_TASK_REQ_LIST_RUN_STOP);
  502. ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
  503. REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
  504. }
  505. /**
  506. * ufshcd_hba_start - Start controller initialization sequence
  507. * @hba: per adapter instance
  508. */
  509. static inline void ufshcd_hba_start(struct ufs_hba *hba)
  510. {
  511. ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
  512. }
  513. /**
  514. * ufshcd_is_hba_active - Get controller state
  515. * @hba: per adapter instance
  516. *
  517. * Returns zero if controller is active, 1 otherwise
  518. */
  519. static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
  520. {
  521. return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
  522. }
  523. u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
  524. {
  525. /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
  526. if ((hba->ufs_version == UFSHCI_VERSION_10) ||
  527. (hba->ufs_version == UFSHCI_VERSION_11))
  528. return UFS_UNIPRO_VER_1_41;
  529. else
  530. return UFS_UNIPRO_VER_1_6;
  531. }
  532. EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
  533. static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
  534. {
  535. /*
  536. * If both host and device support UniPro ver1.6 or later, PA layer
  537. * parameters tuning happens during link startup itself.
  538. *
  539. * We can manually tune PA layer parameters if either host or device
  540. * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
  541. * logic simple, we will only do manual tuning if local unipro version
  542. * doesn't support ver1.6 or later.
  543. */
  544. if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
  545. return true;
  546. else
  547. return false;
  548. }
  549. static void ufshcd_ungate_work(struct work_struct *work)
  550. {
  551. int ret;
  552. unsigned long flags;
  553. struct ufs_hba *hba = container_of(work, struct ufs_hba,
  554. clk_gating.ungate_work);
  555. cancel_delayed_work_sync(&hba->clk_gating.gate_work);
  556. spin_lock_irqsave(hba->host->host_lock, flags);
  557. if (hba->clk_gating.state == CLKS_ON) {
  558. spin_unlock_irqrestore(hba->host->host_lock, flags);
  559. goto unblock_reqs;
  560. }
  561. spin_unlock_irqrestore(hba->host->host_lock, flags);
  562. ufshcd_setup_clocks(hba, true);
  563. /* Exit from hibern8 */
  564. if (ufshcd_can_hibern8_during_gating(hba)) {
  565. /* Prevent gating in this path */
  566. hba->clk_gating.is_suspended = true;
  567. if (ufshcd_is_link_hibern8(hba)) {
  568. ret = ufshcd_uic_hibern8_exit(hba);
  569. if (ret)
  570. dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
  571. __func__, ret);
  572. else
  573. ufshcd_set_link_active(hba);
  574. }
  575. hba->clk_gating.is_suspended = false;
  576. }
  577. unblock_reqs:
  578. if (ufshcd_is_clkscaling_enabled(hba))
  579. devfreq_resume_device(hba->devfreq);
  580. scsi_unblock_requests(hba->host);
  581. }
  582. /**
  583. * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
  584. * Also, exit from hibern8 mode and set the link as active.
  585. * @hba: per adapter instance
  586. * @async: This indicates whether caller should ungate clocks asynchronously.
  587. */
  588. int ufshcd_hold(struct ufs_hba *hba, bool async)
  589. {
  590. int rc = 0;
  591. unsigned long flags;
  592. if (!ufshcd_is_clkgating_allowed(hba))
  593. goto out;
  594. spin_lock_irqsave(hba->host->host_lock, flags);
  595. hba->clk_gating.active_reqs++;
  596. if (ufshcd_eh_in_progress(hba)) {
  597. spin_unlock_irqrestore(hba->host->host_lock, flags);
  598. return 0;
  599. }
  600. start:
  601. switch (hba->clk_gating.state) {
  602. case CLKS_ON:
  603. break;
  604. case REQ_CLKS_OFF:
  605. if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
  606. hba->clk_gating.state = CLKS_ON;
  607. break;
  608. }
  609. /*
  610. * If we here, it means gating work is either done or
  611. * currently running. Hence, fall through to cancel gating
  612. * work and to enable clocks.
  613. */
  614. case CLKS_OFF:
  615. scsi_block_requests(hba->host);
  616. hba->clk_gating.state = REQ_CLKS_ON;
  617. schedule_work(&hba->clk_gating.ungate_work);
  618. /*
  619. * fall through to check if we should wait for this
  620. * work to be done or not.
  621. */
  622. case REQ_CLKS_ON:
  623. if (async) {
  624. rc = -EAGAIN;
  625. hba->clk_gating.active_reqs--;
  626. break;
  627. }
  628. spin_unlock_irqrestore(hba->host->host_lock, flags);
  629. flush_work(&hba->clk_gating.ungate_work);
  630. /* Make sure state is CLKS_ON before returning */
  631. spin_lock_irqsave(hba->host->host_lock, flags);
  632. goto start;
  633. default:
  634. dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
  635. __func__, hba->clk_gating.state);
  636. break;
  637. }
  638. spin_unlock_irqrestore(hba->host->host_lock, flags);
  639. out:
  640. return rc;
  641. }
  642. EXPORT_SYMBOL_GPL(ufshcd_hold);
  643. static void ufshcd_gate_work(struct work_struct *work)
  644. {
  645. struct ufs_hba *hba = container_of(work, struct ufs_hba,
  646. clk_gating.gate_work.work);
  647. unsigned long flags;
  648. spin_lock_irqsave(hba->host->host_lock, flags);
  649. if (hba->clk_gating.is_suspended) {
  650. hba->clk_gating.state = CLKS_ON;
  651. goto rel_lock;
  652. }
  653. if (hba->clk_gating.active_reqs
  654. || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
  655. || hba->lrb_in_use || hba->outstanding_tasks
  656. || hba->active_uic_cmd || hba->uic_async_done)
  657. goto rel_lock;
  658. spin_unlock_irqrestore(hba->host->host_lock, flags);
  659. /* put the link into hibern8 mode before turning off clocks */
  660. if (ufshcd_can_hibern8_during_gating(hba)) {
  661. if (ufshcd_uic_hibern8_enter(hba)) {
  662. hba->clk_gating.state = CLKS_ON;
  663. goto out;
  664. }
  665. ufshcd_set_link_hibern8(hba);
  666. }
  667. if (ufshcd_is_clkscaling_enabled(hba)) {
  668. devfreq_suspend_device(hba->devfreq);
  669. hba->clk_scaling.window_start_t = 0;
  670. }
  671. if (!ufshcd_is_link_active(hba))
  672. ufshcd_setup_clocks(hba, false);
  673. else
  674. /* If link is active, device ref_clk can't be switched off */
  675. __ufshcd_setup_clocks(hba, false, true);
  676. /*
  677. * In case you are here to cancel this work the gating state
  678. * would be marked as REQ_CLKS_ON. In this case keep the state
  679. * as REQ_CLKS_ON which would anyway imply that clocks are off
  680. * and a request to turn them on is pending. By doing this way,
  681. * we keep the state machine in tact and this would ultimately
  682. * prevent from doing cancel work multiple times when there are
  683. * new requests arriving before the current cancel work is done.
  684. */
  685. spin_lock_irqsave(hba->host->host_lock, flags);
  686. if (hba->clk_gating.state == REQ_CLKS_OFF)
  687. hba->clk_gating.state = CLKS_OFF;
  688. rel_lock:
  689. spin_unlock_irqrestore(hba->host->host_lock, flags);
  690. out:
  691. return;
  692. }
  693. /* host lock must be held before calling this variant */
  694. static void __ufshcd_release(struct ufs_hba *hba)
  695. {
  696. if (!ufshcd_is_clkgating_allowed(hba))
  697. return;
  698. hba->clk_gating.active_reqs--;
  699. if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
  700. || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
  701. || hba->lrb_in_use || hba->outstanding_tasks
  702. || hba->active_uic_cmd || hba->uic_async_done
  703. || ufshcd_eh_in_progress(hba))
  704. return;
  705. hba->clk_gating.state = REQ_CLKS_OFF;
  706. schedule_delayed_work(&hba->clk_gating.gate_work,
  707. msecs_to_jiffies(hba->clk_gating.delay_ms));
  708. }
  709. void ufshcd_release(struct ufs_hba *hba)
  710. {
  711. unsigned long flags;
  712. spin_lock_irqsave(hba->host->host_lock, flags);
  713. __ufshcd_release(hba);
  714. spin_unlock_irqrestore(hba->host->host_lock, flags);
  715. }
  716. EXPORT_SYMBOL_GPL(ufshcd_release);
  717. static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
  718. struct device_attribute *attr, char *buf)
  719. {
  720. struct ufs_hba *hba = dev_get_drvdata(dev);
  721. return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
  722. }
  723. static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
  724. struct device_attribute *attr, const char *buf, size_t count)
  725. {
  726. struct ufs_hba *hba = dev_get_drvdata(dev);
  727. unsigned long flags, value;
  728. if (kstrtoul(buf, 0, &value))
  729. return -EINVAL;
  730. spin_lock_irqsave(hba->host->host_lock, flags);
  731. hba->clk_gating.delay_ms = value;
  732. spin_unlock_irqrestore(hba->host->host_lock, flags);
  733. return count;
  734. }
  735. static void ufshcd_init_clk_gating(struct ufs_hba *hba)
  736. {
  737. if (!ufshcd_is_clkgating_allowed(hba))
  738. return;
  739. hba->clk_gating.delay_ms = 150;
  740. INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
  741. INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
  742. hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
  743. hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
  744. sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
  745. hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
  746. hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
  747. if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
  748. dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
  749. }
  750. static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
  751. {
  752. if (!ufshcd_is_clkgating_allowed(hba))
  753. return;
  754. device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
  755. cancel_work_sync(&hba->clk_gating.ungate_work);
  756. cancel_delayed_work_sync(&hba->clk_gating.gate_work);
  757. }
  758. /* Must be called with host lock acquired */
  759. static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
  760. {
  761. if (!ufshcd_is_clkscaling_enabled(hba))
  762. return;
  763. if (!hba->clk_scaling.is_busy_started) {
  764. hba->clk_scaling.busy_start_t = ktime_get();
  765. hba->clk_scaling.is_busy_started = true;
  766. }
  767. }
  768. static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
  769. {
  770. struct ufs_clk_scaling *scaling = &hba->clk_scaling;
  771. if (!ufshcd_is_clkscaling_enabled(hba))
  772. return;
  773. if (!hba->outstanding_reqs && scaling->is_busy_started) {
  774. scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
  775. scaling->busy_start_t));
  776. scaling->busy_start_t = ktime_set(0, 0);
  777. scaling->is_busy_started = false;
  778. }
  779. }
  780. /**
  781. * ufshcd_send_command - Send SCSI or device management commands
  782. * @hba: per adapter instance
  783. * @task_tag: Task tag of the command
  784. */
  785. static inline
  786. void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
  787. {
  788. ufshcd_clk_scaling_start_busy(hba);
  789. __set_bit(task_tag, &hba->outstanding_reqs);
  790. ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  791. }
  792. /**
  793. * ufshcd_copy_sense_data - Copy sense data in case of check condition
  794. * @lrb - pointer to local reference block
  795. */
  796. static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
  797. {
  798. int len;
  799. if (lrbp->sense_buffer &&
  800. ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
  801. len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
  802. memcpy(lrbp->sense_buffer,
  803. lrbp->ucd_rsp_ptr->sr.sense_data,
  804. min_t(int, len, SCSI_SENSE_BUFFERSIZE));
  805. }
  806. }
  807. /**
  808. * ufshcd_copy_query_response() - Copy the Query Response and the data
  809. * descriptor
  810. * @hba: per adapter instance
  811. * @lrb - pointer to local reference block
  812. */
  813. static
  814. int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  815. {
  816. struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
  817. memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
  818. /* Get the descriptor */
  819. if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
  820. u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
  821. GENERAL_UPIU_REQUEST_SIZE;
  822. u16 resp_len;
  823. u16 buf_len;
  824. /* data segment length */
  825. resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
  826. MASK_QUERY_DATA_SEG_LEN;
  827. buf_len = be16_to_cpu(
  828. hba->dev_cmd.query.request.upiu_req.length);
  829. if (likely(buf_len >= resp_len)) {
  830. memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
  831. } else {
  832. dev_warn(hba->dev,
  833. "%s: Response size is bigger than buffer",
  834. __func__);
  835. return -EINVAL;
  836. }
  837. }
  838. return 0;
  839. }
  840. /**
  841. * ufshcd_hba_capabilities - Read controller capabilities
  842. * @hba: per adapter instance
  843. */
  844. static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
  845. {
  846. hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
  847. /* nutrs and nutmrs are 0 based values */
  848. hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
  849. hba->nutmrs =
  850. ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
  851. }
  852. /**
  853. * ufshcd_ready_for_uic_cmd - Check if controller is ready
  854. * to accept UIC commands
  855. * @hba: per adapter instance
  856. * Return true on success, else false
  857. */
  858. static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
  859. {
  860. if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
  861. return true;
  862. else
  863. return false;
  864. }
  865. /**
  866. * ufshcd_get_upmcrs - Get the power mode change request status
  867. * @hba: Pointer to adapter instance
  868. *
  869. * This function gets the UPMCRS field of HCS register
  870. * Returns value of UPMCRS field
  871. */
  872. static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
  873. {
  874. return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
  875. }
  876. /**
  877. * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
  878. * @hba: per adapter instance
  879. * @uic_cmd: UIC command
  880. *
  881. * Mutex must be held.
  882. */
  883. static inline void
  884. ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  885. {
  886. WARN_ON(hba->active_uic_cmd);
  887. hba->active_uic_cmd = uic_cmd;
  888. /* Write Args */
  889. ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
  890. ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
  891. ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
  892. /* Write UIC Cmd */
  893. ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
  894. REG_UIC_COMMAND);
  895. }
  896. /**
  897. * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
  898. * @hba: per adapter instance
  899. * @uic_command: UIC command
  900. *
  901. * Must be called with mutex held.
  902. * Returns 0 only if success.
  903. */
  904. static int
  905. ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  906. {
  907. int ret;
  908. unsigned long flags;
  909. if (wait_for_completion_timeout(&uic_cmd->done,
  910. msecs_to_jiffies(UIC_CMD_TIMEOUT)))
  911. ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
  912. else
  913. ret = -ETIMEDOUT;
  914. spin_lock_irqsave(hba->host->host_lock, flags);
  915. hba->active_uic_cmd = NULL;
  916. spin_unlock_irqrestore(hba->host->host_lock, flags);
  917. return ret;
  918. }
  919. /**
  920. * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
  921. * @hba: per adapter instance
  922. * @uic_cmd: UIC command
  923. * @completion: initialize the completion only if this is set to true
  924. *
  925. * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
  926. * with mutex held and host_lock locked.
  927. * Returns 0 only if success.
  928. */
  929. static int
  930. __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
  931. bool completion)
  932. {
  933. if (!ufshcd_ready_for_uic_cmd(hba)) {
  934. dev_err(hba->dev,
  935. "Controller not ready to accept UIC commands\n");
  936. return -EIO;
  937. }
  938. if (completion)
  939. init_completion(&uic_cmd->done);
  940. ufshcd_dispatch_uic_cmd(hba, uic_cmd);
  941. return 0;
  942. }
  943. /**
  944. * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
  945. * @hba: per adapter instance
  946. * @uic_cmd: UIC command
  947. *
  948. * Returns 0 only if success.
  949. */
  950. static int
  951. ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  952. {
  953. int ret;
  954. unsigned long flags;
  955. ufshcd_hold(hba, false);
  956. mutex_lock(&hba->uic_cmd_mutex);
  957. ufshcd_add_delay_before_dme_cmd(hba);
  958. spin_lock_irqsave(hba->host->host_lock, flags);
  959. ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
  960. spin_unlock_irqrestore(hba->host->host_lock, flags);
  961. if (!ret)
  962. ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
  963. mutex_unlock(&hba->uic_cmd_mutex);
  964. ufshcd_release(hba);
  965. return ret;
  966. }
  967. /**
  968. * ufshcd_map_sg - Map scatter-gather list to prdt
  969. * @lrbp - pointer to local reference block
  970. *
  971. * Returns 0 in case of success, non-zero value in case of failure
  972. */
  973. static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  974. {
  975. struct ufshcd_sg_entry *prd_table;
  976. struct scatterlist *sg;
  977. struct scsi_cmnd *cmd;
  978. int sg_segments;
  979. int i;
  980. cmd = lrbp->cmd;
  981. sg_segments = scsi_dma_map(cmd);
  982. if (sg_segments < 0)
  983. return sg_segments;
  984. if (sg_segments) {
  985. if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
  986. lrbp->utr_descriptor_ptr->prd_table_length =
  987. cpu_to_le16((u16)(sg_segments *
  988. sizeof(struct ufshcd_sg_entry)));
  989. else
  990. lrbp->utr_descriptor_ptr->prd_table_length =
  991. cpu_to_le16((u16) (sg_segments));
  992. prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
  993. scsi_for_each_sg(cmd, sg, sg_segments, i) {
  994. prd_table[i].size =
  995. cpu_to_le32(((u32) sg_dma_len(sg))-1);
  996. prd_table[i].base_addr =
  997. cpu_to_le32(lower_32_bits(sg->dma_address));
  998. prd_table[i].upper_addr =
  999. cpu_to_le32(upper_32_bits(sg->dma_address));
  1000. prd_table[i].reserved = 0;
  1001. }
  1002. } else {
  1003. lrbp->utr_descriptor_ptr->prd_table_length = 0;
  1004. }
  1005. return 0;
  1006. }
  1007. /**
  1008. * ufshcd_enable_intr - enable interrupts
  1009. * @hba: per adapter instance
  1010. * @intrs: interrupt bits
  1011. */
  1012. static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
  1013. {
  1014. u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
  1015. if (hba->ufs_version == UFSHCI_VERSION_10) {
  1016. u32 rw;
  1017. rw = set & INTERRUPT_MASK_RW_VER_10;
  1018. set = rw | ((set ^ intrs) & intrs);
  1019. } else {
  1020. set |= intrs;
  1021. }
  1022. ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
  1023. }
  1024. /**
  1025. * ufshcd_disable_intr - disable interrupts
  1026. * @hba: per adapter instance
  1027. * @intrs: interrupt bits
  1028. */
  1029. static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
  1030. {
  1031. u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
  1032. if (hba->ufs_version == UFSHCI_VERSION_10) {
  1033. u32 rw;
  1034. rw = (set & INTERRUPT_MASK_RW_VER_10) &
  1035. ~(intrs & INTERRUPT_MASK_RW_VER_10);
  1036. set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
  1037. } else {
  1038. set &= ~intrs;
  1039. }
  1040. ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
  1041. }
  1042. /**
  1043. * ufshcd_prepare_req_desc_hdr() - Fills the requests header
  1044. * descriptor according to request
  1045. * @lrbp: pointer to local reference block
  1046. * @upiu_flags: flags required in the header
  1047. * @cmd_dir: requests data direction
  1048. */
  1049. static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
  1050. u32 *upiu_flags, enum dma_data_direction cmd_dir)
  1051. {
  1052. struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
  1053. u32 data_direction;
  1054. u32 dword_0;
  1055. if (cmd_dir == DMA_FROM_DEVICE) {
  1056. data_direction = UTP_DEVICE_TO_HOST;
  1057. *upiu_flags = UPIU_CMD_FLAGS_READ;
  1058. } else if (cmd_dir == DMA_TO_DEVICE) {
  1059. data_direction = UTP_HOST_TO_DEVICE;
  1060. *upiu_flags = UPIU_CMD_FLAGS_WRITE;
  1061. } else {
  1062. data_direction = UTP_NO_DATA_TRANSFER;
  1063. *upiu_flags = UPIU_CMD_FLAGS_NONE;
  1064. }
  1065. dword_0 = data_direction | (lrbp->command_type
  1066. << UPIU_COMMAND_TYPE_OFFSET);
  1067. if (lrbp->intr_cmd)
  1068. dword_0 |= UTP_REQ_DESC_INT_CMD;
  1069. /* Transfer request descriptor header fields */
  1070. req_desc->header.dword_0 = cpu_to_le32(dword_0);
  1071. /* dword_1 is reserved, hence it is set to 0 */
  1072. req_desc->header.dword_1 = 0;
  1073. /*
  1074. * assigning invalid value for command status. Controller
  1075. * updates OCS on command completion, with the command
  1076. * status
  1077. */
  1078. req_desc->header.dword_2 =
  1079. cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
  1080. /* dword_3 is reserved, hence it is set to 0 */
  1081. req_desc->header.dword_3 = 0;
  1082. req_desc->prd_table_length = 0;
  1083. }
  1084. /**
  1085. * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
  1086. * for scsi commands
  1087. * @lrbp - local reference block pointer
  1088. * @upiu_flags - flags
  1089. */
  1090. static
  1091. void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
  1092. {
  1093. struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
  1094. unsigned short cdb_len;
  1095. /* command descriptor fields */
  1096. ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
  1097. UPIU_TRANSACTION_COMMAND, upiu_flags,
  1098. lrbp->lun, lrbp->task_tag);
  1099. ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
  1100. UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
  1101. /* Total EHS length and Data segment length will be zero */
  1102. ucd_req_ptr->header.dword_2 = 0;
  1103. ucd_req_ptr->sc.exp_data_transfer_len =
  1104. cpu_to_be32(lrbp->cmd->sdb.length);
  1105. cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
  1106. memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
  1107. memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
  1108. memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  1109. }
  1110. /**
  1111. * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
  1112. * for query requsts
  1113. * @hba: UFS hba
  1114. * @lrbp: local reference block pointer
  1115. * @upiu_flags: flags
  1116. */
  1117. static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
  1118. struct ufshcd_lrb *lrbp, u32 upiu_flags)
  1119. {
  1120. struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
  1121. struct ufs_query *query = &hba->dev_cmd.query;
  1122. u16 len = be16_to_cpu(query->request.upiu_req.length);
  1123. u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
  1124. /* Query request header */
  1125. ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
  1126. UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
  1127. lrbp->lun, lrbp->task_tag);
  1128. ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
  1129. 0, query->request.query_func, 0, 0);
  1130. /* Data segment length only need for WRITE_DESC */
  1131. if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
  1132. ucd_req_ptr->header.dword_2 =
  1133. UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
  1134. else
  1135. ucd_req_ptr->header.dword_2 = 0;
  1136. /* Copy the Query Request buffer as is */
  1137. memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
  1138. QUERY_OSF_SIZE);
  1139. /* Copy the Descriptor */
  1140. if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
  1141. memcpy(descp, query->descriptor, len);
  1142. memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  1143. }
  1144. static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
  1145. {
  1146. struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
  1147. memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
  1148. /* command descriptor fields */
  1149. ucd_req_ptr->header.dword_0 =
  1150. UPIU_HEADER_DWORD(
  1151. UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
  1152. /* clear rest of the fields of basic header */
  1153. ucd_req_ptr->header.dword_1 = 0;
  1154. ucd_req_ptr->header.dword_2 = 0;
  1155. memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  1156. }
  1157. /**
  1158. * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
  1159. * for Device Management Purposes
  1160. * @hba - per adapter instance
  1161. * @lrb - pointer to local reference block
  1162. */
  1163. static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  1164. {
  1165. u32 upiu_flags;
  1166. int ret = 0;
  1167. if (hba->ufs_version == UFSHCI_VERSION_20)
  1168. lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
  1169. else
  1170. lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
  1171. ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
  1172. if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
  1173. ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
  1174. else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
  1175. ufshcd_prepare_utp_nop_upiu(lrbp);
  1176. else
  1177. ret = -EINVAL;
  1178. return ret;
  1179. }
  1180. /**
  1181. * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
  1182. * for SCSI Purposes
  1183. * @hba - per adapter instance
  1184. * @lrb - pointer to local reference block
  1185. */
  1186. static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  1187. {
  1188. u32 upiu_flags;
  1189. int ret = 0;
  1190. if (hba->ufs_version == UFSHCI_VERSION_20)
  1191. lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
  1192. else
  1193. lrbp->command_type = UTP_CMD_TYPE_SCSI;
  1194. if (likely(lrbp->cmd)) {
  1195. ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
  1196. lrbp->cmd->sc_data_direction);
  1197. ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
  1198. } else {
  1199. ret = -EINVAL;
  1200. }
  1201. return ret;
  1202. }
  1203. /*
  1204. * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
  1205. * @scsi_lun: scsi LUN id
  1206. *
  1207. * Returns UPIU LUN id
  1208. */
  1209. static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
  1210. {
  1211. if (scsi_is_wlun(scsi_lun))
  1212. return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
  1213. | UFS_UPIU_WLUN_ID;
  1214. else
  1215. return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
  1216. }
  1217. /**
  1218. * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
  1219. * @scsi_lun: UPIU W-LUN id
  1220. *
  1221. * Returns SCSI W-LUN id
  1222. */
  1223. static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
  1224. {
  1225. return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
  1226. }
  1227. /**
  1228. * ufshcd_queuecommand - main entry point for SCSI requests
  1229. * @cmd: command from SCSI Midlayer
  1230. * @done: call back function
  1231. *
  1232. * Returns 0 for success, non-zero in case of failure
  1233. */
  1234. static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
  1235. {
  1236. struct ufshcd_lrb *lrbp;
  1237. struct ufs_hba *hba;
  1238. unsigned long flags;
  1239. int tag;
  1240. int err = 0;
  1241. hba = shost_priv(host);
  1242. tag = cmd->request->tag;
  1243. if (!ufshcd_valid_tag(hba, tag)) {
  1244. dev_err(hba->dev,
  1245. "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
  1246. __func__, tag, cmd, cmd->request);
  1247. BUG();
  1248. }
  1249. spin_lock_irqsave(hba->host->host_lock, flags);
  1250. switch (hba->ufshcd_state) {
  1251. case UFSHCD_STATE_OPERATIONAL:
  1252. break;
  1253. case UFSHCD_STATE_EH_SCHEDULED:
  1254. case UFSHCD_STATE_RESET:
  1255. err = SCSI_MLQUEUE_HOST_BUSY;
  1256. goto out_unlock;
  1257. case UFSHCD_STATE_ERROR:
  1258. set_host_byte(cmd, DID_ERROR);
  1259. cmd->scsi_done(cmd);
  1260. goto out_unlock;
  1261. default:
  1262. dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
  1263. __func__, hba->ufshcd_state);
  1264. set_host_byte(cmd, DID_BAD_TARGET);
  1265. cmd->scsi_done(cmd);
  1266. goto out_unlock;
  1267. }
  1268. /* if error handling is in progress, don't issue commands */
  1269. if (ufshcd_eh_in_progress(hba)) {
  1270. set_host_byte(cmd, DID_ERROR);
  1271. cmd->scsi_done(cmd);
  1272. goto out_unlock;
  1273. }
  1274. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1275. /* acquire the tag to make sure device cmds don't use it */
  1276. if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
  1277. /*
  1278. * Dev manage command in progress, requeue the command.
  1279. * Requeuing the command helps in cases where the request *may*
  1280. * find different tag instead of waiting for dev manage command
  1281. * completion.
  1282. */
  1283. err = SCSI_MLQUEUE_HOST_BUSY;
  1284. goto out;
  1285. }
  1286. err = ufshcd_hold(hba, true);
  1287. if (err) {
  1288. err = SCSI_MLQUEUE_HOST_BUSY;
  1289. clear_bit_unlock(tag, &hba->lrb_in_use);
  1290. goto out;
  1291. }
  1292. WARN_ON(hba->clk_gating.state != CLKS_ON);
  1293. lrbp = &hba->lrb[tag];
  1294. WARN_ON(lrbp->cmd);
  1295. lrbp->cmd = cmd;
  1296. lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
  1297. lrbp->sense_buffer = cmd->sense_buffer;
  1298. lrbp->task_tag = tag;
  1299. lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
  1300. lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
  1301. ufshcd_comp_scsi_upiu(hba, lrbp);
  1302. err = ufshcd_map_sg(hba, lrbp);
  1303. if (err) {
  1304. lrbp->cmd = NULL;
  1305. clear_bit_unlock(tag, &hba->lrb_in_use);
  1306. goto out;
  1307. }
  1308. /* issue command to the controller */
  1309. spin_lock_irqsave(hba->host->host_lock, flags);
  1310. ufshcd_send_command(hba, tag);
  1311. out_unlock:
  1312. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1313. out:
  1314. return err;
  1315. }
  1316. static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
  1317. struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
  1318. {
  1319. lrbp->cmd = NULL;
  1320. lrbp->sense_bufflen = 0;
  1321. lrbp->sense_buffer = NULL;
  1322. lrbp->task_tag = tag;
  1323. lrbp->lun = 0; /* device management cmd is not specific to any LUN */
  1324. lrbp->intr_cmd = true; /* No interrupt aggregation */
  1325. hba->dev_cmd.type = cmd_type;
  1326. return ufshcd_comp_devman_upiu(hba, lrbp);
  1327. }
  1328. static int
  1329. ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
  1330. {
  1331. int err = 0;
  1332. unsigned long flags;
  1333. u32 mask = 1 << tag;
  1334. /* clear outstanding transaction before retry */
  1335. spin_lock_irqsave(hba->host->host_lock, flags);
  1336. ufshcd_utrl_clear(hba, tag);
  1337. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1338. /*
  1339. * wait for for h/w to clear corresponding bit in door-bell.
  1340. * max. wait is 1 sec.
  1341. */
  1342. err = ufshcd_wait_for_register(hba,
  1343. REG_UTP_TRANSFER_REQ_DOOR_BELL,
  1344. mask, ~mask, 1000, 1000, true);
  1345. return err;
  1346. }
  1347. static int
  1348. ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  1349. {
  1350. struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
  1351. /* Get the UPIU response */
  1352. query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
  1353. UPIU_RSP_CODE_OFFSET;
  1354. return query_res->response;
  1355. }
  1356. /**
  1357. * ufshcd_dev_cmd_completion() - handles device management command responses
  1358. * @hba: per adapter instance
  1359. * @lrbp: pointer to local reference block
  1360. */
  1361. static int
  1362. ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  1363. {
  1364. int resp;
  1365. int err = 0;
  1366. resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
  1367. switch (resp) {
  1368. case UPIU_TRANSACTION_NOP_IN:
  1369. if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
  1370. err = -EINVAL;
  1371. dev_err(hba->dev, "%s: unexpected response %x\n",
  1372. __func__, resp);
  1373. }
  1374. break;
  1375. case UPIU_TRANSACTION_QUERY_RSP:
  1376. err = ufshcd_check_query_response(hba, lrbp);
  1377. if (!err)
  1378. err = ufshcd_copy_query_response(hba, lrbp);
  1379. break;
  1380. case UPIU_TRANSACTION_REJECT_UPIU:
  1381. /* TODO: handle Reject UPIU Response */
  1382. err = -EPERM;
  1383. dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
  1384. __func__);
  1385. break;
  1386. default:
  1387. err = -EINVAL;
  1388. dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
  1389. __func__, resp);
  1390. break;
  1391. }
  1392. return err;
  1393. }
  1394. static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
  1395. struct ufshcd_lrb *lrbp, int max_timeout)
  1396. {
  1397. int err = 0;
  1398. unsigned long time_left;
  1399. unsigned long flags;
  1400. time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
  1401. msecs_to_jiffies(max_timeout));
  1402. spin_lock_irqsave(hba->host->host_lock, flags);
  1403. hba->dev_cmd.complete = NULL;
  1404. if (likely(time_left)) {
  1405. err = ufshcd_get_tr_ocs(lrbp);
  1406. if (!err)
  1407. err = ufshcd_dev_cmd_completion(hba, lrbp);
  1408. }
  1409. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1410. if (!time_left) {
  1411. err = -ETIMEDOUT;
  1412. dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
  1413. __func__, lrbp->task_tag);
  1414. if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
  1415. /* successfully cleared the command, retry if needed */
  1416. err = -EAGAIN;
  1417. /*
  1418. * in case of an error, after clearing the doorbell,
  1419. * we also need to clear the outstanding_request
  1420. * field in hba
  1421. */
  1422. ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
  1423. }
  1424. return err;
  1425. }
  1426. /**
  1427. * ufshcd_get_dev_cmd_tag - Get device management command tag
  1428. * @hba: per-adapter instance
  1429. * @tag: pointer to variable with available slot value
  1430. *
  1431. * Get a free slot and lock it until device management command
  1432. * completes.
  1433. *
  1434. * Returns false if free slot is unavailable for locking, else
  1435. * return true with tag value in @tag.
  1436. */
  1437. static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
  1438. {
  1439. int tag;
  1440. bool ret = false;
  1441. unsigned long tmp;
  1442. if (!tag_out)
  1443. goto out;
  1444. do {
  1445. tmp = ~hba->lrb_in_use;
  1446. tag = find_last_bit(&tmp, hba->nutrs);
  1447. if (tag >= hba->nutrs)
  1448. goto out;
  1449. } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
  1450. *tag_out = tag;
  1451. ret = true;
  1452. out:
  1453. return ret;
  1454. }
  1455. static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
  1456. {
  1457. clear_bit_unlock(tag, &hba->lrb_in_use);
  1458. }
  1459. /**
  1460. * ufshcd_exec_dev_cmd - API for sending device management requests
  1461. * @hba - UFS hba
  1462. * @cmd_type - specifies the type (NOP, Query...)
  1463. * @timeout - time in seconds
  1464. *
  1465. * NOTE: Since there is only one available tag for device management commands,
  1466. * it is expected you hold the hba->dev_cmd.lock mutex.
  1467. */
  1468. static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
  1469. enum dev_cmd_type cmd_type, int timeout)
  1470. {
  1471. struct ufshcd_lrb *lrbp;
  1472. int err;
  1473. int tag;
  1474. struct completion wait;
  1475. unsigned long flags;
  1476. /*
  1477. * Get free slot, sleep if slots are unavailable.
  1478. * Even though we use wait_event() which sleeps indefinitely,
  1479. * the maximum wait time is bounded by SCSI request timeout.
  1480. */
  1481. wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
  1482. init_completion(&wait);
  1483. lrbp = &hba->lrb[tag];
  1484. WARN_ON(lrbp->cmd);
  1485. err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
  1486. if (unlikely(err))
  1487. goto out_put_tag;
  1488. hba->dev_cmd.complete = &wait;
  1489. /* Make sure descriptors are ready before ringing the doorbell */
  1490. wmb();
  1491. spin_lock_irqsave(hba->host->host_lock, flags);
  1492. ufshcd_send_command(hba, tag);
  1493. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1494. err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
  1495. out_put_tag:
  1496. ufshcd_put_dev_cmd_tag(hba, tag);
  1497. wake_up(&hba->dev_cmd.tag_wq);
  1498. return err;
  1499. }
  1500. /**
  1501. * ufshcd_init_query() - init the query response and request parameters
  1502. * @hba: per-adapter instance
  1503. * @request: address of the request pointer to be initialized
  1504. * @response: address of the response pointer to be initialized
  1505. * @opcode: operation to perform
  1506. * @idn: flag idn to access
  1507. * @index: LU number to access
  1508. * @selector: query/flag/descriptor further identification
  1509. */
  1510. static inline void ufshcd_init_query(struct ufs_hba *hba,
  1511. struct ufs_query_req **request, struct ufs_query_res **response,
  1512. enum query_opcode opcode, u8 idn, u8 index, u8 selector)
  1513. {
  1514. *request = &hba->dev_cmd.query.request;
  1515. *response = &hba->dev_cmd.query.response;
  1516. memset(*request, 0, sizeof(struct ufs_query_req));
  1517. memset(*response, 0, sizeof(struct ufs_query_res));
  1518. (*request)->upiu_req.opcode = opcode;
  1519. (*request)->upiu_req.idn = idn;
  1520. (*request)->upiu_req.index = index;
  1521. (*request)->upiu_req.selector = selector;
  1522. }
  1523. static int ufshcd_query_flag_retry(struct ufs_hba *hba,
  1524. enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
  1525. {
  1526. int ret;
  1527. int retries;
  1528. for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
  1529. ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
  1530. if (ret)
  1531. dev_dbg(hba->dev,
  1532. "%s: failed with error %d, retries %d\n",
  1533. __func__, ret, retries);
  1534. else
  1535. break;
  1536. }
  1537. if (ret)
  1538. dev_err(hba->dev,
  1539. "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
  1540. __func__, opcode, idn, ret, retries);
  1541. return ret;
  1542. }
  1543. /**
  1544. * ufshcd_query_flag() - API function for sending flag query requests
  1545. * hba: per-adapter instance
  1546. * query_opcode: flag query to perform
  1547. * idn: flag idn to access
  1548. * flag_res: the flag value after the query request completes
  1549. *
  1550. * Returns 0 for success, non-zero in case of failure
  1551. */
  1552. int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
  1553. enum flag_idn idn, bool *flag_res)
  1554. {
  1555. struct ufs_query_req *request = NULL;
  1556. struct ufs_query_res *response = NULL;
  1557. int err, index = 0, selector = 0;
  1558. int timeout = QUERY_REQ_TIMEOUT;
  1559. BUG_ON(!hba);
  1560. ufshcd_hold(hba, false);
  1561. mutex_lock(&hba->dev_cmd.lock);
  1562. ufshcd_init_query(hba, &request, &response, opcode, idn, index,
  1563. selector);
  1564. switch (opcode) {
  1565. case UPIU_QUERY_OPCODE_SET_FLAG:
  1566. case UPIU_QUERY_OPCODE_CLEAR_FLAG:
  1567. case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
  1568. request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
  1569. break;
  1570. case UPIU_QUERY_OPCODE_READ_FLAG:
  1571. request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
  1572. if (!flag_res) {
  1573. /* No dummy reads */
  1574. dev_err(hba->dev, "%s: Invalid argument for read request\n",
  1575. __func__);
  1576. err = -EINVAL;
  1577. goto out_unlock;
  1578. }
  1579. break;
  1580. default:
  1581. dev_err(hba->dev,
  1582. "%s: Expected query flag opcode but got = %d\n",
  1583. __func__, opcode);
  1584. err = -EINVAL;
  1585. goto out_unlock;
  1586. }
  1587. if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
  1588. timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
  1589. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
  1590. if (err) {
  1591. dev_err(hba->dev,
  1592. "%s: Sending flag query for idn %d failed, err = %d\n",
  1593. __func__, idn, err);
  1594. goto out_unlock;
  1595. }
  1596. if (flag_res)
  1597. *flag_res = (be32_to_cpu(response->upiu_res.value) &
  1598. MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
  1599. out_unlock:
  1600. mutex_unlock(&hba->dev_cmd.lock);
  1601. ufshcd_release(hba);
  1602. return err;
  1603. }
  1604. /**
  1605. * ufshcd_query_attr - API function for sending attribute requests
  1606. * hba: per-adapter instance
  1607. * opcode: attribute opcode
  1608. * idn: attribute idn to access
  1609. * index: index field
  1610. * selector: selector field
  1611. * attr_val: the attribute value after the query request completes
  1612. *
  1613. * Returns 0 for success, non-zero in case of failure
  1614. */
  1615. static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
  1616. enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
  1617. {
  1618. struct ufs_query_req *request = NULL;
  1619. struct ufs_query_res *response = NULL;
  1620. int err;
  1621. BUG_ON(!hba);
  1622. ufshcd_hold(hba, false);
  1623. if (!attr_val) {
  1624. dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
  1625. __func__, opcode);
  1626. err = -EINVAL;
  1627. goto out;
  1628. }
  1629. mutex_lock(&hba->dev_cmd.lock);
  1630. ufshcd_init_query(hba, &request, &response, opcode, idn, index,
  1631. selector);
  1632. switch (opcode) {
  1633. case UPIU_QUERY_OPCODE_WRITE_ATTR:
  1634. request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
  1635. request->upiu_req.value = cpu_to_be32(*attr_val);
  1636. break;
  1637. case UPIU_QUERY_OPCODE_READ_ATTR:
  1638. request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
  1639. break;
  1640. default:
  1641. dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
  1642. __func__, opcode);
  1643. err = -EINVAL;
  1644. goto out_unlock;
  1645. }
  1646. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  1647. if (err) {
  1648. dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
  1649. __func__, opcode, idn, err);
  1650. goto out_unlock;
  1651. }
  1652. *attr_val = be32_to_cpu(response->upiu_res.value);
  1653. out_unlock:
  1654. mutex_unlock(&hba->dev_cmd.lock);
  1655. out:
  1656. ufshcd_release(hba);
  1657. return err;
  1658. }
  1659. /**
  1660. * ufshcd_query_attr_retry() - API function for sending query
  1661. * attribute with retries
  1662. * @hba: per-adapter instance
  1663. * @opcode: attribute opcode
  1664. * @idn: attribute idn to access
  1665. * @index: index field
  1666. * @selector: selector field
  1667. * @attr_val: the attribute value after the query request
  1668. * completes
  1669. *
  1670. * Returns 0 for success, non-zero in case of failure
  1671. */
  1672. static int ufshcd_query_attr_retry(struct ufs_hba *hba,
  1673. enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
  1674. u32 *attr_val)
  1675. {
  1676. int ret = 0;
  1677. u32 retries;
  1678. for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
  1679. ret = ufshcd_query_attr(hba, opcode, idn, index,
  1680. selector, attr_val);
  1681. if (ret)
  1682. dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
  1683. __func__, ret, retries);
  1684. else
  1685. break;
  1686. }
  1687. if (ret)
  1688. dev_err(hba->dev,
  1689. "%s: query attribute, idn %d, failed with error %d after %d retires\n",
  1690. __func__, idn, ret, QUERY_REQ_RETRIES);
  1691. return ret;
  1692. }
  1693. static int __ufshcd_query_descriptor(struct ufs_hba *hba,
  1694. enum query_opcode opcode, enum desc_idn idn, u8 index,
  1695. u8 selector, u8 *desc_buf, int *buf_len)
  1696. {
  1697. struct ufs_query_req *request = NULL;
  1698. struct ufs_query_res *response = NULL;
  1699. int err;
  1700. BUG_ON(!hba);
  1701. ufshcd_hold(hba, false);
  1702. if (!desc_buf) {
  1703. dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
  1704. __func__, opcode);
  1705. err = -EINVAL;
  1706. goto out;
  1707. }
  1708. if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
  1709. dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
  1710. __func__, *buf_len);
  1711. err = -EINVAL;
  1712. goto out;
  1713. }
  1714. mutex_lock(&hba->dev_cmd.lock);
  1715. ufshcd_init_query(hba, &request, &response, opcode, idn, index,
  1716. selector);
  1717. hba->dev_cmd.query.descriptor = desc_buf;
  1718. request->upiu_req.length = cpu_to_be16(*buf_len);
  1719. switch (opcode) {
  1720. case UPIU_QUERY_OPCODE_WRITE_DESC:
  1721. request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
  1722. break;
  1723. case UPIU_QUERY_OPCODE_READ_DESC:
  1724. request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
  1725. break;
  1726. default:
  1727. dev_err(hba->dev,
  1728. "%s: Expected query descriptor opcode but got = 0x%.2x\n",
  1729. __func__, opcode);
  1730. err = -EINVAL;
  1731. goto out_unlock;
  1732. }
  1733. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  1734. if (err) {
  1735. dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
  1736. __func__, opcode, idn, err);
  1737. goto out_unlock;
  1738. }
  1739. hba->dev_cmd.query.descriptor = NULL;
  1740. *buf_len = be16_to_cpu(response->upiu_res.length);
  1741. out_unlock:
  1742. mutex_unlock(&hba->dev_cmd.lock);
  1743. out:
  1744. ufshcd_release(hba);
  1745. return err;
  1746. }
  1747. /**
  1748. * ufshcd_query_descriptor_retry - API function for sending descriptor
  1749. * requests
  1750. * hba: per-adapter instance
  1751. * opcode: attribute opcode
  1752. * idn: attribute idn to access
  1753. * index: index field
  1754. * selector: selector field
  1755. * desc_buf: the buffer that contains the descriptor
  1756. * buf_len: length parameter passed to the device
  1757. *
  1758. * Returns 0 for success, non-zero in case of failure.
  1759. * The buf_len parameter will contain, on return, the length parameter
  1760. * received on the response.
  1761. */
  1762. int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
  1763. enum query_opcode opcode, enum desc_idn idn, u8 index,
  1764. u8 selector, u8 *desc_buf, int *buf_len)
  1765. {
  1766. int err;
  1767. int retries;
  1768. for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
  1769. err = __ufshcd_query_descriptor(hba, opcode, idn, index,
  1770. selector, desc_buf, buf_len);
  1771. if (!err || err == -EINVAL)
  1772. break;
  1773. }
  1774. return err;
  1775. }
  1776. EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
  1777. /**
  1778. * ufshcd_read_desc_length - read the specified descriptor length from header
  1779. * @hba: Pointer to adapter instance
  1780. * @desc_id: descriptor idn value
  1781. * @desc_index: descriptor index
  1782. * @desc_length: pointer to variable to read the length of descriptor
  1783. *
  1784. * Return 0 in case of success, non-zero otherwise
  1785. */
  1786. static int ufshcd_read_desc_length(struct ufs_hba *hba,
  1787. enum desc_idn desc_id,
  1788. int desc_index,
  1789. int *desc_length)
  1790. {
  1791. int ret;
  1792. u8 header[QUERY_DESC_HDR_SIZE];
  1793. int header_len = QUERY_DESC_HDR_SIZE;
  1794. if (desc_id >= QUERY_DESC_IDN_MAX)
  1795. return -EINVAL;
  1796. ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
  1797. desc_id, desc_index, 0, header,
  1798. &header_len);
  1799. if (ret) {
  1800. dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
  1801. __func__, desc_id);
  1802. return ret;
  1803. } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
  1804. dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
  1805. __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
  1806. desc_id);
  1807. ret = -EINVAL;
  1808. }
  1809. *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
  1810. return ret;
  1811. }
  1812. /**
  1813. * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
  1814. * @hba: Pointer to adapter instance
  1815. * @desc_id: descriptor idn value
  1816. * @desc_len: mapped desc length (out)
  1817. *
  1818. * Return 0 in case of success, non-zero otherwise
  1819. */
  1820. int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
  1821. enum desc_idn desc_id, int *desc_len)
  1822. {
  1823. switch (desc_id) {
  1824. case QUERY_DESC_IDN_DEVICE:
  1825. *desc_len = hba->desc_size.dev_desc;
  1826. break;
  1827. case QUERY_DESC_IDN_POWER:
  1828. *desc_len = hba->desc_size.pwr_desc;
  1829. break;
  1830. case QUERY_DESC_IDN_GEOMETRY:
  1831. *desc_len = hba->desc_size.geom_desc;
  1832. break;
  1833. case QUERY_DESC_IDN_CONFIGURATION:
  1834. *desc_len = hba->desc_size.conf_desc;
  1835. break;
  1836. case QUERY_DESC_IDN_UNIT:
  1837. *desc_len = hba->desc_size.unit_desc;
  1838. break;
  1839. case QUERY_DESC_IDN_INTERCONNECT:
  1840. *desc_len = hba->desc_size.interc_desc;
  1841. break;
  1842. case QUERY_DESC_IDN_STRING:
  1843. *desc_len = QUERY_DESC_MAX_SIZE;
  1844. break;
  1845. case QUERY_DESC_IDN_RFU_0:
  1846. case QUERY_DESC_IDN_RFU_1:
  1847. *desc_len = 0;
  1848. break;
  1849. default:
  1850. *desc_len = 0;
  1851. return -EINVAL;
  1852. }
  1853. return 0;
  1854. }
  1855. EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
  1856. /**
  1857. * ufshcd_read_desc_param - read the specified descriptor parameter
  1858. * @hba: Pointer to adapter instance
  1859. * @desc_id: descriptor idn value
  1860. * @desc_index: descriptor index
  1861. * @param_offset: offset of the parameter to read
  1862. * @param_read_buf: pointer to buffer where parameter would be read
  1863. * @param_size: sizeof(param_read_buf)
  1864. *
  1865. * Return 0 in case of success, non-zero otherwise
  1866. */
  1867. static int ufshcd_read_desc_param(struct ufs_hba *hba,
  1868. enum desc_idn desc_id,
  1869. int desc_index,
  1870. u8 param_offset,
  1871. u8 *param_read_buf,
  1872. u8 param_size)
  1873. {
  1874. int ret;
  1875. u8 *desc_buf;
  1876. int buff_len;
  1877. bool is_kmalloc = true;
  1878. /* Safety check */
  1879. if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
  1880. return -EINVAL;
  1881. /* Get the max length of descriptor from structure filled up at probe
  1882. * time.
  1883. */
  1884. ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
  1885. /* Sanity checks */
  1886. if (ret || !buff_len) {
  1887. dev_err(hba->dev, "%s: Failed to get full descriptor length",
  1888. __func__);
  1889. return ret;
  1890. }
  1891. /* Check whether we need temp memory */
  1892. if (param_offset != 0 || param_size < buff_len) {
  1893. desc_buf = kmalloc(buff_len, GFP_KERNEL);
  1894. if (!desc_buf)
  1895. return -ENOMEM;
  1896. } else {
  1897. desc_buf = param_read_buf;
  1898. is_kmalloc = false;
  1899. }
  1900. /* Request for full descriptor */
  1901. ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
  1902. desc_id, desc_index, 0,
  1903. desc_buf, &buff_len);
  1904. if (ret) {
  1905. dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
  1906. __func__, desc_id, desc_index, param_offset, ret);
  1907. goto out;
  1908. }
  1909. /* Sanity check */
  1910. if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
  1911. dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
  1912. __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
  1913. ret = -EINVAL;
  1914. goto out;
  1915. }
  1916. /* Check wherher we will not copy more data, than available */
  1917. if (is_kmalloc && param_size > buff_len)
  1918. param_size = buff_len;
  1919. if (is_kmalloc)
  1920. memcpy(param_read_buf, &desc_buf[param_offset], param_size);
  1921. out:
  1922. if (is_kmalloc)
  1923. kfree(desc_buf);
  1924. return ret;
  1925. }
  1926. static inline int ufshcd_read_desc(struct ufs_hba *hba,
  1927. enum desc_idn desc_id,
  1928. int desc_index,
  1929. u8 *buf,
  1930. u32 size)
  1931. {
  1932. return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
  1933. }
  1934. static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
  1935. u8 *buf,
  1936. u32 size)
  1937. {
  1938. return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
  1939. }
  1940. int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
  1941. {
  1942. return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
  1943. }
  1944. EXPORT_SYMBOL(ufshcd_read_device_desc);
  1945. /**
  1946. * ufshcd_read_string_desc - read string descriptor
  1947. * @hba: pointer to adapter instance
  1948. * @desc_index: descriptor index
  1949. * @buf: pointer to buffer where descriptor would be read
  1950. * @size: size of buf
  1951. * @ascii: if true convert from unicode to ascii characters
  1952. *
  1953. * Return 0 in case of success, non-zero otherwise
  1954. */
  1955. int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
  1956. u32 size, bool ascii)
  1957. {
  1958. int err = 0;
  1959. err = ufshcd_read_desc(hba,
  1960. QUERY_DESC_IDN_STRING, desc_index, buf, size);
  1961. if (err) {
  1962. dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
  1963. __func__, QUERY_REQ_RETRIES, err);
  1964. goto out;
  1965. }
  1966. if (ascii) {
  1967. int desc_len;
  1968. int ascii_len;
  1969. int i;
  1970. char *buff_ascii;
  1971. desc_len = buf[0];
  1972. /* remove header and divide by 2 to move from UTF16 to UTF8 */
  1973. ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
  1974. if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
  1975. dev_err(hba->dev, "%s: buffer allocated size is too small\n",
  1976. __func__);
  1977. err = -ENOMEM;
  1978. goto out;
  1979. }
  1980. buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
  1981. if (!buff_ascii) {
  1982. err = -ENOMEM;
  1983. goto out;
  1984. }
  1985. /*
  1986. * the descriptor contains string in UTF16 format
  1987. * we need to convert to utf-8 so it can be displayed
  1988. */
  1989. utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
  1990. desc_len - QUERY_DESC_HDR_SIZE,
  1991. UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
  1992. /* replace non-printable or non-ASCII characters with spaces */
  1993. for (i = 0; i < ascii_len; i++)
  1994. ufshcd_remove_non_printable(&buff_ascii[i]);
  1995. memset(buf + QUERY_DESC_HDR_SIZE, 0,
  1996. size - QUERY_DESC_HDR_SIZE);
  1997. memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
  1998. buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
  1999. kfree(buff_ascii);
  2000. }
  2001. out:
  2002. return err;
  2003. }
  2004. EXPORT_SYMBOL(ufshcd_read_string_desc);
  2005. /**
  2006. * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
  2007. * @hba: Pointer to adapter instance
  2008. * @lun: lun id
  2009. * @param_offset: offset of the parameter to read
  2010. * @param_read_buf: pointer to buffer where parameter would be read
  2011. * @param_size: sizeof(param_read_buf)
  2012. *
  2013. * Return 0 in case of success, non-zero otherwise
  2014. */
  2015. static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
  2016. int lun,
  2017. enum unit_desc_param param_offset,
  2018. u8 *param_read_buf,
  2019. u32 param_size)
  2020. {
  2021. /*
  2022. * Unit descriptors are only available for general purpose LUs (LUN id
  2023. * from 0 to 7) and RPMB Well known LU.
  2024. */
  2025. if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
  2026. return -EOPNOTSUPP;
  2027. return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
  2028. param_offset, param_read_buf, param_size);
  2029. }
  2030. /**
  2031. * ufshcd_memory_alloc - allocate memory for host memory space data structures
  2032. * @hba: per adapter instance
  2033. *
  2034. * 1. Allocate DMA memory for Command Descriptor array
  2035. * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
  2036. * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
  2037. * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
  2038. * (UTMRDL)
  2039. * 4. Allocate memory for local reference block(lrb).
  2040. *
  2041. * Returns 0 for success, non-zero in case of failure
  2042. */
  2043. static int ufshcd_memory_alloc(struct ufs_hba *hba)
  2044. {
  2045. size_t utmrdl_size, utrdl_size, ucdl_size;
  2046. /* Allocate memory for UTP command descriptors */
  2047. ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
  2048. hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
  2049. ucdl_size,
  2050. &hba->ucdl_dma_addr,
  2051. GFP_KERNEL);
  2052. /*
  2053. * UFSHCI requires UTP command descriptor to be 128 byte aligned.
  2054. * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
  2055. * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
  2056. * be aligned to 128 bytes as well
  2057. */
  2058. if (!hba->ucdl_base_addr ||
  2059. WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
  2060. dev_err(hba->dev,
  2061. "Command Descriptor Memory allocation failed\n");
  2062. goto out;
  2063. }
  2064. /*
  2065. * Allocate memory for UTP Transfer descriptors
  2066. * UFSHCI requires 1024 byte alignment of UTRD
  2067. */
  2068. utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
  2069. hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
  2070. utrdl_size,
  2071. &hba->utrdl_dma_addr,
  2072. GFP_KERNEL);
  2073. if (!hba->utrdl_base_addr ||
  2074. WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
  2075. dev_err(hba->dev,
  2076. "Transfer Descriptor Memory allocation failed\n");
  2077. goto out;
  2078. }
  2079. /*
  2080. * Allocate memory for UTP Task Management descriptors
  2081. * UFSHCI requires 1024 byte alignment of UTMRD
  2082. */
  2083. utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
  2084. hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
  2085. utmrdl_size,
  2086. &hba->utmrdl_dma_addr,
  2087. GFP_KERNEL);
  2088. if (!hba->utmrdl_base_addr ||
  2089. WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
  2090. dev_err(hba->dev,
  2091. "Task Management Descriptor Memory allocation failed\n");
  2092. goto out;
  2093. }
  2094. /* Allocate memory for local reference block */
  2095. hba->lrb = devm_kzalloc(hba->dev,
  2096. hba->nutrs * sizeof(struct ufshcd_lrb),
  2097. GFP_KERNEL);
  2098. if (!hba->lrb) {
  2099. dev_err(hba->dev, "LRB Memory allocation failed\n");
  2100. goto out;
  2101. }
  2102. return 0;
  2103. out:
  2104. return -ENOMEM;
  2105. }
  2106. /**
  2107. * ufshcd_host_memory_configure - configure local reference block with
  2108. * memory offsets
  2109. * @hba: per adapter instance
  2110. *
  2111. * Configure Host memory space
  2112. * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
  2113. * address.
  2114. * 2. Update each UTRD with Response UPIU offset, Response UPIU length
  2115. * and PRDT offset.
  2116. * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
  2117. * into local reference block.
  2118. */
  2119. static void ufshcd_host_memory_configure(struct ufs_hba *hba)
  2120. {
  2121. struct utp_transfer_cmd_desc *cmd_descp;
  2122. struct utp_transfer_req_desc *utrdlp;
  2123. dma_addr_t cmd_desc_dma_addr;
  2124. dma_addr_t cmd_desc_element_addr;
  2125. u16 response_offset;
  2126. u16 prdt_offset;
  2127. int cmd_desc_size;
  2128. int i;
  2129. utrdlp = hba->utrdl_base_addr;
  2130. cmd_descp = hba->ucdl_base_addr;
  2131. response_offset =
  2132. offsetof(struct utp_transfer_cmd_desc, response_upiu);
  2133. prdt_offset =
  2134. offsetof(struct utp_transfer_cmd_desc, prd_table);
  2135. cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
  2136. cmd_desc_dma_addr = hba->ucdl_dma_addr;
  2137. for (i = 0; i < hba->nutrs; i++) {
  2138. /* Configure UTRD with command descriptor base address */
  2139. cmd_desc_element_addr =
  2140. (cmd_desc_dma_addr + (cmd_desc_size * i));
  2141. utrdlp[i].command_desc_base_addr_lo =
  2142. cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
  2143. utrdlp[i].command_desc_base_addr_hi =
  2144. cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
  2145. /* Response upiu and prdt offset should be in double words */
  2146. if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
  2147. utrdlp[i].response_upiu_offset =
  2148. cpu_to_le16(response_offset);
  2149. utrdlp[i].prd_table_offset =
  2150. cpu_to_le16(prdt_offset);
  2151. utrdlp[i].response_upiu_length =
  2152. cpu_to_le16(ALIGNED_UPIU_SIZE);
  2153. } else {
  2154. utrdlp[i].response_upiu_offset =
  2155. cpu_to_le16((response_offset >> 2));
  2156. utrdlp[i].prd_table_offset =
  2157. cpu_to_le16((prdt_offset >> 2));
  2158. utrdlp[i].response_upiu_length =
  2159. cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
  2160. }
  2161. hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
  2162. hba->lrb[i].ucd_req_ptr =
  2163. (struct utp_upiu_req *)(cmd_descp + i);
  2164. hba->lrb[i].ucd_rsp_ptr =
  2165. (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
  2166. hba->lrb[i].ucd_prdt_ptr =
  2167. (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
  2168. }
  2169. }
  2170. /**
  2171. * ufshcd_dme_link_startup - Notify Unipro to perform link startup
  2172. * @hba: per adapter instance
  2173. *
  2174. * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
  2175. * in order to initialize the Unipro link startup procedure.
  2176. * Once the Unipro links are up, the device connected to the controller
  2177. * is detected.
  2178. *
  2179. * Returns 0 on success, non-zero value on failure
  2180. */
  2181. static int ufshcd_dme_link_startup(struct ufs_hba *hba)
  2182. {
  2183. struct uic_command uic_cmd = {0};
  2184. int ret;
  2185. uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
  2186. ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
  2187. if (ret)
  2188. dev_err(hba->dev,
  2189. "dme-link-startup: error code %d\n", ret);
  2190. return ret;
  2191. }
  2192. static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
  2193. {
  2194. #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
  2195. unsigned long min_sleep_time_us;
  2196. if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
  2197. return;
  2198. /*
  2199. * last_dme_cmd_tstamp will be 0 only for 1st call to
  2200. * this function
  2201. */
  2202. if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
  2203. min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
  2204. } else {
  2205. unsigned long delta =
  2206. (unsigned long) ktime_to_us(
  2207. ktime_sub(ktime_get(),
  2208. hba->last_dme_cmd_tstamp));
  2209. if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
  2210. min_sleep_time_us =
  2211. MIN_DELAY_BEFORE_DME_CMDS_US - delta;
  2212. else
  2213. return; /* no more delay required */
  2214. }
  2215. /* allow sleep for extra 50us if needed */
  2216. usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
  2217. }
  2218. /**
  2219. * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
  2220. * @hba: per adapter instance
  2221. * @attr_sel: uic command argument1
  2222. * @attr_set: attribute set type as uic command argument2
  2223. * @mib_val: setting value as uic command argument3
  2224. * @peer: indicate whether peer or local
  2225. *
  2226. * Returns 0 on success, non-zero value on failure
  2227. */
  2228. int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
  2229. u8 attr_set, u32 mib_val, u8 peer)
  2230. {
  2231. struct uic_command uic_cmd = {0};
  2232. static const char *const action[] = {
  2233. "dme-set",
  2234. "dme-peer-set"
  2235. };
  2236. const char *set = action[!!peer];
  2237. int ret;
  2238. int retries = UFS_UIC_COMMAND_RETRIES;
  2239. uic_cmd.command = peer ?
  2240. UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
  2241. uic_cmd.argument1 = attr_sel;
  2242. uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
  2243. uic_cmd.argument3 = mib_val;
  2244. do {
  2245. /* for peer attributes we retry upon failure */
  2246. ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
  2247. if (ret)
  2248. dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
  2249. set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
  2250. } while (ret && peer && --retries);
  2251. if (!retries)
  2252. dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
  2253. set, UIC_GET_ATTR_ID(attr_sel), mib_val,
  2254. retries);
  2255. return ret;
  2256. }
  2257. EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
  2258. /**
  2259. * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
  2260. * @hba: per adapter instance
  2261. * @attr_sel: uic command argument1
  2262. * @mib_val: the value of the attribute as returned by the UIC command
  2263. * @peer: indicate whether peer or local
  2264. *
  2265. * Returns 0 on success, non-zero value on failure
  2266. */
  2267. int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
  2268. u32 *mib_val, u8 peer)
  2269. {
  2270. struct uic_command uic_cmd = {0};
  2271. static const char *const action[] = {
  2272. "dme-get",
  2273. "dme-peer-get"
  2274. };
  2275. const char *get = action[!!peer];
  2276. int ret;
  2277. int retries = UFS_UIC_COMMAND_RETRIES;
  2278. struct ufs_pa_layer_attr orig_pwr_info;
  2279. struct ufs_pa_layer_attr temp_pwr_info;
  2280. bool pwr_mode_change = false;
  2281. if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
  2282. orig_pwr_info = hba->pwr_info;
  2283. temp_pwr_info = orig_pwr_info;
  2284. if (orig_pwr_info.pwr_tx == FAST_MODE ||
  2285. orig_pwr_info.pwr_rx == FAST_MODE) {
  2286. temp_pwr_info.pwr_tx = FASTAUTO_MODE;
  2287. temp_pwr_info.pwr_rx = FASTAUTO_MODE;
  2288. pwr_mode_change = true;
  2289. } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
  2290. orig_pwr_info.pwr_rx == SLOW_MODE) {
  2291. temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
  2292. temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
  2293. pwr_mode_change = true;
  2294. }
  2295. if (pwr_mode_change) {
  2296. ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
  2297. if (ret)
  2298. goto out;
  2299. }
  2300. }
  2301. uic_cmd.command = peer ?
  2302. UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
  2303. uic_cmd.argument1 = attr_sel;
  2304. do {
  2305. /* for peer attributes we retry upon failure */
  2306. ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
  2307. if (ret)
  2308. dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
  2309. get, UIC_GET_ATTR_ID(attr_sel), ret);
  2310. } while (ret && peer && --retries);
  2311. if (!retries)
  2312. dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
  2313. get, UIC_GET_ATTR_ID(attr_sel), retries);
  2314. if (mib_val && !ret)
  2315. *mib_val = uic_cmd.argument3;
  2316. if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
  2317. && pwr_mode_change)
  2318. ufshcd_change_power_mode(hba, &orig_pwr_info);
  2319. out:
  2320. return ret;
  2321. }
  2322. EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
  2323. /**
  2324. * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
  2325. * state) and waits for it to take effect.
  2326. *
  2327. * @hba: per adapter instance
  2328. * @cmd: UIC command to execute
  2329. *
  2330. * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
  2331. * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
  2332. * and device UniPro link and hence it's final completion would be indicated by
  2333. * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
  2334. * addition to normal UIC command completion Status (UCCS). This function only
  2335. * returns after the relevant status bits indicate the completion.
  2336. *
  2337. * Returns 0 on success, non-zero value on failure
  2338. */
  2339. static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
  2340. {
  2341. struct completion uic_async_done;
  2342. unsigned long flags;
  2343. u8 status;
  2344. int ret;
  2345. bool reenable_intr = false;
  2346. mutex_lock(&hba->uic_cmd_mutex);
  2347. init_completion(&uic_async_done);
  2348. ufshcd_add_delay_before_dme_cmd(hba);
  2349. spin_lock_irqsave(hba->host->host_lock, flags);
  2350. hba->uic_async_done = &uic_async_done;
  2351. if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
  2352. ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
  2353. /*
  2354. * Make sure UIC command completion interrupt is disabled before
  2355. * issuing UIC command.
  2356. */
  2357. wmb();
  2358. reenable_intr = true;
  2359. }
  2360. ret = __ufshcd_send_uic_cmd(hba, cmd, false);
  2361. spin_unlock_irqrestore(hba->host->host_lock, flags);
  2362. if (ret) {
  2363. dev_err(hba->dev,
  2364. "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
  2365. cmd->command, cmd->argument3, ret);
  2366. goto out;
  2367. }
  2368. if (!wait_for_completion_timeout(hba->uic_async_done,
  2369. msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
  2370. dev_err(hba->dev,
  2371. "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
  2372. cmd->command, cmd->argument3);
  2373. ret = -ETIMEDOUT;
  2374. goto out;
  2375. }
  2376. status = ufshcd_get_upmcrs(hba);
  2377. if (status != PWR_LOCAL) {
  2378. dev_err(hba->dev,
  2379. "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
  2380. cmd->command, status);
  2381. ret = (status != PWR_OK) ? status : -1;
  2382. }
  2383. out:
  2384. spin_lock_irqsave(hba->host->host_lock, flags);
  2385. hba->active_uic_cmd = NULL;
  2386. hba->uic_async_done = NULL;
  2387. if (reenable_intr)
  2388. ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
  2389. spin_unlock_irqrestore(hba->host->host_lock, flags);
  2390. mutex_unlock(&hba->uic_cmd_mutex);
  2391. return ret;
  2392. }
  2393. /**
  2394. * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
  2395. * using DME_SET primitives.
  2396. * @hba: per adapter instance
  2397. * @mode: powr mode value
  2398. *
  2399. * Returns 0 on success, non-zero value on failure
  2400. */
  2401. static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
  2402. {
  2403. struct uic_command uic_cmd = {0};
  2404. int ret;
  2405. if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
  2406. ret = ufshcd_dme_set(hba,
  2407. UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
  2408. if (ret) {
  2409. dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
  2410. __func__, ret);
  2411. goto out;
  2412. }
  2413. }
  2414. uic_cmd.command = UIC_CMD_DME_SET;
  2415. uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
  2416. uic_cmd.argument3 = mode;
  2417. ufshcd_hold(hba, false);
  2418. ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
  2419. ufshcd_release(hba);
  2420. out:
  2421. return ret;
  2422. }
  2423. static int ufshcd_link_recovery(struct ufs_hba *hba)
  2424. {
  2425. int ret;
  2426. unsigned long flags;
  2427. spin_lock_irqsave(hba->host->host_lock, flags);
  2428. hba->ufshcd_state = UFSHCD_STATE_RESET;
  2429. ufshcd_set_eh_in_progress(hba);
  2430. spin_unlock_irqrestore(hba->host->host_lock, flags);
  2431. ret = ufshcd_host_reset_and_restore(hba);
  2432. spin_lock_irqsave(hba->host->host_lock, flags);
  2433. if (ret)
  2434. hba->ufshcd_state = UFSHCD_STATE_ERROR;
  2435. ufshcd_clear_eh_in_progress(hba);
  2436. spin_unlock_irqrestore(hba->host->host_lock, flags);
  2437. if (ret)
  2438. dev_err(hba->dev, "%s: link recovery failed, err %d",
  2439. __func__, ret);
  2440. return ret;
  2441. }
  2442. static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
  2443. {
  2444. int ret;
  2445. struct uic_command uic_cmd = {0};
  2446. uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
  2447. ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
  2448. if (ret) {
  2449. dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
  2450. __func__, ret);
  2451. /*
  2452. * If link recovery fails then return error so that caller
  2453. * don't retry the hibern8 enter again.
  2454. */
  2455. if (ufshcd_link_recovery(hba))
  2456. ret = -ENOLINK;
  2457. }
  2458. return ret;
  2459. }
  2460. static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
  2461. {
  2462. int ret = 0, retries;
  2463. for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
  2464. ret = __ufshcd_uic_hibern8_enter(hba);
  2465. if (!ret || ret == -ENOLINK)
  2466. goto out;
  2467. }
  2468. out:
  2469. return ret;
  2470. }
  2471. static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
  2472. {
  2473. struct uic_command uic_cmd = {0};
  2474. int ret;
  2475. uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
  2476. ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
  2477. if (ret) {
  2478. dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
  2479. __func__, ret);
  2480. ret = ufshcd_link_recovery(hba);
  2481. }
  2482. return ret;
  2483. }
  2484. /**
  2485. * ufshcd_init_pwr_info - setting the POR (power on reset)
  2486. * values in hba power info
  2487. * @hba: per-adapter instance
  2488. */
  2489. static void ufshcd_init_pwr_info(struct ufs_hba *hba)
  2490. {
  2491. hba->pwr_info.gear_rx = UFS_PWM_G1;
  2492. hba->pwr_info.gear_tx = UFS_PWM_G1;
  2493. hba->pwr_info.lane_rx = 1;
  2494. hba->pwr_info.lane_tx = 1;
  2495. hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
  2496. hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
  2497. hba->pwr_info.hs_rate = 0;
  2498. }
  2499. /**
  2500. * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
  2501. * @hba: per-adapter instance
  2502. */
  2503. static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
  2504. {
  2505. struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
  2506. if (hba->max_pwr_info.is_valid)
  2507. return 0;
  2508. pwr_info->pwr_tx = FASTAUTO_MODE;
  2509. pwr_info->pwr_rx = FASTAUTO_MODE;
  2510. pwr_info->hs_rate = PA_HS_MODE_B;
  2511. /* Get the connected lane count */
  2512. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
  2513. &pwr_info->lane_rx);
  2514. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
  2515. &pwr_info->lane_tx);
  2516. if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
  2517. dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
  2518. __func__,
  2519. pwr_info->lane_rx,
  2520. pwr_info->lane_tx);
  2521. return -EINVAL;
  2522. }
  2523. /*
  2524. * First, get the maximum gears of HS speed.
  2525. * If a zero value, it means there is no HSGEAR capability.
  2526. * Then, get the maximum gears of PWM speed.
  2527. */
  2528. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
  2529. if (!pwr_info->gear_rx) {
  2530. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
  2531. &pwr_info->gear_rx);
  2532. if (!pwr_info->gear_rx) {
  2533. dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
  2534. __func__, pwr_info->gear_rx);
  2535. return -EINVAL;
  2536. }
  2537. pwr_info->pwr_rx = SLOWAUTO_MODE;
  2538. }
  2539. ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
  2540. &pwr_info->gear_tx);
  2541. if (!pwr_info->gear_tx) {
  2542. ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
  2543. &pwr_info->gear_tx);
  2544. if (!pwr_info->gear_tx) {
  2545. dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
  2546. __func__, pwr_info->gear_tx);
  2547. return -EINVAL;
  2548. }
  2549. pwr_info->pwr_tx = SLOWAUTO_MODE;
  2550. }
  2551. hba->max_pwr_info.is_valid = true;
  2552. return 0;
  2553. }
  2554. static int ufshcd_change_power_mode(struct ufs_hba *hba,
  2555. struct ufs_pa_layer_attr *pwr_mode)
  2556. {
  2557. int ret;
  2558. /* if already configured to the requested pwr_mode */
  2559. if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
  2560. pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
  2561. pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
  2562. pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
  2563. pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
  2564. pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
  2565. pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
  2566. dev_dbg(hba->dev, "%s: power already configured\n", __func__);
  2567. return 0;
  2568. }
  2569. /*
  2570. * Configure attributes for power mode change with below.
  2571. * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
  2572. * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
  2573. * - PA_HSSERIES
  2574. */
  2575. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
  2576. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
  2577. pwr_mode->lane_rx);
  2578. if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
  2579. pwr_mode->pwr_rx == FAST_MODE)
  2580. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
  2581. else
  2582. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
  2583. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
  2584. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
  2585. pwr_mode->lane_tx);
  2586. if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
  2587. pwr_mode->pwr_tx == FAST_MODE)
  2588. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
  2589. else
  2590. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
  2591. if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
  2592. pwr_mode->pwr_tx == FASTAUTO_MODE ||
  2593. pwr_mode->pwr_rx == FAST_MODE ||
  2594. pwr_mode->pwr_tx == FAST_MODE)
  2595. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
  2596. pwr_mode->hs_rate);
  2597. ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
  2598. | pwr_mode->pwr_tx);
  2599. if (ret) {
  2600. dev_err(hba->dev,
  2601. "%s: power mode change failed %d\n", __func__, ret);
  2602. } else {
  2603. ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
  2604. pwr_mode);
  2605. memcpy(&hba->pwr_info, pwr_mode,
  2606. sizeof(struct ufs_pa_layer_attr));
  2607. }
  2608. return ret;
  2609. }
  2610. /**
  2611. * ufshcd_config_pwr_mode - configure a new power mode
  2612. * @hba: per-adapter instance
  2613. * @desired_pwr_mode: desired power configuration
  2614. */
  2615. static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
  2616. struct ufs_pa_layer_attr *desired_pwr_mode)
  2617. {
  2618. struct ufs_pa_layer_attr final_params = { 0 };
  2619. int ret;
  2620. ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
  2621. desired_pwr_mode, &final_params);
  2622. if (ret)
  2623. memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
  2624. ret = ufshcd_change_power_mode(hba, &final_params);
  2625. return ret;
  2626. }
  2627. /**
  2628. * ufshcd_complete_dev_init() - checks device readiness
  2629. * hba: per-adapter instance
  2630. *
  2631. * Set fDeviceInit flag and poll until device toggles it.
  2632. */
  2633. static int ufshcd_complete_dev_init(struct ufs_hba *hba)
  2634. {
  2635. int i;
  2636. int err;
  2637. bool flag_res = 1;
  2638. err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
  2639. QUERY_FLAG_IDN_FDEVICEINIT, NULL);
  2640. if (err) {
  2641. dev_err(hba->dev,
  2642. "%s setting fDeviceInit flag failed with error %d\n",
  2643. __func__, err);
  2644. goto out;
  2645. }
  2646. /* poll for max. 1000 iterations for fDeviceInit flag to clear */
  2647. for (i = 0; i < 1000 && !err && flag_res; i++)
  2648. err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
  2649. QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
  2650. if (err)
  2651. dev_err(hba->dev,
  2652. "%s reading fDeviceInit flag failed with error %d\n",
  2653. __func__, err);
  2654. else if (flag_res)
  2655. dev_err(hba->dev,
  2656. "%s fDeviceInit was not cleared by the device\n",
  2657. __func__);
  2658. out:
  2659. return err;
  2660. }
  2661. /**
  2662. * ufshcd_make_hba_operational - Make UFS controller operational
  2663. * @hba: per adapter instance
  2664. *
  2665. * To bring UFS host controller to operational state,
  2666. * 1. Enable required interrupts
  2667. * 2. Configure interrupt aggregation
  2668. * 3. Program UTRL and UTMRL base address
  2669. * 4. Configure run-stop-registers
  2670. *
  2671. * Returns 0 on success, non-zero value on failure
  2672. */
  2673. static int ufshcd_make_hba_operational(struct ufs_hba *hba)
  2674. {
  2675. int err = 0;
  2676. u32 reg;
  2677. /* Enable required interrupts */
  2678. ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
  2679. /* Configure interrupt aggregation */
  2680. if (ufshcd_is_intr_aggr_allowed(hba))
  2681. ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
  2682. else
  2683. ufshcd_disable_intr_aggr(hba);
  2684. /* Configure UTRL and UTMRL base address registers */
  2685. ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
  2686. REG_UTP_TRANSFER_REQ_LIST_BASE_L);
  2687. ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
  2688. REG_UTP_TRANSFER_REQ_LIST_BASE_H);
  2689. ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
  2690. REG_UTP_TASK_REQ_LIST_BASE_L);
  2691. ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
  2692. REG_UTP_TASK_REQ_LIST_BASE_H);
  2693. /*
  2694. * Make sure base address and interrupt setup are updated before
  2695. * enabling the run/stop registers below.
  2696. */
  2697. wmb();
  2698. /*
  2699. * UCRDY, UTMRLDY and UTRLRDY bits must be 1
  2700. */
  2701. reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
  2702. if (!(ufshcd_get_lists_status(reg))) {
  2703. ufshcd_enable_run_stop_reg(hba);
  2704. } else {
  2705. dev_err(hba->dev,
  2706. "Host controller not ready to process requests");
  2707. err = -EIO;
  2708. goto out;
  2709. }
  2710. out:
  2711. return err;
  2712. }
  2713. /**
  2714. * ufshcd_hba_stop - Send controller to reset state
  2715. * @hba: per adapter instance
  2716. * @can_sleep: perform sleep or just spin
  2717. */
  2718. static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
  2719. {
  2720. int err;
  2721. ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
  2722. err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
  2723. CONTROLLER_ENABLE, CONTROLLER_DISABLE,
  2724. 10, 1, can_sleep);
  2725. if (err)
  2726. dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
  2727. }
  2728. /**
  2729. * ufshcd_hba_enable - initialize the controller
  2730. * @hba: per adapter instance
  2731. *
  2732. * The controller resets itself and controller firmware initialization
  2733. * sequence kicks off. When controller is ready it will set
  2734. * the Host Controller Enable bit to 1.
  2735. *
  2736. * Returns 0 on success, non-zero value on failure
  2737. */
  2738. static int ufshcd_hba_enable(struct ufs_hba *hba)
  2739. {
  2740. int retry;
  2741. /*
  2742. * msleep of 1 and 5 used in this function might result in msleep(20),
  2743. * but it was necessary to send the UFS FPGA to reset mode during
  2744. * development and testing of this driver. msleep can be changed to
  2745. * mdelay and retry count can be reduced based on the controller.
  2746. */
  2747. if (!ufshcd_is_hba_active(hba))
  2748. /* change controller state to "reset state" */
  2749. ufshcd_hba_stop(hba, true);
  2750. /* UniPro link is disabled at this point */
  2751. ufshcd_set_link_off(hba);
  2752. ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
  2753. /* start controller initialization sequence */
  2754. ufshcd_hba_start(hba);
  2755. /*
  2756. * To initialize a UFS host controller HCE bit must be set to 1.
  2757. * During initialization the HCE bit value changes from 1->0->1.
  2758. * When the host controller completes initialization sequence
  2759. * it sets the value of HCE bit to 1. The same HCE bit is read back
  2760. * to check if the controller has completed initialization sequence.
  2761. * So without this delay the value HCE = 1, set in the previous
  2762. * instruction might be read back.
  2763. * This delay can be changed based on the controller.
  2764. */
  2765. msleep(1);
  2766. /* wait for the host controller to complete initialization */
  2767. retry = 10;
  2768. while (ufshcd_is_hba_active(hba)) {
  2769. if (retry) {
  2770. retry--;
  2771. } else {
  2772. dev_err(hba->dev,
  2773. "Controller enable failed\n");
  2774. return -EIO;
  2775. }
  2776. msleep(5);
  2777. }
  2778. /* enable UIC related interrupts */
  2779. ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
  2780. ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
  2781. return 0;
  2782. }
  2783. static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
  2784. {
  2785. int tx_lanes, i, err = 0;
  2786. if (!peer)
  2787. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
  2788. &tx_lanes);
  2789. else
  2790. ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
  2791. &tx_lanes);
  2792. for (i = 0; i < tx_lanes; i++) {
  2793. if (!peer)
  2794. err = ufshcd_dme_set(hba,
  2795. UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
  2796. UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
  2797. 0);
  2798. else
  2799. err = ufshcd_dme_peer_set(hba,
  2800. UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
  2801. UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
  2802. 0);
  2803. if (err) {
  2804. dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
  2805. __func__, peer, i, err);
  2806. break;
  2807. }
  2808. }
  2809. return err;
  2810. }
  2811. static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
  2812. {
  2813. return ufshcd_disable_tx_lcc(hba, true);
  2814. }
  2815. /**
  2816. * ufshcd_link_startup - Initialize unipro link startup
  2817. * @hba: per adapter instance
  2818. *
  2819. * Returns 0 for success, non-zero in case of failure
  2820. */
  2821. static int ufshcd_link_startup(struct ufs_hba *hba)
  2822. {
  2823. int ret;
  2824. int retries = DME_LINKSTARTUP_RETRIES;
  2825. bool link_startup_again = false;
  2826. /*
  2827. * If UFS device isn't active then we will have to issue link startup
  2828. * 2 times to make sure the device state move to active.
  2829. */
  2830. if (!ufshcd_is_ufs_dev_active(hba))
  2831. link_startup_again = true;
  2832. link_startup:
  2833. do {
  2834. ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
  2835. ret = ufshcd_dme_link_startup(hba);
  2836. /* check if device is detected by inter-connect layer */
  2837. if (!ret && !ufshcd_is_device_present(hba)) {
  2838. dev_err(hba->dev, "%s: Device not present\n", __func__);
  2839. ret = -ENXIO;
  2840. goto out;
  2841. }
  2842. /*
  2843. * DME link lost indication is only received when link is up,
  2844. * but we can't be sure if the link is up until link startup
  2845. * succeeds. So reset the local Uni-Pro and try again.
  2846. */
  2847. if (ret && ufshcd_hba_enable(hba))
  2848. goto out;
  2849. } while (ret && retries--);
  2850. if (ret)
  2851. /* failed to get the link up... retire */
  2852. goto out;
  2853. if (link_startup_again) {
  2854. link_startup_again = false;
  2855. retries = DME_LINKSTARTUP_RETRIES;
  2856. goto link_startup;
  2857. }
  2858. if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
  2859. ret = ufshcd_disable_device_tx_lcc(hba);
  2860. if (ret)
  2861. goto out;
  2862. }
  2863. /* Include any host controller configuration via UIC commands */
  2864. ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
  2865. if (ret)
  2866. goto out;
  2867. ret = ufshcd_make_hba_operational(hba);
  2868. out:
  2869. if (ret)
  2870. dev_err(hba->dev, "link startup failed %d\n", ret);
  2871. return ret;
  2872. }
  2873. /**
  2874. * ufshcd_verify_dev_init() - Verify device initialization
  2875. * @hba: per-adapter instance
  2876. *
  2877. * Send NOP OUT UPIU and wait for NOP IN response to check whether the
  2878. * device Transport Protocol (UTP) layer is ready after a reset.
  2879. * If the UTP layer at the device side is not initialized, it may
  2880. * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
  2881. * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
  2882. */
  2883. static int ufshcd_verify_dev_init(struct ufs_hba *hba)
  2884. {
  2885. int err = 0;
  2886. int retries;
  2887. ufshcd_hold(hba, false);
  2888. mutex_lock(&hba->dev_cmd.lock);
  2889. for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
  2890. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
  2891. NOP_OUT_TIMEOUT);
  2892. if (!err || err == -ETIMEDOUT)
  2893. break;
  2894. dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
  2895. }
  2896. mutex_unlock(&hba->dev_cmd.lock);
  2897. ufshcd_release(hba);
  2898. if (err)
  2899. dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
  2900. return err;
  2901. }
  2902. /**
  2903. * ufshcd_set_queue_depth - set lun queue depth
  2904. * @sdev: pointer to SCSI device
  2905. *
  2906. * Read bLUQueueDepth value and activate scsi tagged command
  2907. * queueing. For WLUN, queue depth is set to 1. For best-effort
  2908. * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
  2909. * value that host can queue.
  2910. */
  2911. static void ufshcd_set_queue_depth(struct scsi_device *sdev)
  2912. {
  2913. int ret = 0;
  2914. u8 lun_qdepth;
  2915. struct ufs_hba *hba;
  2916. hba = shost_priv(sdev->host);
  2917. lun_qdepth = hba->nutrs;
  2918. ret = ufshcd_read_unit_desc_param(hba,
  2919. ufshcd_scsi_to_upiu_lun(sdev->lun),
  2920. UNIT_DESC_PARAM_LU_Q_DEPTH,
  2921. &lun_qdepth,
  2922. sizeof(lun_qdepth));
  2923. /* Some WLUN doesn't support unit descriptor */
  2924. if (ret == -EOPNOTSUPP)
  2925. lun_qdepth = 1;
  2926. else if (!lun_qdepth)
  2927. /* eventually, we can figure out the real queue depth */
  2928. lun_qdepth = hba->nutrs;
  2929. else
  2930. lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
  2931. dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
  2932. __func__, lun_qdepth);
  2933. scsi_change_queue_depth(sdev, lun_qdepth);
  2934. }
  2935. /*
  2936. * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
  2937. * @hba: per-adapter instance
  2938. * @lun: UFS device lun id
  2939. * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
  2940. *
  2941. * Returns 0 in case of success and b_lu_write_protect status would be returned
  2942. * @b_lu_write_protect parameter.
  2943. * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
  2944. * Returns -EINVAL in case of invalid parameters passed to this function.
  2945. */
  2946. static int ufshcd_get_lu_wp(struct ufs_hba *hba,
  2947. u8 lun,
  2948. u8 *b_lu_write_protect)
  2949. {
  2950. int ret;
  2951. if (!b_lu_write_protect)
  2952. ret = -EINVAL;
  2953. /*
  2954. * According to UFS device spec, RPMB LU can't be write
  2955. * protected so skip reading bLUWriteProtect parameter for
  2956. * it. For other W-LUs, UNIT DESCRIPTOR is not available.
  2957. */
  2958. else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
  2959. ret = -ENOTSUPP;
  2960. else
  2961. ret = ufshcd_read_unit_desc_param(hba,
  2962. lun,
  2963. UNIT_DESC_PARAM_LU_WR_PROTECT,
  2964. b_lu_write_protect,
  2965. sizeof(*b_lu_write_protect));
  2966. return ret;
  2967. }
  2968. /**
  2969. * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
  2970. * status
  2971. * @hba: per-adapter instance
  2972. * @sdev: pointer to SCSI device
  2973. *
  2974. */
  2975. static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
  2976. struct scsi_device *sdev)
  2977. {
  2978. if (hba->dev_info.f_power_on_wp_en &&
  2979. !hba->dev_info.is_lu_power_on_wp) {
  2980. u8 b_lu_write_protect;
  2981. if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
  2982. &b_lu_write_protect) &&
  2983. (b_lu_write_protect == UFS_LU_POWER_ON_WP))
  2984. hba->dev_info.is_lu_power_on_wp = true;
  2985. }
  2986. }
  2987. /**
  2988. * ufshcd_slave_alloc - handle initial SCSI device configurations
  2989. * @sdev: pointer to SCSI device
  2990. *
  2991. * Returns success
  2992. */
  2993. static int ufshcd_slave_alloc(struct scsi_device *sdev)
  2994. {
  2995. struct ufs_hba *hba;
  2996. hba = shost_priv(sdev->host);
  2997. /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
  2998. sdev->use_10_for_ms = 1;
  2999. /* allow SCSI layer to restart the device in case of errors */
  3000. sdev->allow_restart = 1;
  3001. /* REPORT SUPPORTED OPERATION CODES is not supported */
  3002. sdev->no_report_opcodes = 1;
  3003. /* WRITE_SAME command is not supported */
  3004. sdev->no_write_same = 1;
  3005. ufshcd_set_queue_depth(sdev);
  3006. ufshcd_get_lu_power_on_wp_status(hba, sdev);
  3007. return 0;
  3008. }
  3009. /**
  3010. * ufshcd_change_queue_depth - change queue depth
  3011. * @sdev: pointer to SCSI device
  3012. * @depth: required depth to set
  3013. *
  3014. * Change queue depth and make sure the max. limits are not crossed.
  3015. */
  3016. static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
  3017. {
  3018. struct ufs_hba *hba = shost_priv(sdev->host);
  3019. if (depth > hba->nutrs)
  3020. depth = hba->nutrs;
  3021. return scsi_change_queue_depth(sdev, depth);
  3022. }
  3023. /**
  3024. * ufshcd_slave_configure - adjust SCSI device configurations
  3025. * @sdev: pointer to SCSI device
  3026. */
  3027. static int ufshcd_slave_configure(struct scsi_device *sdev)
  3028. {
  3029. struct request_queue *q = sdev->request_queue;
  3030. blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
  3031. blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
  3032. return 0;
  3033. }
  3034. /**
  3035. * ufshcd_slave_destroy - remove SCSI device configurations
  3036. * @sdev: pointer to SCSI device
  3037. */
  3038. static void ufshcd_slave_destroy(struct scsi_device *sdev)
  3039. {
  3040. struct ufs_hba *hba;
  3041. hba = shost_priv(sdev->host);
  3042. /* Drop the reference as it won't be needed anymore */
  3043. if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
  3044. unsigned long flags;
  3045. spin_lock_irqsave(hba->host->host_lock, flags);
  3046. hba->sdev_ufs_device = NULL;
  3047. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3048. }
  3049. }
  3050. /**
  3051. * ufshcd_task_req_compl - handle task management request completion
  3052. * @hba: per adapter instance
  3053. * @index: index of the completed request
  3054. * @resp: task management service response
  3055. *
  3056. * Returns non-zero value on error, zero on success
  3057. */
  3058. static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
  3059. {
  3060. struct utp_task_req_desc *task_req_descp;
  3061. struct utp_upiu_task_rsp *task_rsp_upiup;
  3062. unsigned long flags;
  3063. int ocs_value;
  3064. int task_result;
  3065. spin_lock_irqsave(hba->host->host_lock, flags);
  3066. /* Clear completed tasks from outstanding_tasks */
  3067. __clear_bit(index, &hba->outstanding_tasks);
  3068. task_req_descp = hba->utmrdl_base_addr;
  3069. ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
  3070. if (ocs_value == OCS_SUCCESS) {
  3071. task_rsp_upiup = (struct utp_upiu_task_rsp *)
  3072. task_req_descp[index].task_rsp_upiu;
  3073. task_result = be32_to_cpu(task_rsp_upiup->output_param1);
  3074. task_result = task_result & MASK_TM_SERVICE_RESP;
  3075. if (resp)
  3076. *resp = (u8)task_result;
  3077. } else {
  3078. dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
  3079. __func__, ocs_value);
  3080. }
  3081. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3082. return ocs_value;
  3083. }
  3084. /**
  3085. * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
  3086. * @lrb: pointer to local reference block of completed command
  3087. * @scsi_status: SCSI command status
  3088. *
  3089. * Returns value base on SCSI command status
  3090. */
  3091. static inline int
  3092. ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
  3093. {
  3094. int result = 0;
  3095. switch (scsi_status) {
  3096. case SAM_STAT_CHECK_CONDITION:
  3097. ufshcd_copy_sense_data(lrbp);
  3098. case SAM_STAT_GOOD:
  3099. result |= DID_OK << 16 |
  3100. COMMAND_COMPLETE << 8 |
  3101. scsi_status;
  3102. break;
  3103. case SAM_STAT_TASK_SET_FULL:
  3104. case SAM_STAT_BUSY:
  3105. case SAM_STAT_TASK_ABORTED:
  3106. ufshcd_copy_sense_data(lrbp);
  3107. result |= scsi_status;
  3108. break;
  3109. default:
  3110. result |= DID_ERROR << 16;
  3111. break;
  3112. } /* end of switch */
  3113. return result;
  3114. }
  3115. /**
  3116. * ufshcd_transfer_rsp_status - Get overall status of the response
  3117. * @hba: per adapter instance
  3118. * @lrb: pointer to local reference block of completed command
  3119. *
  3120. * Returns result of the command to notify SCSI midlayer
  3121. */
  3122. static inline int
  3123. ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  3124. {
  3125. int result = 0;
  3126. int scsi_status;
  3127. int ocs;
  3128. /* overall command status of utrd */
  3129. ocs = ufshcd_get_tr_ocs(lrbp);
  3130. switch (ocs) {
  3131. case OCS_SUCCESS:
  3132. result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
  3133. switch (result) {
  3134. case UPIU_TRANSACTION_RESPONSE:
  3135. /*
  3136. * get the response UPIU result to extract
  3137. * the SCSI command status
  3138. */
  3139. result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
  3140. /*
  3141. * get the result based on SCSI status response
  3142. * to notify the SCSI midlayer of the command status
  3143. */
  3144. scsi_status = result & MASK_SCSI_STATUS;
  3145. result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
  3146. /*
  3147. * Currently we are only supporting BKOPs exception
  3148. * events hence we can ignore BKOPs exception event
  3149. * during power management callbacks. BKOPs exception
  3150. * event is not expected to be raised in runtime suspend
  3151. * callback as it allows the urgent bkops.
  3152. * During system suspend, we are anyway forcefully
  3153. * disabling the bkops and if urgent bkops is needed
  3154. * it will be enabled on system resume. Long term
  3155. * solution could be to abort the system suspend if
  3156. * UFS device needs urgent BKOPs.
  3157. */
  3158. if (!hba->pm_op_in_progress &&
  3159. ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
  3160. schedule_work(&hba->eeh_work);
  3161. break;
  3162. case UPIU_TRANSACTION_REJECT_UPIU:
  3163. /* TODO: handle Reject UPIU Response */
  3164. result = DID_ERROR << 16;
  3165. dev_err(hba->dev,
  3166. "Reject UPIU not fully implemented\n");
  3167. break;
  3168. default:
  3169. result = DID_ERROR << 16;
  3170. dev_err(hba->dev,
  3171. "Unexpected request response code = %x\n",
  3172. result);
  3173. break;
  3174. }
  3175. break;
  3176. case OCS_ABORTED:
  3177. result |= DID_ABORT << 16;
  3178. break;
  3179. case OCS_INVALID_COMMAND_STATUS:
  3180. result |= DID_REQUEUE << 16;
  3181. break;
  3182. case OCS_INVALID_CMD_TABLE_ATTR:
  3183. case OCS_INVALID_PRDT_ATTR:
  3184. case OCS_MISMATCH_DATA_BUF_SIZE:
  3185. case OCS_MISMATCH_RESP_UPIU_SIZE:
  3186. case OCS_PEER_COMM_FAILURE:
  3187. case OCS_FATAL_ERROR:
  3188. default:
  3189. result |= DID_ERROR << 16;
  3190. dev_err(hba->dev,
  3191. "OCS error from controller = %x\n", ocs);
  3192. break;
  3193. } /* end of switch */
  3194. return result;
  3195. }
  3196. /**
  3197. * ufshcd_uic_cmd_compl - handle completion of uic command
  3198. * @hba: per adapter instance
  3199. * @intr_status: interrupt status generated by the controller
  3200. */
  3201. static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
  3202. {
  3203. if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
  3204. hba->active_uic_cmd->argument2 |=
  3205. ufshcd_get_uic_cmd_result(hba);
  3206. hba->active_uic_cmd->argument3 =
  3207. ufshcd_get_dme_attr_val(hba);
  3208. complete(&hba->active_uic_cmd->done);
  3209. }
  3210. if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
  3211. complete(hba->uic_async_done);
  3212. }
  3213. /**
  3214. * __ufshcd_transfer_req_compl - handle SCSI and query command completion
  3215. * @hba: per adapter instance
  3216. * @completed_reqs: requests to complete
  3217. */
  3218. static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
  3219. unsigned long completed_reqs)
  3220. {
  3221. struct ufshcd_lrb *lrbp;
  3222. struct scsi_cmnd *cmd;
  3223. int result;
  3224. int index;
  3225. for_each_set_bit(index, &completed_reqs, hba->nutrs) {
  3226. lrbp = &hba->lrb[index];
  3227. cmd = lrbp->cmd;
  3228. if (cmd) {
  3229. result = ufshcd_transfer_rsp_status(hba, lrbp);
  3230. scsi_dma_unmap(cmd);
  3231. cmd->result = result;
  3232. /* Mark completed command as NULL in LRB */
  3233. lrbp->cmd = NULL;
  3234. clear_bit_unlock(index, &hba->lrb_in_use);
  3235. /* Do not touch lrbp after scsi done */
  3236. cmd->scsi_done(cmd);
  3237. __ufshcd_release(hba);
  3238. } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
  3239. lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
  3240. if (hba->dev_cmd.complete)
  3241. complete(hba->dev_cmd.complete);
  3242. }
  3243. }
  3244. /* clear corresponding bits of completed commands */
  3245. hba->outstanding_reqs ^= completed_reqs;
  3246. ufshcd_clk_scaling_update_busy(hba);
  3247. /* we might have free'd some tags above */
  3248. wake_up(&hba->dev_cmd.tag_wq);
  3249. }
  3250. /**
  3251. * ufshcd_transfer_req_compl - handle SCSI and query command completion
  3252. * @hba: per adapter instance
  3253. */
  3254. static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
  3255. {
  3256. unsigned long completed_reqs;
  3257. u32 tr_doorbell;
  3258. /* Resetting interrupt aggregation counters first and reading the
  3259. * DOOR_BELL afterward allows us to handle all the completed requests.
  3260. * In order to prevent other interrupts starvation the DB is read once
  3261. * after reset. The down side of this solution is the possibility of
  3262. * false interrupt if device completes another request after resetting
  3263. * aggregation and before reading the DB.
  3264. */
  3265. if (ufshcd_is_intr_aggr_allowed(hba))
  3266. ufshcd_reset_intr_aggr(hba);
  3267. tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  3268. completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
  3269. __ufshcd_transfer_req_compl(hba, completed_reqs);
  3270. }
  3271. /**
  3272. * ufshcd_disable_ee - disable exception event
  3273. * @hba: per-adapter instance
  3274. * @mask: exception event to disable
  3275. *
  3276. * Disables exception event in the device so that the EVENT_ALERT
  3277. * bit is not set.
  3278. *
  3279. * Returns zero on success, non-zero error value on failure.
  3280. */
  3281. static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
  3282. {
  3283. int err = 0;
  3284. u32 val;
  3285. if (!(hba->ee_ctrl_mask & mask))
  3286. goto out;
  3287. val = hba->ee_ctrl_mask & ~mask;
  3288. val &= 0xFFFF; /* 2 bytes */
  3289. err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
  3290. QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
  3291. if (!err)
  3292. hba->ee_ctrl_mask &= ~mask;
  3293. out:
  3294. return err;
  3295. }
  3296. /**
  3297. * ufshcd_enable_ee - enable exception event
  3298. * @hba: per-adapter instance
  3299. * @mask: exception event to enable
  3300. *
  3301. * Enable corresponding exception event in the device to allow
  3302. * device to alert host in critical scenarios.
  3303. *
  3304. * Returns zero on success, non-zero error value on failure.
  3305. */
  3306. static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
  3307. {
  3308. int err = 0;
  3309. u32 val;
  3310. if (hba->ee_ctrl_mask & mask)
  3311. goto out;
  3312. val = hba->ee_ctrl_mask | mask;
  3313. val &= 0xFFFF; /* 2 bytes */
  3314. err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
  3315. QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
  3316. if (!err)
  3317. hba->ee_ctrl_mask |= mask;
  3318. out:
  3319. return err;
  3320. }
  3321. /**
  3322. * ufshcd_enable_auto_bkops - Allow device managed BKOPS
  3323. * @hba: per-adapter instance
  3324. *
  3325. * Allow device to manage background operations on its own. Enabling
  3326. * this might lead to inconsistent latencies during normal data transfers
  3327. * as the device is allowed to manage its own way of handling background
  3328. * operations.
  3329. *
  3330. * Returns zero on success, non-zero on failure.
  3331. */
  3332. static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
  3333. {
  3334. int err = 0;
  3335. if (hba->auto_bkops_enabled)
  3336. goto out;
  3337. err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
  3338. QUERY_FLAG_IDN_BKOPS_EN, NULL);
  3339. if (err) {
  3340. dev_err(hba->dev, "%s: failed to enable bkops %d\n",
  3341. __func__, err);
  3342. goto out;
  3343. }
  3344. hba->auto_bkops_enabled = true;
  3345. /* No need of URGENT_BKOPS exception from the device */
  3346. err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
  3347. if (err)
  3348. dev_err(hba->dev, "%s: failed to disable exception event %d\n",
  3349. __func__, err);
  3350. out:
  3351. return err;
  3352. }
  3353. /**
  3354. * ufshcd_disable_auto_bkops - block device in doing background operations
  3355. * @hba: per-adapter instance
  3356. *
  3357. * Disabling background operations improves command response latency but
  3358. * has drawback of device moving into critical state where the device is
  3359. * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
  3360. * host is idle so that BKOPS are managed effectively without any negative
  3361. * impacts.
  3362. *
  3363. * Returns zero on success, non-zero on failure.
  3364. */
  3365. static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
  3366. {
  3367. int err = 0;
  3368. if (!hba->auto_bkops_enabled)
  3369. goto out;
  3370. /*
  3371. * If host assisted BKOPs is to be enabled, make sure
  3372. * urgent bkops exception is allowed.
  3373. */
  3374. err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
  3375. if (err) {
  3376. dev_err(hba->dev, "%s: failed to enable exception event %d\n",
  3377. __func__, err);
  3378. goto out;
  3379. }
  3380. err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
  3381. QUERY_FLAG_IDN_BKOPS_EN, NULL);
  3382. if (err) {
  3383. dev_err(hba->dev, "%s: failed to disable bkops %d\n",
  3384. __func__, err);
  3385. ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
  3386. goto out;
  3387. }
  3388. hba->auto_bkops_enabled = false;
  3389. out:
  3390. return err;
  3391. }
  3392. /**
  3393. * ufshcd_force_reset_auto_bkops - force reset auto bkops state
  3394. * @hba: per adapter instance
  3395. *
  3396. * After a device reset the device may toggle the BKOPS_EN flag
  3397. * to default value. The s/w tracking variables should be updated
  3398. * as well. This function would change the auto-bkops state based on
  3399. * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
  3400. */
  3401. static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
  3402. {
  3403. if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
  3404. hba->auto_bkops_enabled = false;
  3405. hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
  3406. ufshcd_enable_auto_bkops(hba);
  3407. } else {
  3408. hba->auto_bkops_enabled = true;
  3409. hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
  3410. ufshcd_disable_auto_bkops(hba);
  3411. }
  3412. }
  3413. static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
  3414. {
  3415. return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
  3416. QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
  3417. }
  3418. /**
  3419. * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
  3420. * @hba: per-adapter instance
  3421. * @status: bkops_status value
  3422. *
  3423. * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
  3424. * flag in the device to permit background operations if the device
  3425. * bkops_status is greater than or equal to "status" argument passed to
  3426. * this function, disable otherwise.
  3427. *
  3428. * Returns 0 for success, non-zero in case of failure.
  3429. *
  3430. * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
  3431. * to know whether auto bkops is enabled or disabled after this function
  3432. * returns control to it.
  3433. */
  3434. static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
  3435. enum bkops_status status)
  3436. {
  3437. int err;
  3438. u32 curr_status = 0;
  3439. err = ufshcd_get_bkops_status(hba, &curr_status);
  3440. if (err) {
  3441. dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
  3442. __func__, err);
  3443. goto out;
  3444. } else if (curr_status > BKOPS_STATUS_MAX) {
  3445. dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
  3446. __func__, curr_status);
  3447. err = -EINVAL;
  3448. goto out;
  3449. }
  3450. if (curr_status >= status)
  3451. err = ufshcd_enable_auto_bkops(hba);
  3452. else
  3453. err = ufshcd_disable_auto_bkops(hba);
  3454. out:
  3455. return err;
  3456. }
  3457. /**
  3458. * ufshcd_urgent_bkops - handle urgent bkops exception event
  3459. * @hba: per-adapter instance
  3460. *
  3461. * Enable fBackgroundOpsEn flag in the device to permit background
  3462. * operations.
  3463. *
  3464. * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
  3465. * and negative error value for any other failure.
  3466. */
  3467. static int ufshcd_urgent_bkops(struct ufs_hba *hba)
  3468. {
  3469. return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
  3470. }
  3471. static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
  3472. {
  3473. return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
  3474. QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
  3475. }
  3476. static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
  3477. {
  3478. int err;
  3479. u32 curr_status = 0;
  3480. if (hba->is_urgent_bkops_lvl_checked)
  3481. goto enable_auto_bkops;
  3482. err = ufshcd_get_bkops_status(hba, &curr_status);
  3483. if (err) {
  3484. dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
  3485. __func__, err);
  3486. goto out;
  3487. }
  3488. /*
  3489. * We are seeing that some devices are raising the urgent bkops
  3490. * exception events even when BKOPS status doesn't indicate performace
  3491. * impacted or critical. Handle these device by determining their urgent
  3492. * bkops status at runtime.
  3493. */
  3494. if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
  3495. dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
  3496. __func__, curr_status);
  3497. /* update the current status as the urgent bkops level */
  3498. hba->urgent_bkops_lvl = curr_status;
  3499. hba->is_urgent_bkops_lvl_checked = true;
  3500. }
  3501. enable_auto_bkops:
  3502. err = ufshcd_enable_auto_bkops(hba);
  3503. out:
  3504. if (err < 0)
  3505. dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
  3506. __func__, err);
  3507. }
  3508. /**
  3509. * ufshcd_exception_event_handler - handle exceptions raised by device
  3510. * @work: pointer to work data
  3511. *
  3512. * Read bExceptionEventStatus attribute from the device and handle the
  3513. * exception event accordingly.
  3514. */
  3515. static void ufshcd_exception_event_handler(struct work_struct *work)
  3516. {
  3517. struct ufs_hba *hba;
  3518. int err;
  3519. u32 status = 0;
  3520. hba = container_of(work, struct ufs_hba, eeh_work);
  3521. pm_runtime_get_sync(hba->dev);
  3522. scsi_block_requests(hba->host);
  3523. err = ufshcd_get_ee_status(hba, &status);
  3524. if (err) {
  3525. dev_err(hba->dev, "%s: failed to get exception status %d\n",
  3526. __func__, err);
  3527. goto out;
  3528. }
  3529. status &= hba->ee_ctrl_mask;
  3530. if (status & MASK_EE_URGENT_BKOPS)
  3531. ufshcd_bkops_exception_event_handler(hba);
  3532. out:
  3533. scsi_unblock_requests(hba->host);
  3534. pm_runtime_put_sync(hba->dev);
  3535. return;
  3536. }
  3537. /* Complete requests that have door-bell cleared */
  3538. static void ufshcd_complete_requests(struct ufs_hba *hba)
  3539. {
  3540. ufshcd_transfer_req_compl(hba);
  3541. ufshcd_tmc_handler(hba);
  3542. }
  3543. /**
  3544. * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
  3545. * to recover from the DL NAC errors or not.
  3546. * @hba: per-adapter instance
  3547. *
  3548. * Returns true if error handling is required, false otherwise
  3549. */
  3550. static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
  3551. {
  3552. unsigned long flags;
  3553. bool err_handling = true;
  3554. spin_lock_irqsave(hba->host->host_lock, flags);
  3555. /*
  3556. * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
  3557. * device fatal error and/or DL NAC & REPLAY timeout errors.
  3558. */
  3559. if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
  3560. goto out;
  3561. if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
  3562. ((hba->saved_err & UIC_ERROR) &&
  3563. (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
  3564. goto out;
  3565. if ((hba->saved_err & UIC_ERROR) &&
  3566. (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
  3567. int err;
  3568. /*
  3569. * wait for 50ms to see if we can get any other errors or not.
  3570. */
  3571. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3572. msleep(50);
  3573. spin_lock_irqsave(hba->host->host_lock, flags);
  3574. /*
  3575. * now check if we have got any other severe errors other than
  3576. * DL NAC error?
  3577. */
  3578. if ((hba->saved_err & INT_FATAL_ERRORS) ||
  3579. ((hba->saved_err & UIC_ERROR) &&
  3580. (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
  3581. goto out;
  3582. /*
  3583. * As DL NAC is the only error received so far, send out NOP
  3584. * command to confirm if link is still active or not.
  3585. * - If we don't get any response then do error recovery.
  3586. * - If we get response then clear the DL NAC error bit.
  3587. */
  3588. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3589. err = ufshcd_verify_dev_init(hba);
  3590. spin_lock_irqsave(hba->host->host_lock, flags);
  3591. if (err)
  3592. goto out;
  3593. /* Link seems to be alive hence ignore the DL NAC errors */
  3594. if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
  3595. hba->saved_err &= ~UIC_ERROR;
  3596. /* clear NAC error */
  3597. hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
  3598. if (!hba->saved_uic_err) {
  3599. err_handling = false;
  3600. goto out;
  3601. }
  3602. }
  3603. out:
  3604. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3605. return err_handling;
  3606. }
  3607. /**
  3608. * ufshcd_err_handler - handle UFS errors that require s/w attention
  3609. * @work: pointer to work structure
  3610. */
  3611. static void ufshcd_err_handler(struct work_struct *work)
  3612. {
  3613. struct ufs_hba *hba;
  3614. unsigned long flags;
  3615. u32 err_xfer = 0;
  3616. u32 err_tm = 0;
  3617. int err = 0;
  3618. int tag;
  3619. bool needs_reset = false;
  3620. hba = container_of(work, struct ufs_hba, eh_work);
  3621. pm_runtime_get_sync(hba->dev);
  3622. ufshcd_hold(hba, false);
  3623. spin_lock_irqsave(hba->host->host_lock, flags);
  3624. if (hba->ufshcd_state == UFSHCD_STATE_RESET)
  3625. goto out;
  3626. hba->ufshcd_state = UFSHCD_STATE_RESET;
  3627. ufshcd_set_eh_in_progress(hba);
  3628. /* Complete requests that have door-bell cleared by h/w */
  3629. ufshcd_complete_requests(hba);
  3630. if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
  3631. bool ret;
  3632. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3633. /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
  3634. ret = ufshcd_quirk_dl_nac_errors(hba);
  3635. spin_lock_irqsave(hba->host->host_lock, flags);
  3636. if (!ret)
  3637. goto skip_err_handling;
  3638. }
  3639. if ((hba->saved_err & INT_FATAL_ERRORS) ||
  3640. ((hba->saved_err & UIC_ERROR) &&
  3641. (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
  3642. UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
  3643. UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
  3644. needs_reset = true;
  3645. /*
  3646. * if host reset is required then skip clearing the pending
  3647. * transfers forcefully because they will automatically get
  3648. * cleared after link startup.
  3649. */
  3650. if (needs_reset)
  3651. goto skip_pending_xfer_clear;
  3652. /* release lock as clear command might sleep */
  3653. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3654. /* Clear pending transfer requests */
  3655. for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
  3656. if (ufshcd_clear_cmd(hba, tag)) {
  3657. err_xfer = true;
  3658. goto lock_skip_pending_xfer_clear;
  3659. }
  3660. }
  3661. /* Clear pending task management requests */
  3662. for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
  3663. if (ufshcd_clear_tm_cmd(hba, tag)) {
  3664. err_tm = true;
  3665. goto lock_skip_pending_xfer_clear;
  3666. }
  3667. }
  3668. lock_skip_pending_xfer_clear:
  3669. spin_lock_irqsave(hba->host->host_lock, flags);
  3670. /* Complete the requests that are cleared by s/w */
  3671. ufshcd_complete_requests(hba);
  3672. if (err_xfer || err_tm)
  3673. needs_reset = true;
  3674. skip_pending_xfer_clear:
  3675. /* Fatal errors need reset */
  3676. if (needs_reset) {
  3677. unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
  3678. /*
  3679. * ufshcd_reset_and_restore() does the link reinitialization
  3680. * which will need atleast one empty doorbell slot to send the
  3681. * device management commands (NOP and query commands).
  3682. * If there is no slot empty at this moment then free up last
  3683. * slot forcefully.
  3684. */
  3685. if (hba->outstanding_reqs == max_doorbells)
  3686. __ufshcd_transfer_req_compl(hba,
  3687. (1UL << (hba->nutrs - 1)));
  3688. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3689. err = ufshcd_reset_and_restore(hba);
  3690. spin_lock_irqsave(hba->host->host_lock, flags);
  3691. if (err) {
  3692. dev_err(hba->dev, "%s: reset and restore failed\n",
  3693. __func__);
  3694. hba->ufshcd_state = UFSHCD_STATE_ERROR;
  3695. }
  3696. /*
  3697. * Inform scsi mid-layer that we did reset and allow to handle
  3698. * Unit Attention properly.
  3699. */
  3700. scsi_report_bus_reset(hba->host, 0);
  3701. hba->saved_err = 0;
  3702. hba->saved_uic_err = 0;
  3703. }
  3704. skip_err_handling:
  3705. if (!needs_reset) {
  3706. hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
  3707. if (hba->saved_err || hba->saved_uic_err)
  3708. dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
  3709. __func__, hba->saved_err, hba->saved_uic_err);
  3710. }
  3711. ufshcd_clear_eh_in_progress(hba);
  3712. out:
  3713. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3714. scsi_unblock_requests(hba->host);
  3715. ufshcd_release(hba);
  3716. pm_runtime_put_sync(hba->dev);
  3717. }
  3718. /**
  3719. * ufshcd_update_uic_error - check and set fatal UIC error flags.
  3720. * @hba: per-adapter instance
  3721. */
  3722. static void ufshcd_update_uic_error(struct ufs_hba *hba)
  3723. {
  3724. u32 reg;
  3725. /* PA_INIT_ERROR is fatal and needs UIC reset */
  3726. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
  3727. if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
  3728. hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
  3729. else if (hba->dev_quirks &
  3730. UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
  3731. if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
  3732. hba->uic_error |=
  3733. UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
  3734. else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
  3735. hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
  3736. }
  3737. /* UIC NL/TL/DME errors needs software retry */
  3738. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
  3739. if (reg)
  3740. hba->uic_error |= UFSHCD_UIC_NL_ERROR;
  3741. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
  3742. if (reg)
  3743. hba->uic_error |= UFSHCD_UIC_TL_ERROR;
  3744. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
  3745. if (reg)
  3746. hba->uic_error |= UFSHCD_UIC_DME_ERROR;
  3747. dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
  3748. __func__, hba->uic_error);
  3749. }
  3750. /**
  3751. * ufshcd_check_errors - Check for errors that need s/w attention
  3752. * @hba: per-adapter instance
  3753. */
  3754. static void ufshcd_check_errors(struct ufs_hba *hba)
  3755. {
  3756. bool queue_eh_work = false;
  3757. if (hba->errors & INT_FATAL_ERRORS)
  3758. queue_eh_work = true;
  3759. if (hba->errors & UIC_ERROR) {
  3760. hba->uic_error = 0;
  3761. ufshcd_update_uic_error(hba);
  3762. if (hba->uic_error)
  3763. queue_eh_work = true;
  3764. }
  3765. if (queue_eh_work) {
  3766. /*
  3767. * update the transfer error masks to sticky bits, let's do this
  3768. * irrespective of current ufshcd_state.
  3769. */
  3770. hba->saved_err |= hba->errors;
  3771. hba->saved_uic_err |= hba->uic_error;
  3772. /* handle fatal errors only when link is functional */
  3773. if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
  3774. /* block commands from scsi mid-layer */
  3775. scsi_block_requests(hba->host);
  3776. hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
  3777. schedule_work(&hba->eh_work);
  3778. }
  3779. }
  3780. /*
  3781. * if (!queue_eh_work) -
  3782. * Other errors are either non-fatal where host recovers
  3783. * itself without s/w intervention or errors that will be
  3784. * handled by the SCSI core layer.
  3785. */
  3786. }
  3787. /**
  3788. * ufshcd_tmc_handler - handle task management function completion
  3789. * @hba: per adapter instance
  3790. */
  3791. static void ufshcd_tmc_handler(struct ufs_hba *hba)
  3792. {
  3793. u32 tm_doorbell;
  3794. tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
  3795. hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
  3796. wake_up(&hba->tm_wq);
  3797. }
  3798. /**
  3799. * ufshcd_sl_intr - Interrupt service routine
  3800. * @hba: per adapter instance
  3801. * @intr_status: contains interrupts generated by the controller
  3802. */
  3803. static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
  3804. {
  3805. hba->errors = UFSHCD_ERROR_MASK & intr_status;
  3806. if (hba->errors)
  3807. ufshcd_check_errors(hba);
  3808. if (intr_status & UFSHCD_UIC_MASK)
  3809. ufshcd_uic_cmd_compl(hba, intr_status);
  3810. if (intr_status & UTP_TASK_REQ_COMPL)
  3811. ufshcd_tmc_handler(hba);
  3812. if (intr_status & UTP_TRANSFER_REQ_COMPL)
  3813. ufshcd_transfer_req_compl(hba);
  3814. }
  3815. /**
  3816. * ufshcd_intr - Main interrupt service routine
  3817. * @irq: irq number
  3818. * @__hba: pointer to adapter instance
  3819. *
  3820. * Returns IRQ_HANDLED - If interrupt is valid
  3821. * IRQ_NONE - If invalid interrupt
  3822. */
  3823. static irqreturn_t ufshcd_intr(int irq, void *__hba)
  3824. {
  3825. u32 intr_status, enabled_intr_status;
  3826. irqreturn_t retval = IRQ_NONE;
  3827. struct ufs_hba *hba = __hba;
  3828. spin_lock(hba->host->host_lock);
  3829. intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
  3830. enabled_intr_status =
  3831. intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
  3832. if (intr_status)
  3833. ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
  3834. if (enabled_intr_status) {
  3835. ufshcd_sl_intr(hba, enabled_intr_status);
  3836. retval = IRQ_HANDLED;
  3837. }
  3838. spin_unlock(hba->host->host_lock);
  3839. return retval;
  3840. }
  3841. static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
  3842. {
  3843. int err = 0;
  3844. u32 mask = 1 << tag;
  3845. unsigned long flags;
  3846. if (!test_bit(tag, &hba->outstanding_tasks))
  3847. goto out;
  3848. spin_lock_irqsave(hba->host->host_lock, flags);
  3849. ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
  3850. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3851. /* poll for max. 1 sec to clear door bell register by h/w */
  3852. err = ufshcd_wait_for_register(hba,
  3853. REG_UTP_TASK_REQ_DOOR_BELL,
  3854. mask, 0, 1000, 1000, true);
  3855. out:
  3856. return err;
  3857. }
  3858. /**
  3859. * ufshcd_issue_tm_cmd - issues task management commands to controller
  3860. * @hba: per adapter instance
  3861. * @lun_id: LUN ID to which TM command is sent
  3862. * @task_id: task ID to which the TM command is applicable
  3863. * @tm_function: task management function opcode
  3864. * @tm_response: task management service response return value
  3865. *
  3866. * Returns non-zero value on error, zero on success.
  3867. */
  3868. static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
  3869. u8 tm_function, u8 *tm_response)
  3870. {
  3871. struct utp_task_req_desc *task_req_descp;
  3872. struct utp_upiu_task_req *task_req_upiup;
  3873. struct Scsi_Host *host;
  3874. unsigned long flags;
  3875. int free_slot;
  3876. int err;
  3877. int task_tag;
  3878. host = hba->host;
  3879. /*
  3880. * Get free slot, sleep if slots are unavailable.
  3881. * Even though we use wait_event() which sleeps indefinitely,
  3882. * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
  3883. */
  3884. wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
  3885. ufshcd_hold(hba, false);
  3886. spin_lock_irqsave(host->host_lock, flags);
  3887. task_req_descp = hba->utmrdl_base_addr;
  3888. task_req_descp += free_slot;
  3889. /* Configure task request descriptor */
  3890. task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
  3891. task_req_descp->header.dword_2 =
  3892. cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
  3893. /* Configure task request UPIU */
  3894. task_req_upiup =
  3895. (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
  3896. task_tag = hba->nutrs + free_slot;
  3897. task_req_upiup->header.dword_0 =
  3898. UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
  3899. lun_id, task_tag);
  3900. task_req_upiup->header.dword_1 =
  3901. UPIU_HEADER_DWORD(0, tm_function, 0, 0);
  3902. /*
  3903. * The host shall provide the same value for LUN field in the basic
  3904. * header and for Input Parameter.
  3905. */
  3906. task_req_upiup->input_param1 = cpu_to_be32(lun_id);
  3907. task_req_upiup->input_param2 = cpu_to_be32(task_id);
  3908. /* send command to the controller */
  3909. __set_bit(free_slot, &hba->outstanding_tasks);
  3910. /* Make sure descriptors are ready before ringing the task doorbell */
  3911. wmb();
  3912. ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
  3913. spin_unlock_irqrestore(host->host_lock, flags);
  3914. /* wait until the task management command is completed */
  3915. err = wait_event_timeout(hba->tm_wq,
  3916. test_bit(free_slot, &hba->tm_condition),
  3917. msecs_to_jiffies(TM_CMD_TIMEOUT));
  3918. if (!err) {
  3919. dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
  3920. __func__, tm_function);
  3921. if (ufshcd_clear_tm_cmd(hba, free_slot))
  3922. dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
  3923. __func__, free_slot);
  3924. err = -ETIMEDOUT;
  3925. } else {
  3926. err = ufshcd_task_req_compl(hba, free_slot, tm_response);
  3927. }
  3928. clear_bit(free_slot, &hba->tm_condition);
  3929. ufshcd_put_tm_slot(hba, free_slot);
  3930. wake_up(&hba->tm_tag_wq);
  3931. ufshcd_release(hba);
  3932. return err;
  3933. }
  3934. /**
  3935. * ufshcd_eh_device_reset_handler - device reset handler registered to
  3936. * scsi layer.
  3937. * @cmd: SCSI command pointer
  3938. *
  3939. * Returns SUCCESS/FAILED
  3940. */
  3941. static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
  3942. {
  3943. struct Scsi_Host *host;
  3944. struct ufs_hba *hba;
  3945. unsigned int tag;
  3946. u32 pos;
  3947. int err;
  3948. u8 resp = 0xF;
  3949. struct ufshcd_lrb *lrbp;
  3950. unsigned long flags;
  3951. host = cmd->device->host;
  3952. hba = shost_priv(host);
  3953. tag = cmd->request->tag;
  3954. lrbp = &hba->lrb[tag];
  3955. err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
  3956. if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
  3957. if (!err)
  3958. err = resp;
  3959. goto out;
  3960. }
  3961. /* clear the commands that were pending for corresponding LUN */
  3962. for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
  3963. if (hba->lrb[pos].lun == lrbp->lun) {
  3964. err = ufshcd_clear_cmd(hba, pos);
  3965. if (err)
  3966. break;
  3967. }
  3968. }
  3969. spin_lock_irqsave(host->host_lock, flags);
  3970. ufshcd_transfer_req_compl(hba);
  3971. spin_unlock_irqrestore(host->host_lock, flags);
  3972. out:
  3973. if (!err) {
  3974. err = SUCCESS;
  3975. } else {
  3976. dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
  3977. err = FAILED;
  3978. }
  3979. return err;
  3980. }
  3981. /**
  3982. * ufshcd_abort - abort a specific command
  3983. * @cmd: SCSI command pointer
  3984. *
  3985. * Abort the pending command in device by sending UFS_ABORT_TASK task management
  3986. * command, and in host controller by clearing the door-bell register. There can
  3987. * be race between controller sending the command to the device while abort is
  3988. * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
  3989. * really issued and then try to abort it.
  3990. *
  3991. * Returns SUCCESS/FAILED
  3992. */
  3993. static int ufshcd_abort(struct scsi_cmnd *cmd)
  3994. {
  3995. struct Scsi_Host *host;
  3996. struct ufs_hba *hba;
  3997. unsigned long flags;
  3998. unsigned int tag;
  3999. int err = 0;
  4000. int poll_cnt;
  4001. u8 resp = 0xF;
  4002. struct ufshcd_lrb *lrbp;
  4003. u32 reg;
  4004. host = cmd->device->host;
  4005. hba = shost_priv(host);
  4006. tag = cmd->request->tag;
  4007. if (!ufshcd_valid_tag(hba, tag)) {
  4008. dev_err(hba->dev,
  4009. "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
  4010. __func__, tag, cmd, cmd->request);
  4011. BUG();
  4012. }
  4013. ufshcd_hold(hba, false);
  4014. reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  4015. /* If command is already aborted/completed, return SUCCESS */
  4016. if (!(test_bit(tag, &hba->outstanding_reqs))) {
  4017. dev_err(hba->dev,
  4018. "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
  4019. __func__, tag, hba->outstanding_reqs, reg);
  4020. goto out;
  4021. }
  4022. if (!(reg & (1 << tag))) {
  4023. dev_err(hba->dev,
  4024. "%s: cmd was completed, but without a notifying intr, tag = %d",
  4025. __func__, tag);
  4026. }
  4027. lrbp = &hba->lrb[tag];
  4028. for (poll_cnt = 100; poll_cnt; poll_cnt--) {
  4029. err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
  4030. UFS_QUERY_TASK, &resp);
  4031. if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
  4032. /* cmd pending in the device */
  4033. break;
  4034. } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
  4035. /*
  4036. * cmd not pending in the device, check if it is
  4037. * in transition.
  4038. */
  4039. reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  4040. if (reg & (1 << tag)) {
  4041. /* sleep for max. 200us to stabilize */
  4042. usleep_range(100, 200);
  4043. continue;
  4044. }
  4045. /* command completed already */
  4046. goto out;
  4047. } else {
  4048. if (!err)
  4049. err = resp; /* service response error */
  4050. goto out;
  4051. }
  4052. }
  4053. if (!poll_cnt) {
  4054. err = -EBUSY;
  4055. goto out;
  4056. }
  4057. err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
  4058. UFS_ABORT_TASK, &resp);
  4059. if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
  4060. if (!err)
  4061. err = resp; /* service response error */
  4062. goto out;
  4063. }
  4064. err = ufshcd_clear_cmd(hba, tag);
  4065. if (err)
  4066. goto out;
  4067. scsi_dma_unmap(cmd);
  4068. spin_lock_irqsave(host->host_lock, flags);
  4069. ufshcd_outstanding_req_clear(hba, tag);
  4070. hba->lrb[tag].cmd = NULL;
  4071. spin_unlock_irqrestore(host->host_lock, flags);
  4072. clear_bit_unlock(tag, &hba->lrb_in_use);
  4073. wake_up(&hba->dev_cmd.tag_wq);
  4074. out:
  4075. if (!err) {
  4076. err = SUCCESS;
  4077. } else {
  4078. dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
  4079. err = FAILED;
  4080. }
  4081. /*
  4082. * This ufshcd_release() corresponds to the original scsi cmd that got
  4083. * aborted here (as we won't get any IRQ for it).
  4084. */
  4085. ufshcd_release(hba);
  4086. return err;
  4087. }
  4088. /**
  4089. * ufshcd_host_reset_and_restore - reset and restore host controller
  4090. * @hba: per-adapter instance
  4091. *
  4092. * Note that host controller reset may issue DME_RESET to
  4093. * local and remote (device) Uni-Pro stack and the attributes
  4094. * are reset to default state.
  4095. *
  4096. * Returns zero on success, non-zero on failure
  4097. */
  4098. static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
  4099. {
  4100. int err;
  4101. unsigned long flags;
  4102. /* Reset the host controller */
  4103. spin_lock_irqsave(hba->host->host_lock, flags);
  4104. ufshcd_hba_stop(hba, false);
  4105. spin_unlock_irqrestore(hba->host->host_lock, flags);
  4106. err = ufshcd_hba_enable(hba);
  4107. if (err)
  4108. goto out;
  4109. /* Establish the link again and restore the device */
  4110. err = ufshcd_probe_hba(hba);
  4111. if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
  4112. err = -EIO;
  4113. out:
  4114. if (err)
  4115. dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
  4116. return err;
  4117. }
  4118. /**
  4119. * ufshcd_reset_and_restore - reset and re-initialize host/device
  4120. * @hba: per-adapter instance
  4121. *
  4122. * Reset and recover device, host and re-establish link. This
  4123. * is helpful to recover the communication in fatal error conditions.
  4124. *
  4125. * Returns zero on success, non-zero on failure
  4126. */
  4127. static int ufshcd_reset_and_restore(struct ufs_hba *hba)
  4128. {
  4129. int err = 0;
  4130. unsigned long flags;
  4131. int retries = MAX_HOST_RESET_RETRIES;
  4132. do {
  4133. err = ufshcd_host_reset_and_restore(hba);
  4134. } while (err && --retries);
  4135. /*
  4136. * After reset the door-bell might be cleared, complete
  4137. * outstanding requests in s/w here.
  4138. */
  4139. spin_lock_irqsave(hba->host->host_lock, flags);
  4140. ufshcd_transfer_req_compl(hba);
  4141. ufshcd_tmc_handler(hba);
  4142. spin_unlock_irqrestore(hba->host->host_lock, flags);
  4143. return err;
  4144. }
  4145. /**
  4146. * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
  4147. * @cmd - SCSI command pointer
  4148. *
  4149. * Returns SUCCESS/FAILED
  4150. */
  4151. static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
  4152. {
  4153. int err;
  4154. unsigned long flags;
  4155. struct ufs_hba *hba;
  4156. hba = shost_priv(cmd->device->host);
  4157. ufshcd_hold(hba, false);
  4158. /*
  4159. * Check if there is any race with fatal error handling.
  4160. * If so, wait for it to complete. Even though fatal error
  4161. * handling does reset and restore in some cases, don't assume
  4162. * anything out of it. We are just avoiding race here.
  4163. */
  4164. do {
  4165. spin_lock_irqsave(hba->host->host_lock, flags);
  4166. if (!(work_pending(&hba->eh_work) ||
  4167. hba->ufshcd_state == UFSHCD_STATE_RESET))
  4168. break;
  4169. spin_unlock_irqrestore(hba->host->host_lock, flags);
  4170. dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
  4171. flush_work(&hba->eh_work);
  4172. } while (1);
  4173. hba->ufshcd_state = UFSHCD_STATE_RESET;
  4174. ufshcd_set_eh_in_progress(hba);
  4175. spin_unlock_irqrestore(hba->host->host_lock, flags);
  4176. err = ufshcd_reset_and_restore(hba);
  4177. spin_lock_irqsave(hba->host->host_lock, flags);
  4178. if (!err) {
  4179. err = SUCCESS;
  4180. hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
  4181. } else {
  4182. err = FAILED;
  4183. hba->ufshcd_state = UFSHCD_STATE_ERROR;
  4184. }
  4185. ufshcd_clear_eh_in_progress(hba);
  4186. spin_unlock_irqrestore(hba->host->host_lock, flags);
  4187. ufshcd_release(hba);
  4188. return err;
  4189. }
  4190. /**
  4191. * ufshcd_get_max_icc_level - calculate the ICC level
  4192. * @sup_curr_uA: max. current supported by the regulator
  4193. * @start_scan: row at the desc table to start scan from
  4194. * @buff: power descriptor buffer
  4195. *
  4196. * Returns calculated max ICC level for specific regulator
  4197. */
  4198. static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
  4199. {
  4200. int i;
  4201. int curr_uA;
  4202. u16 data;
  4203. u16 unit;
  4204. for (i = start_scan; i >= 0; i--) {
  4205. data = be16_to_cpu(*((u16 *)(buff + 2*i)));
  4206. unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
  4207. ATTR_ICC_LVL_UNIT_OFFSET;
  4208. curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
  4209. switch (unit) {
  4210. case UFSHCD_NANO_AMP:
  4211. curr_uA = curr_uA / 1000;
  4212. break;
  4213. case UFSHCD_MILI_AMP:
  4214. curr_uA = curr_uA * 1000;
  4215. break;
  4216. case UFSHCD_AMP:
  4217. curr_uA = curr_uA * 1000 * 1000;
  4218. break;
  4219. case UFSHCD_MICRO_AMP:
  4220. default:
  4221. break;
  4222. }
  4223. if (sup_curr_uA >= curr_uA)
  4224. break;
  4225. }
  4226. if (i < 0) {
  4227. i = 0;
  4228. pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
  4229. }
  4230. return (u32)i;
  4231. }
  4232. /**
  4233. * ufshcd_calc_icc_level - calculate the max ICC level
  4234. * In case regulators are not initialized we'll return 0
  4235. * @hba: per-adapter instance
  4236. * @desc_buf: power descriptor buffer to extract ICC levels from.
  4237. * @len: length of desc_buff
  4238. *
  4239. * Returns calculated ICC level
  4240. */
  4241. static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
  4242. u8 *desc_buf, int len)
  4243. {
  4244. u32 icc_level = 0;
  4245. if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
  4246. !hba->vreg_info.vccq2) {
  4247. dev_err(hba->dev,
  4248. "%s: Regulator capability was not set, actvIccLevel=%d",
  4249. __func__, icc_level);
  4250. goto out;
  4251. }
  4252. if (hba->vreg_info.vcc)
  4253. icc_level = ufshcd_get_max_icc_level(
  4254. hba->vreg_info.vcc->max_uA,
  4255. POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
  4256. &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
  4257. if (hba->vreg_info.vccq)
  4258. icc_level = ufshcd_get_max_icc_level(
  4259. hba->vreg_info.vccq->max_uA,
  4260. icc_level,
  4261. &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
  4262. if (hba->vreg_info.vccq2)
  4263. icc_level = ufshcd_get_max_icc_level(
  4264. hba->vreg_info.vccq2->max_uA,
  4265. icc_level,
  4266. &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
  4267. out:
  4268. return icc_level;
  4269. }
  4270. static void ufshcd_init_icc_levels(struct ufs_hba *hba)
  4271. {
  4272. int ret;
  4273. int buff_len = hba->desc_size.pwr_desc;
  4274. u8 desc_buf[hba->desc_size.pwr_desc];
  4275. ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
  4276. if (ret) {
  4277. dev_err(hba->dev,
  4278. "%s: Failed reading power descriptor.len = %d ret = %d",
  4279. __func__, buff_len, ret);
  4280. return;
  4281. }
  4282. hba->init_prefetch_data.icc_level =
  4283. ufshcd_find_max_sup_active_icc_level(hba,
  4284. desc_buf, buff_len);
  4285. dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
  4286. __func__, hba->init_prefetch_data.icc_level);
  4287. ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
  4288. QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
  4289. &hba->init_prefetch_data.icc_level);
  4290. if (ret)
  4291. dev_err(hba->dev,
  4292. "%s: Failed configuring bActiveICCLevel = %d ret = %d",
  4293. __func__, hba->init_prefetch_data.icc_level , ret);
  4294. }
  4295. /**
  4296. * ufshcd_scsi_add_wlus - Adds required W-LUs
  4297. * @hba: per-adapter instance
  4298. *
  4299. * UFS device specification requires the UFS devices to support 4 well known
  4300. * logical units:
  4301. * "REPORT_LUNS" (address: 01h)
  4302. * "UFS Device" (address: 50h)
  4303. * "RPMB" (address: 44h)
  4304. * "BOOT" (address: 30h)
  4305. * UFS device's power management needs to be controlled by "POWER CONDITION"
  4306. * field of SSU (START STOP UNIT) command. But this "power condition" field
  4307. * will take effect only when its sent to "UFS device" well known logical unit
  4308. * hence we require the scsi_device instance to represent this logical unit in
  4309. * order for the UFS host driver to send the SSU command for power management.
  4310. * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
  4311. * Block) LU so user space process can control this LU. User space may also
  4312. * want to have access to BOOT LU.
  4313. * This function adds scsi device instances for each of all well known LUs
  4314. * (except "REPORT LUNS" LU).
  4315. *
  4316. * Returns zero on success (all required W-LUs are added successfully),
  4317. * non-zero error value on failure (if failed to add any of the required W-LU).
  4318. */
  4319. static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
  4320. {
  4321. int ret = 0;
  4322. struct scsi_device *sdev_rpmb;
  4323. struct scsi_device *sdev_boot;
  4324. hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
  4325. ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
  4326. if (IS_ERR(hba->sdev_ufs_device)) {
  4327. ret = PTR_ERR(hba->sdev_ufs_device);
  4328. hba->sdev_ufs_device = NULL;
  4329. goto out;
  4330. }
  4331. scsi_device_put(hba->sdev_ufs_device);
  4332. sdev_boot = __scsi_add_device(hba->host, 0, 0,
  4333. ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
  4334. if (IS_ERR(sdev_boot)) {
  4335. ret = PTR_ERR(sdev_boot);
  4336. goto remove_sdev_ufs_device;
  4337. }
  4338. scsi_device_put(sdev_boot);
  4339. sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
  4340. ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
  4341. if (IS_ERR(sdev_rpmb)) {
  4342. ret = PTR_ERR(sdev_rpmb);
  4343. goto remove_sdev_boot;
  4344. }
  4345. scsi_device_put(sdev_rpmb);
  4346. goto out;
  4347. remove_sdev_boot:
  4348. scsi_remove_device(sdev_boot);
  4349. remove_sdev_ufs_device:
  4350. scsi_remove_device(hba->sdev_ufs_device);
  4351. out:
  4352. return ret;
  4353. }
  4354. static int ufs_get_device_desc(struct ufs_hba *hba,
  4355. struct ufs_dev_desc *dev_desc)
  4356. {
  4357. int err;
  4358. u8 model_index;
  4359. u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
  4360. u8 desc_buf[hba->desc_size.dev_desc];
  4361. err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
  4362. if (err) {
  4363. dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
  4364. __func__, err);
  4365. goto out;
  4366. }
  4367. /*
  4368. * getting vendor (manufacturerID) and Bank Index in big endian
  4369. * format
  4370. */
  4371. dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
  4372. desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
  4373. model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
  4374. err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
  4375. QUERY_DESC_MAX_SIZE, ASCII_STD);
  4376. if (err) {
  4377. dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
  4378. __func__, err);
  4379. goto out;
  4380. }
  4381. str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
  4382. strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
  4383. min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
  4384. MAX_MODEL_LEN));
  4385. /* Null terminate the model string */
  4386. dev_desc->model[MAX_MODEL_LEN] = '\0';
  4387. out:
  4388. return err;
  4389. }
  4390. static void ufs_fixup_device_setup(struct ufs_hba *hba,
  4391. struct ufs_dev_desc *dev_desc)
  4392. {
  4393. struct ufs_dev_fix *f;
  4394. for (f = ufs_fixups; f->quirk; f++) {
  4395. if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
  4396. f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
  4397. (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
  4398. !strcmp(f->card.model, UFS_ANY_MODEL)))
  4399. hba->dev_quirks |= f->quirk;
  4400. }
  4401. }
  4402. /**
  4403. * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
  4404. * @hba: per-adapter instance
  4405. *
  4406. * PA_TActivate parameter can be tuned manually if UniPro version is less than
  4407. * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
  4408. * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
  4409. * the hibern8 exit latency.
  4410. *
  4411. * Returns zero on success, non-zero error value on failure.
  4412. */
  4413. static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
  4414. {
  4415. int ret = 0;
  4416. u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
  4417. ret = ufshcd_dme_peer_get(hba,
  4418. UIC_ARG_MIB_SEL(
  4419. RX_MIN_ACTIVATETIME_CAPABILITY,
  4420. UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
  4421. &peer_rx_min_activatetime);
  4422. if (ret)
  4423. goto out;
  4424. /* make sure proper unit conversion is applied */
  4425. tuned_pa_tactivate =
  4426. ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
  4427. / PA_TACTIVATE_TIME_UNIT_US);
  4428. ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
  4429. tuned_pa_tactivate);
  4430. out:
  4431. return ret;
  4432. }
  4433. /**
  4434. * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
  4435. * @hba: per-adapter instance
  4436. *
  4437. * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
  4438. * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
  4439. * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
  4440. * This optimal value can help reduce the hibern8 exit latency.
  4441. *
  4442. * Returns zero on success, non-zero error value on failure.
  4443. */
  4444. static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
  4445. {
  4446. int ret = 0;
  4447. u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
  4448. u32 max_hibern8_time, tuned_pa_hibern8time;
  4449. ret = ufshcd_dme_get(hba,
  4450. UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
  4451. UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
  4452. &local_tx_hibern8_time_cap);
  4453. if (ret)
  4454. goto out;
  4455. ret = ufshcd_dme_peer_get(hba,
  4456. UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
  4457. UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
  4458. &peer_rx_hibern8_time_cap);
  4459. if (ret)
  4460. goto out;
  4461. max_hibern8_time = max(local_tx_hibern8_time_cap,
  4462. peer_rx_hibern8_time_cap);
  4463. /* make sure proper unit conversion is applied */
  4464. tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
  4465. / PA_HIBERN8_TIME_UNIT_US);
  4466. ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
  4467. tuned_pa_hibern8time);
  4468. out:
  4469. return ret;
  4470. }
  4471. /**
  4472. * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
  4473. * less than device PA_TACTIVATE time.
  4474. * @hba: per-adapter instance
  4475. *
  4476. * Some UFS devices require host PA_TACTIVATE to be lower than device
  4477. * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
  4478. * for such devices.
  4479. *
  4480. * Returns zero on success, non-zero error value on failure.
  4481. */
  4482. static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
  4483. {
  4484. int ret = 0;
  4485. u32 granularity, peer_granularity;
  4486. u32 pa_tactivate, peer_pa_tactivate;
  4487. u32 pa_tactivate_us, peer_pa_tactivate_us;
  4488. u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
  4489. ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
  4490. &granularity);
  4491. if (ret)
  4492. goto out;
  4493. ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
  4494. &peer_granularity);
  4495. if (ret)
  4496. goto out;
  4497. if ((granularity < PA_GRANULARITY_MIN_VAL) ||
  4498. (granularity > PA_GRANULARITY_MAX_VAL)) {
  4499. dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
  4500. __func__, granularity);
  4501. return -EINVAL;
  4502. }
  4503. if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
  4504. (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
  4505. dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
  4506. __func__, peer_granularity);
  4507. return -EINVAL;
  4508. }
  4509. ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
  4510. if (ret)
  4511. goto out;
  4512. ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
  4513. &peer_pa_tactivate);
  4514. if (ret)
  4515. goto out;
  4516. pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
  4517. peer_pa_tactivate_us = peer_pa_tactivate *
  4518. gran_to_us_table[peer_granularity - 1];
  4519. if (pa_tactivate_us > peer_pa_tactivate_us) {
  4520. u32 new_peer_pa_tactivate;
  4521. new_peer_pa_tactivate = pa_tactivate_us /
  4522. gran_to_us_table[peer_granularity - 1];
  4523. new_peer_pa_tactivate++;
  4524. ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
  4525. new_peer_pa_tactivate);
  4526. }
  4527. out:
  4528. return ret;
  4529. }
  4530. static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
  4531. {
  4532. if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
  4533. ufshcd_tune_pa_tactivate(hba);
  4534. ufshcd_tune_pa_hibern8time(hba);
  4535. }
  4536. if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
  4537. /* set 1ms timeout for PA_TACTIVATE */
  4538. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
  4539. if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
  4540. ufshcd_quirk_tune_host_pa_tactivate(hba);
  4541. ufshcd_vops_apply_dev_quirks(hba);
  4542. }
  4543. static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
  4544. {
  4545. int err;
  4546. err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
  4547. &hba->desc_size.dev_desc);
  4548. if (err)
  4549. hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
  4550. err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
  4551. &hba->desc_size.pwr_desc);
  4552. if (err)
  4553. hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
  4554. err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
  4555. &hba->desc_size.interc_desc);
  4556. if (err)
  4557. hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
  4558. err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
  4559. &hba->desc_size.conf_desc);
  4560. if (err)
  4561. hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
  4562. err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
  4563. &hba->desc_size.unit_desc);
  4564. if (err)
  4565. hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
  4566. err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
  4567. &hba->desc_size.geom_desc);
  4568. if (err)
  4569. hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
  4570. }
  4571. static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
  4572. {
  4573. hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
  4574. hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
  4575. hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
  4576. hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
  4577. hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
  4578. hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
  4579. }
  4580. /**
  4581. * ufshcd_probe_hba - probe hba to detect device and initialize
  4582. * @hba: per-adapter instance
  4583. *
  4584. * Execute link-startup and verify device initialization
  4585. */
  4586. static int ufshcd_probe_hba(struct ufs_hba *hba)
  4587. {
  4588. struct ufs_dev_desc card = {0};
  4589. int ret;
  4590. ret = ufshcd_link_startup(hba);
  4591. if (ret)
  4592. goto out;
  4593. ufshcd_init_pwr_info(hba);
  4594. /* set the default level for urgent bkops */
  4595. hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
  4596. hba->is_urgent_bkops_lvl_checked = false;
  4597. /* UniPro link is active now */
  4598. ufshcd_set_link_active(hba);
  4599. ret = ufshcd_verify_dev_init(hba);
  4600. if (ret)
  4601. goto out;
  4602. ret = ufshcd_complete_dev_init(hba);
  4603. if (ret)
  4604. goto out;
  4605. /* Init check for device descriptor sizes */
  4606. ufshcd_init_desc_sizes(hba);
  4607. ret = ufs_get_device_desc(hba, &card);
  4608. if (ret) {
  4609. dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
  4610. __func__, ret);
  4611. goto out;
  4612. }
  4613. ufs_fixup_device_setup(hba, &card);
  4614. ufshcd_tune_unipro_params(hba);
  4615. ret = ufshcd_set_vccq_rail_unused(hba,
  4616. (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
  4617. if (ret)
  4618. goto out;
  4619. /* UFS device is also active now */
  4620. ufshcd_set_ufs_dev_active(hba);
  4621. ufshcd_force_reset_auto_bkops(hba);
  4622. hba->wlun_dev_clr_ua = true;
  4623. if (ufshcd_get_max_pwr_mode(hba)) {
  4624. dev_err(hba->dev,
  4625. "%s: Failed getting max supported power mode\n",
  4626. __func__);
  4627. } else {
  4628. ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
  4629. if (ret)
  4630. dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
  4631. __func__, ret);
  4632. }
  4633. /* set the state as operational after switching to desired gear */
  4634. hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
  4635. /*
  4636. * If we are in error handling context or in power management callbacks
  4637. * context, no need to scan the host
  4638. */
  4639. if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
  4640. bool flag;
  4641. /* clear any previous UFS device information */
  4642. memset(&hba->dev_info, 0, sizeof(hba->dev_info));
  4643. if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
  4644. QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
  4645. hba->dev_info.f_power_on_wp_en = flag;
  4646. if (!hba->is_init_prefetch)
  4647. ufshcd_init_icc_levels(hba);
  4648. /* Add required well known logical units to scsi mid layer */
  4649. if (ufshcd_scsi_add_wlus(hba))
  4650. goto out;
  4651. scsi_scan_host(hba->host);
  4652. pm_runtime_put_sync(hba->dev);
  4653. }
  4654. if (!hba->is_init_prefetch)
  4655. hba->is_init_prefetch = true;
  4656. /* Resume devfreq after UFS device is detected */
  4657. if (ufshcd_is_clkscaling_enabled(hba))
  4658. devfreq_resume_device(hba->devfreq);
  4659. out:
  4660. /*
  4661. * If we failed to initialize the device or the device is not
  4662. * present, turn off the power/clocks etc.
  4663. */
  4664. if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
  4665. pm_runtime_put_sync(hba->dev);
  4666. ufshcd_hba_exit(hba);
  4667. }
  4668. return ret;
  4669. }
  4670. /**
  4671. * ufshcd_async_scan - asynchronous execution for probing hba
  4672. * @data: data pointer to pass to this function
  4673. * @cookie: cookie data
  4674. */
  4675. static void ufshcd_async_scan(void *data, async_cookie_t cookie)
  4676. {
  4677. struct ufs_hba *hba = (struct ufs_hba *)data;
  4678. ufshcd_probe_hba(hba);
  4679. }
  4680. static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
  4681. {
  4682. unsigned long flags;
  4683. struct Scsi_Host *host;
  4684. struct ufs_hba *hba;
  4685. int index;
  4686. bool found = false;
  4687. if (!scmd || !scmd->device || !scmd->device->host)
  4688. return BLK_EH_NOT_HANDLED;
  4689. host = scmd->device->host;
  4690. hba = shost_priv(host);
  4691. if (!hba)
  4692. return BLK_EH_NOT_HANDLED;
  4693. spin_lock_irqsave(host->host_lock, flags);
  4694. for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
  4695. if (hba->lrb[index].cmd == scmd) {
  4696. found = true;
  4697. break;
  4698. }
  4699. }
  4700. spin_unlock_irqrestore(host->host_lock, flags);
  4701. /*
  4702. * Bypass SCSI error handling and reset the block layer timer if this
  4703. * SCSI command was not actually dispatched to UFS driver, otherwise
  4704. * let SCSI layer handle the error as usual.
  4705. */
  4706. return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
  4707. }
  4708. static struct scsi_host_template ufshcd_driver_template = {
  4709. .module = THIS_MODULE,
  4710. .name = UFSHCD,
  4711. .proc_name = UFSHCD,
  4712. .queuecommand = ufshcd_queuecommand,
  4713. .slave_alloc = ufshcd_slave_alloc,
  4714. .slave_configure = ufshcd_slave_configure,
  4715. .slave_destroy = ufshcd_slave_destroy,
  4716. .change_queue_depth = ufshcd_change_queue_depth,
  4717. .eh_abort_handler = ufshcd_abort,
  4718. .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
  4719. .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
  4720. .eh_timed_out = ufshcd_eh_timed_out,
  4721. .this_id = -1,
  4722. .sg_tablesize = SG_ALL,
  4723. .cmd_per_lun = UFSHCD_CMD_PER_LUN,
  4724. .can_queue = UFSHCD_CAN_QUEUE,
  4725. .max_host_blocked = 1,
  4726. .track_queue_depth = 1,
  4727. };
  4728. static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
  4729. int ua)
  4730. {
  4731. int ret;
  4732. if (!vreg)
  4733. return 0;
  4734. ret = regulator_set_load(vreg->reg, ua);
  4735. if (ret < 0) {
  4736. dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
  4737. __func__, vreg->name, ua, ret);
  4738. }
  4739. return ret;
  4740. }
  4741. static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
  4742. struct ufs_vreg *vreg)
  4743. {
  4744. if (!vreg)
  4745. return 0;
  4746. else if (vreg->unused)
  4747. return 0;
  4748. else
  4749. return ufshcd_config_vreg_load(hba->dev, vreg,
  4750. UFS_VREG_LPM_LOAD_UA);
  4751. }
  4752. static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
  4753. struct ufs_vreg *vreg)
  4754. {
  4755. if (!vreg)
  4756. return 0;
  4757. else if (vreg->unused)
  4758. return 0;
  4759. else
  4760. return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
  4761. }
  4762. static int ufshcd_config_vreg(struct device *dev,
  4763. struct ufs_vreg *vreg, bool on)
  4764. {
  4765. int ret = 0;
  4766. struct regulator *reg;
  4767. const char *name;
  4768. int min_uV, uA_load;
  4769. BUG_ON(!vreg);
  4770. reg = vreg->reg;
  4771. name = vreg->name;
  4772. if (regulator_count_voltages(reg) > 0) {
  4773. min_uV = on ? vreg->min_uV : 0;
  4774. ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
  4775. if (ret) {
  4776. dev_err(dev, "%s: %s set voltage failed, err=%d\n",
  4777. __func__, name, ret);
  4778. goto out;
  4779. }
  4780. uA_load = on ? vreg->max_uA : 0;
  4781. ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
  4782. if (ret)
  4783. goto out;
  4784. }
  4785. out:
  4786. return ret;
  4787. }
  4788. static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
  4789. {
  4790. int ret = 0;
  4791. if (!vreg)
  4792. goto out;
  4793. else if (vreg->enabled || vreg->unused)
  4794. goto out;
  4795. ret = ufshcd_config_vreg(dev, vreg, true);
  4796. if (!ret)
  4797. ret = regulator_enable(vreg->reg);
  4798. if (!ret)
  4799. vreg->enabled = true;
  4800. else
  4801. dev_err(dev, "%s: %s enable failed, err=%d\n",
  4802. __func__, vreg->name, ret);
  4803. out:
  4804. return ret;
  4805. }
  4806. static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
  4807. {
  4808. int ret = 0;
  4809. if (!vreg)
  4810. goto out;
  4811. else if (!vreg->enabled || vreg->unused)
  4812. goto out;
  4813. ret = regulator_disable(vreg->reg);
  4814. if (!ret) {
  4815. /* ignore errors on applying disable config */
  4816. ufshcd_config_vreg(dev, vreg, false);
  4817. vreg->enabled = false;
  4818. } else {
  4819. dev_err(dev, "%s: %s disable failed, err=%d\n",
  4820. __func__, vreg->name, ret);
  4821. }
  4822. out:
  4823. return ret;
  4824. }
  4825. static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
  4826. {
  4827. int ret = 0;
  4828. struct device *dev = hba->dev;
  4829. struct ufs_vreg_info *info = &hba->vreg_info;
  4830. if (!info)
  4831. goto out;
  4832. ret = ufshcd_toggle_vreg(dev, info->vcc, on);
  4833. if (ret)
  4834. goto out;
  4835. ret = ufshcd_toggle_vreg(dev, info->vccq, on);
  4836. if (ret)
  4837. goto out;
  4838. ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
  4839. if (ret)
  4840. goto out;
  4841. out:
  4842. if (ret) {
  4843. ufshcd_toggle_vreg(dev, info->vccq2, false);
  4844. ufshcd_toggle_vreg(dev, info->vccq, false);
  4845. ufshcd_toggle_vreg(dev, info->vcc, false);
  4846. }
  4847. return ret;
  4848. }
  4849. static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
  4850. {
  4851. struct ufs_vreg_info *info = &hba->vreg_info;
  4852. if (info)
  4853. return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
  4854. return 0;
  4855. }
  4856. static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
  4857. {
  4858. int ret = 0;
  4859. if (!vreg)
  4860. goto out;
  4861. vreg->reg = devm_regulator_get(dev, vreg->name);
  4862. if (IS_ERR(vreg->reg)) {
  4863. ret = PTR_ERR(vreg->reg);
  4864. dev_err(dev, "%s: %s get failed, err=%d\n",
  4865. __func__, vreg->name, ret);
  4866. }
  4867. out:
  4868. return ret;
  4869. }
  4870. static int ufshcd_init_vreg(struct ufs_hba *hba)
  4871. {
  4872. int ret = 0;
  4873. struct device *dev = hba->dev;
  4874. struct ufs_vreg_info *info = &hba->vreg_info;
  4875. if (!info)
  4876. goto out;
  4877. ret = ufshcd_get_vreg(dev, info->vcc);
  4878. if (ret)
  4879. goto out;
  4880. ret = ufshcd_get_vreg(dev, info->vccq);
  4881. if (ret)
  4882. goto out;
  4883. ret = ufshcd_get_vreg(dev, info->vccq2);
  4884. out:
  4885. return ret;
  4886. }
  4887. static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
  4888. {
  4889. struct ufs_vreg_info *info = &hba->vreg_info;
  4890. if (info)
  4891. return ufshcd_get_vreg(hba->dev, info->vdd_hba);
  4892. return 0;
  4893. }
  4894. static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
  4895. {
  4896. int ret = 0;
  4897. struct ufs_vreg_info *info = &hba->vreg_info;
  4898. if (!info)
  4899. goto out;
  4900. else if (!info->vccq)
  4901. goto out;
  4902. if (unused) {
  4903. /* shut off the rail here */
  4904. ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
  4905. /*
  4906. * Mark this rail as no longer used, so it doesn't get enabled
  4907. * later by mistake
  4908. */
  4909. if (!ret)
  4910. info->vccq->unused = true;
  4911. } else {
  4912. /*
  4913. * rail should have been already enabled hence just make sure
  4914. * that unused flag is cleared.
  4915. */
  4916. info->vccq->unused = false;
  4917. }
  4918. out:
  4919. return ret;
  4920. }
  4921. static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
  4922. bool skip_ref_clk)
  4923. {
  4924. int ret = 0;
  4925. struct ufs_clk_info *clki;
  4926. struct list_head *head = &hba->clk_list_head;
  4927. unsigned long flags;
  4928. if (!head || list_empty(head))
  4929. goto out;
  4930. list_for_each_entry(clki, head, list) {
  4931. if (!IS_ERR_OR_NULL(clki->clk)) {
  4932. if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
  4933. continue;
  4934. if (on && !clki->enabled) {
  4935. ret = clk_prepare_enable(clki->clk);
  4936. if (ret) {
  4937. dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
  4938. __func__, clki->name, ret);
  4939. goto out;
  4940. }
  4941. } else if (!on && clki->enabled) {
  4942. clk_disable_unprepare(clki->clk);
  4943. }
  4944. clki->enabled = on;
  4945. dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
  4946. clki->name, on ? "en" : "dis");
  4947. }
  4948. }
  4949. ret = ufshcd_vops_setup_clocks(hba, on);
  4950. out:
  4951. if (ret) {
  4952. list_for_each_entry(clki, head, list) {
  4953. if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
  4954. clk_disable_unprepare(clki->clk);
  4955. }
  4956. } else if (on) {
  4957. spin_lock_irqsave(hba->host->host_lock, flags);
  4958. hba->clk_gating.state = CLKS_ON;
  4959. spin_unlock_irqrestore(hba->host->host_lock, flags);
  4960. }
  4961. return ret;
  4962. }
  4963. static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
  4964. {
  4965. return __ufshcd_setup_clocks(hba, on, false);
  4966. }
  4967. static int ufshcd_init_clocks(struct ufs_hba *hba)
  4968. {
  4969. int ret = 0;
  4970. struct ufs_clk_info *clki;
  4971. struct device *dev = hba->dev;
  4972. struct list_head *head = &hba->clk_list_head;
  4973. if (!head || list_empty(head))
  4974. goto out;
  4975. list_for_each_entry(clki, head, list) {
  4976. if (!clki->name)
  4977. continue;
  4978. clki->clk = devm_clk_get(dev, clki->name);
  4979. if (IS_ERR(clki->clk)) {
  4980. ret = PTR_ERR(clki->clk);
  4981. dev_err(dev, "%s: %s clk get failed, %d\n",
  4982. __func__, clki->name, ret);
  4983. goto out;
  4984. }
  4985. if (clki->max_freq) {
  4986. ret = clk_set_rate(clki->clk, clki->max_freq);
  4987. if (ret) {
  4988. dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
  4989. __func__, clki->name,
  4990. clki->max_freq, ret);
  4991. goto out;
  4992. }
  4993. clki->curr_freq = clki->max_freq;
  4994. }
  4995. dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
  4996. clki->name, clk_get_rate(clki->clk));
  4997. }
  4998. out:
  4999. return ret;
  5000. }
  5001. static int ufshcd_variant_hba_init(struct ufs_hba *hba)
  5002. {
  5003. int err = 0;
  5004. if (!hba->vops)
  5005. goto out;
  5006. err = ufshcd_vops_init(hba);
  5007. if (err)
  5008. goto out;
  5009. err = ufshcd_vops_setup_regulators(hba, true);
  5010. if (err)
  5011. goto out_exit;
  5012. goto out;
  5013. out_exit:
  5014. ufshcd_vops_exit(hba);
  5015. out:
  5016. if (err)
  5017. dev_err(hba->dev, "%s: variant %s init failed err %d\n",
  5018. __func__, ufshcd_get_var_name(hba), err);
  5019. return err;
  5020. }
  5021. static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
  5022. {
  5023. if (!hba->vops)
  5024. return;
  5025. ufshcd_vops_setup_clocks(hba, false);
  5026. ufshcd_vops_setup_regulators(hba, false);
  5027. ufshcd_vops_exit(hba);
  5028. }
  5029. static int ufshcd_hba_init(struct ufs_hba *hba)
  5030. {
  5031. int err;
  5032. /*
  5033. * Handle host controller power separately from the UFS device power
  5034. * rails as it will help controlling the UFS host controller power
  5035. * collapse easily which is different than UFS device power collapse.
  5036. * Also, enable the host controller power before we go ahead with rest
  5037. * of the initialization here.
  5038. */
  5039. err = ufshcd_init_hba_vreg(hba);
  5040. if (err)
  5041. goto out;
  5042. err = ufshcd_setup_hba_vreg(hba, true);
  5043. if (err)
  5044. goto out;
  5045. err = ufshcd_init_clocks(hba);
  5046. if (err)
  5047. goto out_disable_hba_vreg;
  5048. err = ufshcd_setup_clocks(hba, true);
  5049. if (err)
  5050. goto out_disable_hba_vreg;
  5051. err = ufshcd_init_vreg(hba);
  5052. if (err)
  5053. goto out_disable_clks;
  5054. err = ufshcd_setup_vreg(hba, true);
  5055. if (err)
  5056. goto out_disable_clks;
  5057. err = ufshcd_variant_hba_init(hba);
  5058. if (err)
  5059. goto out_disable_vreg;
  5060. hba->is_powered = true;
  5061. goto out;
  5062. out_disable_vreg:
  5063. ufshcd_setup_vreg(hba, false);
  5064. out_disable_clks:
  5065. ufshcd_setup_clocks(hba, false);
  5066. out_disable_hba_vreg:
  5067. ufshcd_setup_hba_vreg(hba, false);
  5068. out:
  5069. return err;
  5070. }
  5071. static void ufshcd_hba_exit(struct ufs_hba *hba)
  5072. {
  5073. if (hba->is_powered) {
  5074. ufshcd_variant_hba_exit(hba);
  5075. ufshcd_setup_vreg(hba, false);
  5076. ufshcd_setup_clocks(hba, false);
  5077. ufshcd_setup_hba_vreg(hba, false);
  5078. hba->is_powered = false;
  5079. }
  5080. }
  5081. static int
  5082. ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
  5083. {
  5084. unsigned char cmd[6] = {REQUEST_SENSE,
  5085. 0,
  5086. 0,
  5087. 0,
  5088. SCSI_SENSE_BUFFERSIZE,
  5089. 0};
  5090. char *buffer;
  5091. int ret;
  5092. buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
  5093. if (!buffer) {
  5094. ret = -ENOMEM;
  5095. goto out;
  5096. }
  5097. ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
  5098. SCSI_SENSE_BUFFERSIZE, NULL,
  5099. msecs_to_jiffies(1000), 3, NULL, REQ_PM);
  5100. if (ret)
  5101. pr_err("%s: failed with err %d\n", __func__, ret);
  5102. kfree(buffer);
  5103. out:
  5104. return ret;
  5105. }
  5106. /**
  5107. * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
  5108. * power mode
  5109. * @hba: per adapter instance
  5110. * @pwr_mode: device power mode to set
  5111. *
  5112. * Returns 0 if requested power mode is set successfully
  5113. * Returns non-zero if failed to set the requested power mode
  5114. */
  5115. static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
  5116. enum ufs_dev_pwr_mode pwr_mode)
  5117. {
  5118. unsigned char cmd[6] = { START_STOP };
  5119. struct scsi_sense_hdr sshdr;
  5120. struct scsi_device *sdp;
  5121. unsigned long flags;
  5122. int ret;
  5123. spin_lock_irqsave(hba->host->host_lock, flags);
  5124. sdp = hba->sdev_ufs_device;
  5125. if (sdp) {
  5126. ret = scsi_device_get(sdp);
  5127. if (!ret && !scsi_device_online(sdp)) {
  5128. ret = -ENODEV;
  5129. scsi_device_put(sdp);
  5130. }
  5131. } else {
  5132. ret = -ENODEV;
  5133. }
  5134. spin_unlock_irqrestore(hba->host->host_lock, flags);
  5135. if (ret)
  5136. return ret;
  5137. /*
  5138. * If scsi commands fail, the scsi mid-layer schedules scsi error-
  5139. * handling, which would wait for host to be resumed. Since we know
  5140. * we are functional while we are here, skip host resume in error
  5141. * handling context.
  5142. */
  5143. hba->host->eh_noresume = 1;
  5144. if (hba->wlun_dev_clr_ua) {
  5145. ret = ufshcd_send_request_sense(hba, sdp);
  5146. if (ret)
  5147. goto out;
  5148. /* Unit attention condition is cleared now */
  5149. hba->wlun_dev_clr_ua = false;
  5150. }
  5151. cmd[4] = pwr_mode << 4;
  5152. /*
  5153. * Current function would be generally called from the power management
  5154. * callbacks hence set the REQ_PM flag so that it doesn't resume the
  5155. * already suspended childs.
  5156. */
  5157. ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
  5158. START_STOP_TIMEOUT, 0, NULL, REQ_PM);
  5159. if (ret) {
  5160. sdev_printk(KERN_WARNING, sdp,
  5161. "START_STOP failed for power mode: %d, result %x\n",
  5162. pwr_mode, ret);
  5163. if (driver_byte(ret) & DRIVER_SENSE)
  5164. scsi_print_sense_hdr(sdp, NULL, &sshdr);
  5165. }
  5166. if (!ret)
  5167. hba->curr_dev_pwr_mode = pwr_mode;
  5168. out:
  5169. scsi_device_put(sdp);
  5170. hba->host->eh_noresume = 0;
  5171. return ret;
  5172. }
  5173. static int ufshcd_link_state_transition(struct ufs_hba *hba,
  5174. enum uic_link_state req_link_state,
  5175. int check_for_bkops)
  5176. {
  5177. int ret = 0;
  5178. if (req_link_state == hba->uic_link_state)
  5179. return 0;
  5180. if (req_link_state == UIC_LINK_HIBERN8_STATE) {
  5181. ret = ufshcd_uic_hibern8_enter(hba);
  5182. if (!ret)
  5183. ufshcd_set_link_hibern8(hba);
  5184. else
  5185. goto out;
  5186. }
  5187. /*
  5188. * If autobkops is enabled, link can't be turned off because
  5189. * turning off the link would also turn off the device.
  5190. */
  5191. else if ((req_link_state == UIC_LINK_OFF_STATE) &&
  5192. (!check_for_bkops || (check_for_bkops &&
  5193. !hba->auto_bkops_enabled))) {
  5194. /*
  5195. * Let's make sure that link is in low power mode, we are doing
  5196. * this currently by putting the link in Hibern8. Otherway to
  5197. * put the link in low power mode is to send the DME end point
  5198. * to device and then send the DME reset command to local
  5199. * unipro. But putting the link in hibern8 is much faster.
  5200. */
  5201. ret = ufshcd_uic_hibern8_enter(hba);
  5202. if (ret)
  5203. goto out;
  5204. /*
  5205. * Change controller state to "reset state" which
  5206. * should also put the link in off/reset state
  5207. */
  5208. ufshcd_hba_stop(hba, true);
  5209. /*
  5210. * TODO: Check if we need any delay to make sure that
  5211. * controller is reset
  5212. */
  5213. ufshcd_set_link_off(hba);
  5214. }
  5215. out:
  5216. return ret;
  5217. }
  5218. static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
  5219. {
  5220. /*
  5221. * It seems some UFS devices may keep drawing more than sleep current
  5222. * (atleast for 500us) from UFS rails (especially from VCCQ rail).
  5223. * To avoid this situation, add 2ms delay before putting these UFS
  5224. * rails in LPM mode.
  5225. */
  5226. if (!ufshcd_is_link_active(hba) &&
  5227. hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
  5228. usleep_range(2000, 2100);
  5229. /*
  5230. * If UFS device is either in UFS_Sleep turn off VCC rail to save some
  5231. * power.
  5232. *
  5233. * If UFS device and link is in OFF state, all power supplies (VCC,
  5234. * VCCQ, VCCQ2) can be turned off if power on write protect is not
  5235. * required. If UFS link is inactive (Hibern8 or OFF state) and device
  5236. * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
  5237. *
  5238. * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
  5239. * in low power state which would save some power.
  5240. */
  5241. if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
  5242. !hba->dev_info.is_lu_power_on_wp) {
  5243. ufshcd_setup_vreg(hba, false);
  5244. } else if (!ufshcd_is_ufs_dev_active(hba)) {
  5245. ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
  5246. if (!ufshcd_is_link_active(hba)) {
  5247. ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
  5248. ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
  5249. }
  5250. }
  5251. }
  5252. static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
  5253. {
  5254. int ret = 0;
  5255. if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
  5256. !hba->dev_info.is_lu_power_on_wp) {
  5257. ret = ufshcd_setup_vreg(hba, true);
  5258. } else if (!ufshcd_is_ufs_dev_active(hba)) {
  5259. ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
  5260. if (!ret && !ufshcd_is_link_active(hba)) {
  5261. ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
  5262. if (ret)
  5263. goto vcc_disable;
  5264. ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
  5265. if (ret)
  5266. goto vccq_lpm;
  5267. }
  5268. }
  5269. goto out;
  5270. vccq_lpm:
  5271. ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
  5272. vcc_disable:
  5273. ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
  5274. out:
  5275. return ret;
  5276. }
  5277. static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
  5278. {
  5279. if (ufshcd_is_link_off(hba))
  5280. ufshcd_setup_hba_vreg(hba, false);
  5281. }
  5282. static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
  5283. {
  5284. if (ufshcd_is_link_off(hba))
  5285. ufshcd_setup_hba_vreg(hba, true);
  5286. }
  5287. /**
  5288. * ufshcd_suspend - helper function for suspend operations
  5289. * @hba: per adapter instance
  5290. * @pm_op: desired low power operation type
  5291. *
  5292. * This function will try to put the UFS device and link into low power
  5293. * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
  5294. * (System PM level).
  5295. *
  5296. * If this function is called during shutdown, it will make sure that
  5297. * both UFS device and UFS link is powered off.
  5298. *
  5299. * NOTE: UFS device & link must be active before we enter in this function.
  5300. *
  5301. * Returns 0 for success and non-zero for failure
  5302. */
  5303. static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  5304. {
  5305. int ret = 0;
  5306. enum ufs_pm_level pm_lvl;
  5307. enum ufs_dev_pwr_mode req_dev_pwr_mode;
  5308. enum uic_link_state req_link_state;
  5309. hba->pm_op_in_progress = 1;
  5310. if (!ufshcd_is_shutdown_pm(pm_op)) {
  5311. pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
  5312. hba->rpm_lvl : hba->spm_lvl;
  5313. req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
  5314. req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
  5315. } else {
  5316. req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
  5317. req_link_state = UIC_LINK_OFF_STATE;
  5318. }
  5319. /*
  5320. * If we can't transition into any of the low power modes
  5321. * just gate the clocks.
  5322. */
  5323. ufshcd_hold(hba, false);
  5324. hba->clk_gating.is_suspended = true;
  5325. if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
  5326. req_link_state == UIC_LINK_ACTIVE_STATE) {
  5327. goto disable_clks;
  5328. }
  5329. if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
  5330. (req_link_state == hba->uic_link_state))
  5331. goto out;
  5332. /* UFS device & link must be active before we enter in this function */
  5333. if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
  5334. ret = -EINVAL;
  5335. goto out;
  5336. }
  5337. if (ufshcd_is_runtime_pm(pm_op)) {
  5338. if (ufshcd_can_autobkops_during_suspend(hba)) {
  5339. /*
  5340. * The device is idle with no requests in the queue,
  5341. * allow background operations if bkops status shows
  5342. * that performance might be impacted.
  5343. */
  5344. ret = ufshcd_urgent_bkops(hba);
  5345. if (ret)
  5346. goto enable_gating;
  5347. } else {
  5348. /* make sure that auto bkops is disabled */
  5349. ufshcd_disable_auto_bkops(hba);
  5350. }
  5351. }
  5352. if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
  5353. ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
  5354. !ufshcd_is_runtime_pm(pm_op))) {
  5355. /* ensure that bkops is disabled */
  5356. ufshcd_disable_auto_bkops(hba);
  5357. ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
  5358. if (ret)
  5359. goto enable_gating;
  5360. }
  5361. ret = ufshcd_link_state_transition(hba, req_link_state, 1);
  5362. if (ret)
  5363. goto set_dev_active;
  5364. ufshcd_vreg_set_lpm(hba);
  5365. disable_clks:
  5366. /*
  5367. * The clock scaling needs access to controller registers. Hence, Wait
  5368. * for pending clock scaling work to be done before clocks are
  5369. * turned off.
  5370. */
  5371. if (ufshcd_is_clkscaling_enabled(hba)) {
  5372. devfreq_suspend_device(hba->devfreq);
  5373. hba->clk_scaling.window_start_t = 0;
  5374. }
  5375. /*
  5376. * Call vendor specific suspend callback. As these callbacks may access
  5377. * vendor specific host controller register space call them before the
  5378. * host clocks are ON.
  5379. */
  5380. ret = ufshcd_vops_suspend(hba, pm_op);
  5381. if (ret)
  5382. goto set_link_active;
  5383. ret = ufshcd_vops_setup_clocks(hba, false);
  5384. if (ret)
  5385. goto vops_resume;
  5386. if (!ufshcd_is_link_active(hba))
  5387. ufshcd_setup_clocks(hba, false);
  5388. else
  5389. /* If link is active, device ref_clk can't be switched off */
  5390. __ufshcd_setup_clocks(hba, false, true);
  5391. hba->clk_gating.state = CLKS_OFF;
  5392. /*
  5393. * Disable the host irq as host controller as there won't be any
  5394. * host controller transaction expected till resume.
  5395. */
  5396. ufshcd_disable_irq(hba);
  5397. /* Put the host controller in low power mode if possible */
  5398. ufshcd_hba_vreg_set_lpm(hba);
  5399. goto out;
  5400. vops_resume:
  5401. ufshcd_vops_resume(hba, pm_op);
  5402. set_link_active:
  5403. ufshcd_vreg_set_hpm(hba);
  5404. if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
  5405. ufshcd_set_link_active(hba);
  5406. else if (ufshcd_is_link_off(hba))
  5407. ufshcd_host_reset_and_restore(hba);
  5408. set_dev_active:
  5409. if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
  5410. ufshcd_disable_auto_bkops(hba);
  5411. enable_gating:
  5412. hba->clk_gating.is_suspended = false;
  5413. ufshcd_release(hba);
  5414. out:
  5415. hba->pm_op_in_progress = 0;
  5416. return ret;
  5417. }
  5418. /**
  5419. * ufshcd_resume - helper function for resume operations
  5420. * @hba: per adapter instance
  5421. * @pm_op: runtime PM or system PM
  5422. *
  5423. * This function basically brings the UFS device, UniPro link and controller
  5424. * to active state.
  5425. *
  5426. * Returns 0 for success and non-zero for failure
  5427. */
  5428. static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  5429. {
  5430. int ret;
  5431. enum uic_link_state old_link_state;
  5432. hba->pm_op_in_progress = 1;
  5433. old_link_state = hba->uic_link_state;
  5434. ufshcd_hba_vreg_set_hpm(hba);
  5435. /* Make sure clocks are enabled before accessing controller */
  5436. ret = ufshcd_setup_clocks(hba, true);
  5437. if (ret)
  5438. goto out;
  5439. /* enable the host irq as host controller would be active soon */
  5440. ret = ufshcd_enable_irq(hba);
  5441. if (ret)
  5442. goto disable_irq_and_vops_clks;
  5443. ret = ufshcd_vreg_set_hpm(hba);
  5444. if (ret)
  5445. goto disable_irq_and_vops_clks;
  5446. /*
  5447. * Call vendor specific resume callback. As these callbacks may access
  5448. * vendor specific host controller register space call them when the
  5449. * host clocks are ON.
  5450. */
  5451. ret = ufshcd_vops_resume(hba, pm_op);
  5452. if (ret)
  5453. goto disable_vreg;
  5454. if (ufshcd_is_link_hibern8(hba)) {
  5455. ret = ufshcd_uic_hibern8_exit(hba);
  5456. if (!ret)
  5457. ufshcd_set_link_active(hba);
  5458. else
  5459. goto vendor_suspend;
  5460. } else if (ufshcd_is_link_off(hba)) {
  5461. ret = ufshcd_host_reset_and_restore(hba);
  5462. /*
  5463. * ufshcd_host_reset_and_restore() should have already
  5464. * set the link state as active
  5465. */
  5466. if (ret || !ufshcd_is_link_active(hba))
  5467. goto vendor_suspend;
  5468. }
  5469. if (!ufshcd_is_ufs_dev_active(hba)) {
  5470. ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
  5471. if (ret)
  5472. goto set_old_link_state;
  5473. }
  5474. if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
  5475. ufshcd_enable_auto_bkops(hba);
  5476. else
  5477. /*
  5478. * If BKOPs operations are urgently needed at this moment then
  5479. * keep auto-bkops enabled or else disable it.
  5480. */
  5481. ufshcd_urgent_bkops(hba);
  5482. hba->clk_gating.is_suspended = false;
  5483. if (ufshcd_is_clkscaling_enabled(hba))
  5484. devfreq_resume_device(hba->devfreq);
  5485. /* Schedule clock gating in case of no access to UFS device yet */
  5486. ufshcd_release(hba);
  5487. goto out;
  5488. set_old_link_state:
  5489. ufshcd_link_state_transition(hba, old_link_state, 0);
  5490. vendor_suspend:
  5491. ufshcd_vops_suspend(hba, pm_op);
  5492. disable_vreg:
  5493. ufshcd_vreg_set_lpm(hba);
  5494. disable_irq_and_vops_clks:
  5495. ufshcd_disable_irq(hba);
  5496. ufshcd_setup_clocks(hba, false);
  5497. out:
  5498. hba->pm_op_in_progress = 0;
  5499. return ret;
  5500. }
  5501. /**
  5502. * ufshcd_system_suspend - system suspend routine
  5503. * @hba: per adapter instance
  5504. * @pm_op: runtime PM or system PM
  5505. *
  5506. * Check the description of ufshcd_suspend() function for more details.
  5507. *
  5508. * Returns 0 for success and non-zero for failure
  5509. */
  5510. int ufshcd_system_suspend(struct ufs_hba *hba)
  5511. {
  5512. int ret = 0;
  5513. if (!hba || !hba->is_powered)
  5514. return 0;
  5515. if (pm_runtime_suspended(hba->dev)) {
  5516. if (hba->rpm_lvl == hba->spm_lvl)
  5517. /*
  5518. * There is possibility that device may still be in
  5519. * active state during the runtime suspend.
  5520. */
  5521. if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
  5522. hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
  5523. goto out;
  5524. /*
  5525. * UFS device and/or UFS link low power states during runtime
  5526. * suspend seems to be different than what is expected during
  5527. * system suspend. Hence runtime resume the devic & link and
  5528. * let the system suspend low power states to take effect.
  5529. * TODO: If resume takes longer time, we might have optimize
  5530. * it in future by not resuming everything if possible.
  5531. */
  5532. ret = ufshcd_runtime_resume(hba);
  5533. if (ret)
  5534. goto out;
  5535. }
  5536. ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
  5537. out:
  5538. if (!ret)
  5539. hba->is_sys_suspended = true;
  5540. return ret;
  5541. }
  5542. EXPORT_SYMBOL(ufshcd_system_suspend);
  5543. /**
  5544. * ufshcd_system_resume - system resume routine
  5545. * @hba: per adapter instance
  5546. *
  5547. * Returns 0 for success and non-zero for failure
  5548. */
  5549. int ufshcd_system_resume(struct ufs_hba *hba)
  5550. {
  5551. if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
  5552. /*
  5553. * Let the runtime resume take care of resuming
  5554. * if runtime suspended.
  5555. */
  5556. return 0;
  5557. return ufshcd_resume(hba, UFS_SYSTEM_PM);
  5558. }
  5559. EXPORT_SYMBOL(ufshcd_system_resume);
  5560. /**
  5561. * ufshcd_runtime_suspend - runtime suspend routine
  5562. * @hba: per adapter instance
  5563. *
  5564. * Check the description of ufshcd_suspend() function for more details.
  5565. *
  5566. * Returns 0 for success and non-zero for failure
  5567. */
  5568. int ufshcd_runtime_suspend(struct ufs_hba *hba)
  5569. {
  5570. if (!hba || !hba->is_powered)
  5571. return 0;
  5572. return ufshcd_suspend(hba, UFS_RUNTIME_PM);
  5573. }
  5574. EXPORT_SYMBOL(ufshcd_runtime_suspend);
  5575. /**
  5576. * ufshcd_runtime_resume - runtime resume routine
  5577. * @hba: per adapter instance
  5578. *
  5579. * This function basically brings the UFS device, UniPro link and controller
  5580. * to active state. Following operations are done in this function:
  5581. *
  5582. * 1. Turn on all the controller related clocks
  5583. * 2. Bring the UniPro link out of Hibernate state
  5584. * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
  5585. * to active state.
  5586. * 4. If auto-bkops is enabled on the device, disable it.
  5587. *
  5588. * So following would be the possible power state after this function return
  5589. * successfully:
  5590. * S1: UFS device in Active state with VCC rail ON
  5591. * UniPro link in Active state
  5592. * All the UFS/UniPro controller clocks are ON
  5593. *
  5594. * Returns 0 for success and non-zero for failure
  5595. */
  5596. int ufshcd_runtime_resume(struct ufs_hba *hba)
  5597. {
  5598. if (!hba || !hba->is_powered)
  5599. return 0;
  5600. else
  5601. return ufshcd_resume(hba, UFS_RUNTIME_PM);
  5602. }
  5603. EXPORT_SYMBOL(ufshcd_runtime_resume);
  5604. int ufshcd_runtime_idle(struct ufs_hba *hba)
  5605. {
  5606. return 0;
  5607. }
  5608. EXPORT_SYMBOL(ufshcd_runtime_idle);
  5609. /**
  5610. * ufshcd_shutdown - shutdown routine
  5611. * @hba: per adapter instance
  5612. *
  5613. * This function would power off both UFS device and UFS link.
  5614. *
  5615. * Returns 0 always to allow force shutdown even in case of errors.
  5616. */
  5617. int ufshcd_shutdown(struct ufs_hba *hba)
  5618. {
  5619. int ret = 0;
  5620. if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
  5621. goto out;
  5622. if (pm_runtime_suspended(hba->dev)) {
  5623. ret = ufshcd_runtime_resume(hba);
  5624. if (ret)
  5625. goto out;
  5626. }
  5627. ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
  5628. out:
  5629. if (ret)
  5630. dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
  5631. /* allow force shutdown even in case of errors */
  5632. return 0;
  5633. }
  5634. EXPORT_SYMBOL(ufshcd_shutdown);
  5635. /**
  5636. * ufshcd_remove - de-allocate SCSI host and host memory space
  5637. * data structure memory
  5638. * @hba - per adapter instance
  5639. */
  5640. void ufshcd_remove(struct ufs_hba *hba)
  5641. {
  5642. scsi_remove_host(hba->host);
  5643. /* disable interrupts */
  5644. ufshcd_disable_intr(hba, hba->intr_mask);
  5645. ufshcd_hba_stop(hba, true);
  5646. scsi_host_put(hba->host);
  5647. ufshcd_exit_clk_gating(hba);
  5648. if (ufshcd_is_clkscaling_enabled(hba))
  5649. devfreq_remove_device(hba->devfreq);
  5650. ufshcd_hba_exit(hba);
  5651. }
  5652. EXPORT_SYMBOL_GPL(ufshcd_remove);
  5653. /**
  5654. * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
  5655. * @hba: pointer to Host Bus Adapter (HBA)
  5656. */
  5657. void ufshcd_dealloc_host(struct ufs_hba *hba)
  5658. {
  5659. scsi_host_put(hba->host);
  5660. }
  5661. EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
  5662. /**
  5663. * ufshcd_set_dma_mask - Set dma mask based on the controller
  5664. * addressing capability
  5665. * @hba: per adapter instance
  5666. *
  5667. * Returns 0 for success, non-zero for failure
  5668. */
  5669. static int ufshcd_set_dma_mask(struct ufs_hba *hba)
  5670. {
  5671. if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
  5672. if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
  5673. return 0;
  5674. }
  5675. return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
  5676. }
  5677. /**
  5678. * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
  5679. * @dev: pointer to device handle
  5680. * @hba_handle: driver private handle
  5681. * Returns 0 on success, non-zero value on failure
  5682. */
  5683. int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
  5684. {
  5685. struct Scsi_Host *host;
  5686. struct ufs_hba *hba;
  5687. int err = 0;
  5688. if (!dev) {
  5689. dev_err(dev,
  5690. "Invalid memory reference for dev is NULL\n");
  5691. err = -ENODEV;
  5692. goto out_error;
  5693. }
  5694. host = scsi_host_alloc(&ufshcd_driver_template,
  5695. sizeof(struct ufs_hba));
  5696. if (!host) {
  5697. dev_err(dev, "scsi_host_alloc failed\n");
  5698. err = -ENOMEM;
  5699. goto out_error;
  5700. }
  5701. hba = shost_priv(host);
  5702. hba->host = host;
  5703. hba->dev = dev;
  5704. *hba_handle = hba;
  5705. out_error:
  5706. return err;
  5707. }
  5708. EXPORT_SYMBOL(ufshcd_alloc_host);
  5709. static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
  5710. {
  5711. int ret = 0;
  5712. struct ufs_clk_info *clki;
  5713. struct list_head *head = &hba->clk_list_head;
  5714. if (!head || list_empty(head))
  5715. goto out;
  5716. ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
  5717. if (ret)
  5718. return ret;
  5719. list_for_each_entry(clki, head, list) {
  5720. if (!IS_ERR_OR_NULL(clki->clk)) {
  5721. if (scale_up && clki->max_freq) {
  5722. if (clki->curr_freq == clki->max_freq)
  5723. continue;
  5724. ret = clk_set_rate(clki->clk, clki->max_freq);
  5725. if (ret) {
  5726. dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
  5727. __func__, clki->name,
  5728. clki->max_freq, ret);
  5729. break;
  5730. }
  5731. clki->curr_freq = clki->max_freq;
  5732. } else if (!scale_up && clki->min_freq) {
  5733. if (clki->curr_freq == clki->min_freq)
  5734. continue;
  5735. ret = clk_set_rate(clki->clk, clki->min_freq);
  5736. if (ret) {
  5737. dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
  5738. __func__, clki->name,
  5739. clki->min_freq, ret);
  5740. break;
  5741. }
  5742. clki->curr_freq = clki->min_freq;
  5743. }
  5744. }
  5745. dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
  5746. clki->name, clk_get_rate(clki->clk));
  5747. }
  5748. ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
  5749. out:
  5750. return ret;
  5751. }
  5752. static int ufshcd_devfreq_target(struct device *dev,
  5753. unsigned long *freq, u32 flags)
  5754. {
  5755. int err = 0;
  5756. struct ufs_hba *hba = dev_get_drvdata(dev);
  5757. if (!ufshcd_is_clkscaling_enabled(hba))
  5758. return -EINVAL;
  5759. if (*freq == UINT_MAX)
  5760. err = ufshcd_scale_clks(hba, true);
  5761. else if (*freq == 0)
  5762. err = ufshcd_scale_clks(hba, false);
  5763. return err;
  5764. }
  5765. static int ufshcd_devfreq_get_dev_status(struct device *dev,
  5766. struct devfreq_dev_status *stat)
  5767. {
  5768. struct ufs_hba *hba = dev_get_drvdata(dev);
  5769. struct ufs_clk_scaling *scaling = &hba->clk_scaling;
  5770. unsigned long flags;
  5771. if (!ufshcd_is_clkscaling_enabled(hba))
  5772. return -EINVAL;
  5773. memset(stat, 0, sizeof(*stat));
  5774. spin_lock_irqsave(hba->host->host_lock, flags);
  5775. if (!scaling->window_start_t)
  5776. goto start_window;
  5777. if (scaling->is_busy_started)
  5778. scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
  5779. scaling->busy_start_t));
  5780. stat->total_time = jiffies_to_usecs((long)jiffies -
  5781. (long)scaling->window_start_t);
  5782. stat->busy_time = scaling->tot_busy_t;
  5783. start_window:
  5784. scaling->window_start_t = jiffies;
  5785. scaling->tot_busy_t = 0;
  5786. if (hba->outstanding_reqs) {
  5787. scaling->busy_start_t = ktime_get();
  5788. scaling->is_busy_started = true;
  5789. } else {
  5790. scaling->busy_start_t = ktime_set(0, 0);
  5791. scaling->is_busy_started = false;
  5792. }
  5793. spin_unlock_irqrestore(hba->host->host_lock, flags);
  5794. return 0;
  5795. }
  5796. static struct devfreq_dev_profile ufs_devfreq_profile = {
  5797. .polling_ms = 100,
  5798. .target = ufshcd_devfreq_target,
  5799. .get_dev_status = ufshcd_devfreq_get_dev_status,
  5800. };
  5801. /**
  5802. * ufshcd_init - Driver initialization routine
  5803. * @hba: per-adapter instance
  5804. * @mmio_base: base register address
  5805. * @irq: Interrupt line of device
  5806. * Returns 0 on success, non-zero value on failure
  5807. */
  5808. int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
  5809. {
  5810. int err;
  5811. struct Scsi_Host *host = hba->host;
  5812. struct device *dev = hba->dev;
  5813. if (!mmio_base) {
  5814. dev_err(hba->dev,
  5815. "Invalid memory reference for mmio_base is NULL\n");
  5816. err = -ENODEV;
  5817. goto out_error;
  5818. }
  5819. hba->mmio_base = mmio_base;
  5820. hba->irq = irq;
  5821. /* Set descriptor lengths to specification defaults */
  5822. ufshcd_def_desc_sizes(hba);
  5823. err = ufshcd_hba_init(hba);
  5824. if (err)
  5825. goto out_error;
  5826. /* Read capabilities registers */
  5827. ufshcd_hba_capabilities(hba);
  5828. /* Get UFS version supported by the controller */
  5829. hba->ufs_version = ufshcd_get_ufs_version(hba);
  5830. /* Get Interrupt bit mask per version */
  5831. hba->intr_mask = ufshcd_get_intr_mask(hba);
  5832. err = ufshcd_set_dma_mask(hba);
  5833. if (err) {
  5834. dev_err(hba->dev, "set dma mask failed\n");
  5835. goto out_disable;
  5836. }
  5837. /* Allocate memory for host memory space */
  5838. err = ufshcd_memory_alloc(hba);
  5839. if (err) {
  5840. dev_err(hba->dev, "Memory allocation failed\n");
  5841. goto out_disable;
  5842. }
  5843. /* Configure LRB */
  5844. ufshcd_host_memory_configure(hba);
  5845. host->can_queue = hba->nutrs;
  5846. host->cmd_per_lun = hba->nutrs;
  5847. host->max_id = UFSHCD_MAX_ID;
  5848. host->max_lun = UFS_MAX_LUNS;
  5849. host->max_channel = UFSHCD_MAX_CHANNEL;
  5850. host->unique_id = host->host_no;
  5851. host->max_cmd_len = MAX_CDB_SIZE;
  5852. hba->max_pwr_info.is_valid = false;
  5853. /* Initailize wait queue for task management */
  5854. init_waitqueue_head(&hba->tm_wq);
  5855. init_waitqueue_head(&hba->tm_tag_wq);
  5856. /* Initialize work queues */
  5857. INIT_WORK(&hba->eh_work, ufshcd_err_handler);
  5858. INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
  5859. /* Initialize UIC command mutex */
  5860. mutex_init(&hba->uic_cmd_mutex);
  5861. /* Initialize mutex for device management commands */
  5862. mutex_init(&hba->dev_cmd.lock);
  5863. /* Initialize device management tag acquire wait queue */
  5864. init_waitqueue_head(&hba->dev_cmd.tag_wq);
  5865. ufshcd_init_clk_gating(hba);
  5866. /*
  5867. * In order to avoid any spurious interrupt immediately after
  5868. * registering UFS controller interrupt handler, clear any pending UFS
  5869. * interrupt status and disable all the UFS interrupts.
  5870. */
  5871. ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
  5872. REG_INTERRUPT_STATUS);
  5873. ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
  5874. /*
  5875. * Make sure that UFS interrupts are disabled and any pending interrupt
  5876. * status is cleared before registering UFS interrupt handler.
  5877. */
  5878. mb();
  5879. /* IRQ registration */
  5880. err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
  5881. if (err) {
  5882. dev_err(hba->dev, "request irq failed\n");
  5883. goto exit_gating;
  5884. } else {
  5885. hba->is_irq_enabled = true;
  5886. }
  5887. err = scsi_add_host(host, hba->dev);
  5888. if (err) {
  5889. dev_err(hba->dev, "scsi_add_host failed\n");
  5890. goto exit_gating;
  5891. }
  5892. /* Host controller enable */
  5893. err = ufshcd_hba_enable(hba);
  5894. if (err) {
  5895. dev_err(hba->dev, "Host controller enable failed\n");
  5896. goto out_remove_scsi_host;
  5897. }
  5898. if (ufshcd_is_clkscaling_enabled(hba)) {
  5899. hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
  5900. "simple_ondemand", NULL);
  5901. if (IS_ERR(hba->devfreq)) {
  5902. dev_err(hba->dev, "Unable to register with devfreq %ld\n",
  5903. PTR_ERR(hba->devfreq));
  5904. err = PTR_ERR(hba->devfreq);
  5905. goto out_remove_scsi_host;
  5906. }
  5907. /* Suspend devfreq until the UFS device is detected */
  5908. devfreq_suspend_device(hba->devfreq);
  5909. hba->clk_scaling.window_start_t = 0;
  5910. }
  5911. /* Hold auto suspend until async scan completes */
  5912. pm_runtime_get_sync(dev);
  5913. /*
  5914. * We are assuming that device wasn't put in sleep/power-down
  5915. * state exclusively during the boot stage before kernel.
  5916. * This assumption helps avoid doing link startup twice during
  5917. * ufshcd_probe_hba().
  5918. */
  5919. ufshcd_set_ufs_dev_active(hba);
  5920. async_schedule(ufshcd_async_scan, hba);
  5921. return 0;
  5922. out_remove_scsi_host:
  5923. scsi_remove_host(hba->host);
  5924. exit_gating:
  5925. ufshcd_exit_clk_gating(hba);
  5926. out_disable:
  5927. hba->is_irq_enabled = false;
  5928. scsi_host_put(host);
  5929. ufshcd_hba_exit(hba);
  5930. out_error:
  5931. return err;
  5932. }
  5933. EXPORT_SYMBOL_GPL(ufshcd_init);
  5934. MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
  5935. MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
  5936. MODULE_DESCRIPTION("Generic UFS host controller driver Core");
  5937. MODULE_LICENSE("GPL");
  5938. MODULE_VERSION(UFSHCD_DRIVER_VERSION);