caamalg.c 126 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358
  1. /*
  2. * caam - Freescale FSL CAAM support for crypto API
  3. *
  4. * Copyright 2008-2011 Freescale Semiconductor, Inc.
  5. *
  6. * Based on talitos crypto API driver.
  7. *
  8. * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
  9. *
  10. * --------------- ---------------
  11. * | JobDesc #1 |-------------------->| ShareDesc |
  12. * | *(packet 1) | | (PDB) |
  13. * --------------- |------------->| (hashKey) |
  14. * . | | (cipherKey) |
  15. * . | |-------->| (operation) |
  16. * --------------- | | ---------------
  17. * | JobDesc #2 |------| |
  18. * | *(packet 2) | |
  19. * --------------- |
  20. * . |
  21. * . |
  22. * --------------- |
  23. * | JobDesc #3 |------------
  24. * | *(packet 3) |
  25. * ---------------
  26. *
  27. * The SharedDesc never changes for a connection unless rekeyed, but
  28. * each packet will likely be in a different place. So all we need
  29. * to know to process the packet is where the input is, where the
  30. * output goes, and what context we want to process with. Context is
  31. * in the SharedDesc, packet references in the JobDesc.
  32. *
  33. * So, a job desc looks like:
  34. *
  35. * ---------------------
  36. * | Header |
  37. * | ShareDesc Pointer |
  38. * | SEQ_OUT_PTR |
  39. * | (output buffer) |
  40. * | (output length) |
  41. * | SEQ_IN_PTR |
  42. * | (input buffer) |
  43. * | (input length) |
  44. * ---------------------
  45. */
  46. #include "compat.h"
  47. #include "regs.h"
  48. #include "intern.h"
  49. #include "desc_constr.h"
  50. #include "jr.h"
  51. #include "error.h"
  52. #include "sg_sw_sec4.h"
  53. #include "key_gen.h"
  54. /*
  55. * crypto alg
  56. */
  57. #define CAAM_CRA_PRIORITY 3000
  58. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  59. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  60. CTR_RFC3686_NONCE_SIZE + \
  61. SHA512_DIGEST_SIZE * 2)
  62. /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
  63. #define CAAM_MAX_IV_LENGTH 16
  64. #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
  65. #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
  66. CAAM_CMD_SZ * 4)
  67. /* length of descriptors text */
  68. #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
  69. #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
  70. #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
  71. #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
  72. /* Note: Nonce is counted in enckeylen */
  73. #define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ)
  74. #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
  75. #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
  76. #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
  77. #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
  78. #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
  79. #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
  80. #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
  81. #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
  82. #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
  83. #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
  84. #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
  85. #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
  86. #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
  87. #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
  88. 20 * CAAM_CMD_SZ)
  89. #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
  90. 15 * CAAM_CMD_SZ)
  91. #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
  92. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  93. #ifdef DEBUG
  94. /* for print_hex_dumps with line references */
  95. #define debug(format, arg...) printk(format, arg)
  96. #else
  97. #define debug(format, arg...)
  98. #endif
  99. static struct list_head alg_list;
  100. /* Set DK bit in class 1 operation if shared */
  101. static inline void append_dec_op1(u32 *desc, u32 type)
  102. {
  103. u32 *jump_cmd, *uncond_jump_cmd;
  104. /* DK bit is valid only for AES */
  105. if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
  106. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  107. OP_ALG_DECRYPT);
  108. return;
  109. }
  110. jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
  111. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  112. OP_ALG_DECRYPT);
  113. uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  114. set_jump_tgt_here(desc, jump_cmd);
  115. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  116. OP_ALG_DECRYPT | OP_ALG_AAI_DK);
  117. set_jump_tgt_here(desc, uncond_jump_cmd);
  118. }
  119. /*
  120. * For aead functions, read payload and write payload,
  121. * both of which are specified in req->src and req->dst
  122. */
  123. static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
  124. {
  125. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  126. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
  127. KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
  128. }
  129. /*
  130. * For aead encrypt and decrypt, read iv for both classes
  131. */
  132. static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
  133. {
  134. append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
  135. LDST_SRCDST_BYTE_CONTEXT |
  136. (ivoffset << LDST_OFFSET_SHIFT));
  137. append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
  138. (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
  139. }
  140. /*
  141. * For ablkcipher encrypt and decrypt, read from req->src and
  142. * write to req->dst
  143. */
  144. static inline void ablkcipher_append_src_dst(u32 *desc)
  145. {
  146. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  147. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  148. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
  149. KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  150. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  151. }
  152. /*
  153. * If all data, including src (with assoc and iv) or dst (with iv only) are
  154. * contiguous
  155. */
  156. #define GIV_SRC_CONTIG 1
  157. #define GIV_DST_CONTIG (1 << 1)
  158. /*
  159. * per-session context
  160. */
  161. struct caam_ctx {
  162. struct device *jrdev;
  163. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  164. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  165. u32 sh_desc_givenc[DESC_MAX_USED_LEN];
  166. dma_addr_t sh_desc_enc_dma;
  167. dma_addr_t sh_desc_dec_dma;
  168. dma_addr_t sh_desc_givenc_dma;
  169. u32 class1_alg_type;
  170. u32 class2_alg_type;
  171. u32 alg_op;
  172. u8 key[CAAM_MAX_KEY_SIZE];
  173. dma_addr_t key_dma;
  174. unsigned int enckeylen;
  175. unsigned int split_key_len;
  176. unsigned int split_key_pad_len;
  177. unsigned int authsize;
  178. };
  179. static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
  180. int keys_fit_inline, bool is_rfc3686)
  181. {
  182. u32 *nonce;
  183. unsigned int enckeylen = ctx->enckeylen;
  184. /*
  185. * RFC3686 specific:
  186. * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
  187. * | enckeylen = encryption key size + nonce size
  188. */
  189. if (is_rfc3686)
  190. enckeylen -= CTR_RFC3686_NONCE_SIZE;
  191. if (keys_fit_inline) {
  192. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  193. ctx->split_key_len, CLASS_2 |
  194. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  195. append_key_as_imm(desc, (void *)ctx->key +
  196. ctx->split_key_pad_len, enckeylen,
  197. enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  198. } else {
  199. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  200. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  201. append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
  202. enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  203. }
  204. /* Load Counter into CONTEXT1 reg */
  205. if (is_rfc3686) {
  206. nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
  207. enckeylen);
  208. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  209. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  210. append_move(desc,
  211. MOVE_SRC_OUTFIFO |
  212. MOVE_DEST_CLASS1CTX |
  213. (16 << MOVE_OFFSET_SHIFT) |
  214. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  215. }
  216. }
  217. static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
  218. int keys_fit_inline, bool is_rfc3686)
  219. {
  220. u32 *key_jump_cmd;
  221. /* Note: Context registers are saved. */
  222. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  223. /* Skip if already shared */
  224. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  225. JUMP_COND_SHRD);
  226. append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  227. set_jump_tgt_here(desc, key_jump_cmd);
  228. }
  229. static int aead_null_set_sh_desc(struct crypto_aead *aead)
  230. {
  231. unsigned int ivsize = crypto_aead_ivsize(aead);
  232. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  233. struct device *jrdev = ctx->jrdev;
  234. bool keys_fit_inline = false;
  235. u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
  236. u32 *desc;
  237. /*
  238. * Job Descriptor and Shared Descriptors
  239. * must all fit into the 64-word Descriptor h/w Buffer
  240. */
  241. if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
  242. ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
  243. keys_fit_inline = true;
  244. /* old_aead_encrypt shared descriptor */
  245. desc = ctx->sh_desc_enc;
  246. init_sh_desc(desc, HDR_SHARE_SERIAL);
  247. /* Skip if already shared */
  248. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  249. JUMP_COND_SHRD);
  250. if (keys_fit_inline)
  251. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  252. ctx->split_key_len, CLASS_2 |
  253. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  254. else
  255. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  256. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  257. set_jump_tgt_here(desc, key_jump_cmd);
  258. /* cryptlen = seqoutlen - authsize */
  259. append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
  260. /*
  261. * NULL encryption; IV is zero
  262. * assoclen = (assoclen + cryptlen) - cryptlen
  263. */
  264. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
  265. /* read assoc before reading payload */
  266. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  267. KEY_VLF);
  268. /* Prepare to read and write cryptlen bytes */
  269. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  270. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  271. /*
  272. * MOVE_LEN opcode is not available in all SEC HW revisions,
  273. * thus need to do some magic, i.e. self-patch the descriptor
  274. * buffer.
  275. */
  276. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
  277. MOVE_DEST_MATH3 |
  278. (0x6 << MOVE_LEN_SHIFT));
  279. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
  280. MOVE_DEST_DESCBUF |
  281. MOVE_WAITCOMP |
  282. (0x8 << MOVE_LEN_SHIFT));
  283. /* Class 2 operation */
  284. append_operation(desc, ctx->class2_alg_type |
  285. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  286. /* Read and write cryptlen bytes */
  287. aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  288. set_move_tgt_here(desc, read_move_cmd);
  289. set_move_tgt_here(desc, write_move_cmd);
  290. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  291. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
  292. MOVE_AUX_LS);
  293. /* Write ICV */
  294. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  295. LDST_SRCDST_BYTE_CONTEXT);
  296. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  297. desc_bytes(desc),
  298. DMA_TO_DEVICE);
  299. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  300. dev_err(jrdev, "unable to map shared descriptor\n");
  301. return -ENOMEM;
  302. }
  303. #ifdef DEBUG
  304. print_hex_dump(KERN_ERR,
  305. "aead null enc shdesc@"__stringify(__LINE__)": ",
  306. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  307. desc_bytes(desc), 1);
  308. #endif
  309. /*
  310. * Job Descriptor and Shared Descriptors
  311. * must all fit into the 64-word Descriptor h/w Buffer
  312. */
  313. keys_fit_inline = false;
  314. if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
  315. ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
  316. keys_fit_inline = true;
  317. desc = ctx->sh_desc_dec;
  318. /* old_aead_decrypt shared descriptor */
  319. init_sh_desc(desc, HDR_SHARE_SERIAL);
  320. /* Skip if already shared */
  321. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  322. JUMP_COND_SHRD);
  323. if (keys_fit_inline)
  324. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  325. ctx->split_key_len, CLASS_2 |
  326. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  327. else
  328. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  329. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  330. set_jump_tgt_here(desc, key_jump_cmd);
  331. /* Class 2 operation */
  332. append_operation(desc, ctx->class2_alg_type |
  333. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  334. /* assoclen + cryptlen = seqinlen - ivsize - authsize */
  335. append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
  336. ctx->authsize + ivsize);
  337. /* assoclen = (assoclen + cryptlen) - cryptlen */
  338. append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  339. append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
  340. /* read assoc before reading payload */
  341. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  342. KEY_VLF);
  343. /* Prepare to read and write cryptlen bytes */
  344. append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
  345. append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
  346. /*
  347. * MOVE_LEN opcode is not available in all SEC HW revisions,
  348. * thus need to do some magic, i.e. self-patch the descriptor
  349. * buffer.
  350. */
  351. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
  352. MOVE_DEST_MATH2 |
  353. (0x6 << MOVE_LEN_SHIFT));
  354. write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
  355. MOVE_DEST_DESCBUF |
  356. MOVE_WAITCOMP |
  357. (0x8 << MOVE_LEN_SHIFT));
  358. /* Read and write cryptlen bytes */
  359. aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  360. /*
  361. * Insert a NOP here, since we need at least 4 instructions between
  362. * code patching the descriptor buffer and the location being patched.
  363. */
  364. jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  365. set_jump_tgt_here(desc, jump_cmd);
  366. set_move_tgt_here(desc, read_move_cmd);
  367. set_move_tgt_here(desc, write_move_cmd);
  368. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  369. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
  370. MOVE_AUX_LS);
  371. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  372. /* Load ICV */
  373. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
  374. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
  375. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  376. desc_bytes(desc),
  377. DMA_TO_DEVICE);
  378. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  379. dev_err(jrdev, "unable to map shared descriptor\n");
  380. return -ENOMEM;
  381. }
  382. #ifdef DEBUG
  383. print_hex_dump(KERN_ERR,
  384. "aead null dec shdesc@"__stringify(__LINE__)": ",
  385. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  386. desc_bytes(desc), 1);
  387. #endif
  388. return 0;
  389. }
  390. static int aead_set_sh_desc(struct crypto_aead *aead)
  391. {
  392. unsigned int ivsize = crypto_aead_ivsize(aead);
  393. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  394. struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
  395. const char *alg_name = crypto_tfm_alg_name(ctfm);
  396. struct device *jrdev = ctx->jrdev;
  397. bool keys_fit_inline;
  398. u32 geniv, moveiv;
  399. u32 ctx1_iv_off = 0;
  400. u32 *desc;
  401. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  402. OP_ALG_AAI_CTR_MOD128);
  403. const bool is_rfc3686 = (ctr_mode &&
  404. (strstr(alg_name, "rfc3686") != NULL));
  405. if (!ctx->authsize)
  406. return 0;
  407. /* NULL encryption / decryption */
  408. if (!ctx->enckeylen)
  409. return aead_null_set_sh_desc(aead);
  410. /*
  411. * AES-CTR needs to load IV in CONTEXT1 reg
  412. * at an offset of 128bits (16bytes)
  413. * CONTEXT1[255:128] = IV
  414. */
  415. if (ctr_mode)
  416. ctx1_iv_off = 16;
  417. /*
  418. * RFC3686 specific:
  419. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  420. */
  421. if (is_rfc3686)
  422. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  423. /*
  424. * Job Descriptor and Shared Descriptors
  425. * must all fit into the 64-word Descriptor h/w Buffer
  426. */
  427. keys_fit_inline = false;
  428. if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
  429. ctx->split_key_pad_len + ctx->enckeylen +
  430. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  431. CAAM_DESC_BYTES_MAX)
  432. keys_fit_inline = true;
  433. /* old_aead_encrypt shared descriptor */
  434. desc = ctx->sh_desc_enc;
  435. /* Note: Context registers are saved. */
  436. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  437. /* Class 2 operation */
  438. append_operation(desc, ctx->class2_alg_type |
  439. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  440. /* cryptlen = seqoutlen - authsize */
  441. append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
  442. /* assoclen + cryptlen = seqinlen - ivsize */
  443. append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize);
  444. /* assoclen = (assoclen + cryptlen) - cryptlen */
  445. append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
  446. /* read assoc before reading payload */
  447. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  448. KEY_VLF);
  449. aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
  450. /* Load Counter into CONTEXT1 reg */
  451. if (is_rfc3686)
  452. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  453. LDST_CLASS_1_CCB |
  454. LDST_SRCDST_BYTE_CONTEXT |
  455. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  456. LDST_OFFSET_SHIFT));
  457. /* Class 1 operation */
  458. append_operation(desc, ctx->class1_alg_type |
  459. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  460. /* Read and write cryptlen bytes */
  461. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  462. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  463. aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
  464. /* Write ICV */
  465. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  466. LDST_SRCDST_BYTE_CONTEXT);
  467. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  468. desc_bytes(desc),
  469. DMA_TO_DEVICE);
  470. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  471. dev_err(jrdev, "unable to map shared descriptor\n");
  472. return -ENOMEM;
  473. }
  474. #ifdef DEBUG
  475. print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
  476. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  477. desc_bytes(desc), 1);
  478. #endif
  479. /*
  480. * Job Descriptor and Shared Descriptors
  481. * must all fit into the 64-word Descriptor h/w Buffer
  482. */
  483. keys_fit_inline = false;
  484. if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
  485. ctx->split_key_pad_len + ctx->enckeylen +
  486. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  487. CAAM_DESC_BYTES_MAX)
  488. keys_fit_inline = true;
  489. /* old_aead_decrypt shared descriptor */
  490. desc = ctx->sh_desc_dec;
  491. /* Note: Context registers are saved. */
  492. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  493. /* Class 2 operation */
  494. append_operation(desc, ctx->class2_alg_type |
  495. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  496. /* assoclen + cryptlen = seqinlen - ivsize - authsize */
  497. append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
  498. ctx->authsize + ivsize);
  499. /* assoclen = (assoclen + cryptlen) - cryptlen */
  500. append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  501. append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
  502. /* read assoc before reading payload */
  503. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  504. KEY_VLF);
  505. aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
  506. /* Load Counter into CONTEXT1 reg */
  507. if (is_rfc3686)
  508. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  509. LDST_CLASS_1_CCB |
  510. LDST_SRCDST_BYTE_CONTEXT |
  511. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  512. LDST_OFFSET_SHIFT));
  513. /* Choose operation */
  514. if (ctr_mode)
  515. append_operation(desc, ctx->class1_alg_type |
  516. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
  517. else
  518. append_dec_op1(desc, ctx->class1_alg_type);
  519. /* Read and write cryptlen bytes */
  520. append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
  521. append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
  522. aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
  523. /* Load ICV */
  524. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
  525. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
  526. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  527. desc_bytes(desc),
  528. DMA_TO_DEVICE);
  529. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  530. dev_err(jrdev, "unable to map shared descriptor\n");
  531. return -ENOMEM;
  532. }
  533. #ifdef DEBUG
  534. print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
  535. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  536. desc_bytes(desc), 1);
  537. #endif
  538. /*
  539. * Job Descriptor and Shared Descriptors
  540. * must all fit into the 64-word Descriptor h/w Buffer
  541. */
  542. keys_fit_inline = false;
  543. if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
  544. ctx->split_key_pad_len + ctx->enckeylen +
  545. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  546. CAAM_DESC_BYTES_MAX)
  547. keys_fit_inline = true;
  548. /* aead_givencrypt shared descriptor */
  549. desc = ctx->sh_desc_givenc;
  550. /* Note: Context registers are saved. */
  551. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  552. /* Generate IV */
  553. geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
  554. NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
  555. NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
  556. append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
  557. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  558. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  559. append_move(desc, MOVE_WAITCOMP |
  560. MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
  561. (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
  562. (ivsize << MOVE_LEN_SHIFT));
  563. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  564. /* Copy IV to class 1 context */
  565. append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
  566. (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
  567. (ivsize << MOVE_LEN_SHIFT));
  568. /* Return to encryption */
  569. append_operation(desc, ctx->class2_alg_type |
  570. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  571. /* ivsize + cryptlen = seqoutlen - authsize */
  572. append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
  573. /* assoclen = seqinlen - (ivsize + cryptlen) */
  574. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
  575. /* read assoc before reading payload */
  576. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  577. KEY_VLF);
  578. /* Copy iv from outfifo to class 2 fifo */
  579. moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
  580. NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
  581. append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
  582. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  583. append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
  584. LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
  585. /* Load Counter into CONTEXT1 reg */
  586. if (is_rfc3686)
  587. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  588. LDST_CLASS_1_CCB |
  589. LDST_SRCDST_BYTE_CONTEXT |
  590. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  591. LDST_OFFSET_SHIFT));
  592. /* Class 1 operation */
  593. append_operation(desc, ctx->class1_alg_type |
  594. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  595. /* Will write ivsize + cryptlen */
  596. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  597. /* Not need to reload iv */
  598. append_seq_fifo_load(desc, ivsize,
  599. FIFOLD_CLASS_SKIP);
  600. /* Will read cryptlen */
  601. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  602. aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
  603. /* Write ICV */
  604. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  605. LDST_SRCDST_BYTE_CONTEXT);
  606. ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
  607. desc_bytes(desc),
  608. DMA_TO_DEVICE);
  609. if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
  610. dev_err(jrdev, "unable to map shared descriptor\n");
  611. return -ENOMEM;
  612. }
  613. #ifdef DEBUG
  614. print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
  615. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  616. desc_bytes(desc), 1);
  617. #endif
  618. return 0;
  619. }
  620. static int aead_setauthsize(struct crypto_aead *authenc,
  621. unsigned int authsize)
  622. {
  623. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  624. ctx->authsize = authsize;
  625. aead_set_sh_desc(authenc);
  626. return 0;
  627. }
  628. static int gcm_set_sh_desc(struct crypto_aead *aead)
  629. {
  630. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  631. struct device *jrdev = ctx->jrdev;
  632. bool keys_fit_inline = false;
  633. u32 *key_jump_cmd, *zero_payload_jump_cmd,
  634. *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
  635. u32 *desc;
  636. if (!ctx->enckeylen || !ctx->authsize)
  637. return 0;
  638. /*
  639. * AES GCM encrypt shared descriptor
  640. * Job Descriptor and Shared Descriptor
  641. * must fit into the 64-word Descriptor h/w Buffer
  642. */
  643. if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  644. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  645. keys_fit_inline = true;
  646. desc = ctx->sh_desc_enc;
  647. init_sh_desc(desc, HDR_SHARE_SERIAL);
  648. /* skip key loading if they are loaded due to sharing */
  649. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  650. JUMP_COND_SHRD | JUMP_COND_SELF);
  651. if (keys_fit_inline)
  652. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  653. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  654. else
  655. append_key(desc, ctx->key_dma, ctx->enckeylen,
  656. CLASS_1 | KEY_DEST_CLASS_REG);
  657. set_jump_tgt_here(desc, key_jump_cmd);
  658. /* class 1 operation */
  659. append_operation(desc, ctx->class1_alg_type |
  660. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  661. /* if assoclen + cryptlen is ZERO, skip to ICV write */
  662. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  663. zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
  664. JUMP_COND_MATH_Z);
  665. /* if assoclen is ZERO, skip reading the assoc data */
  666. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  667. zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
  668. JUMP_COND_MATH_Z);
  669. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  670. /* skip assoc data */
  671. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  672. /* cryptlen = seqinlen - assoclen */
  673. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
  674. /* if cryptlen is ZERO jump to zero-payload commands */
  675. zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
  676. JUMP_COND_MATH_Z);
  677. /* read assoc data */
  678. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  679. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  680. set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
  681. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  682. /* write encrypted data */
  683. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  684. /* read payload data */
  685. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  686. FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  687. /* jump the zero-payload commands */
  688. append_jump(desc, JUMP_TEST_ALL | 2);
  689. /* zero-payload commands */
  690. set_jump_tgt_here(desc, zero_payload_jump_cmd);
  691. /* read assoc data */
  692. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  693. FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
  694. /* There is no input data */
  695. set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
  696. /* write ICV */
  697. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  698. LDST_SRCDST_BYTE_CONTEXT);
  699. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  700. desc_bytes(desc),
  701. DMA_TO_DEVICE);
  702. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  703. dev_err(jrdev, "unable to map shared descriptor\n");
  704. return -ENOMEM;
  705. }
  706. #ifdef DEBUG
  707. print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
  708. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  709. desc_bytes(desc), 1);
  710. #endif
  711. /*
  712. * Job Descriptor and Shared Descriptors
  713. * must all fit into the 64-word Descriptor h/w Buffer
  714. */
  715. keys_fit_inline = false;
  716. if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
  717. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  718. keys_fit_inline = true;
  719. desc = ctx->sh_desc_dec;
  720. init_sh_desc(desc, HDR_SHARE_SERIAL);
  721. /* skip key loading if they are loaded due to sharing */
  722. key_jump_cmd = append_jump(desc, JUMP_JSL |
  723. JUMP_TEST_ALL | JUMP_COND_SHRD |
  724. JUMP_COND_SELF);
  725. if (keys_fit_inline)
  726. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  727. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  728. else
  729. append_key(desc, ctx->key_dma, ctx->enckeylen,
  730. CLASS_1 | KEY_DEST_CLASS_REG);
  731. set_jump_tgt_here(desc, key_jump_cmd);
  732. /* class 1 operation */
  733. append_operation(desc, ctx->class1_alg_type |
  734. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  735. /* if assoclen is ZERO, skip reading the assoc data */
  736. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  737. zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
  738. JUMP_COND_MATH_Z);
  739. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  740. /* skip assoc data */
  741. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  742. /* read assoc data */
  743. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  744. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  745. set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
  746. /* cryptlen = seqoutlen - assoclen */
  747. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  748. /* jump to zero-payload command if cryptlen is zero */
  749. zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
  750. JUMP_COND_MATH_Z);
  751. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  752. /* store encrypted data */
  753. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  754. /* read payload data */
  755. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  756. FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  757. /* zero-payload command */
  758. set_jump_tgt_here(desc, zero_payload_jump_cmd);
  759. /* read ICV */
  760. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  761. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  762. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  763. desc_bytes(desc),
  764. DMA_TO_DEVICE);
  765. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  766. dev_err(jrdev, "unable to map shared descriptor\n");
  767. return -ENOMEM;
  768. }
  769. #ifdef DEBUG
  770. print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
  771. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  772. desc_bytes(desc), 1);
  773. #endif
  774. return 0;
  775. }
  776. static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  777. {
  778. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  779. ctx->authsize = authsize;
  780. gcm_set_sh_desc(authenc);
  781. return 0;
  782. }
  783. static int rfc4106_set_sh_desc(struct crypto_aead *aead)
  784. {
  785. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  786. struct device *jrdev = ctx->jrdev;
  787. bool keys_fit_inline = false;
  788. u32 *key_jump_cmd;
  789. u32 *desc;
  790. if (!ctx->enckeylen || !ctx->authsize)
  791. return 0;
  792. /*
  793. * RFC4106 encrypt shared descriptor
  794. * Job Descriptor and Shared Descriptor
  795. * must fit into the 64-word Descriptor h/w Buffer
  796. */
  797. if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  798. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  799. keys_fit_inline = true;
  800. desc = ctx->sh_desc_enc;
  801. init_sh_desc(desc, HDR_SHARE_SERIAL);
  802. /* Skip key loading if it is loaded due to sharing */
  803. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  804. JUMP_COND_SHRD);
  805. if (keys_fit_inline)
  806. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  807. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  808. else
  809. append_key(desc, ctx->key_dma, ctx->enckeylen,
  810. CLASS_1 | KEY_DEST_CLASS_REG);
  811. set_jump_tgt_here(desc, key_jump_cmd);
  812. /* Class 1 operation */
  813. append_operation(desc, ctx->class1_alg_type |
  814. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  815. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  816. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  817. /* Skip assoc data */
  818. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  819. /* Read assoc data */
  820. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  821. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  822. /* cryptlen = seqoutlen - assoclen */
  823. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  824. /* Will read cryptlen bytes */
  825. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  826. /* Write encrypted data */
  827. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  828. /* Read payload data */
  829. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  830. FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  831. /* Write ICV */
  832. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  833. LDST_SRCDST_BYTE_CONTEXT);
  834. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  835. desc_bytes(desc),
  836. DMA_TO_DEVICE);
  837. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  838. dev_err(jrdev, "unable to map shared descriptor\n");
  839. return -ENOMEM;
  840. }
  841. #ifdef DEBUG
  842. print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
  843. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  844. desc_bytes(desc), 1);
  845. #endif
  846. /*
  847. * Job Descriptor and Shared Descriptors
  848. * must all fit into the 64-word Descriptor h/w Buffer
  849. */
  850. keys_fit_inline = false;
  851. if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
  852. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  853. keys_fit_inline = true;
  854. desc = ctx->sh_desc_dec;
  855. init_sh_desc(desc, HDR_SHARE_SERIAL);
  856. /* Skip key loading if it is loaded due to sharing */
  857. key_jump_cmd = append_jump(desc, JUMP_JSL |
  858. JUMP_TEST_ALL | JUMP_COND_SHRD);
  859. if (keys_fit_inline)
  860. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  861. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  862. else
  863. append_key(desc, ctx->key_dma, ctx->enckeylen,
  864. CLASS_1 | KEY_DEST_CLASS_REG);
  865. set_jump_tgt_here(desc, key_jump_cmd);
  866. /* Class 1 operation */
  867. append_operation(desc, ctx->class1_alg_type |
  868. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  869. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  870. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  871. /* Skip assoc data */
  872. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  873. /* Read assoc data */
  874. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  875. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  876. /* Will write cryptlen bytes */
  877. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  878. /* Will read cryptlen bytes */
  879. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  880. /* Store payload data */
  881. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  882. /* Read encrypted data */
  883. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  884. FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  885. /* Read ICV */
  886. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  887. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  888. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  889. desc_bytes(desc),
  890. DMA_TO_DEVICE);
  891. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  892. dev_err(jrdev, "unable to map shared descriptor\n");
  893. return -ENOMEM;
  894. }
  895. #ifdef DEBUG
  896. print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
  897. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  898. desc_bytes(desc), 1);
  899. #endif
  900. return 0;
  901. }
  902. static int rfc4106_setauthsize(struct crypto_aead *authenc,
  903. unsigned int authsize)
  904. {
  905. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  906. ctx->authsize = authsize;
  907. rfc4106_set_sh_desc(authenc);
  908. return 0;
  909. }
  910. static int rfc4543_set_sh_desc(struct crypto_aead *aead)
  911. {
  912. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  913. struct device *jrdev = ctx->jrdev;
  914. bool keys_fit_inline = false;
  915. u32 *key_jump_cmd;
  916. u32 *read_move_cmd, *write_move_cmd;
  917. u32 *desc;
  918. if (!ctx->enckeylen || !ctx->authsize)
  919. return 0;
  920. /*
  921. * RFC4543 encrypt shared descriptor
  922. * Job Descriptor and Shared Descriptor
  923. * must fit into the 64-word Descriptor h/w Buffer
  924. */
  925. if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  926. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  927. keys_fit_inline = true;
  928. desc = ctx->sh_desc_enc;
  929. init_sh_desc(desc, HDR_SHARE_SERIAL);
  930. /* Skip key loading if it is loaded due to sharing */
  931. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  932. JUMP_COND_SHRD);
  933. if (keys_fit_inline)
  934. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  935. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  936. else
  937. append_key(desc, ctx->key_dma, ctx->enckeylen,
  938. CLASS_1 | KEY_DEST_CLASS_REG);
  939. set_jump_tgt_here(desc, key_jump_cmd);
  940. /* Class 1 operation */
  941. append_operation(desc, ctx->class1_alg_type |
  942. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  943. /* assoclen + cryptlen = seqinlen */
  944. append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
  945. /*
  946. * MOVE_LEN opcode is not available in all SEC HW revisions,
  947. * thus need to do some magic, i.e. self-patch the descriptor
  948. * buffer.
  949. */
  950. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
  951. (0x6 << MOVE_LEN_SHIFT));
  952. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
  953. (0x8 << MOVE_LEN_SHIFT));
  954. /* Will read assoclen + cryptlen bytes */
  955. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  956. /* Will write assoclen + cryptlen bytes */
  957. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  958. /* Read and write assoclen + cryptlen bytes */
  959. aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
  960. set_move_tgt_here(desc, read_move_cmd);
  961. set_move_tgt_here(desc, write_move_cmd);
  962. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  963. /* Move payload data to OFIFO */
  964. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
  965. /* Write ICV */
  966. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  967. LDST_SRCDST_BYTE_CONTEXT);
  968. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  969. desc_bytes(desc),
  970. DMA_TO_DEVICE);
  971. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  972. dev_err(jrdev, "unable to map shared descriptor\n");
  973. return -ENOMEM;
  974. }
  975. #ifdef DEBUG
  976. print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
  977. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  978. desc_bytes(desc), 1);
  979. #endif
  980. /*
  981. * Job Descriptor and Shared Descriptors
  982. * must all fit into the 64-word Descriptor h/w Buffer
  983. */
  984. keys_fit_inline = false;
  985. if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
  986. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  987. keys_fit_inline = true;
  988. desc = ctx->sh_desc_dec;
  989. init_sh_desc(desc, HDR_SHARE_SERIAL);
  990. /* Skip key loading if it is loaded due to sharing */
  991. key_jump_cmd = append_jump(desc, JUMP_JSL |
  992. JUMP_TEST_ALL | JUMP_COND_SHRD);
  993. if (keys_fit_inline)
  994. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  995. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  996. else
  997. append_key(desc, ctx->key_dma, ctx->enckeylen,
  998. CLASS_1 | KEY_DEST_CLASS_REG);
  999. set_jump_tgt_here(desc, key_jump_cmd);
  1000. /* Class 1 operation */
  1001. append_operation(desc, ctx->class1_alg_type |
  1002. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  1003. /* assoclen + cryptlen = seqoutlen */
  1004. append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1005. /*
  1006. * MOVE_LEN opcode is not available in all SEC HW revisions,
  1007. * thus need to do some magic, i.e. self-patch the descriptor
  1008. * buffer.
  1009. */
  1010. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
  1011. (0x6 << MOVE_LEN_SHIFT));
  1012. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
  1013. (0x8 << MOVE_LEN_SHIFT));
  1014. /* Will read assoclen + cryptlen bytes */
  1015. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1016. /* Will write assoclen + cryptlen bytes */
  1017. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1018. /* Store payload data */
  1019. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  1020. /* In-snoop assoclen + cryptlen data */
  1021. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
  1022. FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
  1023. set_move_tgt_here(desc, read_move_cmd);
  1024. set_move_tgt_here(desc, write_move_cmd);
  1025. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  1026. /* Move payload data to OFIFO */
  1027. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
  1028. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  1029. /* Read ICV */
  1030. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  1031. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  1032. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  1033. desc_bytes(desc),
  1034. DMA_TO_DEVICE);
  1035. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  1036. dev_err(jrdev, "unable to map shared descriptor\n");
  1037. return -ENOMEM;
  1038. }
  1039. #ifdef DEBUG
  1040. print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
  1041. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1042. desc_bytes(desc), 1);
  1043. #endif
  1044. return 0;
  1045. }
  1046. static int rfc4543_setauthsize(struct crypto_aead *authenc,
  1047. unsigned int authsize)
  1048. {
  1049. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  1050. ctx->authsize = authsize;
  1051. rfc4543_set_sh_desc(authenc);
  1052. return 0;
  1053. }
  1054. static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
  1055. u32 authkeylen)
  1056. {
  1057. return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
  1058. ctx->split_key_pad_len, key_in, authkeylen,
  1059. ctx->alg_op);
  1060. }
  1061. static int aead_setkey(struct crypto_aead *aead,
  1062. const u8 *key, unsigned int keylen)
  1063. {
  1064. /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
  1065. static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
  1066. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1067. struct device *jrdev = ctx->jrdev;
  1068. struct crypto_authenc_keys keys;
  1069. int ret = 0;
  1070. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  1071. goto badkey;
  1072. /* Pick class 2 key length from algorithm submask */
  1073. ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
  1074. OP_ALG_ALGSEL_SHIFT] * 2;
  1075. ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
  1076. if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  1077. goto badkey;
  1078. #ifdef DEBUG
  1079. printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
  1080. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  1081. keys.authkeylen);
  1082. printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
  1083. ctx->split_key_len, ctx->split_key_pad_len);
  1084. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1085. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1086. #endif
  1087. ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
  1088. if (ret) {
  1089. goto badkey;
  1090. }
  1091. /* postpend encryption key to auth split key */
  1092. memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
  1093. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
  1094. keys.enckeylen, DMA_TO_DEVICE);
  1095. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1096. dev_err(jrdev, "unable to map key i/o memory\n");
  1097. return -ENOMEM;
  1098. }
  1099. #ifdef DEBUG
  1100. print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
  1101. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  1102. ctx->split_key_pad_len + keys.enckeylen, 1);
  1103. #endif
  1104. ctx->enckeylen = keys.enckeylen;
  1105. ret = aead_set_sh_desc(aead);
  1106. if (ret) {
  1107. dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
  1108. keys.enckeylen, DMA_TO_DEVICE);
  1109. }
  1110. return ret;
  1111. badkey:
  1112. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1113. return -EINVAL;
  1114. }
  1115. static int gcm_setkey(struct crypto_aead *aead,
  1116. const u8 *key, unsigned int keylen)
  1117. {
  1118. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1119. struct device *jrdev = ctx->jrdev;
  1120. int ret = 0;
  1121. #ifdef DEBUG
  1122. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1123. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1124. #endif
  1125. memcpy(ctx->key, key, keylen);
  1126. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
  1127. DMA_TO_DEVICE);
  1128. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1129. dev_err(jrdev, "unable to map key i/o memory\n");
  1130. return -ENOMEM;
  1131. }
  1132. ctx->enckeylen = keylen;
  1133. ret = gcm_set_sh_desc(aead);
  1134. if (ret) {
  1135. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1136. DMA_TO_DEVICE);
  1137. }
  1138. return ret;
  1139. }
  1140. static int rfc4106_setkey(struct crypto_aead *aead,
  1141. const u8 *key, unsigned int keylen)
  1142. {
  1143. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1144. struct device *jrdev = ctx->jrdev;
  1145. int ret = 0;
  1146. if (keylen < 4)
  1147. return -EINVAL;
  1148. #ifdef DEBUG
  1149. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1150. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1151. #endif
  1152. memcpy(ctx->key, key, keylen);
  1153. /*
  1154. * The last four bytes of the key material are used as the salt value
  1155. * in the nonce. Update the AES key length.
  1156. */
  1157. ctx->enckeylen = keylen - 4;
  1158. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
  1159. DMA_TO_DEVICE);
  1160. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1161. dev_err(jrdev, "unable to map key i/o memory\n");
  1162. return -ENOMEM;
  1163. }
  1164. ret = rfc4106_set_sh_desc(aead);
  1165. if (ret) {
  1166. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1167. DMA_TO_DEVICE);
  1168. }
  1169. return ret;
  1170. }
  1171. static int rfc4543_setkey(struct crypto_aead *aead,
  1172. const u8 *key, unsigned int keylen)
  1173. {
  1174. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1175. struct device *jrdev = ctx->jrdev;
  1176. int ret = 0;
  1177. if (keylen < 4)
  1178. return -EINVAL;
  1179. #ifdef DEBUG
  1180. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1181. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1182. #endif
  1183. memcpy(ctx->key, key, keylen);
  1184. /*
  1185. * The last four bytes of the key material are used as the salt value
  1186. * in the nonce. Update the AES key length.
  1187. */
  1188. ctx->enckeylen = keylen - 4;
  1189. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
  1190. DMA_TO_DEVICE);
  1191. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1192. dev_err(jrdev, "unable to map key i/o memory\n");
  1193. return -ENOMEM;
  1194. }
  1195. ret = rfc4543_set_sh_desc(aead);
  1196. if (ret) {
  1197. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1198. DMA_TO_DEVICE);
  1199. }
  1200. return ret;
  1201. }
  1202. static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  1203. const u8 *key, unsigned int keylen)
  1204. {
  1205. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1206. struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
  1207. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
  1208. const char *alg_name = crypto_tfm_alg_name(tfm);
  1209. struct device *jrdev = ctx->jrdev;
  1210. int ret = 0;
  1211. u32 *key_jump_cmd;
  1212. u32 *desc;
  1213. u32 *nonce;
  1214. u32 geniv;
  1215. u32 ctx1_iv_off = 0;
  1216. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  1217. OP_ALG_AAI_CTR_MOD128);
  1218. const bool is_rfc3686 = (ctr_mode &&
  1219. (strstr(alg_name, "rfc3686") != NULL));
  1220. #ifdef DEBUG
  1221. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1222. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1223. #endif
  1224. /*
  1225. * AES-CTR needs to load IV in CONTEXT1 reg
  1226. * at an offset of 128bits (16bytes)
  1227. * CONTEXT1[255:128] = IV
  1228. */
  1229. if (ctr_mode)
  1230. ctx1_iv_off = 16;
  1231. /*
  1232. * RFC3686 specific:
  1233. * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  1234. * | *key = {KEY, NONCE}
  1235. */
  1236. if (is_rfc3686) {
  1237. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  1238. keylen -= CTR_RFC3686_NONCE_SIZE;
  1239. }
  1240. memcpy(ctx->key, key, keylen);
  1241. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
  1242. DMA_TO_DEVICE);
  1243. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1244. dev_err(jrdev, "unable to map key i/o memory\n");
  1245. return -ENOMEM;
  1246. }
  1247. ctx->enckeylen = keylen;
  1248. /* ablkcipher_encrypt shared descriptor */
  1249. desc = ctx->sh_desc_enc;
  1250. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1251. /* Skip if already shared */
  1252. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1253. JUMP_COND_SHRD);
  1254. /* Load class1 key only */
  1255. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1256. ctx->enckeylen, CLASS_1 |
  1257. KEY_DEST_CLASS_REG);
  1258. /* Load nonce into CONTEXT1 reg */
  1259. if (is_rfc3686) {
  1260. nonce = (u32 *)(key + keylen);
  1261. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  1262. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1263. append_move(desc, MOVE_WAITCOMP |
  1264. MOVE_SRC_OUTFIFO |
  1265. MOVE_DEST_CLASS1CTX |
  1266. (16 << MOVE_OFFSET_SHIFT) |
  1267. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1268. }
  1269. set_jump_tgt_here(desc, key_jump_cmd);
  1270. /* Load iv */
  1271. append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
  1272. LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1273. /* Load counter into CONTEXT1 reg */
  1274. if (is_rfc3686)
  1275. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  1276. LDST_CLASS_1_CCB |
  1277. LDST_SRCDST_BYTE_CONTEXT |
  1278. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1279. LDST_OFFSET_SHIFT));
  1280. /* Load operation */
  1281. append_operation(desc, ctx->class1_alg_type |
  1282. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  1283. /* Perform operation */
  1284. ablkcipher_append_src_dst(desc);
  1285. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  1286. desc_bytes(desc),
  1287. DMA_TO_DEVICE);
  1288. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  1289. dev_err(jrdev, "unable to map shared descriptor\n");
  1290. return -ENOMEM;
  1291. }
  1292. #ifdef DEBUG
  1293. print_hex_dump(KERN_ERR,
  1294. "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
  1295. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1296. desc_bytes(desc), 1);
  1297. #endif
  1298. /* ablkcipher_decrypt shared descriptor */
  1299. desc = ctx->sh_desc_dec;
  1300. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1301. /* Skip if already shared */
  1302. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1303. JUMP_COND_SHRD);
  1304. /* Load class1 key only */
  1305. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1306. ctx->enckeylen, CLASS_1 |
  1307. KEY_DEST_CLASS_REG);
  1308. /* Load nonce into CONTEXT1 reg */
  1309. if (is_rfc3686) {
  1310. nonce = (u32 *)(key + keylen);
  1311. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  1312. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1313. append_move(desc, MOVE_WAITCOMP |
  1314. MOVE_SRC_OUTFIFO |
  1315. MOVE_DEST_CLASS1CTX |
  1316. (16 << MOVE_OFFSET_SHIFT) |
  1317. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1318. }
  1319. set_jump_tgt_here(desc, key_jump_cmd);
  1320. /* load IV */
  1321. append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
  1322. LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1323. /* Load counter into CONTEXT1 reg */
  1324. if (is_rfc3686)
  1325. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  1326. LDST_CLASS_1_CCB |
  1327. LDST_SRCDST_BYTE_CONTEXT |
  1328. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1329. LDST_OFFSET_SHIFT));
  1330. /* Choose operation */
  1331. if (ctr_mode)
  1332. append_operation(desc, ctx->class1_alg_type |
  1333. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
  1334. else
  1335. append_dec_op1(desc, ctx->class1_alg_type);
  1336. /* Perform operation */
  1337. ablkcipher_append_src_dst(desc);
  1338. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  1339. desc_bytes(desc),
  1340. DMA_TO_DEVICE);
  1341. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  1342. dev_err(jrdev, "unable to map shared descriptor\n");
  1343. return -ENOMEM;
  1344. }
  1345. #ifdef DEBUG
  1346. print_hex_dump(KERN_ERR,
  1347. "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
  1348. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1349. desc_bytes(desc), 1);
  1350. #endif
  1351. /* ablkcipher_givencrypt shared descriptor */
  1352. desc = ctx->sh_desc_givenc;
  1353. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1354. /* Skip if already shared */
  1355. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1356. JUMP_COND_SHRD);
  1357. /* Load class1 key only */
  1358. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1359. ctx->enckeylen, CLASS_1 |
  1360. KEY_DEST_CLASS_REG);
  1361. /* Load Nonce into CONTEXT1 reg */
  1362. if (is_rfc3686) {
  1363. nonce = (u32 *)(key + keylen);
  1364. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  1365. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1366. append_move(desc, MOVE_WAITCOMP |
  1367. MOVE_SRC_OUTFIFO |
  1368. MOVE_DEST_CLASS1CTX |
  1369. (16 << MOVE_OFFSET_SHIFT) |
  1370. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1371. }
  1372. set_jump_tgt_here(desc, key_jump_cmd);
  1373. /* Generate IV */
  1374. geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
  1375. NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
  1376. NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
  1377. append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
  1378. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  1379. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  1380. append_move(desc, MOVE_WAITCOMP |
  1381. MOVE_SRC_INFIFO |
  1382. MOVE_DEST_CLASS1CTX |
  1383. (crt->ivsize << MOVE_LEN_SHIFT) |
  1384. (ctx1_iv_off << MOVE_OFFSET_SHIFT));
  1385. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  1386. /* Copy generated IV to memory */
  1387. append_seq_store(desc, crt->ivsize,
  1388. LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
  1389. (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1390. /* Load Counter into CONTEXT1 reg */
  1391. if (is_rfc3686)
  1392. append_load_imm_u32(desc, (u32)1, LDST_IMM |
  1393. LDST_CLASS_1_CCB |
  1394. LDST_SRCDST_BYTE_CONTEXT |
  1395. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1396. LDST_OFFSET_SHIFT));
  1397. if (ctx1_iv_off)
  1398. append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
  1399. (1 << JUMP_OFFSET_SHIFT));
  1400. /* Load operation */
  1401. append_operation(desc, ctx->class1_alg_type |
  1402. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  1403. /* Perform operation */
  1404. ablkcipher_append_src_dst(desc);
  1405. ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
  1406. desc_bytes(desc),
  1407. DMA_TO_DEVICE);
  1408. if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
  1409. dev_err(jrdev, "unable to map shared descriptor\n");
  1410. return -ENOMEM;
  1411. }
  1412. #ifdef DEBUG
  1413. print_hex_dump(KERN_ERR,
  1414. "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
  1415. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1416. desc_bytes(desc), 1);
  1417. #endif
  1418. return ret;
  1419. }
  1420. /*
  1421. * aead_edesc - s/w-extended aead descriptor
  1422. * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
  1423. * @assoc_chained: if source is chained
  1424. * @src_nents: number of segments in input scatterlist
  1425. * @src_chained: if source is chained
  1426. * @dst_nents: number of segments in output scatterlist
  1427. * @dst_chained: if destination is chained
  1428. * @iv_dma: dma address of iv for checking continuity and link table
  1429. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  1430. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  1431. * @sec4_sg_dma: bus physical mapped address of h/w link table
  1432. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  1433. */
  1434. struct aead_edesc {
  1435. int assoc_nents;
  1436. bool assoc_chained;
  1437. int src_nents;
  1438. bool src_chained;
  1439. int dst_nents;
  1440. bool dst_chained;
  1441. dma_addr_t iv_dma;
  1442. int sec4_sg_bytes;
  1443. dma_addr_t sec4_sg_dma;
  1444. struct sec4_sg_entry *sec4_sg;
  1445. u32 hw_desc[];
  1446. };
  1447. /*
  1448. * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  1449. * @src_nents: number of segments in input scatterlist
  1450. * @src_chained: if source is chained
  1451. * @dst_nents: number of segments in output scatterlist
  1452. * @dst_chained: if destination is chained
  1453. * @iv_dma: dma address of iv for checking continuity and link table
  1454. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  1455. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  1456. * @sec4_sg_dma: bus physical mapped address of h/w link table
  1457. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  1458. */
  1459. struct ablkcipher_edesc {
  1460. int src_nents;
  1461. bool src_chained;
  1462. int dst_nents;
  1463. bool dst_chained;
  1464. dma_addr_t iv_dma;
  1465. int sec4_sg_bytes;
  1466. dma_addr_t sec4_sg_dma;
  1467. struct sec4_sg_entry *sec4_sg;
  1468. u32 hw_desc[0];
  1469. };
  1470. static void caam_unmap(struct device *dev, struct scatterlist *src,
  1471. struct scatterlist *dst, int src_nents,
  1472. bool src_chained, int dst_nents, bool dst_chained,
  1473. dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
  1474. int sec4_sg_bytes)
  1475. {
  1476. if (dst != src) {
  1477. dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
  1478. src_chained);
  1479. dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
  1480. dst_chained);
  1481. } else {
  1482. dma_unmap_sg_chained(dev, src, src_nents ? : 1,
  1483. DMA_BIDIRECTIONAL, src_chained);
  1484. }
  1485. if (iv_dma)
  1486. dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
  1487. if (sec4_sg_bytes)
  1488. dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
  1489. DMA_TO_DEVICE);
  1490. }
  1491. static void aead_unmap(struct device *dev,
  1492. struct aead_edesc *edesc,
  1493. struct aead_request *req)
  1494. {
  1495. caam_unmap(dev, req->src, req->dst,
  1496. edesc->src_nents, edesc->src_chained, edesc->dst_nents,
  1497. edesc->dst_chained, 0, 0,
  1498. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  1499. }
  1500. static void old_aead_unmap(struct device *dev,
  1501. struct aead_edesc *edesc,
  1502. struct aead_request *req)
  1503. {
  1504. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1505. int ivsize = crypto_aead_ivsize(aead);
  1506. dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
  1507. DMA_TO_DEVICE, edesc->assoc_chained);
  1508. caam_unmap(dev, req->src, req->dst,
  1509. edesc->src_nents, edesc->src_chained, edesc->dst_nents,
  1510. edesc->dst_chained, edesc->iv_dma, ivsize,
  1511. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  1512. }
  1513. static void ablkcipher_unmap(struct device *dev,
  1514. struct ablkcipher_edesc *edesc,
  1515. struct ablkcipher_request *req)
  1516. {
  1517. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1518. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1519. caam_unmap(dev, req->src, req->dst,
  1520. edesc->src_nents, edesc->src_chained, edesc->dst_nents,
  1521. edesc->dst_chained, edesc->iv_dma, ivsize,
  1522. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  1523. }
  1524. static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1525. void *context)
  1526. {
  1527. struct aead_request *req = context;
  1528. struct aead_edesc *edesc;
  1529. #ifdef DEBUG
  1530. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1531. #endif
  1532. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  1533. if (err)
  1534. caam_jr_strstatus(jrdev, err);
  1535. aead_unmap(jrdev, edesc, req);
  1536. kfree(edesc);
  1537. aead_request_complete(req, err);
  1538. }
  1539. static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1540. void *context)
  1541. {
  1542. struct aead_request *req = context;
  1543. struct aead_edesc *edesc;
  1544. #ifdef DEBUG
  1545. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1546. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1547. int ivsize = crypto_aead_ivsize(aead);
  1548. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1549. #endif
  1550. edesc = (struct aead_edesc *)((char *)desc -
  1551. offsetof(struct aead_edesc, hw_desc));
  1552. if (err)
  1553. caam_jr_strstatus(jrdev, err);
  1554. old_aead_unmap(jrdev, edesc, req);
  1555. #ifdef DEBUG
  1556. print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
  1557. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
  1558. req->assoclen , 1);
  1559. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  1560. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
  1561. edesc->src_nents ? 100 : ivsize, 1);
  1562. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  1563. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1564. edesc->src_nents ? 100 : req->cryptlen +
  1565. ctx->authsize + 4, 1);
  1566. #endif
  1567. kfree(edesc);
  1568. aead_request_complete(req, err);
  1569. }
  1570. static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1571. void *context)
  1572. {
  1573. struct aead_request *req = context;
  1574. struct aead_edesc *edesc;
  1575. #ifdef DEBUG
  1576. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1577. #endif
  1578. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  1579. if (err)
  1580. caam_jr_strstatus(jrdev, err);
  1581. aead_unmap(jrdev, edesc, req);
  1582. /*
  1583. * verify hw auth check passed else return -EBADMSG
  1584. */
  1585. if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
  1586. err = -EBADMSG;
  1587. kfree(edesc);
  1588. aead_request_complete(req, err);
  1589. }
  1590. static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1591. void *context)
  1592. {
  1593. struct aead_request *req = context;
  1594. struct aead_edesc *edesc;
  1595. #ifdef DEBUG
  1596. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1597. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1598. int ivsize = crypto_aead_ivsize(aead);
  1599. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1600. #endif
  1601. edesc = (struct aead_edesc *)((char *)desc -
  1602. offsetof(struct aead_edesc, hw_desc));
  1603. #ifdef DEBUG
  1604. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  1605. DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
  1606. ivsize, 1);
  1607. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  1608. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
  1609. req->cryptlen - ctx->authsize, 1);
  1610. #endif
  1611. if (err)
  1612. caam_jr_strstatus(jrdev, err);
  1613. old_aead_unmap(jrdev, edesc, req);
  1614. /*
  1615. * verify hw auth check passed else return -EBADMSG
  1616. */
  1617. if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
  1618. err = -EBADMSG;
  1619. #ifdef DEBUG
  1620. print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
  1621. DUMP_PREFIX_ADDRESS, 16, 4,
  1622. ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
  1623. sizeof(struct iphdr) + req->assoclen +
  1624. ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
  1625. ctx->authsize + 36, 1);
  1626. if (!err && edesc->sec4_sg_bytes) {
  1627. struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
  1628. print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
  1629. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
  1630. sg->length + ctx->authsize + 16, 1);
  1631. }
  1632. #endif
  1633. kfree(edesc);
  1634. aead_request_complete(req, err);
  1635. }
  1636. static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1637. void *context)
  1638. {
  1639. struct ablkcipher_request *req = context;
  1640. struct ablkcipher_edesc *edesc;
  1641. #ifdef DEBUG
  1642. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1643. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1644. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1645. #endif
  1646. edesc = (struct ablkcipher_edesc *)((char *)desc -
  1647. offsetof(struct ablkcipher_edesc, hw_desc));
  1648. if (err)
  1649. caam_jr_strstatus(jrdev, err);
  1650. #ifdef DEBUG
  1651. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  1652. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1653. edesc->src_nents > 1 ? 100 : ivsize, 1);
  1654. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  1655. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1656. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  1657. #endif
  1658. ablkcipher_unmap(jrdev, edesc, req);
  1659. kfree(edesc);
  1660. ablkcipher_request_complete(req, err);
  1661. }
  1662. static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1663. void *context)
  1664. {
  1665. struct ablkcipher_request *req = context;
  1666. struct ablkcipher_edesc *edesc;
  1667. #ifdef DEBUG
  1668. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1669. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1670. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1671. #endif
  1672. edesc = (struct ablkcipher_edesc *)((char *)desc -
  1673. offsetof(struct ablkcipher_edesc, hw_desc));
  1674. if (err)
  1675. caam_jr_strstatus(jrdev, err);
  1676. #ifdef DEBUG
  1677. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  1678. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1679. ivsize, 1);
  1680. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  1681. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1682. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  1683. #endif
  1684. ablkcipher_unmap(jrdev, edesc, req);
  1685. kfree(edesc);
  1686. ablkcipher_request_complete(req, err);
  1687. }
  1688. /*
  1689. * Fill in aead job descriptor
  1690. */
  1691. static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr,
  1692. struct aead_edesc *edesc,
  1693. struct aead_request *req,
  1694. bool all_contig, bool encrypt)
  1695. {
  1696. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1697. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1698. int ivsize = crypto_aead_ivsize(aead);
  1699. int authsize = ctx->authsize;
  1700. u32 *desc = edesc->hw_desc;
  1701. u32 out_options = 0, in_options;
  1702. dma_addr_t dst_dma, src_dma;
  1703. int len, sec4_sg_index = 0;
  1704. bool is_gcm = false;
  1705. #ifdef DEBUG
  1706. debug("assoclen %d cryptlen %d authsize %d\n",
  1707. req->assoclen, req->cryptlen, authsize);
  1708. print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
  1709. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
  1710. req->assoclen , 1);
  1711. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  1712. DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
  1713. edesc->src_nents ? 100 : ivsize, 1);
  1714. print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
  1715. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1716. edesc->src_nents ? 100 : req->cryptlen, 1);
  1717. print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
  1718. DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
  1719. desc_bytes(sh_desc), 1);
  1720. #endif
  1721. if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
  1722. OP_ALG_ALGSEL_AES) &&
  1723. ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
  1724. is_gcm = true;
  1725. len = desc_len(sh_desc);
  1726. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1727. if (all_contig) {
  1728. if (is_gcm)
  1729. src_dma = edesc->iv_dma;
  1730. else
  1731. src_dma = sg_dma_address(req->assoc);
  1732. in_options = 0;
  1733. } else {
  1734. src_dma = edesc->sec4_sg_dma;
  1735. sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
  1736. (edesc->src_nents ? : 1);
  1737. in_options = LDST_SGF;
  1738. }
  1739. append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
  1740. in_options);
  1741. if (likely(req->src == req->dst)) {
  1742. if (all_contig) {
  1743. dst_dma = sg_dma_address(req->src);
  1744. } else {
  1745. dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
  1746. ((edesc->assoc_nents ? : 1) + 1);
  1747. out_options = LDST_SGF;
  1748. }
  1749. } else {
  1750. if (!edesc->dst_nents) {
  1751. dst_dma = sg_dma_address(req->dst);
  1752. } else {
  1753. dst_dma = edesc->sec4_sg_dma +
  1754. sec4_sg_index *
  1755. sizeof(struct sec4_sg_entry);
  1756. out_options = LDST_SGF;
  1757. }
  1758. }
  1759. if (encrypt)
  1760. append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
  1761. out_options);
  1762. else
  1763. append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
  1764. out_options);
  1765. }
  1766. /*
  1767. * Fill in aead job descriptor
  1768. */
  1769. static void init_aead_job(struct aead_request *req,
  1770. struct aead_edesc *edesc,
  1771. bool all_contig, bool encrypt)
  1772. {
  1773. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1774. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1775. int authsize = ctx->authsize;
  1776. u32 *desc = edesc->hw_desc;
  1777. u32 out_options, in_options;
  1778. dma_addr_t dst_dma, src_dma;
  1779. int len, sec4_sg_index = 0;
  1780. dma_addr_t ptr;
  1781. u32 *sh_desc;
  1782. sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
  1783. ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
  1784. len = desc_len(sh_desc);
  1785. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1786. if (all_contig) {
  1787. src_dma = sg_dma_address(req->src);
  1788. in_options = 0;
  1789. } else {
  1790. src_dma = edesc->sec4_sg_dma;
  1791. sec4_sg_index += edesc->src_nents;
  1792. in_options = LDST_SGF;
  1793. }
  1794. append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
  1795. in_options);
  1796. dst_dma = src_dma;
  1797. out_options = in_options;
  1798. if (unlikely(req->src != req->dst)) {
  1799. if (!edesc->dst_nents) {
  1800. dst_dma = sg_dma_address(req->dst);
  1801. } else {
  1802. dst_dma = edesc->sec4_sg_dma +
  1803. sec4_sg_index *
  1804. sizeof(struct sec4_sg_entry);
  1805. out_options = LDST_SGF;
  1806. }
  1807. }
  1808. if (encrypt)
  1809. append_seq_out_ptr(desc, dst_dma,
  1810. req->assoclen + req->cryptlen + authsize,
  1811. out_options);
  1812. else
  1813. append_seq_out_ptr(desc, dst_dma,
  1814. req->assoclen + req->cryptlen - authsize,
  1815. out_options);
  1816. /* REG3 = assoclen */
  1817. append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
  1818. }
  1819. static void init_gcm_job(struct aead_request *req,
  1820. struct aead_edesc *edesc,
  1821. bool all_contig, bool encrypt)
  1822. {
  1823. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1824. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1825. unsigned int ivsize = crypto_aead_ivsize(aead);
  1826. u32 *desc = edesc->hw_desc;
  1827. bool generic_gcm = (ivsize == 12);
  1828. unsigned int last;
  1829. init_aead_job(req, edesc, all_contig, encrypt);
  1830. /* BUG This should not be specific to generic GCM. */
  1831. last = 0;
  1832. if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
  1833. last = FIFOLD_TYPE_LAST1;
  1834. /* Read GCM IV */
  1835. append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
  1836. FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
  1837. /* Append Salt */
  1838. if (!generic_gcm)
  1839. append_data(desc, ctx->key + ctx->enckeylen, 4);
  1840. /* Append IV */
  1841. append_data(desc, req->iv, ivsize);
  1842. /* End of blank commands */
  1843. }
  1844. /*
  1845. * Fill in aead givencrypt job descriptor
  1846. */
  1847. static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
  1848. struct aead_edesc *edesc,
  1849. struct aead_request *req,
  1850. int contig)
  1851. {
  1852. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1853. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1854. int ivsize = crypto_aead_ivsize(aead);
  1855. int authsize = ctx->authsize;
  1856. u32 *desc = edesc->hw_desc;
  1857. u32 out_options = 0, in_options;
  1858. dma_addr_t dst_dma, src_dma;
  1859. int len, sec4_sg_index = 0;
  1860. bool is_gcm = false;
  1861. #ifdef DEBUG
  1862. debug("assoclen %d cryptlen %d authsize %d\n",
  1863. req->assoclen, req->cryptlen, authsize);
  1864. print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
  1865. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
  1866. req->assoclen , 1);
  1867. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  1868. DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
  1869. print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
  1870. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1871. edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
  1872. print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
  1873. DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
  1874. desc_bytes(sh_desc), 1);
  1875. #endif
  1876. if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
  1877. OP_ALG_ALGSEL_AES) &&
  1878. ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
  1879. is_gcm = true;
  1880. len = desc_len(sh_desc);
  1881. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1882. if (contig & GIV_SRC_CONTIG) {
  1883. if (is_gcm)
  1884. src_dma = edesc->iv_dma;
  1885. else
  1886. src_dma = sg_dma_address(req->assoc);
  1887. in_options = 0;
  1888. } else {
  1889. src_dma = edesc->sec4_sg_dma;
  1890. sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
  1891. in_options = LDST_SGF;
  1892. }
  1893. append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
  1894. in_options);
  1895. if (contig & GIV_DST_CONTIG) {
  1896. dst_dma = edesc->iv_dma;
  1897. } else {
  1898. if (likely(req->src == req->dst)) {
  1899. dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
  1900. (edesc->assoc_nents +
  1901. (is_gcm ? 1 + edesc->src_nents : 0));
  1902. out_options = LDST_SGF;
  1903. } else {
  1904. dst_dma = edesc->sec4_sg_dma +
  1905. sec4_sg_index *
  1906. sizeof(struct sec4_sg_entry);
  1907. out_options = LDST_SGF;
  1908. }
  1909. }
  1910. append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
  1911. out_options);
  1912. }
  1913. /*
  1914. * Fill in ablkcipher job descriptor
  1915. */
  1916. static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
  1917. struct ablkcipher_edesc *edesc,
  1918. struct ablkcipher_request *req,
  1919. bool iv_contig)
  1920. {
  1921. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1922. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1923. u32 *desc = edesc->hw_desc;
  1924. u32 out_options = 0, in_options;
  1925. dma_addr_t dst_dma, src_dma;
  1926. int len, sec4_sg_index = 0;
  1927. #ifdef DEBUG
  1928. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  1929. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1930. ivsize, 1);
  1931. print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
  1932. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1933. edesc->src_nents ? 100 : req->nbytes, 1);
  1934. #endif
  1935. len = desc_len(sh_desc);
  1936. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1937. if (iv_contig) {
  1938. src_dma = edesc->iv_dma;
  1939. in_options = 0;
  1940. } else {
  1941. src_dma = edesc->sec4_sg_dma;
  1942. sec4_sg_index += edesc->src_nents + 1;
  1943. in_options = LDST_SGF;
  1944. }
  1945. append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
  1946. if (likely(req->src == req->dst)) {
  1947. if (!edesc->src_nents && iv_contig) {
  1948. dst_dma = sg_dma_address(req->src);
  1949. } else {
  1950. dst_dma = edesc->sec4_sg_dma +
  1951. sizeof(struct sec4_sg_entry);
  1952. out_options = LDST_SGF;
  1953. }
  1954. } else {
  1955. if (!edesc->dst_nents) {
  1956. dst_dma = sg_dma_address(req->dst);
  1957. } else {
  1958. dst_dma = edesc->sec4_sg_dma +
  1959. sec4_sg_index * sizeof(struct sec4_sg_entry);
  1960. out_options = LDST_SGF;
  1961. }
  1962. }
  1963. append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
  1964. }
  1965. /*
  1966. * Fill in ablkcipher givencrypt job descriptor
  1967. */
  1968. static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
  1969. struct ablkcipher_edesc *edesc,
  1970. struct ablkcipher_request *req,
  1971. bool iv_contig)
  1972. {
  1973. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1974. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1975. u32 *desc = edesc->hw_desc;
  1976. u32 out_options, in_options;
  1977. dma_addr_t dst_dma, src_dma;
  1978. int len, sec4_sg_index = 0;
  1979. #ifdef DEBUG
  1980. print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
  1981. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1982. ivsize, 1);
  1983. print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
  1984. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1985. edesc->src_nents ? 100 : req->nbytes, 1);
  1986. #endif
  1987. len = desc_len(sh_desc);
  1988. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1989. if (!edesc->src_nents) {
  1990. src_dma = sg_dma_address(req->src);
  1991. in_options = 0;
  1992. } else {
  1993. src_dma = edesc->sec4_sg_dma;
  1994. sec4_sg_index += edesc->src_nents;
  1995. in_options = LDST_SGF;
  1996. }
  1997. append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
  1998. if (iv_contig) {
  1999. dst_dma = edesc->iv_dma;
  2000. out_options = 0;
  2001. } else {
  2002. dst_dma = edesc->sec4_sg_dma +
  2003. sec4_sg_index * sizeof(struct sec4_sg_entry);
  2004. out_options = LDST_SGF;
  2005. }
  2006. append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
  2007. }
  2008. /*
  2009. * allocate and map the aead extended descriptor
  2010. */
  2011. static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req,
  2012. int desc_bytes,
  2013. bool *all_contig_ptr,
  2014. bool encrypt)
  2015. {
  2016. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2017. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2018. struct device *jrdev = ctx->jrdev;
  2019. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  2020. CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
  2021. int assoc_nents, src_nents, dst_nents = 0;
  2022. struct aead_edesc *edesc;
  2023. dma_addr_t iv_dma = 0;
  2024. int sgc;
  2025. bool all_contig = true;
  2026. bool assoc_chained = false, src_chained = false, dst_chained = false;
  2027. int ivsize = crypto_aead_ivsize(aead);
  2028. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  2029. unsigned int authsize = ctx->authsize;
  2030. bool is_gcm = false;
  2031. assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
  2032. if (unlikely(req->dst != req->src)) {
  2033. src_nents = sg_count(req->src, req->cryptlen, &src_chained);
  2034. dst_nents = sg_count(req->dst,
  2035. req->cryptlen +
  2036. (encrypt ? authsize : (-authsize)),
  2037. &dst_chained);
  2038. } else {
  2039. src_nents = sg_count(req->src,
  2040. req->cryptlen +
  2041. (encrypt ? authsize : 0),
  2042. &src_chained);
  2043. }
  2044. sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
  2045. DMA_TO_DEVICE, assoc_chained);
  2046. if (likely(req->src == req->dst)) {
  2047. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2048. DMA_BIDIRECTIONAL, src_chained);
  2049. } else {
  2050. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2051. DMA_TO_DEVICE, src_chained);
  2052. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  2053. DMA_FROM_DEVICE, dst_chained);
  2054. }
  2055. iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
  2056. if (dma_mapping_error(jrdev, iv_dma)) {
  2057. dev_err(jrdev, "unable to map IV\n");
  2058. return ERR_PTR(-ENOMEM);
  2059. }
  2060. if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
  2061. OP_ALG_ALGSEL_AES) &&
  2062. ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
  2063. is_gcm = true;
  2064. /*
  2065. * Check if data are contiguous.
  2066. * GCM expected input sequence: IV, AAD, text
  2067. * All other - expected input sequence: AAD, IV, text
  2068. */
  2069. if (is_gcm)
  2070. all_contig = (!assoc_nents &&
  2071. iv_dma + ivsize == sg_dma_address(req->assoc) &&
  2072. !src_nents && sg_dma_address(req->assoc) +
  2073. req->assoclen == sg_dma_address(req->src));
  2074. else
  2075. all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
  2076. req->assoclen == iv_dma && !src_nents &&
  2077. iv_dma + ivsize == sg_dma_address(req->src));
  2078. if (!all_contig) {
  2079. assoc_nents = assoc_nents ? : 1;
  2080. src_nents = src_nents ? : 1;
  2081. sec4_sg_len = assoc_nents + 1 + src_nents;
  2082. }
  2083. sec4_sg_len += dst_nents;
  2084. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  2085. /* allocate space for base edesc and hw desc commands, link tables */
  2086. edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
  2087. sec4_sg_bytes, GFP_DMA | flags);
  2088. if (!edesc) {
  2089. dev_err(jrdev, "could not allocate extended descriptor\n");
  2090. return ERR_PTR(-ENOMEM);
  2091. }
  2092. edesc->assoc_nents = assoc_nents;
  2093. edesc->assoc_chained = assoc_chained;
  2094. edesc->src_nents = src_nents;
  2095. edesc->src_chained = src_chained;
  2096. edesc->dst_nents = dst_nents;
  2097. edesc->dst_chained = dst_chained;
  2098. edesc->iv_dma = iv_dma;
  2099. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2100. edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
  2101. desc_bytes;
  2102. *all_contig_ptr = all_contig;
  2103. sec4_sg_index = 0;
  2104. if (!all_contig) {
  2105. if (!is_gcm) {
  2106. sg_to_sec4_sg_len(req->assoc, req->assoclen,
  2107. edesc->sec4_sg + sec4_sg_index);
  2108. sec4_sg_index += assoc_nents;
  2109. }
  2110. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  2111. iv_dma, ivsize, 0);
  2112. sec4_sg_index += 1;
  2113. if (is_gcm) {
  2114. sg_to_sec4_sg_len(req->assoc, req->assoclen,
  2115. edesc->sec4_sg + sec4_sg_index);
  2116. sec4_sg_index += assoc_nents;
  2117. }
  2118. sg_to_sec4_sg_last(req->src,
  2119. src_nents,
  2120. edesc->sec4_sg +
  2121. sec4_sg_index, 0);
  2122. sec4_sg_index += src_nents;
  2123. }
  2124. if (dst_nents) {
  2125. sg_to_sec4_sg_last(req->dst, dst_nents,
  2126. edesc->sec4_sg + sec4_sg_index, 0);
  2127. }
  2128. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2129. sec4_sg_bytes, DMA_TO_DEVICE);
  2130. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2131. dev_err(jrdev, "unable to map S/G table\n");
  2132. return ERR_PTR(-ENOMEM);
  2133. }
  2134. return edesc;
  2135. }
  2136. /*
  2137. * allocate and map the aead extended descriptor
  2138. */
  2139. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  2140. int desc_bytes, bool *all_contig_ptr,
  2141. bool encrypt)
  2142. {
  2143. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2144. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2145. struct device *jrdev = ctx->jrdev;
  2146. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  2147. CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
  2148. int src_nents, dst_nents = 0;
  2149. struct aead_edesc *edesc;
  2150. int sgc;
  2151. bool all_contig = true;
  2152. bool src_chained = false, dst_chained = false;
  2153. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  2154. unsigned int authsize = ctx->authsize;
  2155. if (unlikely(req->dst != req->src)) {
  2156. src_nents = sg_count(req->src, req->assoclen + req->cryptlen,
  2157. &src_chained);
  2158. dst_nents = sg_count(req->dst,
  2159. req->assoclen + req->cryptlen +
  2160. (encrypt ? authsize : (-authsize)),
  2161. &dst_chained);
  2162. } else {
  2163. src_nents = sg_count(req->src,
  2164. req->assoclen + req->cryptlen +
  2165. (encrypt ? authsize : 0),
  2166. &src_chained);
  2167. }
  2168. /* Check if data are contiguous. */
  2169. all_contig = !src_nents;
  2170. if (!all_contig) {
  2171. src_nents = src_nents ? : 1;
  2172. sec4_sg_len = src_nents;
  2173. }
  2174. sec4_sg_len += dst_nents;
  2175. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  2176. /* allocate space for base edesc and hw desc commands, link tables */
  2177. edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
  2178. sec4_sg_bytes, GFP_DMA | flags);
  2179. if (!edesc) {
  2180. dev_err(jrdev, "could not allocate extended descriptor\n");
  2181. return ERR_PTR(-ENOMEM);
  2182. }
  2183. if (likely(req->src == req->dst)) {
  2184. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2185. DMA_BIDIRECTIONAL, src_chained);
  2186. if (unlikely(!sgc)) {
  2187. dev_err(jrdev, "unable to map source\n");
  2188. kfree(edesc);
  2189. return ERR_PTR(-ENOMEM);
  2190. }
  2191. } else {
  2192. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2193. DMA_TO_DEVICE, src_chained);
  2194. if (unlikely(!sgc)) {
  2195. dev_err(jrdev, "unable to map source\n");
  2196. kfree(edesc);
  2197. return ERR_PTR(-ENOMEM);
  2198. }
  2199. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  2200. DMA_FROM_DEVICE, dst_chained);
  2201. if (unlikely(!sgc)) {
  2202. dev_err(jrdev, "unable to map destination\n");
  2203. dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1,
  2204. DMA_TO_DEVICE, src_chained);
  2205. kfree(edesc);
  2206. return ERR_PTR(-ENOMEM);
  2207. }
  2208. }
  2209. edesc->src_nents = src_nents;
  2210. edesc->src_chained = src_chained;
  2211. edesc->dst_nents = dst_nents;
  2212. edesc->dst_chained = dst_chained;
  2213. edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
  2214. desc_bytes;
  2215. *all_contig_ptr = all_contig;
  2216. sec4_sg_index = 0;
  2217. if (!all_contig) {
  2218. sg_to_sec4_sg_last(req->src, src_nents,
  2219. edesc->sec4_sg + sec4_sg_index, 0);
  2220. sec4_sg_index += src_nents;
  2221. }
  2222. if (dst_nents) {
  2223. sg_to_sec4_sg_last(req->dst, dst_nents,
  2224. edesc->sec4_sg + sec4_sg_index, 0);
  2225. }
  2226. if (!sec4_sg_bytes)
  2227. return edesc;
  2228. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2229. sec4_sg_bytes, DMA_TO_DEVICE);
  2230. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2231. dev_err(jrdev, "unable to map S/G table\n");
  2232. aead_unmap(jrdev, edesc, req);
  2233. kfree(edesc);
  2234. return ERR_PTR(-ENOMEM);
  2235. }
  2236. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2237. return edesc;
  2238. }
  2239. static int gcm_encrypt(struct aead_request *req)
  2240. {
  2241. struct aead_edesc *edesc;
  2242. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2243. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2244. struct device *jrdev = ctx->jrdev;
  2245. bool all_contig;
  2246. u32 *desc;
  2247. int ret = 0;
  2248. /* allocate extended descriptor */
  2249. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
  2250. if (IS_ERR(edesc))
  2251. return PTR_ERR(edesc);
  2252. /* Create and submit job descriptor */
  2253. init_gcm_job(req, edesc, all_contig, true);
  2254. #ifdef DEBUG
  2255. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2256. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2257. desc_bytes(edesc->hw_desc), 1);
  2258. #endif
  2259. desc = edesc->hw_desc;
  2260. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  2261. if (!ret) {
  2262. ret = -EINPROGRESS;
  2263. } else {
  2264. aead_unmap(jrdev, edesc, req);
  2265. kfree(edesc);
  2266. }
  2267. return ret;
  2268. }
  2269. static int old_aead_encrypt(struct aead_request *req)
  2270. {
  2271. struct aead_edesc *edesc;
  2272. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2273. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2274. struct device *jrdev = ctx->jrdev;
  2275. bool all_contig;
  2276. u32 *desc;
  2277. int ret = 0;
  2278. /* allocate extended descriptor */
  2279. edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
  2280. CAAM_CMD_SZ, &all_contig, true);
  2281. if (IS_ERR(edesc))
  2282. return PTR_ERR(edesc);
  2283. /* Create and submit job descriptor */
  2284. old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
  2285. all_contig, true);
  2286. #ifdef DEBUG
  2287. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2288. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2289. desc_bytes(edesc->hw_desc), 1);
  2290. #endif
  2291. desc = edesc->hw_desc;
  2292. ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
  2293. if (!ret) {
  2294. ret = -EINPROGRESS;
  2295. } else {
  2296. old_aead_unmap(jrdev, edesc, req);
  2297. kfree(edesc);
  2298. }
  2299. return ret;
  2300. }
  2301. static int gcm_decrypt(struct aead_request *req)
  2302. {
  2303. struct aead_edesc *edesc;
  2304. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2305. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2306. struct device *jrdev = ctx->jrdev;
  2307. bool all_contig;
  2308. u32 *desc;
  2309. int ret = 0;
  2310. /* allocate extended descriptor */
  2311. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
  2312. if (IS_ERR(edesc))
  2313. return PTR_ERR(edesc);
  2314. /* Create and submit job descriptor*/
  2315. init_gcm_job(req, edesc, all_contig, false);
  2316. #ifdef DEBUG
  2317. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2318. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2319. desc_bytes(edesc->hw_desc), 1);
  2320. #endif
  2321. desc = edesc->hw_desc;
  2322. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  2323. if (!ret) {
  2324. ret = -EINPROGRESS;
  2325. } else {
  2326. aead_unmap(jrdev, edesc, req);
  2327. kfree(edesc);
  2328. }
  2329. return ret;
  2330. }
  2331. static int old_aead_decrypt(struct aead_request *req)
  2332. {
  2333. struct aead_edesc *edesc;
  2334. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2335. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2336. struct device *jrdev = ctx->jrdev;
  2337. bool all_contig;
  2338. u32 *desc;
  2339. int ret = 0;
  2340. /* allocate extended descriptor */
  2341. edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
  2342. CAAM_CMD_SZ, &all_contig, false);
  2343. if (IS_ERR(edesc))
  2344. return PTR_ERR(edesc);
  2345. #ifdef DEBUG
  2346. print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
  2347. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  2348. req->cryptlen, 1);
  2349. #endif
  2350. /* Create and submit job descriptor*/
  2351. old_init_aead_job(ctx->sh_desc_dec,
  2352. ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
  2353. #ifdef DEBUG
  2354. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2355. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2356. desc_bytes(edesc->hw_desc), 1);
  2357. #endif
  2358. desc = edesc->hw_desc;
  2359. ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req);
  2360. if (!ret) {
  2361. ret = -EINPROGRESS;
  2362. } else {
  2363. old_aead_unmap(jrdev, edesc, req);
  2364. kfree(edesc);
  2365. }
  2366. return ret;
  2367. }
  2368. /*
  2369. * allocate and map the aead extended descriptor for aead givencrypt
  2370. */
  2371. static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
  2372. *greq, int desc_bytes,
  2373. u32 *contig_ptr)
  2374. {
  2375. struct aead_request *req = &greq->areq;
  2376. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2377. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2378. struct device *jrdev = ctx->jrdev;
  2379. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  2380. CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
  2381. int assoc_nents, src_nents, dst_nents = 0;
  2382. struct aead_edesc *edesc;
  2383. dma_addr_t iv_dma = 0;
  2384. int sgc;
  2385. u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
  2386. int ivsize = crypto_aead_ivsize(aead);
  2387. bool assoc_chained = false, src_chained = false, dst_chained = false;
  2388. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  2389. bool is_gcm = false;
  2390. assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
  2391. src_nents = sg_count(req->src, req->cryptlen, &src_chained);
  2392. if (unlikely(req->dst != req->src))
  2393. dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
  2394. &dst_chained);
  2395. sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
  2396. DMA_TO_DEVICE, assoc_chained);
  2397. if (likely(req->src == req->dst)) {
  2398. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2399. DMA_BIDIRECTIONAL, src_chained);
  2400. } else {
  2401. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2402. DMA_TO_DEVICE, src_chained);
  2403. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  2404. DMA_FROM_DEVICE, dst_chained);
  2405. }
  2406. iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
  2407. if (dma_mapping_error(jrdev, iv_dma)) {
  2408. dev_err(jrdev, "unable to map IV\n");
  2409. return ERR_PTR(-ENOMEM);
  2410. }
  2411. if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
  2412. OP_ALG_ALGSEL_AES) &&
  2413. ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
  2414. is_gcm = true;
  2415. /*
  2416. * Check if data are contiguous.
  2417. * GCM expected input sequence: IV, AAD, text
  2418. * All other - expected input sequence: AAD, IV, text
  2419. */
  2420. if (is_gcm) {
  2421. if (assoc_nents || iv_dma + ivsize !=
  2422. sg_dma_address(req->assoc) || src_nents ||
  2423. sg_dma_address(req->assoc) + req->assoclen !=
  2424. sg_dma_address(req->src))
  2425. contig &= ~GIV_SRC_CONTIG;
  2426. } else {
  2427. if (assoc_nents ||
  2428. sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
  2429. src_nents || iv_dma + ivsize != sg_dma_address(req->src))
  2430. contig &= ~GIV_SRC_CONTIG;
  2431. }
  2432. if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
  2433. contig &= ~GIV_DST_CONTIG;
  2434. if (!(contig & GIV_SRC_CONTIG)) {
  2435. assoc_nents = assoc_nents ? : 1;
  2436. src_nents = src_nents ? : 1;
  2437. sec4_sg_len += assoc_nents + 1 + src_nents;
  2438. if (req->src == req->dst &&
  2439. (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
  2440. contig &= ~GIV_DST_CONTIG;
  2441. }
  2442. /*
  2443. * Add new sg entries for GCM output sequence.
  2444. * Expected output sequence: IV, encrypted text.
  2445. */
  2446. if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
  2447. sec4_sg_len += 1 + src_nents;
  2448. if (unlikely(req->src != req->dst)) {
  2449. dst_nents = dst_nents ? : 1;
  2450. sec4_sg_len += 1 + dst_nents;
  2451. }
  2452. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  2453. /* allocate space for base edesc and hw desc commands, link tables */
  2454. edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
  2455. sec4_sg_bytes, GFP_DMA | flags);
  2456. if (!edesc) {
  2457. dev_err(jrdev, "could not allocate extended descriptor\n");
  2458. return ERR_PTR(-ENOMEM);
  2459. }
  2460. edesc->assoc_nents = assoc_nents;
  2461. edesc->assoc_chained = assoc_chained;
  2462. edesc->src_nents = src_nents;
  2463. edesc->src_chained = src_chained;
  2464. edesc->dst_nents = dst_nents;
  2465. edesc->dst_chained = dst_chained;
  2466. edesc->iv_dma = iv_dma;
  2467. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2468. edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
  2469. desc_bytes;
  2470. *contig_ptr = contig;
  2471. sec4_sg_index = 0;
  2472. if (!(contig & GIV_SRC_CONTIG)) {
  2473. if (!is_gcm) {
  2474. sg_to_sec4_sg_len(req->assoc, req->assoclen,
  2475. edesc->sec4_sg + sec4_sg_index);
  2476. sec4_sg_index += assoc_nents;
  2477. }
  2478. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  2479. iv_dma, ivsize, 0);
  2480. sec4_sg_index += 1;
  2481. if (is_gcm) {
  2482. sg_to_sec4_sg_len(req->assoc, req->assoclen,
  2483. edesc->sec4_sg + sec4_sg_index);
  2484. sec4_sg_index += assoc_nents;
  2485. }
  2486. sg_to_sec4_sg_last(req->src, src_nents,
  2487. edesc->sec4_sg +
  2488. sec4_sg_index, 0);
  2489. sec4_sg_index += src_nents;
  2490. }
  2491. if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
  2492. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  2493. iv_dma, ivsize, 0);
  2494. sec4_sg_index += 1;
  2495. sg_to_sec4_sg_last(req->src, src_nents,
  2496. edesc->sec4_sg + sec4_sg_index, 0);
  2497. }
  2498. if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
  2499. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  2500. iv_dma, ivsize, 0);
  2501. sec4_sg_index += 1;
  2502. sg_to_sec4_sg_last(req->dst, dst_nents,
  2503. edesc->sec4_sg + sec4_sg_index, 0);
  2504. }
  2505. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2506. sec4_sg_bytes, DMA_TO_DEVICE);
  2507. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2508. dev_err(jrdev, "unable to map S/G table\n");
  2509. return ERR_PTR(-ENOMEM);
  2510. }
  2511. return edesc;
  2512. }
  2513. static int old_aead_givencrypt(struct aead_givcrypt_request *areq)
  2514. {
  2515. struct aead_request *req = &areq->areq;
  2516. struct aead_edesc *edesc;
  2517. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2518. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2519. struct device *jrdev = ctx->jrdev;
  2520. u32 contig;
  2521. u32 *desc;
  2522. int ret = 0;
  2523. /* allocate extended descriptor */
  2524. edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
  2525. CAAM_CMD_SZ, &contig);
  2526. if (IS_ERR(edesc))
  2527. return PTR_ERR(edesc);
  2528. #ifdef DEBUG
  2529. print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
  2530. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  2531. req->cryptlen, 1);
  2532. #endif
  2533. /* Create and submit job descriptor*/
  2534. init_aead_giv_job(ctx->sh_desc_givenc,
  2535. ctx->sh_desc_givenc_dma, edesc, req, contig);
  2536. #ifdef DEBUG
  2537. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2538. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2539. desc_bytes(edesc->hw_desc), 1);
  2540. #endif
  2541. desc = edesc->hw_desc;
  2542. ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
  2543. if (!ret) {
  2544. ret = -EINPROGRESS;
  2545. } else {
  2546. old_aead_unmap(jrdev, edesc, req);
  2547. kfree(edesc);
  2548. }
  2549. return ret;
  2550. }
  2551. static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
  2552. {
  2553. return old_aead_encrypt(&areq->areq);
  2554. }
  2555. /*
  2556. * allocate and map the ablkcipher extended descriptor for ablkcipher
  2557. */
  2558. static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
  2559. *req, int desc_bytes,
  2560. bool *iv_contig_out)
  2561. {
  2562. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2563. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2564. struct device *jrdev = ctx->jrdev;
  2565. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  2566. CRYPTO_TFM_REQ_MAY_SLEEP)) ?
  2567. GFP_KERNEL : GFP_ATOMIC;
  2568. int src_nents, dst_nents = 0, sec4_sg_bytes;
  2569. struct ablkcipher_edesc *edesc;
  2570. dma_addr_t iv_dma = 0;
  2571. bool iv_contig = false;
  2572. int sgc;
  2573. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  2574. bool src_chained = false, dst_chained = false;
  2575. int sec4_sg_index;
  2576. src_nents = sg_count(req->src, req->nbytes, &src_chained);
  2577. if (req->dst != req->src)
  2578. dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
  2579. if (likely(req->src == req->dst)) {
  2580. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2581. DMA_BIDIRECTIONAL, src_chained);
  2582. } else {
  2583. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2584. DMA_TO_DEVICE, src_chained);
  2585. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  2586. DMA_FROM_DEVICE, dst_chained);
  2587. }
  2588. iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
  2589. if (dma_mapping_error(jrdev, iv_dma)) {
  2590. dev_err(jrdev, "unable to map IV\n");
  2591. return ERR_PTR(-ENOMEM);
  2592. }
  2593. /*
  2594. * Check if iv can be contiguous with source and destination.
  2595. * If so, include it. If not, create scatterlist.
  2596. */
  2597. if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
  2598. iv_contig = true;
  2599. else
  2600. src_nents = src_nents ? : 1;
  2601. sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
  2602. sizeof(struct sec4_sg_entry);
  2603. /* allocate space for base edesc and hw desc commands, link tables */
  2604. edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
  2605. sec4_sg_bytes, GFP_DMA | flags);
  2606. if (!edesc) {
  2607. dev_err(jrdev, "could not allocate extended descriptor\n");
  2608. return ERR_PTR(-ENOMEM);
  2609. }
  2610. edesc->src_nents = src_nents;
  2611. edesc->src_chained = src_chained;
  2612. edesc->dst_nents = dst_nents;
  2613. edesc->dst_chained = dst_chained;
  2614. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2615. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  2616. desc_bytes;
  2617. sec4_sg_index = 0;
  2618. if (!iv_contig) {
  2619. dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
  2620. sg_to_sec4_sg_last(req->src, src_nents,
  2621. edesc->sec4_sg + 1, 0);
  2622. sec4_sg_index += 1 + src_nents;
  2623. }
  2624. if (dst_nents) {
  2625. sg_to_sec4_sg_last(req->dst, dst_nents,
  2626. edesc->sec4_sg + sec4_sg_index, 0);
  2627. }
  2628. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2629. sec4_sg_bytes, DMA_TO_DEVICE);
  2630. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2631. dev_err(jrdev, "unable to map S/G table\n");
  2632. return ERR_PTR(-ENOMEM);
  2633. }
  2634. edesc->iv_dma = iv_dma;
  2635. #ifdef DEBUG
  2636. print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
  2637. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  2638. sec4_sg_bytes, 1);
  2639. #endif
  2640. *iv_contig_out = iv_contig;
  2641. return edesc;
  2642. }
  2643. static int ablkcipher_encrypt(struct ablkcipher_request *req)
  2644. {
  2645. struct ablkcipher_edesc *edesc;
  2646. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2647. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2648. struct device *jrdev = ctx->jrdev;
  2649. bool iv_contig;
  2650. u32 *desc;
  2651. int ret = 0;
  2652. /* allocate extended descriptor */
  2653. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  2654. CAAM_CMD_SZ, &iv_contig);
  2655. if (IS_ERR(edesc))
  2656. return PTR_ERR(edesc);
  2657. /* Create and submit job descriptor*/
  2658. init_ablkcipher_job(ctx->sh_desc_enc,
  2659. ctx->sh_desc_enc_dma, edesc, req, iv_contig);
  2660. #ifdef DEBUG
  2661. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  2662. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2663. desc_bytes(edesc->hw_desc), 1);
  2664. #endif
  2665. desc = edesc->hw_desc;
  2666. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  2667. if (!ret) {
  2668. ret = -EINPROGRESS;
  2669. } else {
  2670. ablkcipher_unmap(jrdev, edesc, req);
  2671. kfree(edesc);
  2672. }
  2673. return ret;
  2674. }
  2675. static int ablkcipher_decrypt(struct ablkcipher_request *req)
  2676. {
  2677. struct ablkcipher_edesc *edesc;
  2678. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2679. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2680. struct device *jrdev = ctx->jrdev;
  2681. bool iv_contig;
  2682. u32 *desc;
  2683. int ret = 0;
  2684. /* allocate extended descriptor */
  2685. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  2686. CAAM_CMD_SZ, &iv_contig);
  2687. if (IS_ERR(edesc))
  2688. return PTR_ERR(edesc);
  2689. /* Create and submit job descriptor*/
  2690. init_ablkcipher_job(ctx->sh_desc_dec,
  2691. ctx->sh_desc_dec_dma, edesc, req, iv_contig);
  2692. desc = edesc->hw_desc;
  2693. #ifdef DEBUG
  2694. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  2695. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2696. desc_bytes(edesc->hw_desc), 1);
  2697. #endif
  2698. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
  2699. if (!ret) {
  2700. ret = -EINPROGRESS;
  2701. } else {
  2702. ablkcipher_unmap(jrdev, edesc, req);
  2703. kfree(edesc);
  2704. }
  2705. return ret;
  2706. }
  2707. /*
  2708. * allocate and map the ablkcipher extended descriptor
  2709. * for ablkcipher givencrypt
  2710. */
  2711. static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
  2712. struct skcipher_givcrypt_request *greq,
  2713. int desc_bytes,
  2714. bool *iv_contig_out)
  2715. {
  2716. struct ablkcipher_request *req = &greq->creq;
  2717. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2718. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2719. struct device *jrdev = ctx->jrdev;
  2720. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  2721. CRYPTO_TFM_REQ_MAY_SLEEP)) ?
  2722. GFP_KERNEL : GFP_ATOMIC;
  2723. int src_nents, dst_nents = 0, sec4_sg_bytes;
  2724. struct ablkcipher_edesc *edesc;
  2725. dma_addr_t iv_dma = 0;
  2726. bool iv_contig = false;
  2727. int sgc;
  2728. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  2729. bool src_chained = false, dst_chained = false;
  2730. int sec4_sg_index;
  2731. src_nents = sg_count(req->src, req->nbytes, &src_chained);
  2732. if (unlikely(req->dst != req->src))
  2733. dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
  2734. if (likely(req->src == req->dst)) {
  2735. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2736. DMA_BIDIRECTIONAL, src_chained);
  2737. } else {
  2738. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2739. DMA_TO_DEVICE, src_chained);
  2740. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  2741. DMA_FROM_DEVICE, dst_chained);
  2742. }
  2743. /*
  2744. * Check if iv can be contiguous with source and destination.
  2745. * If so, include it. If not, create scatterlist.
  2746. */
  2747. iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
  2748. if (dma_mapping_error(jrdev, iv_dma)) {
  2749. dev_err(jrdev, "unable to map IV\n");
  2750. return ERR_PTR(-ENOMEM);
  2751. }
  2752. if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
  2753. iv_contig = true;
  2754. else
  2755. dst_nents = dst_nents ? : 1;
  2756. sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
  2757. sizeof(struct sec4_sg_entry);
  2758. /* allocate space for base edesc and hw desc commands, link tables */
  2759. edesc = kmalloc(sizeof(*edesc) + desc_bytes +
  2760. sec4_sg_bytes, GFP_DMA | flags);
  2761. if (!edesc) {
  2762. dev_err(jrdev, "could not allocate extended descriptor\n");
  2763. return ERR_PTR(-ENOMEM);
  2764. }
  2765. edesc->src_nents = src_nents;
  2766. edesc->src_chained = src_chained;
  2767. edesc->dst_nents = dst_nents;
  2768. edesc->dst_chained = dst_chained;
  2769. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2770. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  2771. desc_bytes;
  2772. sec4_sg_index = 0;
  2773. if (src_nents) {
  2774. sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
  2775. sec4_sg_index += src_nents;
  2776. }
  2777. if (!iv_contig) {
  2778. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  2779. iv_dma, ivsize, 0);
  2780. sec4_sg_index += 1;
  2781. sg_to_sec4_sg_last(req->dst, dst_nents,
  2782. edesc->sec4_sg + sec4_sg_index, 0);
  2783. }
  2784. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2785. sec4_sg_bytes, DMA_TO_DEVICE);
  2786. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2787. dev_err(jrdev, "unable to map S/G table\n");
  2788. return ERR_PTR(-ENOMEM);
  2789. }
  2790. edesc->iv_dma = iv_dma;
  2791. #ifdef DEBUG
  2792. print_hex_dump(KERN_ERR,
  2793. "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
  2794. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  2795. sec4_sg_bytes, 1);
  2796. #endif
  2797. *iv_contig_out = iv_contig;
  2798. return edesc;
  2799. }
  2800. static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
  2801. {
  2802. struct ablkcipher_request *req = &creq->creq;
  2803. struct ablkcipher_edesc *edesc;
  2804. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2805. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2806. struct device *jrdev = ctx->jrdev;
  2807. bool iv_contig;
  2808. u32 *desc;
  2809. int ret = 0;
  2810. /* allocate extended descriptor */
  2811. edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
  2812. CAAM_CMD_SZ, &iv_contig);
  2813. if (IS_ERR(edesc))
  2814. return PTR_ERR(edesc);
  2815. /* Create and submit job descriptor*/
  2816. init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
  2817. edesc, req, iv_contig);
  2818. #ifdef DEBUG
  2819. print_hex_dump(KERN_ERR,
  2820. "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
  2821. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2822. desc_bytes(edesc->hw_desc), 1);
  2823. #endif
  2824. desc = edesc->hw_desc;
  2825. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  2826. if (!ret) {
  2827. ret = -EINPROGRESS;
  2828. } else {
  2829. ablkcipher_unmap(jrdev, edesc, req);
  2830. kfree(edesc);
  2831. }
  2832. return ret;
  2833. }
  2834. #define template_aead template_u.aead
  2835. #define template_ablkcipher template_u.ablkcipher
  2836. struct caam_alg_template {
  2837. char name[CRYPTO_MAX_ALG_NAME];
  2838. char driver_name[CRYPTO_MAX_ALG_NAME];
  2839. unsigned int blocksize;
  2840. u32 type;
  2841. union {
  2842. struct ablkcipher_alg ablkcipher;
  2843. struct old_aead_alg aead;
  2844. } template_u;
  2845. u32 class1_alg_type;
  2846. u32 class2_alg_type;
  2847. u32 alg_op;
  2848. };
  2849. static struct caam_alg_template driver_algs[] = {
  2850. /* single-pass ipsec_esp descriptor */
  2851. {
  2852. .name = "authenc(hmac(md5),ecb(cipher_null))",
  2853. .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
  2854. .blocksize = NULL_BLOCK_SIZE,
  2855. .type = CRYPTO_ALG_TYPE_AEAD,
  2856. .template_aead = {
  2857. .setkey = aead_setkey,
  2858. .setauthsize = aead_setauthsize,
  2859. .encrypt = old_aead_encrypt,
  2860. .decrypt = old_aead_decrypt,
  2861. .givencrypt = aead_null_givencrypt,
  2862. .geniv = "<built-in>",
  2863. .ivsize = NULL_IV_SIZE,
  2864. .maxauthsize = MD5_DIGEST_SIZE,
  2865. },
  2866. .class1_alg_type = 0,
  2867. .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
  2868. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2869. },
  2870. {
  2871. .name = "authenc(hmac(sha1),ecb(cipher_null))",
  2872. .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
  2873. .blocksize = NULL_BLOCK_SIZE,
  2874. .type = CRYPTO_ALG_TYPE_AEAD,
  2875. .template_aead = {
  2876. .setkey = aead_setkey,
  2877. .setauthsize = aead_setauthsize,
  2878. .encrypt = old_aead_encrypt,
  2879. .decrypt = old_aead_decrypt,
  2880. .givencrypt = aead_null_givencrypt,
  2881. .geniv = "<built-in>",
  2882. .ivsize = NULL_IV_SIZE,
  2883. .maxauthsize = SHA1_DIGEST_SIZE,
  2884. },
  2885. .class1_alg_type = 0,
  2886. .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
  2887. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2888. },
  2889. {
  2890. .name = "authenc(hmac(sha224),ecb(cipher_null))",
  2891. .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
  2892. .blocksize = NULL_BLOCK_SIZE,
  2893. .type = CRYPTO_ALG_TYPE_AEAD,
  2894. .template_aead = {
  2895. .setkey = aead_setkey,
  2896. .setauthsize = aead_setauthsize,
  2897. .encrypt = old_aead_encrypt,
  2898. .decrypt = old_aead_decrypt,
  2899. .givencrypt = aead_null_givencrypt,
  2900. .geniv = "<built-in>",
  2901. .ivsize = NULL_IV_SIZE,
  2902. .maxauthsize = SHA224_DIGEST_SIZE,
  2903. },
  2904. .class1_alg_type = 0,
  2905. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2906. OP_ALG_AAI_HMAC_PRECOMP,
  2907. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  2908. },
  2909. {
  2910. .name = "authenc(hmac(sha256),ecb(cipher_null))",
  2911. .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
  2912. .blocksize = NULL_BLOCK_SIZE,
  2913. .type = CRYPTO_ALG_TYPE_AEAD,
  2914. .template_aead = {
  2915. .setkey = aead_setkey,
  2916. .setauthsize = aead_setauthsize,
  2917. .encrypt = old_aead_encrypt,
  2918. .decrypt = old_aead_decrypt,
  2919. .givencrypt = aead_null_givencrypt,
  2920. .geniv = "<built-in>",
  2921. .ivsize = NULL_IV_SIZE,
  2922. .maxauthsize = SHA256_DIGEST_SIZE,
  2923. },
  2924. .class1_alg_type = 0,
  2925. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2926. OP_ALG_AAI_HMAC_PRECOMP,
  2927. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2928. },
  2929. {
  2930. .name = "authenc(hmac(sha384),ecb(cipher_null))",
  2931. .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
  2932. .blocksize = NULL_BLOCK_SIZE,
  2933. .type = CRYPTO_ALG_TYPE_AEAD,
  2934. .template_aead = {
  2935. .setkey = aead_setkey,
  2936. .setauthsize = aead_setauthsize,
  2937. .encrypt = old_aead_encrypt,
  2938. .decrypt = old_aead_decrypt,
  2939. .givencrypt = aead_null_givencrypt,
  2940. .geniv = "<built-in>",
  2941. .ivsize = NULL_IV_SIZE,
  2942. .maxauthsize = SHA384_DIGEST_SIZE,
  2943. },
  2944. .class1_alg_type = 0,
  2945. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2946. OP_ALG_AAI_HMAC_PRECOMP,
  2947. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2948. },
  2949. {
  2950. .name = "authenc(hmac(sha512),ecb(cipher_null))",
  2951. .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
  2952. .blocksize = NULL_BLOCK_SIZE,
  2953. .type = CRYPTO_ALG_TYPE_AEAD,
  2954. .template_aead = {
  2955. .setkey = aead_setkey,
  2956. .setauthsize = aead_setauthsize,
  2957. .encrypt = old_aead_encrypt,
  2958. .decrypt = old_aead_decrypt,
  2959. .givencrypt = aead_null_givencrypt,
  2960. .geniv = "<built-in>",
  2961. .ivsize = NULL_IV_SIZE,
  2962. .maxauthsize = SHA512_DIGEST_SIZE,
  2963. },
  2964. .class1_alg_type = 0,
  2965. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2966. OP_ALG_AAI_HMAC_PRECOMP,
  2967. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  2968. },
  2969. {
  2970. .name = "authenc(hmac(md5),cbc(aes))",
  2971. .driver_name = "authenc-hmac-md5-cbc-aes-caam",
  2972. .blocksize = AES_BLOCK_SIZE,
  2973. .type = CRYPTO_ALG_TYPE_AEAD,
  2974. .template_aead = {
  2975. .setkey = aead_setkey,
  2976. .setauthsize = aead_setauthsize,
  2977. .encrypt = old_aead_encrypt,
  2978. .decrypt = old_aead_decrypt,
  2979. .givencrypt = old_aead_givencrypt,
  2980. .geniv = "<built-in>",
  2981. .ivsize = AES_BLOCK_SIZE,
  2982. .maxauthsize = MD5_DIGEST_SIZE,
  2983. },
  2984. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2985. .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
  2986. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2987. },
  2988. {
  2989. .name = "authenc(hmac(sha1),cbc(aes))",
  2990. .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
  2991. .blocksize = AES_BLOCK_SIZE,
  2992. .type = CRYPTO_ALG_TYPE_AEAD,
  2993. .template_aead = {
  2994. .setkey = aead_setkey,
  2995. .setauthsize = aead_setauthsize,
  2996. .encrypt = old_aead_encrypt,
  2997. .decrypt = old_aead_decrypt,
  2998. .givencrypt = old_aead_givencrypt,
  2999. .geniv = "<built-in>",
  3000. .ivsize = AES_BLOCK_SIZE,
  3001. .maxauthsize = SHA1_DIGEST_SIZE,
  3002. },
  3003. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  3004. .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
  3005. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3006. },
  3007. {
  3008. .name = "authenc(hmac(sha224),cbc(aes))",
  3009. .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
  3010. .blocksize = AES_BLOCK_SIZE,
  3011. .type = CRYPTO_ALG_TYPE_AEAD,
  3012. .template_aead = {
  3013. .setkey = aead_setkey,
  3014. .setauthsize = aead_setauthsize,
  3015. .encrypt = old_aead_encrypt,
  3016. .decrypt = old_aead_decrypt,
  3017. .givencrypt = old_aead_givencrypt,
  3018. .geniv = "<built-in>",
  3019. .ivsize = AES_BLOCK_SIZE,
  3020. .maxauthsize = SHA224_DIGEST_SIZE,
  3021. },
  3022. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  3023. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3024. OP_ALG_AAI_HMAC_PRECOMP,
  3025. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3026. },
  3027. {
  3028. .name = "authenc(hmac(sha256),cbc(aes))",
  3029. .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
  3030. .blocksize = AES_BLOCK_SIZE,
  3031. .type = CRYPTO_ALG_TYPE_AEAD,
  3032. .template_aead = {
  3033. .setkey = aead_setkey,
  3034. .setauthsize = aead_setauthsize,
  3035. .encrypt = old_aead_encrypt,
  3036. .decrypt = old_aead_decrypt,
  3037. .givencrypt = old_aead_givencrypt,
  3038. .geniv = "<built-in>",
  3039. .ivsize = AES_BLOCK_SIZE,
  3040. .maxauthsize = SHA256_DIGEST_SIZE,
  3041. },
  3042. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  3043. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3044. OP_ALG_AAI_HMAC_PRECOMP,
  3045. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3046. },
  3047. {
  3048. .name = "authenc(hmac(sha384),cbc(aes))",
  3049. .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
  3050. .blocksize = AES_BLOCK_SIZE,
  3051. .type = CRYPTO_ALG_TYPE_AEAD,
  3052. .template_aead = {
  3053. .setkey = aead_setkey,
  3054. .setauthsize = aead_setauthsize,
  3055. .encrypt = old_aead_encrypt,
  3056. .decrypt = old_aead_decrypt,
  3057. .givencrypt = old_aead_givencrypt,
  3058. .geniv = "<built-in>",
  3059. .ivsize = AES_BLOCK_SIZE,
  3060. .maxauthsize = SHA384_DIGEST_SIZE,
  3061. },
  3062. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  3063. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3064. OP_ALG_AAI_HMAC_PRECOMP,
  3065. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3066. },
  3067. {
  3068. .name = "authenc(hmac(sha512),cbc(aes))",
  3069. .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
  3070. .blocksize = AES_BLOCK_SIZE,
  3071. .type = CRYPTO_ALG_TYPE_AEAD,
  3072. .template_aead = {
  3073. .setkey = aead_setkey,
  3074. .setauthsize = aead_setauthsize,
  3075. .encrypt = old_aead_encrypt,
  3076. .decrypt = old_aead_decrypt,
  3077. .givencrypt = old_aead_givencrypt,
  3078. .geniv = "<built-in>",
  3079. .ivsize = AES_BLOCK_SIZE,
  3080. .maxauthsize = SHA512_DIGEST_SIZE,
  3081. },
  3082. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  3083. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3084. OP_ALG_AAI_HMAC_PRECOMP,
  3085. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3086. },
  3087. {
  3088. .name = "authenc(hmac(md5),cbc(des3_ede))",
  3089. .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
  3090. .blocksize = DES3_EDE_BLOCK_SIZE,
  3091. .type = CRYPTO_ALG_TYPE_AEAD,
  3092. .template_aead = {
  3093. .setkey = aead_setkey,
  3094. .setauthsize = aead_setauthsize,
  3095. .encrypt = old_aead_encrypt,
  3096. .decrypt = old_aead_decrypt,
  3097. .givencrypt = old_aead_givencrypt,
  3098. .geniv = "<built-in>",
  3099. .ivsize = DES3_EDE_BLOCK_SIZE,
  3100. .maxauthsize = MD5_DIGEST_SIZE,
  3101. },
  3102. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3103. .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
  3104. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3105. },
  3106. {
  3107. .name = "authenc(hmac(sha1),cbc(des3_ede))",
  3108. .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
  3109. .blocksize = DES3_EDE_BLOCK_SIZE,
  3110. .type = CRYPTO_ALG_TYPE_AEAD,
  3111. .template_aead = {
  3112. .setkey = aead_setkey,
  3113. .setauthsize = aead_setauthsize,
  3114. .encrypt = old_aead_encrypt,
  3115. .decrypt = old_aead_decrypt,
  3116. .givencrypt = old_aead_givencrypt,
  3117. .geniv = "<built-in>",
  3118. .ivsize = DES3_EDE_BLOCK_SIZE,
  3119. .maxauthsize = SHA1_DIGEST_SIZE,
  3120. },
  3121. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3122. .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
  3123. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3124. },
  3125. {
  3126. .name = "authenc(hmac(sha224),cbc(des3_ede))",
  3127. .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
  3128. .blocksize = DES3_EDE_BLOCK_SIZE,
  3129. .type = CRYPTO_ALG_TYPE_AEAD,
  3130. .template_aead = {
  3131. .setkey = aead_setkey,
  3132. .setauthsize = aead_setauthsize,
  3133. .encrypt = old_aead_encrypt,
  3134. .decrypt = old_aead_decrypt,
  3135. .givencrypt = old_aead_givencrypt,
  3136. .geniv = "<built-in>",
  3137. .ivsize = DES3_EDE_BLOCK_SIZE,
  3138. .maxauthsize = SHA224_DIGEST_SIZE,
  3139. },
  3140. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3141. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3142. OP_ALG_AAI_HMAC_PRECOMP,
  3143. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3144. },
  3145. {
  3146. .name = "authenc(hmac(sha256),cbc(des3_ede))",
  3147. .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
  3148. .blocksize = DES3_EDE_BLOCK_SIZE,
  3149. .type = CRYPTO_ALG_TYPE_AEAD,
  3150. .template_aead = {
  3151. .setkey = aead_setkey,
  3152. .setauthsize = aead_setauthsize,
  3153. .encrypt = old_aead_encrypt,
  3154. .decrypt = old_aead_decrypt,
  3155. .givencrypt = old_aead_givencrypt,
  3156. .geniv = "<built-in>",
  3157. .ivsize = DES3_EDE_BLOCK_SIZE,
  3158. .maxauthsize = SHA256_DIGEST_SIZE,
  3159. },
  3160. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3161. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3162. OP_ALG_AAI_HMAC_PRECOMP,
  3163. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3164. },
  3165. {
  3166. .name = "authenc(hmac(sha384),cbc(des3_ede))",
  3167. .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
  3168. .blocksize = DES3_EDE_BLOCK_SIZE,
  3169. .type = CRYPTO_ALG_TYPE_AEAD,
  3170. .template_aead = {
  3171. .setkey = aead_setkey,
  3172. .setauthsize = aead_setauthsize,
  3173. .encrypt = old_aead_encrypt,
  3174. .decrypt = old_aead_decrypt,
  3175. .givencrypt = old_aead_givencrypt,
  3176. .geniv = "<built-in>",
  3177. .ivsize = DES3_EDE_BLOCK_SIZE,
  3178. .maxauthsize = SHA384_DIGEST_SIZE,
  3179. },
  3180. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3181. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3182. OP_ALG_AAI_HMAC_PRECOMP,
  3183. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3184. },
  3185. {
  3186. .name = "authenc(hmac(sha512),cbc(des3_ede))",
  3187. .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
  3188. .blocksize = DES3_EDE_BLOCK_SIZE,
  3189. .type = CRYPTO_ALG_TYPE_AEAD,
  3190. .template_aead = {
  3191. .setkey = aead_setkey,
  3192. .setauthsize = aead_setauthsize,
  3193. .encrypt = old_aead_encrypt,
  3194. .decrypt = old_aead_decrypt,
  3195. .givencrypt = old_aead_givencrypt,
  3196. .geniv = "<built-in>",
  3197. .ivsize = DES3_EDE_BLOCK_SIZE,
  3198. .maxauthsize = SHA512_DIGEST_SIZE,
  3199. },
  3200. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3201. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3202. OP_ALG_AAI_HMAC_PRECOMP,
  3203. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3204. },
  3205. {
  3206. .name = "authenc(hmac(md5),cbc(des))",
  3207. .driver_name = "authenc-hmac-md5-cbc-des-caam",
  3208. .blocksize = DES_BLOCK_SIZE,
  3209. .type = CRYPTO_ALG_TYPE_AEAD,
  3210. .template_aead = {
  3211. .setkey = aead_setkey,
  3212. .setauthsize = aead_setauthsize,
  3213. .encrypt = old_aead_encrypt,
  3214. .decrypt = old_aead_decrypt,
  3215. .givencrypt = old_aead_givencrypt,
  3216. .geniv = "<built-in>",
  3217. .ivsize = DES_BLOCK_SIZE,
  3218. .maxauthsize = MD5_DIGEST_SIZE,
  3219. },
  3220. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3221. .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
  3222. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3223. },
  3224. {
  3225. .name = "authenc(hmac(sha1),cbc(des))",
  3226. .driver_name = "authenc-hmac-sha1-cbc-des-caam",
  3227. .blocksize = DES_BLOCK_SIZE,
  3228. .type = CRYPTO_ALG_TYPE_AEAD,
  3229. .template_aead = {
  3230. .setkey = aead_setkey,
  3231. .setauthsize = aead_setauthsize,
  3232. .encrypt = old_aead_encrypt,
  3233. .decrypt = old_aead_decrypt,
  3234. .givencrypt = old_aead_givencrypt,
  3235. .geniv = "<built-in>",
  3236. .ivsize = DES_BLOCK_SIZE,
  3237. .maxauthsize = SHA1_DIGEST_SIZE,
  3238. },
  3239. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3240. .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
  3241. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3242. },
  3243. {
  3244. .name = "authenc(hmac(sha224),cbc(des))",
  3245. .driver_name = "authenc-hmac-sha224-cbc-des-caam",
  3246. .blocksize = DES_BLOCK_SIZE,
  3247. .type = CRYPTO_ALG_TYPE_AEAD,
  3248. .template_aead = {
  3249. .setkey = aead_setkey,
  3250. .setauthsize = aead_setauthsize,
  3251. .encrypt = old_aead_encrypt,
  3252. .decrypt = old_aead_decrypt,
  3253. .givencrypt = old_aead_givencrypt,
  3254. .geniv = "<built-in>",
  3255. .ivsize = DES_BLOCK_SIZE,
  3256. .maxauthsize = SHA224_DIGEST_SIZE,
  3257. },
  3258. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3259. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3260. OP_ALG_AAI_HMAC_PRECOMP,
  3261. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3262. },
  3263. {
  3264. .name = "authenc(hmac(sha256),cbc(des))",
  3265. .driver_name = "authenc-hmac-sha256-cbc-des-caam",
  3266. .blocksize = DES_BLOCK_SIZE,
  3267. .type = CRYPTO_ALG_TYPE_AEAD,
  3268. .template_aead = {
  3269. .setkey = aead_setkey,
  3270. .setauthsize = aead_setauthsize,
  3271. .encrypt = old_aead_encrypt,
  3272. .decrypt = old_aead_decrypt,
  3273. .givencrypt = old_aead_givencrypt,
  3274. .geniv = "<built-in>",
  3275. .ivsize = DES_BLOCK_SIZE,
  3276. .maxauthsize = SHA256_DIGEST_SIZE,
  3277. },
  3278. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3279. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3280. OP_ALG_AAI_HMAC_PRECOMP,
  3281. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3282. },
  3283. {
  3284. .name = "authenc(hmac(sha384),cbc(des))",
  3285. .driver_name = "authenc-hmac-sha384-cbc-des-caam",
  3286. .blocksize = DES_BLOCK_SIZE,
  3287. .type = CRYPTO_ALG_TYPE_AEAD,
  3288. .template_aead = {
  3289. .setkey = aead_setkey,
  3290. .setauthsize = aead_setauthsize,
  3291. .encrypt = old_aead_encrypt,
  3292. .decrypt = old_aead_decrypt,
  3293. .givencrypt = old_aead_givencrypt,
  3294. .geniv = "<built-in>",
  3295. .ivsize = DES_BLOCK_SIZE,
  3296. .maxauthsize = SHA384_DIGEST_SIZE,
  3297. },
  3298. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3299. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3300. OP_ALG_AAI_HMAC_PRECOMP,
  3301. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3302. },
  3303. {
  3304. .name = "authenc(hmac(sha512),cbc(des))",
  3305. .driver_name = "authenc-hmac-sha512-cbc-des-caam",
  3306. .blocksize = DES_BLOCK_SIZE,
  3307. .type = CRYPTO_ALG_TYPE_AEAD,
  3308. .template_aead = {
  3309. .setkey = aead_setkey,
  3310. .setauthsize = aead_setauthsize,
  3311. .encrypt = old_aead_encrypt,
  3312. .decrypt = old_aead_decrypt,
  3313. .givencrypt = old_aead_givencrypt,
  3314. .geniv = "<built-in>",
  3315. .ivsize = DES_BLOCK_SIZE,
  3316. .maxauthsize = SHA512_DIGEST_SIZE,
  3317. },
  3318. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3319. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3320. OP_ALG_AAI_HMAC_PRECOMP,
  3321. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3322. },
  3323. {
  3324. .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
  3325. .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
  3326. .blocksize = 1,
  3327. .type = CRYPTO_ALG_TYPE_AEAD,
  3328. .template_aead = {
  3329. .setkey = aead_setkey,
  3330. .setauthsize = aead_setauthsize,
  3331. .encrypt = old_aead_encrypt,
  3332. .decrypt = old_aead_decrypt,
  3333. .givencrypt = old_aead_givencrypt,
  3334. .geniv = "<built-in>",
  3335. .ivsize = CTR_RFC3686_IV_SIZE,
  3336. .maxauthsize = MD5_DIGEST_SIZE,
  3337. },
  3338. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  3339. .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
  3340. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3341. },
  3342. {
  3343. .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
  3344. .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
  3345. .blocksize = 1,
  3346. .type = CRYPTO_ALG_TYPE_AEAD,
  3347. .template_aead = {
  3348. .setkey = aead_setkey,
  3349. .setauthsize = aead_setauthsize,
  3350. .encrypt = old_aead_encrypt,
  3351. .decrypt = old_aead_decrypt,
  3352. .givencrypt = old_aead_givencrypt,
  3353. .geniv = "<built-in>",
  3354. .ivsize = CTR_RFC3686_IV_SIZE,
  3355. .maxauthsize = SHA1_DIGEST_SIZE,
  3356. },
  3357. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  3358. .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
  3359. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3360. },
  3361. {
  3362. .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
  3363. .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
  3364. .blocksize = 1,
  3365. .type = CRYPTO_ALG_TYPE_AEAD,
  3366. .template_aead = {
  3367. .setkey = aead_setkey,
  3368. .setauthsize = aead_setauthsize,
  3369. .encrypt = old_aead_encrypt,
  3370. .decrypt = old_aead_decrypt,
  3371. .givencrypt = old_aead_givencrypt,
  3372. .geniv = "<built-in>",
  3373. .ivsize = CTR_RFC3686_IV_SIZE,
  3374. .maxauthsize = SHA224_DIGEST_SIZE,
  3375. },
  3376. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  3377. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3378. OP_ALG_AAI_HMAC_PRECOMP,
  3379. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3380. },
  3381. {
  3382. .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
  3383. .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
  3384. .blocksize = 1,
  3385. .type = CRYPTO_ALG_TYPE_AEAD,
  3386. .template_aead = {
  3387. .setkey = aead_setkey,
  3388. .setauthsize = aead_setauthsize,
  3389. .encrypt = old_aead_encrypt,
  3390. .decrypt = old_aead_decrypt,
  3391. .givencrypt = old_aead_givencrypt,
  3392. .geniv = "<built-in>",
  3393. .ivsize = CTR_RFC3686_IV_SIZE,
  3394. .maxauthsize = SHA256_DIGEST_SIZE,
  3395. },
  3396. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  3397. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3398. OP_ALG_AAI_HMAC_PRECOMP,
  3399. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3400. },
  3401. {
  3402. .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
  3403. .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
  3404. .blocksize = 1,
  3405. .type = CRYPTO_ALG_TYPE_AEAD,
  3406. .template_aead = {
  3407. .setkey = aead_setkey,
  3408. .setauthsize = aead_setauthsize,
  3409. .encrypt = old_aead_encrypt,
  3410. .decrypt = old_aead_decrypt,
  3411. .givencrypt = old_aead_givencrypt,
  3412. .geniv = "<built-in>",
  3413. .ivsize = CTR_RFC3686_IV_SIZE,
  3414. .maxauthsize = SHA384_DIGEST_SIZE,
  3415. },
  3416. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  3417. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3418. OP_ALG_AAI_HMAC_PRECOMP,
  3419. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3420. },
  3421. {
  3422. .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
  3423. .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
  3424. .blocksize = 1,
  3425. .type = CRYPTO_ALG_TYPE_AEAD,
  3426. .template_aead = {
  3427. .setkey = aead_setkey,
  3428. .setauthsize = aead_setauthsize,
  3429. .encrypt = old_aead_encrypt,
  3430. .decrypt = old_aead_decrypt,
  3431. .givencrypt = old_aead_givencrypt,
  3432. .geniv = "<built-in>",
  3433. .ivsize = CTR_RFC3686_IV_SIZE,
  3434. .maxauthsize = SHA512_DIGEST_SIZE,
  3435. },
  3436. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  3437. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3438. OP_ALG_AAI_HMAC_PRECOMP,
  3439. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3440. },
  3441. /* ablkcipher descriptor */
  3442. {
  3443. .name = "cbc(aes)",
  3444. .driver_name = "cbc-aes-caam",
  3445. .blocksize = AES_BLOCK_SIZE,
  3446. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  3447. .template_ablkcipher = {
  3448. .setkey = ablkcipher_setkey,
  3449. .encrypt = ablkcipher_encrypt,
  3450. .decrypt = ablkcipher_decrypt,
  3451. .givencrypt = ablkcipher_givencrypt,
  3452. .geniv = "<built-in>",
  3453. .min_keysize = AES_MIN_KEY_SIZE,
  3454. .max_keysize = AES_MAX_KEY_SIZE,
  3455. .ivsize = AES_BLOCK_SIZE,
  3456. },
  3457. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  3458. },
  3459. {
  3460. .name = "cbc(des3_ede)",
  3461. .driver_name = "cbc-3des-caam",
  3462. .blocksize = DES3_EDE_BLOCK_SIZE,
  3463. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  3464. .template_ablkcipher = {
  3465. .setkey = ablkcipher_setkey,
  3466. .encrypt = ablkcipher_encrypt,
  3467. .decrypt = ablkcipher_decrypt,
  3468. .givencrypt = ablkcipher_givencrypt,
  3469. .geniv = "<built-in>",
  3470. .min_keysize = DES3_EDE_KEY_SIZE,
  3471. .max_keysize = DES3_EDE_KEY_SIZE,
  3472. .ivsize = DES3_EDE_BLOCK_SIZE,
  3473. },
  3474. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3475. },
  3476. {
  3477. .name = "cbc(des)",
  3478. .driver_name = "cbc-des-caam",
  3479. .blocksize = DES_BLOCK_SIZE,
  3480. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  3481. .template_ablkcipher = {
  3482. .setkey = ablkcipher_setkey,
  3483. .encrypt = ablkcipher_encrypt,
  3484. .decrypt = ablkcipher_decrypt,
  3485. .givencrypt = ablkcipher_givencrypt,
  3486. .geniv = "<built-in>",
  3487. .min_keysize = DES_KEY_SIZE,
  3488. .max_keysize = DES_KEY_SIZE,
  3489. .ivsize = DES_BLOCK_SIZE,
  3490. },
  3491. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3492. },
  3493. {
  3494. .name = "ctr(aes)",
  3495. .driver_name = "ctr-aes-caam",
  3496. .blocksize = 1,
  3497. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  3498. .template_ablkcipher = {
  3499. .setkey = ablkcipher_setkey,
  3500. .encrypt = ablkcipher_encrypt,
  3501. .decrypt = ablkcipher_decrypt,
  3502. .geniv = "chainiv",
  3503. .min_keysize = AES_MIN_KEY_SIZE,
  3504. .max_keysize = AES_MAX_KEY_SIZE,
  3505. .ivsize = AES_BLOCK_SIZE,
  3506. },
  3507. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  3508. },
  3509. {
  3510. .name = "rfc3686(ctr(aes))",
  3511. .driver_name = "rfc3686-ctr-aes-caam",
  3512. .blocksize = 1,
  3513. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  3514. .template_ablkcipher = {
  3515. .setkey = ablkcipher_setkey,
  3516. .encrypt = ablkcipher_encrypt,
  3517. .decrypt = ablkcipher_decrypt,
  3518. .givencrypt = ablkcipher_givencrypt,
  3519. .geniv = "<built-in>",
  3520. .min_keysize = AES_MIN_KEY_SIZE +
  3521. CTR_RFC3686_NONCE_SIZE,
  3522. .max_keysize = AES_MAX_KEY_SIZE +
  3523. CTR_RFC3686_NONCE_SIZE,
  3524. .ivsize = CTR_RFC3686_IV_SIZE,
  3525. },
  3526. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  3527. }
  3528. };
  3529. struct caam_alg_entry {
  3530. int class1_alg_type;
  3531. int class2_alg_type;
  3532. int alg_op;
  3533. };
  3534. struct caam_aead_alg {
  3535. struct aead_alg aead;
  3536. struct caam_alg_entry caam;
  3537. bool registered;
  3538. };
  3539. static struct caam_aead_alg driver_aeads[] = {
  3540. {
  3541. .aead = {
  3542. .base = {
  3543. .cra_name = "rfc4106(gcm(aes))",
  3544. .cra_driver_name = "rfc4106-gcm-aes-caam",
  3545. .cra_blocksize = 1,
  3546. },
  3547. .setkey = rfc4106_setkey,
  3548. .setauthsize = rfc4106_setauthsize,
  3549. .encrypt = gcm_encrypt,
  3550. .decrypt = gcm_decrypt,
  3551. .ivsize = 8,
  3552. .maxauthsize = AES_BLOCK_SIZE,
  3553. },
  3554. .caam = {
  3555. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  3556. },
  3557. },
  3558. {
  3559. .aead = {
  3560. .base = {
  3561. .cra_name = "rfc4543(gcm(aes))",
  3562. .cra_driver_name = "rfc4543-gcm-aes-caam",
  3563. .cra_blocksize = 1,
  3564. },
  3565. .setkey = rfc4543_setkey,
  3566. .setauthsize = rfc4543_setauthsize,
  3567. .encrypt = gcm_encrypt,
  3568. .decrypt = gcm_decrypt,
  3569. .ivsize = 8,
  3570. .maxauthsize = AES_BLOCK_SIZE,
  3571. },
  3572. .caam = {
  3573. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  3574. },
  3575. },
  3576. /* Galois Counter Mode */
  3577. {
  3578. .aead = {
  3579. .base = {
  3580. .cra_name = "gcm(aes)",
  3581. .cra_driver_name = "gcm-aes-caam",
  3582. .cra_blocksize = 1,
  3583. },
  3584. .setkey = gcm_setkey,
  3585. .setauthsize = gcm_setauthsize,
  3586. .encrypt = gcm_encrypt,
  3587. .decrypt = gcm_decrypt,
  3588. .ivsize = 12,
  3589. .maxauthsize = AES_BLOCK_SIZE,
  3590. },
  3591. .caam = {
  3592. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  3593. },
  3594. },
  3595. };
  3596. struct caam_crypto_alg {
  3597. struct crypto_alg crypto_alg;
  3598. struct list_head entry;
  3599. struct caam_alg_entry caam;
  3600. };
  3601. static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
  3602. {
  3603. ctx->jrdev = caam_jr_alloc();
  3604. if (IS_ERR(ctx->jrdev)) {
  3605. pr_err("Job Ring Device allocation for transform failed\n");
  3606. return PTR_ERR(ctx->jrdev);
  3607. }
  3608. /* copy descriptor header template value */
  3609. ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  3610. ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  3611. ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
  3612. return 0;
  3613. }
  3614. static int caam_cra_init(struct crypto_tfm *tfm)
  3615. {
  3616. struct crypto_alg *alg = tfm->__crt_alg;
  3617. struct caam_crypto_alg *caam_alg =
  3618. container_of(alg, struct caam_crypto_alg, crypto_alg);
  3619. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  3620. return caam_init_common(ctx, &caam_alg->caam);
  3621. }
  3622. static int caam_aead_init(struct crypto_aead *tfm)
  3623. {
  3624. struct aead_alg *alg = crypto_aead_alg(tfm);
  3625. struct caam_aead_alg *caam_alg =
  3626. container_of(alg, struct caam_aead_alg, aead);
  3627. struct caam_ctx *ctx = crypto_aead_ctx(tfm);
  3628. return caam_init_common(ctx, &caam_alg->caam);
  3629. }
  3630. static void caam_exit_common(struct caam_ctx *ctx)
  3631. {
  3632. if (ctx->sh_desc_enc_dma &&
  3633. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
  3634. dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
  3635. desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
  3636. if (ctx->sh_desc_dec_dma &&
  3637. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
  3638. dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
  3639. desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
  3640. if (ctx->sh_desc_givenc_dma &&
  3641. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
  3642. dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
  3643. desc_bytes(ctx->sh_desc_givenc),
  3644. DMA_TO_DEVICE);
  3645. if (ctx->key_dma &&
  3646. !dma_mapping_error(ctx->jrdev, ctx->key_dma))
  3647. dma_unmap_single(ctx->jrdev, ctx->key_dma,
  3648. ctx->enckeylen + ctx->split_key_pad_len,
  3649. DMA_TO_DEVICE);
  3650. caam_jr_free(ctx->jrdev);
  3651. }
  3652. static void caam_cra_exit(struct crypto_tfm *tfm)
  3653. {
  3654. caam_exit_common(crypto_tfm_ctx(tfm));
  3655. }
  3656. static void caam_aead_exit(struct crypto_aead *tfm)
  3657. {
  3658. caam_exit_common(crypto_aead_ctx(tfm));
  3659. }
  3660. static void __exit caam_algapi_exit(void)
  3661. {
  3662. struct caam_crypto_alg *t_alg, *n;
  3663. int i;
  3664. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  3665. struct caam_aead_alg *t_alg = driver_aeads + i;
  3666. if (t_alg->registered)
  3667. crypto_unregister_aead(&t_alg->aead);
  3668. }
  3669. if (!alg_list.next)
  3670. return;
  3671. list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
  3672. crypto_unregister_alg(&t_alg->crypto_alg);
  3673. list_del(&t_alg->entry);
  3674. kfree(t_alg);
  3675. }
  3676. }
  3677. static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
  3678. *template)
  3679. {
  3680. struct caam_crypto_alg *t_alg;
  3681. struct crypto_alg *alg;
  3682. t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
  3683. if (!t_alg) {
  3684. pr_err("failed to allocate t_alg\n");
  3685. return ERR_PTR(-ENOMEM);
  3686. }
  3687. alg = &t_alg->crypto_alg;
  3688. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
  3689. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  3690. template->driver_name);
  3691. alg->cra_module = THIS_MODULE;
  3692. alg->cra_init = caam_cra_init;
  3693. alg->cra_exit = caam_cra_exit;
  3694. alg->cra_priority = CAAM_CRA_PRIORITY;
  3695. alg->cra_blocksize = template->blocksize;
  3696. alg->cra_alignmask = 0;
  3697. alg->cra_ctxsize = sizeof(struct caam_ctx);
  3698. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
  3699. template->type;
  3700. switch (template->type) {
  3701. case CRYPTO_ALG_TYPE_GIVCIPHER:
  3702. alg->cra_type = &crypto_givcipher_type;
  3703. alg->cra_ablkcipher = template->template_ablkcipher;
  3704. break;
  3705. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3706. alg->cra_type = &crypto_ablkcipher_type;
  3707. alg->cra_ablkcipher = template->template_ablkcipher;
  3708. break;
  3709. case CRYPTO_ALG_TYPE_AEAD:
  3710. alg->cra_type = &crypto_aead_type;
  3711. alg->cra_aead = template->template_aead;
  3712. break;
  3713. }
  3714. t_alg->caam.class1_alg_type = template->class1_alg_type;
  3715. t_alg->caam.class2_alg_type = template->class2_alg_type;
  3716. t_alg->caam.alg_op = template->alg_op;
  3717. return t_alg;
  3718. }
  3719. static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
  3720. {
  3721. struct aead_alg *alg = &t_alg->aead;
  3722. alg->base.cra_module = THIS_MODULE;
  3723. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  3724. alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  3725. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  3726. alg->init = caam_aead_init;
  3727. alg->exit = caam_aead_exit;
  3728. }
  3729. static int __init caam_algapi_init(void)
  3730. {
  3731. struct device_node *dev_node;
  3732. struct platform_device *pdev;
  3733. struct device *ctrldev;
  3734. void *priv;
  3735. int i = 0, err = 0;
  3736. bool registered = false;
  3737. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
  3738. if (!dev_node) {
  3739. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
  3740. if (!dev_node)
  3741. return -ENODEV;
  3742. }
  3743. pdev = of_find_device_by_node(dev_node);
  3744. if (!pdev) {
  3745. of_node_put(dev_node);
  3746. return -ENODEV;
  3747. }
  3748. ctrldev = &pdev->dev;
  3749. priv = dev_get_drvdata(ctrldev);
  3750. of_node_put(dev_node);
  3751. /*
  3752. * If priv is NULL, it's probably because the caam driver wasn't
  3753. * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
  3754. */
  3755. if (!priv)
  3756. return -ENODEV;
  3757. INIT_LIST_HEAD(&alg_list);
  3758. /* register crypto algorithms the device supports */
  3759. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3760. /* TODO: check if h/w supports alg */
  3761. struct caam_crypto_alg *t_alg;
  3762. t_alg = caam_alg_alloc(&driver_algs[i]);
  3763. if (IS_ERR(t_alg)) {
  3764. err = PTR_ERR(t_alg);
  3765. pr_warn("%s alg allocation failed\n",
  3766. driver_algs[i].driver_name);
  3767. continue;
  3768. }
  3769. err = crypto_register_alg(&t_alg->crypto_alg);
  3770. if (err) {
  3771. pr_warn("%s alg registration failed\n",
  3772. t_alg->crypto_alg.cra_driver_name);
  3773. kfree(t_alg);
  3774. continue;
  3775. }
  3776. list_add_tail(&t_alg->entry, &alg_list);
  3777. registered = true;
  3778. }
  3779. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  3780. struct caam_aead_alg *t_alg = driver_aeads + i;
  3781. caam_aead_alg_init(t_alg);
  3782. err = crypto_register_aead(&t_alg->aead);
  3783. if (err) {
  3784. pr_warn("%s alg registration failed\n",
  3785. t_alg->aead.base.cra_driver_name);
  3786. continue;
  3787. }
  3788. t_alg->registered = true;
  3789. registered = true;
  3790. }
  3791. if (registered)
  3792. pr_info("caam algorithms registered in /proc/crypto\n");
  3793. return err;
  3794. }
  3795. module_init(caam_algapi_init);
  3796. module_exit(caam_algapi_exit);
  3797. MODULE_LICENSE("GPL");
  3798. MODULE_DESCRIPTION("FSL CAAM support for crypto API");
  3799. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");