caamalg.c 94 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566
  1. /*
  2. * caam - Freescale FSL CAAM support for crypto API
  3. *
  4. * Copyright 2008-2011 Freescale Semiconductor, Inc.
  5. * Copyright 2016 NXP
  6. *
  7. * Based on talitos crypto API driver.
  8. *
  9. * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
  10. *
  11. * --------------- ---------------
  12. * | JobDesc #1 |-------------------->| ShareDesc |
  13. * | *(packet 1) | | (PDB) |
  14. * --------------- |------------->| (hashKey) |
  15. * . | | (cipherKey) |
  16. * . | |-------->| (operation) |
  17. * --------------- | | ---------------
  18. * | JobDesc #2 |------| |
  19. * | *(packet 2) | |
  20. * --------------- |
  21. * . |
  22. * . |
  23. * --------------- |
  24. * | JobDesc #3 |------------
  25. * | *(packet 3) |
  26. * ---------------
  27. *
  28. * The SharedDesc never changes for a connection unless rekeyed, but
  29. * each packet will likely be in a different place. So all we need
  30. * to know to process the packet is where the input is, where the
  31. * output goes, and what context we want to process with. Context is
  32. * in the SharedDesc, packet references in the JobDesc.
  33. *
  34. * So, a job desc looks like:
  35. *
  36. * ---------------------
  37. * | Header |
  38. * | ShareDesc Pointer |
  39. * | SEQ_OUT_PTR |
  40. * | (output buffer) |
  41. * | (output length) |
  42. * | SEQ_IN_PTR |
  43. * | (input buffer) |
  44. * | (input length) |
  45. * ---------------------
  46. */
  47. #include "compat.h"
  48. #include "regs.h"
  49. #include "intern.h"
  50. #include "desc_constr.h"
  51. #include "jr.h"
  52. #include "error.h"
  53. #include "sg_sw_sec4.h"
  54. #include "key_gen.h"
  55. #include "caamalg_desc.h"
  56. /*
  57. * crypto alg
  58. */
  59. #define CAAM_CRA_PRIORITY 3000
  60. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  61. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  62. CTR_RFC3686_NONCE_SIZE + \
  63. SHA512_DIGEST_SIZE * 2)
  64. #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
  65. #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
  66. CAAM_CMD_SZ * 4)
  67. #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
  68. CAAM_CMD_SZ * 5)
  69. #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
  70. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  71. #ifdef DEBUG
  72. /* for print_hex_dumps with line references */
  73. #define debug(format, arg...) printk(format, arg)
  74. #else
  75. #define debug(format, arg...)
  76. #endif
  77. static struct list_head alg_list;
  78. struct caam_alg_entry {
  79. int class1_alg_type;
  80. int class2_alg_type;
  81. bool rfc3686;
  82. bool geniv;
  83. };
  84. struct caam_aead_alg {
  85. struct aead_alg aead;
  86. struct caam_alg_entry caam;
  87. bool registered;
  88. };
  89. /*
  90. * per-session context
  91. */
  92. struct caam_ctx {
  93. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  94. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  95. u32 sh_desc_givenc[DESC_MAX_USED_LEN];
  96. u8 key[CAAM_MAX_KEY_SIZE];
  97. dma_addr_t sh_desc_enc_dma;
  98. dma_addr_t sh_desc_dec_dma;
  99. dma_addr_t sh_desc_givenc_dma;
  100. dma_addr_t key_dma;
  101. enum dma_data_direction dir;
  102. struct device *jrdev;
  103. struct alginfo adata;
  104. struct alginfo cdata;
  105. unsigned int authsize;
  106. };
  107. static int aead_null_set_sh_desc(struct crypto_aead *aead)
  108. {
  109. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  110. struct device *jrdev = ctx->jrdev;
  111. struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  112. u32 *desc;
  113. int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
  114. ctx->adata.keylen_pad;
  115. /*
  116. * Job Descriptor and Shared Descriptors
  117. * must all fit into the 64-word Descriptor h/w Buffer
  118. */
  119. if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
  120. ctx->adata.key_inline = true;
  121. ctx->adata.key_virt = ctx->key;
  122. } else {
  123. ctx->adata.key_inline = false;
  124. ctx->adata.key_dma = ctx->key_dma;
  125. }
  126. /* aead_encrypt shared descriptor */
  127. desc = ctx->sh_desc_enc;
  128. cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
  129. ctrlpriv->era);
  130. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  131. desc_bytes(desc), ctx->dir);
  132. /*
  133. * Job Descriptor and Shared Descriptors
  134. * must all fit into the 64-word Descriptor h/w Buffer
  135. */
  136. if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
  137. ctx->adata.key_inline = true;
  138. ctx->adata.key_virt = ctx->key;
  139. } else {
  140. ctx->adata.key_inline = false;
  141. ctx->adata.key_dma = ctx->key_dma;
  142. }
  143. /* aead_decrypt shared descriptor */
  144. desc = ctx->sh_desc_dec;
  145. cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
  146. ctrlpriv->era);
  147. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  148. desc_bytes(desc), ctx->dir);
  149. return 0;
  150. }
  151. static int aead_set_sh_desc(struct crypto_aead *aead)
  152. {
  153. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  154. struct caam_aead_alg, aead);
  155. unsigned int ivsize = crypto_aead_ivsize(aead);
  156. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  157. struct device *jrdev = ctx->jrdev;
  158. struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  159. u32 ctx1_iv_off = 0;
  160. u32 *desc, *nonce = NULL;
  161. u32 inl_mask;
  162. unsigned int data_len[2];
  163. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  164. OP_ALG_AAI_CTR_MOD128);
  165. const bool is_rfc3686 = alg->caam.rfc3686;
  166. if (!ctx->authsize)
  167. return 0;
  168. /* NULL encryption / decryption */
  169. if (!ctx->cdata.keylen)
  170. return aead_null_set_sh_desc(aead);
  171. /*
  172. * AES-CTR needs to load IV in CONTEXT1 reg
  173. * at an offset of 128bits (16bytes)
  174. * CONTEXT1[255:128] = IV
  175. */
  176. if (ctr_mode)
  177. ctx1_iv_off = 16;
  178. /*
  179. * RFC3686 specific:
  180. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  181. */
  182. if (is_rfc3686) {
  183. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  184. nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
  185. ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
  186. }
  187. data_len[0] = ctx->adata.keylen_pad;
  188. data_len[1] = ctx->cdata.keylen;
  189. if (alg->caam.geniv)
  190. goto skip_enc;
  191. /*
  192. * Job Descriptor and Shared Descriptors
  193. * must all fit into the 64-word Descriptor h/w Buffer
  194. */
  195. if (desc_inline_query(DESC_AEAD_ENC_LEN +
  196. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  197. AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
  198. ARRAY_SIZE(data_len)) < 0)
  199. return -EINVAL;
  200. if (inl_mask & 1)
  201. ctx->adata.key_virt = ctx->key;
  202. else
  203. ctx->adata.key_dma = ctx->key_dma;
  204. if (inl_mask & 2)
  205. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  206. else
  207. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  208. ctx->adata.key_inline = !!(inl_mask & 1);
  209. ctx->cdata.key_inline = !!(inl_mask & 2);
  210. /* aead_encrypt shared descriptor */
  211. desc = ctx->sh_desc_enc;
  212. cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
  213. ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
  214. false, ctrlpriv->era);
  215. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  216. desc_bytes(desc), ctx->dir);
  217. skip_enc:
  218. /*
  219. * Job Descriptor and Shared Descriptors
  220. * must all fit into the 64-word Descriptor h/w Buffer
  221. */
  222. if (desc_inline_query(DESC_AEAD_DEC_LEN +
  223. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  224. AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
  225. ARRAY_SIZE(data_len)) < 0)
  226. return -EINVAL;
  227. if (inl_mask & 1)
  228. ctx->adata.key_virt = ctx->key;
  229. else
  230. ctx->adata.key_dma = ctx->key_dma;
  231. if (inl_mask & 2)
  232. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  233. else
  234. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  235. ctx->adata.key_inline = !!(inl_mask & 1);
  236. ctx->cdata.key_inline = !!(inl_mask & 2);
  237. /* aead_decrypt shared descriptor */
  238. desc = ctx->sh_desc_dec;
  239. cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
  240. ctx->authsize, alg->caam.geniv, is_rfc3686,
  241. nonce, ctx1_iv_off, false, ctrlpriv->era);
  242. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  243. desc_bytes(desc), ctx->dir);
  244. if (!alg->caam.geniv)
  245. goto skip_givenc;
  246. /*
  247. * Job Descriptor and Shared Descriptors
  248. * must all fit into the 64-word Descriptor h/w Buffer
  249. */
  250. if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
  251. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  252. AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
  253. ARRAY_SIZE(data_len)) < 0)
  254. return -EINVAL;
  255. if (inl_mask & 1)
  256. ctx->adata.key_virt = ctx->key;
  257. else
  258. ctx->adata.key_dma = ctx->key_dma;
  259. if (inl_mask & 2)
  260. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  261. else
  262. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  263. ctx->adata.key_inline = !!(inl_mask & 1);
  264. ctx->cdata.key_inline = !!(inl_mask & 2);
  265. /* aead_givencrypt shared descriptor */
  266. desc = ctx->sh_desc_enc;
  267. cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
  268. ctx->authsize, is_rfc3686, nonce,
  269. ctx1_iv_off, false, ctrlpriv->era);
  270. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  271. desc_bytes(desc), ctx->dir);
  272. skip_givenc:
  273. return 0;
  274. }
  275. static int aead_setauthsize(struct crypto_aead *authenc,
  276. unsigned int authsize)
  277. {
  278. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  279. ctx->authsize = authsize;
  280. aead_set_sh_desc(authenc);
  281. return 0;
  282. }
  283. static int gcm_set_sh_desc(struct crypto_aead *aead)
  284. {
  285. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  286. struct device *jrdev = ctx->jrdev;
  287. unsigned int ivsize = crypto_aead_ivsize(aead);
  288. u32 *desc;
  289. int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
  290. ctx->cdata.keylen;
  291. if (!ctx->cdata.keylen || !ctx->authsize)
  292. return 0;
  293. /*
  294. * AES GCM encrypt shared descriptor
  295. * Job Descriptor and Shared Descriptor
  296. * must fit into the 64-word Descriptor h/w Buffer
  297. */
  298. if (rem_bytes >= DESC_GCM_ENC_LEN) {
  299. ctx->cdata.key_inline = true;
  300. ctx->cdata.key_virt = ctx->key;
  301. } else {
  302. ctx->cdata.key_inline = false;
  303. ctx->cdata.key_dma = ctx->key_dma;
  304. }
  305. desc = ctx->sh_desc_enc;
  306. cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
  307. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  308. desc_bytes(desc), ctx->dir);
  309. /*
  310. * Job Descriptor and Shared Descriptors
  311. * must all fit into the 64-word Descriptor h/w Buffer
  312. */
  313. if (rem_bytes >= DESC_GCM_DEC_LEN) {
  314. ctx->cdata.key_inline = true;
  315. ctx->cdata.key_virt = ctx->key;
  316. } else {
  317. ctx->cdata.key_inline = false;
  318. ctx->cdata.key_dma = ctx->key_dma;
  319. }
  320. desc = ctx->sh_desc_dec;
  321. cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
  322. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  323. desc_bytes(desc), ctx->dir);
  324. return 0;
  325. }
  326. static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  327. {
  328. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  329. ctx->authsize = authsize;
  330. gcm_set_sh_desc(authenc);
  331. return 0;
  332. }
  333. static int rfc4106_set_sh_desc(struct crypto_aead *aead)
  334. {
  335. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  336. struct device *jrdev = ctx->jrdev;
  337. unsigned int ivsize = crypto_aead_ivsize(aead);
  338. u32 *desc;
  339. int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
  340. ctx->cdata.keylen;
  341. if (!ctx->cdata.keylen || !ctx->authsize)
  342. return 0;
  343. /*
  344. * RFC4106 encrypt shared descriptor
  345. * Job Descriptor and Shared Descriptor
  346. * must fit into the 64-word Descriptor h/w Buffer
  347. */
  348. if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
  349. ctx->cdata.key_inline = true;
  350. ctx->cdata.key_virt = ctx->key;
  351. } else {
  352. ctx->cdata.key_inline = false;
  353. ctx->cdata.key_dma = ctx->key_dma;
  354. }
  355. desc = ctx->sh_desc_enc;
  356. cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
  357. false);
  358. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  359. desc_bytes(desc), ctx->dir);
  360. /*
  361. * Job Descriptor and Shared Descriptors
  362. * must all fit into the 64-word Descriptor h/w Buffer
  363. */
  364. if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
  365. ctx->cdata.key_inline = true;
  366. ctx->cdata.key_virt = ctx->key;
  367. } else {
  368. ctx->cdata.key_inline = false;
  369. ctx->cdata.key_dma = ctx->key_dma;
  370. }
  371. desc = ctx->sh_desc_dec;
  372. cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
  373. false);
  374. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  375. desc_bytes(desc), ctx->dir);
  376. return 0;
  377. }
  378. static int rfc4106_setauthsize(struct crypto_aead *authenc,
  379. unsigned int authsize)
  380. {
  381. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  382. ctx->authsize = authsize;
  383. rfc4106_set_sh_desc(authenc);
  384. return 0;
  385. }
  386. static int rfc4543_set_sh_desc(struct crypto_aead *aead)
  387. {
  388. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  389. struct device *jrdev = ctx->jrdev;
  390. unsigned int ivsize = crypto_aead_ivsize(aead);
  391. u32 *desc;
  392. int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
  393. ctx->cdata.keylen;
  394. if (!ctx->cdata.keylen || !ctx->authsize)
  395. return 0;
  396. /*
  397. * RFC4543 encrypt shared descriptor
  398. * Job Descriptor and Shared Descriptor
  399. * must fit into the 64-word Descriptor h/w Buffer
  400. */
  401. if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
  402. ctx->cdata.key_inline = true;
  403. ctx->cdata.key_virt = ctx->key;
  404. } else {
  405. ctx->cdata.key_inline = false;
  406. ctx->cdata.key_dma = ctx->key_dma;
  407. }
  408. desc = ctx->sh_desc_enc;
  409. cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
  410. false);
  411. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  412. desc_bytes(desc), ctx->dir);
  413. /*
  414. * Job Descriptor and Shared Descriptors
  415. * must all fit into the 64-word Descriptor h/w Buffer
  416. */
  417. if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
  418. ctx->cdata.key_inline = true;
  419. ctx->cdata.key_virt = ctx->key;
  420. } else {
  421. ctx->cdata.key_inline = false;
  422. ctx->cdata.key_dma = ctx->key_dma;
  423. }
  424. desc = ctx->sh_desc_dec;
  425. cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
  426. false);
  427. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  428. desc_bytes(desc), ctx->dir);
  429. return 0;
  430. }
  431. static int rfc4543_setauthsize(struct crypto_aead *authenc,
  432. unsigned int authsize)
  433. {
  434. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  435. ctx->authsize = authsize;
  436. rfc4543_set_sh_desc(authenc);
  437. return 0;
  438. }
  439. static int aead_setkey(struct crypto_aead *aead,
  440. const u8 *key, unsigned int keylen)
  441. {
  442. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  443. struct device *jrdev = ctx->jrdev;
  444. struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  445. struct crypto_authenc_keys keys;
  446. int ret = 0;
  447. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  448. goto badkey;
  449. #ifdef DEBUG
  450. printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
  451. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  452. keys.authkeylen);
  453. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  454. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  455. #endif
  456. /*
  457. * If DKP is supported, use it in the shared descriptor to generate
  458. * the split key.
  459. */
  460. if (ctrlpriv->era >= 6) {
  461. ctx->adata.keylen = keys.authkeylen;
  462. ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
  463. OP_ALG_ALGSEL_MASK);
  464. if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  465. goto badkey;
  466. memcpy(ctx->key, keys.authkey, keys.authkeylen);
  467. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
  468. keys.enckeylen);
  469. dma_sync_single_for_device(jrdev, ctx->key_dma,
  470. ctx->adata.keylen_pad +
  471. keys.enckeylen, ctx->dir);
  472. goto skip_split_key;
  473. }
  474. ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
  475. keys.authkeylen, CAAM_MAX_KEY_SIZE -
  476. keys.enckeylen);
  477. if (ret) {
  478. goto badkey;
  479. }
  480. /* postpend encryption key to auth split key */
  481. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
  482. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
  483. keys.enckeylen, ctx->dir);
  484. #ifdef DEBUG
  485. print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
  486. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  487. ctx->adata.keylen_pad + keys.enckeylen, 1);
  488. #endif
  489. skip_split_key:
  490. ctx->cdata.keylen = keys.enckeylen;
  491. memzero_explicit(&keys, sizeof(keys));
  492. return aead_set_sh_desc(aead);
  493. badkey:
  494. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  495. memzero_explicit(&keys, sizeof(keys));
  496. return -EINVAL;
  497. }
  498. static int gcm_setkey(struct crypto_aead *aead,
  499. const u8 *key, unsigned int keylen)
  500. {
  501. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  502. struct device *jrdev = ctx->jrdev;
  503. #ifdef DEBUG
  504. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  505. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  506. #endif
  507. memcpy(ctx->key, key, keylen);
  508. dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
  509. ctx->cdata.keylen = keylen;
  510. return gcm_set_sh_desc(aead);
  511. }
  512. static int rfc4106_setkey(struct crypto_aead *aead,
  513. const u8 *key, unsigned int keylen)
  514. {
  515. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  516. struct device *jrdev = ctx->jrdev;
  517. if (keylen < 4)
  518. return -EINVAL;
  519. #ifdef DEBUG
  520. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  521. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  522. #endif
  523. memcpy(ctx->key, key, keylen);
  524. /*
  525. * The last four bytes of the key material are used as the salt value
  526. * in the nonce. Update the AES key length.
  527. */
  528. ctx->cdata.keylen = keylen - 4;
  529. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
  530. ctx->dir);
  531. return rfc4106_set_sh_desc(aead);
  532. }
  533. static int rfc4543_setkey(struct crypto_aead *aead,
  534. const u8 *key, unsigned int keylen)
  535. {
  536. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  537. struct device *jrdev = ctx->jrdev;
  538. if (keylen < 4)
  539. return -EINVAL;
  540. #ifdef DEBUG
  541. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  542. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  543. #endif
  544. memcpy(ctx->key, key, keylen);
  545. /*
  546. * The last four bytes of the key material are used as the salt value
  547. * in the nonce. Update the AES key length.
  548. */
  549. ctx->cdata.keylen = keylen - 4;
  550. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
  551. ctx->dir);
  552. return rfc4543_set_sh_desc(aead);
  553. }
  554. static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  555. const u8 *key, unsigned int keylen)
  556. {
  557. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  558. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
  559. const char *alg_name = crypto_tfm_alg_name(tfm);
  560. struct device *jrdev = ctx->jrdev;
  561. unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  562. u32 *desc;
  563. u32 ctx1_iv_off = 0;
  564. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  565. OP_ALG_AAI_CTR_MOD128);
  566. const bool is_rfc3686 = (ctr_mode &&
  567. (strstr(alg_name, "rfc3686") != NULL));
  568. #ifdef DEBUG
  569. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  570. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  571. #endif
  572. /*
  573. * AES-CTR needs to load IV in CONTEXT1 reg
  574. * at an offset of 128bits (16bytes)
  575. * CONTEXT1[255:128] = IV
  576. */
  577. if (ctr_mode)
  578. ctx1_iv_off = 16;
  579. /*
  580. * RFC3686 specific:
  581. * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  582. * | *key = {KEY, NONCE}
  583. */
  584. if (is_rfc3686) {
  585. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  586. keylen -= CTR_RFC3686_NONCE_SIZE;
  587. }
  588. ctx->cdata.keylen = keylen;
  589. ctx->cdata.key_virt = key;
  590. ctx->cdata.key_inline = true;
  591. /* ablkcipher_encrypt shared descriptor */
  592. desc = ctx->sh_desc_enc;
  593. cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
  594. ctx1_iv_off);
  595. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  596. desc_bytes(desc), ctx->dir);
  597. /* ablkcipher_decrypt shared descriptor */
  598. desc = ctx->sh_desc_dec;
  599. cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
  600. ctx1_iv_off);
  601. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  602. desc_bytes(desc), ctx->dir);
  603. /* ablkcipher_givencrypt shared descriptor */
  604. desc = ctx->sh_desc_givenc;
  605. cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
  606. ctx1_iv_off);
  607. dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
  608. desc_bytes(desc), ctx->dir);
  609. return 0;
  610. }
  611. static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  612. const u8 *key, unsigned int keylen)
  613. {
  614. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  615. struct device *jrdev = ctx->jrdev;
  616. u32 *desc;
  617. if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
  618. crypto_ablkcipher_set_flags(ablkcipher,
  619. CRYPTO_TFM_RES_BAD_KEY_LEN);
  620. dev_err(jrdev, "key size mismatch\n");
  621. return -EINVAL;
  622. }
  623. ctx->cdata.keylen = keylen;
  624. ctx->cdata.key_virt = key;
  625. ctx->cdata.key_inline = true;
  626. /* xts_ablkcipher_encrypt shared descriptor */
  627. desc = ctx->sh_desc_enc;
  628. cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
  629. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  630. desc_bytes(desc), ctx->dir);
  631. /* xts_ablkcipher_decrypt shared descriptor */
  632. desc = ctx->sh_desc_dec;
  633. cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
  634. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  635. desc_bytes(desc), ctx->dir);
  636. return 0;
  637. }
  638. /*
  639. * aead_edesc - s/w-extended aead descriptor
  640. * @src_nents: number of segments in input s/w scatterlist
  641. * @dst_nents: number of segments in output s/w scatterlist
  642. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  643. * @sec4_sg_dma: bus physical mapped address of h/w link table
  644. * @sec4_sg: pointer to h/w link table
  645. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  646. */
  647. struct aead_edesc {
  648. int src_nents;
  649. int dst_nents;
  650. int sec4_sg_bytes;
  651. dma_addr_t sec4_sg_dma;
  652. struct sec4_sg_entry *sec4_sg;
  653. u32 hw_desc[];
  654. };
  655. /*
  656. * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  657. * @src_nents: number of segments in input s/w scatterlist
  658. * @dst_nents: number of segments in output s/w scatterlist
  659. * @iv_dma: dma address of iv for checking continuity and link table
  660. * @iv_dir: DMA mapping direction for IV
  661. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  662. * @sec4_sg_dma: bus physical mapped address of h/w link table
  663. * @sec4_sg: pointer to h/w link table
  664. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  665. * and IV
  666. */
  667. struct ablkcipher_edesc {
  668. int src_nents;
  669. int dst_nents;
  670. dma_addr_t iv_dma;
  671. enum dma_data_direction iv_dir;
  672. int sec4_sg_bytes;
  673. dma_addr_t sec4_sg_dma;
  674. struct sec4_sg_entry *sec4_sg;
  675. u32 hw_desc[0];
  676. };
  677. static void caam_unmap(struct device *dev, struct scatterlist *src,
  678. struct scatterlist *dst, int src_nents,
  679. int dst_nents,
  680. dma_addr_t iv_dma, int ivsize,
  681. enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
  682. int sec4_sg_bytes)
  683. {
  684. if (dst != src) {
  685. if (src_nents)
  686. dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
  687. dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
  688. } else {
  689. dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
  690. }
  691. if (iv_dma)
  692. dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
  693. if (sec4_sg_bytes)
  694. dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
  695. DMA_TO_DEVICE);
  696. }
  697. static void aead_unmap(struct device *dev,
  698. struct aead_edesc *edesc,
  699. struct aead_request *req)
  700. {
  701. caam_unmap(dev, req->src, req->dst,
  702. edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
  703. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  704. }
  705. static void ablkcipher_unmap(struct device *dev,
  706. struct ablkcipher_edesc *edesc,
  707. struct ablkcipher_request *req)
  708. {
  709. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  710. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  711. caam_unmap(dev, req->src, req->dst,
  712. edesc->src_nents, edesc->dst_nents,
  713. edesc->iv_dma, ivsize, edesc->iv_dir,
  714. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  715. }
  716. static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  717. void *context)
  718. {
  719. struct aead_request *req = context;
  720. struct aead_edesc *edesc;
  721. #ifdef DEBUG
  722. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  723. #endif
  724. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  725. if (err)
  726. caam_jr_strstatus(jrdev, err);
  727. aead_unmap(jrdev, edesc, req);
  728. kfree(edesc);
  729. aead_request_complete(req, err);
  730. }
  731. static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  732. void *context)
  733. {
  734. struct aead_request *req = context;
  735. struct aead_edesc *edesc;
  736. #ifdef DEBUG
  737. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  738. #endif
  739. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  740. if (err)
  741. caam_jr_strstatus(jrdev, err);
  742. aead_unmap(jrdev, edesc, req);
  743. /*
  744. * verify hw auth check passed else return -EBADMSG
  745. */
  746. if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
  747. err = -EBADMSG;
  748. kfree(edesc);
  749. aead_request_complete(req, err);
  750. }
  751. static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  752. void *context)
  753. {
  754. struct ablkcipher_request *req = context;
  755. struct ablkcipher_edesc *edesc;
  756. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  757. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  758. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  759. #ifdef DEBUG
  760. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  761. #endif
  762. edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
  763. if (err)
  764. caam_jr_strstatus(jrdev, err);
  765. #ifdef DEBUG
  766. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  767. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  768. edesc->src_nents > 1 ? 100 : ivsize, 1);
  769. #endif
  770. caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
  771. DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
  772. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  773. ablkcipher_unmap(jrdev, edesc, req);
  774. /*
  775. * The crypto API expects us to set the IV (req->info) to the last
  776. * ciphertext block when running in CBC mode.
  777. */
  778. if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
  779. scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
  780. ivsize, ivsize, 0);
  781. /* In case initial IV was generated, copy it in GIVCIPHER request */
  782. if (edesc->iv_dir == DMA_FROM_DEVICE) {
  783. u8 *iv;
  784. struct skcipher_givcrypt_request *greq;
  785. greq = container_of(req, struct skcipher_givcrypt_request,
  786. creq);
  787. iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
  788. edesc->sec4_sg_bytes;
  789. memcpy(greq->giv, iv, ivsize);
  790. }
  791. kfree(edesc);
  792. ablkcipher_request_complete(req, err);
  793. }
  794. static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  795. void *context)
  796. {
  797. struct ablkcipher_request *req = context;
  798. struct ablkcipher_edesc *edesc;
  799. #ifdef DEBUG
  800. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  801. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  802. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  803. #endif
  804. edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
  805. if (err)
  806. caam_jr_strstatus(jrdev, err);
  807. #ifdef DEBUG
  808. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  809. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  810. ivsize, 1);
  811. #endif
  812. caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
  813. DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
  814. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  815. ablkcipher_unmap(jrdev, edesc, req);
  816. kfree(edesc);
  817. ablkcipher_request_complete(req, err);
  818. }
  819. /*
  820. * Fill in aead job descriptor
  821. */
  822. static void init_aead_job(struct aead_request *req,
  823. struct aead_edesc *edesc,
  824. bool all_contig, bool encrypt)
  825. {
  826. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  827. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  828. int authsize = ctx->authsize;
  829. u32 *desc = edesc->hw_desc;
  830. u32 out_options, in_options;
  831. dma_addr_t dst_dma, src_dma;
  832. int len, sec4_sg_index = 0;
  833. dma_addr_t ptr;
  834. u32 *sh_desc;
  835. sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
  836. ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
  837. len = desc_len(sh_desc);
  838. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  839. if (all_contig) {
  840. src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
  841. in_options = 0;
  842. } else {
  843. src_dma = edesc->sec4_sg_dma;
  844. sec4_sg_index += edesc->src_nents;
  845. in_options = LDST_SGF;
  846. }
  847. append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
  848. in_options);
  849. dst_dma = src_dma;
  850. out_options = in_options;
  851. if (unlikely(req->src != req->dst)) {
  852. if (edesc->dst_nents == 1) {
  853. dst_dma = sg_dma_address(req->dst);
  854. out_options = 0;
  855. } else {
  856. dst_dma = edesc->sec4_sg_dma +
  857. sec4_sg_index *
  858. sizeof(struct sec4_sg_entry);
  859. out_options = LDST_SGF;
  860. }
  861. }
  862. if (encrypt)
  863. append_seq_out_ptr(desc, dst_dma,
  864. req->assoclen + req->cryptlen + authsize,
  865. out_options);
  866. else
  867. append_seq_out_ptr(desc, dst_dma,
  868. req->assoclen + req->cryptlen - authsize,
  869. out_options);
  870. }
  871. static void init_gcm_job(struct aead_request *req,
  872. struct aead_edesc *edesc,
  873. bool all_contig, bool encrypt)
  874. {
  875. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  876. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  877. unsigned int ivsize = crypto_aead_ivsize(aead);
  878. u32 *desc = edesc->hw_desc;
  879. bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
  880. unsigned int last;
  881. init_aead_job(req, edesc, all_contig, encrypt);
  882. append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
  883. /* BUG This should not be specific to generic GCM. */
  884. last = 0;
  885. if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
  886. last = FIFOLD_TYPE_LAST1;
  887. /* Read GCM IV */
  888. append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
  889. FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
  890. /* Append Salt */
  891. if (!generic_gcm)
  892. append_data(desc, ctx->key + ctx->cdata.keylen, 4);
  893. /* Append IV */
  894. append_data(desc, req->iv, ivsize);
  895. /* End of blank commands */
  896. }
  897. static void init_authenc_job(struct aead_request *req,
  898. struct aead_edesc *edesc,
  899. bool all_contig, bool encrypt)
  900. {
  901. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  902. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  903. struct caam_aead_alg, aead);
  904. unsigned int ivsize = crypto_aead_ivsize(aead);
  905. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  906. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
  907. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  908. OP_ALG_AAI_CTR_MOD128);
  909. const bool is_rfc3686 = alg->caam.rfc3686;
  910. u32 *desc = edesc->hw_desc;
  911. u32 ivoffset = 0;
  912. /*
  913. * AES-CTR needs to load IV in CONTEXT1 reg
  914. * at an offset of 128bits (16bytes)
  915. * CONTEXT1[255:128] = IV
  916. */
  917. if (ctr_mode)
  918. ivoffset = 16;
  919. /*
  920. * RFC3686 specific:
  921. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  922. */
  923. if (is_rfc3686)
  924. ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
  925. init_aead_job(req, edesc, all_contig, encrypt);
  926. /*
  927. * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
  928. * having DPOVRD as destination.
  929. */
  930. if (ctrlpriv->era < 3)
  931. append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
  932. else
  933. append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
  934. if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
  935. append_load_as_imm(desc, req->iv, ivsize,
  936. LDST_CLASS_1_CCB |
  937. LDST_SRCDST_BYTE_CONTEXT |
  938. (ivoffset << LDST_OFFSET_SHIFT));
  939. }
  940. /*
  941. * Fill in ablkcipher job descriptor
  942. */
  943. static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
  944. struct ablkcipher_edesc *edesc,
  945. struct ablkcipher_request *req)
  946. {
  947. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  948. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  949. u32 *desc = edesc->hw_desc;
  950. u32 out_options = 0;
  951. dma_addr_t dst_dma;
  952. int len;
  953. #ifdef DEBUG
  954. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  955. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  956. ivsize, 1);
  957. pr_err("asked=%d, nbytes%d\n",
  958. (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
  959. #endif
  960. caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
  961. DUMP_PREFIX_ADDRESS, 16, 4, req->src,
  962. edesc->src_nents > 1 ? 100 : req->nbytes, 1);
  963. len = desc_len(sh_desc);
  964. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  965. append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
  966. LDST_SGF);
  967. if (likely(req->src == req->dst)) {
  968. dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
  969. out_options = LDST_SGF;
  970. } else {
  971. if (edesc->dst_nents == 1) {
  972. dst_dma = sg_dma_address(req->dst);
  973. } else {
  974. dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
  975. sizeof(struct sec4_sg_entry);
  976. out_options = LDST_SGF;
  977. }
  978. }
  979. append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
  980. }
  981. /*
  982. * Fill in ablkcipher givencrypt job descriptor
  983. */
  984. static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
  985. struct ablkcipher_edesc *edesc,
  986. struct ablkcipher_request *req)
  987. {
  988. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  989. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  990. u32 *desc = edesc->hw_desc;
  991. u32 in_options;
  992. dma_addr_t dst_dma, src_dma;
  993. int len, sec4_sg_index = 0;
  994. #ifdef DEBUG
  995. print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
  996. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  997. ivsize, 1);
  998. #endif
  999. caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
  1000. DUMP_PREFIX_ADDRESS, 16, 4, req->src,
  1001. edesc->src_nents > 1 ? 100 : req->nbytes, 1);
  1002. len = desc_len(sh_desc);
  1003. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1004. if (edesc->src_nents == 1) {
  1005. src_dma = sg_dma_address(req->src);
  1006. in_options = 0;
  1007. } else {
  1008. src_dma = edesc->sec4_sg_dma;
  1009. sec4_sg_index += edesc->src_nents;
  1010. in_options = LDST_SGF;
  1011. }
  1012. append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
  1013. dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
  1014. sizeof(struct sec4_sg_entry);
  1015. append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
  1016. }
  1017. /*
  1018. * allocate and map the aead extended descriptor
  1019. */
  1020. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  1021. int desc_bytes, bool *all_contig_ptr,
  1022. bool encrypt)
  1023. {
  1024. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1025. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1026. struct device *jrdev = ctx->jrdev;
  1027. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  1028. GFP_KERNEL : GFP_ATOMIC;
  1029. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  1030. struct aead_edesc *edesc;
  1031. int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
  1032. unsigned int authsize = ctx->authsize;
  1033. if (unlikely(req->dst != req->src)) {
  1034. src_nents = sg_nents_for_len(req->src, req->assoclen +
  1035. req->cryptlen);
  1036. if (unlikely(src_nents < 0)) {
  1037. dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
  1038. req->assoclen + req->cryptlen);
  1039. return ERR_PTR(src_nents);
  1040. }
  1041. dst_nents = sg_nents_for_len(req->dst, req->assoclen +
  1042. req->cryptlen +
  1043. (encrypt ? authsize :
  1044. (-authsize)));
  1045. if (unlikely(dst_nents < 0)) {
  1046. dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
  1047. req->assoclen + req->cryptlen +
  1048. (encrypt ? authsize : (-authsize)));
  1049. return ERR_PTR(dst_nents);
  1050. }
  1051. } else {
  1052. src_nents = sg_nents_for_len(req->src, req->assoclen +
  1053. req->cryptlen +
  1054. (encrypt ? authsize : 0));
  1055. if (unlikely(src_nents < 0)) {
  1056. dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
  1057. req->assoclen + req->cryptlen +
  1058. (encrypt ? authsize : 0));
  1059. return ERR_PTR(src_nents);
  1060. }
  1061. }
  1062. if (likely(req->src == req->dst)) {
  1063. mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
  1064. DMA_BIDIRECTIONAL);
  1065. if (unlikely(!mapped_src_nents)) {
  1066. dev_err(jrdev, "unable to map source\n");
  1067. return ERR_PTR(-ENOMEM);
  1068. }
  1069. } else {
  1070. /* Cover also the case of null (zero length) input data */
  1071. if (src_nents) {
  1072. mapped_src_nents = dma_map_sg(jrdev, req->src,
  1073. src_nents, DMA_TO_DEVICE);
  1074. if (unlikely(!mapped_src_nents)) {
  1075. dev_err(jrdev, "unable to map source\n");
  1076. return ERR_PTR(-ENOMEM);
  1077. }
  1078. } else {
  1079. mapped_src_nents = 0;
  1080. }
  1081. mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
  1082. DMA_FROM_DEVICE);
  1083. if (unlikely(!mapped_dst_nents)) {
  1084. dev_err(jrdev, "unable to map destination\n");
  1085. dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
  1086. return ERR_PTR(-ENOMEM);
  1087. }
  1088. }
  1089. sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
  1090. sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
  1091. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  1092. /* allocate space for base edesc and hw desc commands, link tables */
  1093. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  1094. GFP_DMA | flags);
  1095. if (!edesc) {
  1096. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
  1097. 0, DMA_NONE, 0, 0);
  1098. return ERR_PTR(-ENOMEM);
  1099. }
  1100. edesc->src_nents = src_nents;
  1101. edesc->dst_nents = dst_nents;
  1102. edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
  1103. desc_bytes;
  1104. *all_contig_ptr = !(mapped_src_nents > 1);
  1105. sec4_sg_index = 0;
  1106. if (mapped_src_nents > 1) {
  1107. sg_to_sec4_sg_last(req->src, mapped_src_nents,
  1108. edesc->sec4_sg + sec4_sg_index, 0);
  1109. sec4_sg_index += mapped_src_nents;
  1110. }
  1111. if (mapped_dst_nents > 1) {
  1112. sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
  1113. edesc->sec4_sg + sec4_sg_index, 0);
  1114. }
  1115. if (!sec4_sg_bytes)
  1116. return edesc;
  1117. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1118. sec4_sg_bytes, DMA_TO_DEVICE);
  1119. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  1120. dev_err(jrdev, "unable to map S/G table\n");
  1121. aead_unmap(jrdev, edesc, req);
  1122. kfree(edesc);
  1123. return ERR_PTR(-ENOMEM);
  1124. }
  1125. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1126. return edesc;
  1127. }
  1128. static int gcm_encrypt(struct aead_request *req)
  1129. {
  1130. struct aead_edesc *edesc;
  1131. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1132. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1133. struct device *jrdev = ctx->jrdev;
  1134. bool all_contig;
  1135. u32 *desc;
  1136. int ret = 0;
  1137. /* allocate extended descriptor */
  1138. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
  1139. if (IS_ERR(edesc))
  1140. return PTR_ERR(edesc);
  1141. /* Create and submit job descriptor */
  1142. init_gcm_job(req, edesc, all_contig, true);
  1143. #ifdef DEBUG
  1144. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1145. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1146. desc_bytes(edesc->hw_desc), 1);
  1147. #endif
  1148. desc = edesc->hw_desc;
  1149. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  1150. if (!ret) {
  1151. ret = -EINPROGRESS;
  1152. } else {
  1153. aead_unmap(jrdev, edesc, req);
  1154. kfree(edesc);
  1155. }
  1156. return ret;
  1157. }
  1158. static int ipsec_gcm_encrypt(struct aead_request *req)
  1159. {
  1160. if (req->assoclen < 8)
  1161. return -EINVAL;
  1162. return gcm_encrypt(req);
  1163. }
  1164. static int aead_encrypt(struct aead_request *req)
  1165. {
  1166. struct aead_edesc *edesc;
  1167. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1168. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1169. struct device *jrdev = ctx->jrdev;
  1170. bool all_contig;
  1171. u32 *desc;
  1172. int ret = 0;
  1173. /* allocate extended descriptor */
  1174. edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
  1175. &all_contig, true);
  1176. if (IS_ERR(edesc))
  1177. return PTR_ERR(edesc);
  1178. /* Create and submit job descriptor */
  1179. init_authenc_job(req, edesc, all_contig, true);
  1180. #ifdef DEBUG
  1181. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1182. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1183. desc_bytes(edesc->hw_desc), 1);
  1184. #endif
  1185. desc = edesc->hw_desc;
  1186. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  1187. if (!ret) {
  1188. ret = -EINPROGRESS;
  1189. } else {
  1190. aead_unmap(jrdev, edesc, req);
  1191. kfree(edesc);
  1192. }
  1193. return ret;
  1194. }
  1195. static int gcm_decrypt(struct aead_request *req)
  1196. {
  1197. struct aead_edesc *edesc;
  1198. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1199. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1200. struct device *jrdev = ctx->jrdev;
  1201. bool all_contig;
  1202. u32 *desc;
  1203. int ret = 0;
  1204. /* allocate extended descriptor */
  1205. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
  1206. if (IS_ERR(edesc))
  1207. return PTR_ERR(edesc);
  1208. /* Create and submit job descriptor*/
  1209. init_gcm_job(req, edesc, all_contig, false);
  1210. #ifdef DEBUG
  1211. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1212. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1213. desc_bytes(edesc->hw_desc), 1);
  1214. #endif
  1215. desc = edesc->hw_desc;
  1216. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  1217. if (!ret) {
  1218. ret = -EINPROGRESS;
  1219. } else {
  1220. aead_unmap(jrdev, edesc, req);
  1221. kfree(edesc);
  1222. }
  1223. return ret;
  1224. }
  1225. static int ipsec_gcm_decrypt(struct aead_request *req)
  1226. {
  1227. if (req->assoclen < 8)
  1228. return -EINVAL;
  1229. return gcm_decrypt(req);
  1230. }
  1231. static int aead_decrypt(struct aead_request *req)
  1232. {
  1233. struct aead_edesc *edesc;
  1234. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1235. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1236. struct device *jrdev = ctx->jrdev;
  1237. bool all_contig;
  1238. u32 *desc;
  1239. int ret = 0;
  1240. caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
  1241. DUMP_PREFIX_ADDRESS, 16, 4, req->src,
  1242. req->assoclen + req->cryptlen, 1);
  1243. /* allocate extended descriptor */
  1244. edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
  1245. &all_contig, false);
  1246. if (IS_ERR(edesc))
  1247. return PTR_ERR(edesc);
  1248. /* Create and submit job descriptor*/
  1249. init_authenc_job(req, edesc, all_contig, false);
  1250. #ifdef DEBUG
  1251. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1252. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1253. desc_bytes(edesc->hw_desc), 1);
  1254. #endif
  1255. desc = edesc->hw_desc;
  1256. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  1257. if (!ret) {
  1258. ret = -EINPROGRESS;
  1259. } else {
  1260. aead_unmap(jrdev, edesc, req);
  1261. kfree(edesc);
  1262. }
  1263. return ret;
  1264. }
  1265. /*
  1266. * allocate and map the ablkcipher extended descriptor for ablkcipher
  1267. */
  1268. static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
  1269. *req, int desc_bytes)
  1270. {
  1271. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1272. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1273. struct device *jrdev = ctx->jrdev;
  1274. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  1275. GFP_KERNEL : GFP_ATOMIC;
  1276. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  1277. struct ablkcipher_edesc *edesc;
  1278. dma_addr_t iv_dma;
  1279. u8 *iv;
  1280. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1281. int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
  1282. src_nents = sg_nents_for_len(req->src, req->nbytes);
  1283. if (unlikely(src_nents < 0)) {
  1284. dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
  1285. req->nbytes);
  1286. return ERR_PTR(src_nents);
  1287. }
  1288. if (req->dst != req->src) {
  1289. dst_nents = sg_nents_for_len(req->dst, req->nbytes);
  1290. if (unlikely(dst_nents < 0)) {
  1291. dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
  1292. req->nbytes);
  1293. return ERR_PTR(dst_nents);
  1294. }
  1295. }
  1296. if (likely(req->src == req->dst)) {
  1297. mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
  1298. DMA_BIDIRECTIONAL);
  1299. if (unlikely(!mapped_src_nents)) {
  1300. dev_err(jrdev, "unable to map source\n");
  1301. return ERR_PTR(-ENOMEM);
  1302. }
  1303. } else {
  1304. mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
  1305. DMA_TO_DEVICE);
  1306. if (unlikely(!mapped_src_nents)) {
  1307. dev_err(jrdev, "unable to map source\n");
  1308. return ERR_PTR(-ENOMEM);
  1309. }
  1310. mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
  1311. DMA_FROM_DEVICE);
  1312. if (unlikely(!mapped_dst_nents)) {
  1313. dev_err(jrdev, "unable to map destination\n");
  1314. dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
  1315. return ERR_PTR(-ENOMEM);
  1316. }
  1317. }
  1318. sec4_sg_ents = 1 + mapped_src_nents;
  1319. dst_sg_idx = sec4_sg_ents;
  1320. sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
  1321. sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
  1322. /*
  1323. * allocate space for base edesc and hw desc commands, link tables, IV
  1324. */
  1325. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
  1326. GFP_DMA | flags);
  1327. if (!edesc) {
  1328. dev_err(jrdev, "could not allocate extended descriptor\n");
  1329. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
  1330. 0, DMA_NONE, 0, 0);
  1331. return ERR_PTR(-ENOMEM);
  1332. }
  1333. edesc->src_nents = src_nents;
  1334. edesc->dst_nents = dst_nents;
  1335. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1336. edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
  1337. desc_bytes);
  1338. edesc->iv_dir = DMA_TO_DEVICE;
  1339. /* Make sure IV is located in a DMAable area */
  1340. iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
  1341. memcpy(iv, req->info, ivsize);
  1342. iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
  1343. if (dma_mapping_error(jrdev, iv_dma)) {
  1344. dev_err(jrdev, "unable to map IV\n");
  1345. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
  1346. 0, DMA_NONE, 0, 0);
  1347. kfree(edesc);
  1348. return ERR_PTR(-ENOMEM);
  1349. }
  1350. dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
  1351. sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
  1352. if (mapped_dst_nents > 1) {
  1353. sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
  1354. edesc->sec4_sg + dst_sg_idx, 0);
  1355. }
  1356. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1357. sec4_sg_bytes, DMA_TO_DEVICE);
  1358. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  1359. dev_err(jrdev, "unable to map S/G table\n");
  1360. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
  1361. iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
  1362. kfree(edesc);
  1363. return ERR_PTR(-ENOMEM);
  1364. }
  1365. edesc->iv_dma = iv_dma;
  1366. #ifdef DEBUG
  1367. print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
  1368. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  1369. sec4_sg_bytes, 1);
  1370. #endif
  1371. return edesc;
  1372. }
  1373. static int ablkcipher_encrypt(struct ablkcipher_request *req)
  1374. {
  1375. struct ablkcipher_edesc *edesc;
  1376. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1377. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1378. struct device *jrdev = ctx->jrdev;
  1379. u32 *desc;
  1380. int ret = 0;
  1381. /* allocate extended descriptor */
  1382. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
  1383. if (IS_ERR(edesc))
  1384. return PTR_ERR(edesc);
  1385. /* Create and submit job descriptor*/
  1386. init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
  1387. #ifdef DEBUG
  1388. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  1389. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1390. desc_bytes(edesc->hw_desc), 1);
  1391. #endif
  1392. desc = edesc->hw_desc;
  1393. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  1394. if (!ret) {
  1395. ret = -EINPROGRESS;
  1396. } else {
  1397. ablkcipher_unmap(jrdev, edesc, req);
  1398. kfree(edesc);
  1399. }
  1400. return ret;
  1401. }
  1402. static int ablkcipher_decrypt(struct ablkcipher_request *req)
  1403. {
  1404. struct ablkcipher_edesc *edesc;
  1405. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1406. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1407. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1408. struct device *jrdev = ctx->jrdev;
  1409. u32 *desc;
  1410. int ret = 0;
  1411. /* allocate extended descriptor */
  1412. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
  1413. if (IS_ERR(edesc))
  1414. return PTR_ERR(edesc);
  1415. /*
  1416. * The crypto API expects us to set the IV (req->info) to the last
  1417. * ciphertext block when running in CBC mode.
  1418. */
  1419. if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
  1420. scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
  1421. ivsize, ivsize, 0);
  1422. /* Create and submit job descriptor*/
  1423. init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
  1424. desc = edesc->hw_desc;
  1425. #ifdef DEBUG
  1426. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  1427. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1428. desc_bytes(edesc->hw_desc), 1);
  1429. #endif
  1430. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
  1431. if (!ret) {
  1432. ret = -EINPROGRESS;
  1433. } else {
  1434. ablkcipher_unmap(jrdev, edesc, req);
  1435. kfree(edesc);
  1436. }
  1437. return ret;
  1438. }
  1439. /*
  1440. * allocate and map the ablkcipher extended descriptor
  1441. * for ablkcipher givencrypt
  1442. */
  1443. static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
  1444. struct skcipher_givcrypt_request *greq,
  1445. int desc_bytes)
  1446. {
  1447. struct ablkcipher_request *req = &greq->creq;
  1448. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1449. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1450. struct device *jrdev = ctx->jrdev;
  1451. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  1452. GFP_KERNEL : GFP_ATOMIC;
  1453. int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
  1454. struct ablkcipher_edesc *edesc;
  1455. dma_addr_t iv_dma;
  1456. u8 *iv;
  1457. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1458. int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
  1459. src_nents = sg_nents_for_len(req->src, req->nbytes);
  1460. if (unlikely(src_nents < 0)) {
  1461. dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
  1462. req->nbytes);
  1463. return ERR_PTR(src_nents);
  1464. }
  1465. if (likely(req->src == req->dst)) {
  1466. mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
  1467. DMA_BIDIRECTIONAL);
  1468. if (unlikely(!mapped_src_nents)) {
  1469. dev_err(jrdev, "unable to map source\n");
  1470. return ERR_PTR(-ENOMEM);
  1471. }
  1472. dst_nents = src_nents;
  1473. mapped_dst_nents = src_nents;
  1474. } else {
  1475. mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
  1476. DMA_TO_DEVICE);
  1477. if (unlikely(!mapped_src_nents)) {
  1478. dev_err(jrdev, "unable to map source\n");
  1479. return ERR_PTR(-ENOMEM);
  1480. }
  1481. dst_nents = sg_nents_for_len(req->dst, req->nbytes);
  1482. if (unlikely(dst_nents < 0)) {
  1483. dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
  1484. req->nbytes);
  1485. return ERR_PTR(dst_nents);
  1486. }
  1487. mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
  1488. DMA_FROM_DEVICE);
  1489. if (unlikely(!mapped_dst_nents)) {
  1490. dev_err(jrdev, "unable to map destination\n");
  1491. dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
  1492. return ERR_PTR(-ENOMEM);
  1493. }
  1494. }
  1495. sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
  1496. dst_sg_idx = sec4_sg_ents;
  1497. sec4_sg_ents += 1 + mapped_dst_nents;
  1498. /*
  1499. * allocate space for base edesc and hw desc commands, link tables, IV
  1500. */
  1501. sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
  1502. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
  1503. GFP_DMA | flags);
  1504. if (!edesc) {
  1505. dev_err(jrdev, "could not allocate extended descriptor\n");
  1506. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
  1507. 0, DMA_NONE, 0, 0);
  1508. return ERR_PTR(-ENOMEM);
  1509. }
  1510. edesc->src_nents = src_nents;
  1511. edesc->dst_nents = dst_nents;
  1512. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1513. edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
  1514. desc_bytes);
  1515. edesc->iv_dir = DMA_FROM_DEVICE;
  1516. /* Make sure IV is located in a DMAable area */
  1517. iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
  1518. iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
  1519. if (dma_mapping_error(jrdev, iv_dma)) {
  1520. dev_err(jrdev, "unable to map IV\n");
  1521. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
  1522. 0, DMA_NONE, 0, 0);
  1523. kfree(edesc);
  1524. return ERR_PTR(-ENOMEM);
  1525. }
  1526. if (mapped_src_nents > 1)
  1527. sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
  1528. 0);
  1529. dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
  1530. sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
  1531. dst_sg_idx + 1, 0);
  1532. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1533. sec4_sg_bytes, DMA_TO_DEVICE);
  1534. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  1535. dev_err(jrdev, "unable to map S/G table\n");
  1536. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
  1537. iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
  1538. kfree(edesc);
  1539. return ERR_PTR(-ENOMEM);
  1540. }
  1541. edesc->iv_dma = iv_dma;
  1542. #ifdef DEBUG
  1543. print_hex_dump(KERN_ERR,
  1544. "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
  1545. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  1546. sec4_sg_bytes, 1);
  1547. #endif
  1548. return edesc;
  1549. }
  1550. static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
  1551. {
  1552. struct ablkcipher_request *req = &creq->creq;
  1553. struct ablkcipher_edesc *edesc;
  1554. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1555. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1556. struct device *jrdev = ctx->jrdev;
  1557. u32 *desc;
  1558. int ret = 0;
  1559. /* allocate extended descriptor */
  1560. edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
  1561. if (IS_ERR(edesc))
  1562. return PTR_ERR(edesc);
  1563. /* Create and submit job descriptor*/
  1564. init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
  1565. edesc, req);
  1566. #ifdef DEBUG
  1567. print_hex_dump(KERN_ERR,
  1568. "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
  1569. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1570. desc_bytes(edesc->hw_desc), 1);
  1571. #endif
  1572. desc = edesc->hw_desc;
  1573. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  1574. if (!ret) {
  1575. ret = -EINPROGRESS;
  1576. } else {
  1577. ablkcipher_unmap(jrdev, edesc, req);
  1578. kfree(edesc);
  1579. }
  1580. return ret;
  1581. }
  1582. #define template_aead template_u.aead
  1583. #define template_ablkcipher template_u.ablkcipher
  1584. struct caam_alg_template {
  1585. char name[CRYPTO_MAX_ALG_NAME];
  1586. char driver_name[CRYPTO_MAX_ALG_NAME];
  1587. unsigned int blocksize;
  1588. u32 type;
  1589. union {
  1590. struct ablkcipher_alg ablkcipher;
  1591. } template_u;
  1592. u32 class1_alg_type;
  1593. u32 class2_alg_type;
  1594. };
  1595. static struct caam_alg_template driver_algs[] = {
  1596. /* ablkcipher descriptor */
  1597. {
  1598. .name = "cbc(aes)",
  1599. .driver_name = "cbc-aes-caam",
  1600. .blocksize = AES_BLOCK_SIZE,
  1601. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1602. .template_ablkcipher = {
  1603. .setkey = ablkcipher_setkey,
  1604. .encrypt = ablkcipher_encrypt,
  1605. .decrypt = ablkcipher_decrypt,
  1606. .givencrypt = ablkcipher_givencrypt,
  1607. .geniv = "<built-in>",
  1608. .min_keysize = AES_MIN_KEY_SIZE,
  1609. .max_keysize = AES_MAX_KEY_SIZE,
  1610. .ivsize = AES_BLOCK_SIZE,
  1611. },
  1612. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1613. },
  1614. {
  1615. .name = "cbc(des3_ede)",
  1616. .driver_name = "cbc-3des-caam",
  1617. .blocksize = DES3_EDE_BLOCK_SIZE,
  1618. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1619. .template_ablkcipher = {
  1620. .setkey = ablkcipher_setkey,
  1621. .encrypt = ablkcipher_encrypt,
  1622. .decrypt = ablkcipher_decrypt,
  1623. .givencrypt = ablkcipher_givencrypt,
  1624. .geniv = "<built-in>",
  1625. .min_keysize = DES3_EDE_KEY_SIZE,
  1626. .max_keysize = DES3_EDE_KEY_SIZE,
  1627. .ivsize = DES3_EDE_BLOCK_SIZE,
  1628. },
  1629. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1630. },
  1631. {
  1632. .name = "cbc(des)",
  1633. .driver_name = "cbc-des-caam",
  1634. .blocksize = DES_BLOCK_SIZE,
  1635. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1636. .template_ablkcipher = {
  1637. .setkey = ablkcipher_setkey,
  1638. .encrypt = ablkcipher_encrypt,
  1639. .decrypt = ablkcipher_decrypt,
  1640. .givencrypt = ablkcipher_givencrypt,
  1641. .geniv = "<built-in>",
  1642. .min_keysize = DES_KEY_SIZE,
  1643. .max_keysize = DES_KEY_SIZE,
  1644. .ivsize = DES_BLOCK_SIZE,
  1645. },
  1646. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1647. },
  1648. {
  1649. .name = "ctr(aes)",
  1650. .driver_name = "ctr-aes-caam",
  1651. .blocksize = 1,
  1652. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1653. .template_ablkcipher = {
  1654. .setkey = ablkcipher_setkey,
  1655. .encrypt = ablkcipher_encrypt,
  1656. .decrypt = ablkcipher_decrypt,
  1657. .geniv = "chainiv",
  1658. .min_keysize = AES_MIN_KEY_SIZE,
  1659. .max_keysize = AES_MAX_KEY_SIZE,
  1660. .ivsize = AES_BLOCK_SIZE,
  1661. },
  1662. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  1663. },
  1664. {
  1665. .name = "rfc3686(ctr(aes))",
  1666. .driver_name = "rfc3686-ctr-aes-caam",
  1667. .blocksize = 1,
  1668. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1669. .template_ablkcipher = {
  1670. .setkey = ablkcipher_setkey,
  1671. .encrypt = ablkcipher_encrypt,
  1672. .decrypt = ablkcipher_decrypt,
  1673. .givencrypt = ablkcipher_givencrypt,
  1674. .geniv = "<built-in>",
  1675. .min_keysize = AES_MIN_KEY_SIZE +
  1676. CTR_RFC3686_NONCE_SIZE,
  1677. .max_keysize = AES_MAX_KEY_SIZE +
  1678. CTR_RFC3686_NONCE_SIZE,
  1679. .ivsize = CTR_RFC3686_IV_SIZE,
  1680. },
  1681. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  1682. },
  1683. {
  1684. .name = "xts(aes)",
  1685. .driver_name = "xts-aes-caam",
  1686. .blocksize = AES_BLOCK_SIZE,
  1687. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1688. .template_ablkcipher = {
  1689. .setkey = xts_ablkcipher_setkey,
  1690. .encrypt = ablkcipher_encrypt,
  1691. .decrypt = ablkcipher_decrypt,
  1692. .geniv = "eseqiv",
  1693. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  1694. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  1695. .ivsize = AES_BLOCK_SIZE,
  1696. },
  1697. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
  1698. },
  1699. };
  1700. static struct caam_aead_alg driver_aeads[] = {
  1701. {
  1702. .aead = {
  1703. .base = {
  1704. .cra_name = "rfc4106(gcm(aes))",
  1705. .cra_driver_name = "rfc4106-gcm-aes-caam",
  1706. .cra_blocksize = 1,
  1707. },
  1708. .setkey = rfc4106_setkey,
  1709. .setauthsize = rfc4106_setauthsize,
  1710. .encrypt = ipsec_gcm_encrypt,
  1711. .decrypt = ipsec_gcm_decrypt,
  1712. .ivsize = GCM_RFC4106_IV_SIZE,
  1713. .maxauthsize = AES_BLOCK_SIZE,
  1714. },
  1715. .caam = {
  1716. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1717. },
  1718. },
  1719. {
  1720. .aead = {
  1721. .base = {
  1722. .cra_name = "rfc4543(gcm(aes))",
  1723. .cra_driver_name = "rfc4543-gcm-aes-caam",
  1724. .cra_blocksize = 1,
  1725. },
  1726. .setkey = rfc4543_setkey,
  1727. .setauthsize = rfc4543_setauthsize,
  1728. .encrypt = ipsec_gcm_encrypt,
  1729. .decrypt = ipsec_gcm_decrypt,
  1730. .ivsize = GCM_RFC4543_IV_SIZE,
  1731. .maxauthsize = AES_BLOCK_SIZE,
  1732. },
  1733. .caam = {
  1734. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1735. },
  1736. },
  1737. /* Galois Counter Mode */
  1738. {
  1739. .aead = {
  1740. .base = {
  1741. .cra_name = "gcm(aes)",
  1742. .cra_driver_name = "gcm-aes-caam",
  1743. .cra_blocksize = 1,
  1744. },
  1745. .setkey = gcm_setkey,
  1746. .setauthsize = gcm_setauthsize,
  1747. .encrypt = gcm_encrypt,
  1748. .decrypt = gcm_decrypt,
  1749. .ivsize = GCM_AES_IV_SIZE,
  1750. .maxauthsize = AES_BLOCK_SIZE,
  1751. },
  1752. .caam = {
  1753. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1754. },
  1755. },
  1756. /* single-pass ipsec_esp descriptor */
  1757. {
  1758. .aead = {
  1759. .base = {
  1760. .cra_name = "authenc(hmac(md5),"
  1761. "ecb(cipher_null))",
  1762. .cra_driver_name = "authenc-hmac-md5-"
  1763. "ecb-cipher_null-caam",
  1764. .cra_blocksize = NULL_BLOCK_SIZE,
  1765. },
  1766. .setkey = aead_setkey,
  1767. .setauthsize = aead_setauthsize,
  1768. .encrypt = aead_encrypt,
  1769. .decrypt = aead_decrypt,
  1770. .ivsize = NULL_IV_SIZE,
  1771. .maxauthsize = MD5_DIGEST_SIZE,
  1772. },
  1773. .caam = {
  1774. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1775. OP_ALG_AAI_HMAC_PRECOMP,
  1776. },
  1777. },
  1778. {
  1779. .aead = {
  1780. .base = {
  1781. .cra_name = "authenc(hmac(sha1),"
  1782. "ecb(cipher_null))",
  1783. .cra_driver_name = "authenc-hmac-sha1-"
  1784. "ecb-cipher_null-caam",
  1785. .cra_blocksize = NULL_BLOCK_SIZE,
  1786. },
  1787. .setkey = aead_setkey,
  1788. .setauthsize = aead_setauthsize,
  1789. .encrypt = aead_encrypt,
  1790. .decrypt = aead_decrypt,
  1791. .ivsize = NULL_IV_SIZE,
  1792. .maxauthsize = SHA1_DIGEST_SIZE,
  1793. },
  1794. .caam = {
  1795. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1796. OP_ALG_AAI_HMAC_PRECOMP,
  1797. },
  1798. },
  1799. {
  1800. .aead = {
  1801. .base = {
  1802. .cra_name = "authenc(hmac(sha224),"
  1803. "ecb(cipher_null))",
  1804. .cra_driver_name = "authenc-hmac-sha224-"
  1805. "ecb-cipher_null-caam",
  1806. .cra_blocksize = NULL_BLOCK_SIZE,
  1807. },
  1808. .setkey = aead_setkey,
  1809. .setauthsize = aead_setauthsize,
  1810. .encrypt = aead_encrypt,
  1811. .decrypt = aead_decrypt,
  1812. .ivsize = NULL_IV_SIZE,
  1813. .maxauthsize = SHA224_DIGEST_SIZE,
  1814. },
  1815. .caam = {
  1816. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1817. OP_ALG_AAI_HMAC_PRECOMP,
  1818. },
  1819. },
  1820. {
  1821. .aead = {
  1822. .base = {
  1823. .cra_name = "authenc(hmac(sha256),"
  1824. "ecb(cipher_null))",
  1825. .cra_driver_name = "authenc-hmac-sha256-"
  1826. "ecb-cipher_null-caam",
  1827. .cra_blocksize = NULL_BLOCK_SIZE,
  1828. },
  1829. .setkey = aead_setkey,
  1830. .setauthsize = aead_setauthsize,
  1831. .encrypt = aead_encrypt,
  1832. .decrypt = aead_decrypt,
  1833. .ivsize = NULL_IV_SIZE,
  1834. .maxauthsize = SHA256_DIGEST_SIZE,
  1835. },
  1836. .caam = {
  1837. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1838. OP_ALG_AAI_HMAC_PRECOMP,
  1839. },
  1840. },
  1841. {
  1842. .aead = {
  1843. .base = {
  1844. .cra_name = "authenc(hmac(sha384),"
  1845. "ecb(cipher_null))",
  1846. .cra_driver_name = "authenc-hmac-sha384-"
  1847. "ecb-cipher_null-caam",
  1848. .cra_blocksize = NULL_BLOCK_SIZE,
  1849. },
  1850. .setkey = aead_setkey,
  1851. .setauthsize = aead_setauthsize,
  1852. .encrypt = aead_encrypt,
  1853. .decrypt = aead_decrypt,
  1854. .ivsize = NULL_IV_SIZE,
  1855. .maxauthsize = SHA384_DIGEST_SIZE,
  1856. },
  1857. .caam = {
  1858. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1859. OP_ALG_AAI_HMAC_PRECOMP,
  1860. },
  1861. },
  1862. {
  1863. .aead = {
  1864. .base = {
  1865. .cra_name = "authenc(hmac(sha512),"
  1866. "ecb(cipher_null))",
  1867. .cra_driver_name = "authenc-hmac-sha512-"
  1868. "ecb-cipher_null-caam",
  1869. .cra_blocksize = NULL_BLOCK_SIZE,
  1870. },
  1871. .setkey = aead_setkey,
  1872. .setauthsize = aead_setauthsize,
  1873. .encrypt = aead_encrypt,
  1874. .decrypt = aead_decrypt,
  1875. .ivsize = NULL_IV_SIZE,
  1876. .maxauthsize = SHA512_DIGEST_SIZE,
  1877. },
  1878. .caam = {
  1879. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1880. OP_ALG_AAI_HMAC_PRECOMP,
  1881. },
  1882. },
  1883. {
  1884. .aead = {
  1885. .base = {
  1886. .cra_name = "authenc(hmac(md5),cbc(aes))",
  1887. .cra_driver_name = "authenc-hmac-md5-"
  1888. "cbc-aes-caam",
  1889. .cra_blocksize = AES_BLOCK_SIZE,
  1890. },
  1891. .setkey = aead_setkey,
  1892. .setauthsize = aead_setauthsize,
  1893. .encrypt = aead_encrypt,
  1894. .decrypt = aead_decrypt,
  1895. .ivsize = AES_BLOCK_SIZE,
  1896. .maxauthsize = MD5_DIGEST_SIZE,
  1897. },
  1898. .caam = {
  1899. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1900. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1901. OP_ALG_AAI_HMAC_PRECOMP,
  1902. },
  1903. },
  1904. {
  1905. .aead = {
  1906. .base = {
  1907. .cra_name = "echainiv(authenc(hmac(md5),"
  1908. "cbc(aes)))",
  1909. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1910. "cbc-aes-caam",
  1911. .cra_blocksize = AES_BLOCK_SIZE,
  1912. },
  1913. .setkey = aead_setkey,
  1914. .setauthsize = aead_setauthsize,
  1915. .encrypt = aead_encrypt,
  1916. .decrypt = aead_decrypt,
  1917. .ivsize = AES_BLOCK_SIZE,
  1918. .maxauthsize = MD5_DIGEST_SIZE,
  1919. },
  1920. .caam = {
  1921. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1922. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1923. OP_ALG_AAI_HMAC_PRECOMP,
  1924. .geniv = true,
  1925. },
  1926. },
  1927. {
  1928. .aead = {
  1929. .base = {
  1930. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1931. .cra_driver_name = "authenc-hmac-sha1-"
  1932. "cbc-aes-caam",
  1933. .cra_blocksize = AES_BLOCK_SIZE,
  1934. },
  1935. .setkey = aead_setkey,
  1936. .setauthsize = aead_setauthsize,
  1937. .encrypt = aead_encrypt,
  1938. .decrypt = aead_decrypt,
  1939. .ivsize = AES_BLOCK_SIZE,
  1940. .maxauthsize = SHA1_DIGEST_SIZE,
  1941. },
  1942. .caam = {
  1943. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1944. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1945. OP_ALG_AAI_HMAC_PRECOMP,
  1946. },
  1947. },
  1948. {
  1949. .aead = {
  1950. .base = {
  1951. .cra_name = "echainiv(authenc(hmac(sha1),"
  1952. "cbc(aes)))",
  1953. .cra_driver_name = "echainiv-authenc-"
  1954. "hmac-sha1-cbc-aes-caam",
  1955. .cra_blocksize = AES_BLOCK_SIZE,
  1956. },
  1957. .setkey = aead_setkey,
  1958. .setauthsize = aead_setauthsize,
  1959. .encrypt = aead_encrypt,
  1960. .decrypt = aead_decrypt,
  1961. .ivsize = AES_BLOCK_SIZE,
  1962. .maxauthsize = SHA1_DIGEST_SIZE,
  1963. },
  1964. .caam = {
  1965. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1966. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1967. OP_ALG_AAI_HMAC_PRECOMP,
  1968. .geniv = true,
  1969. },
  1970. },
  1971. {
  1972. .aead = {
  1973. .base = {
  1974. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  1975. .cra_driver_name = "authenc-hmac-sha224-"
  1976. "cbc-aes-caam",
  1977. .cra_blocksize = AES_BLOCK_SIZE,
  1978. },
  1979. .setkey = aead_setkey,
  1980. .setauthsize = aead_setauthsize,
  1981. .encrypt = aead_encrypt,
  1982. .decrypt = aead_decrypt,
  1983. .ivsize = AES_BLOCK_SIZE,
  1984. .maxauthsize = SHA224_DIGEST_SIZE,
  1985. },
  1986. .caam = {
  1987. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1988. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1989. OP_ALG_AAI_HMAC_PRECOMP,
  1990. },
  1991. },
  1992. {
  1993. .aead = {
  1994. .base = {
  1995. .cra_name = "echainiv(authenc(hmac(sha224),"
  1996. "cbc(aes)))",
  1997. .cra_driver_name = "echainiv-authenc-"
  1998. "hmac-sha224-cbc-aes-caam",
  1999. .cra_blocksize = AES_BLOCK_SIZE,
  2000. },
  2001. .setkey = aead_setkey,
  2002. .setauthsize = aead_setauthsize,
  2003. .encrypt = aead_encrypt,
  2004. .decrypt = aead_decrypt,
  2005. .ivsize = AES_BLOCK_SIZE,
  2006. .maxauthsize = SHA224_DIGEST_SIZE,
  2007. },
  2008. .caam = {
  2009. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2010. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2011. OP_ALG_AAI_HMAC_PRECOMP,
  2012. .geniv = true,
  2013. },
  2014. },
  2015. {
  2016. .aead = {
  2017. .base = {
  2018. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  2019. .cra_driver_name = "authenc-hmac-sha256-"
  2020. "cbc-aes-caam",
  2021. .cra_blocksize = AES_BLOCK_SIZE,
  2022. },
  2023. .setkey = aead_setkey,
  2024. .setauthsize = aead_setauthsize,
  2025. .encrypt = aead_encrypt,
  2026. .decrypt = aead_decrypt,
  2027. .ivsize = AES_BLOCK_SIZE,
  2028. .maxauthsize = SHA256_DIGEST_SIZE,
  2029. },
  2030. .caam = {
  2031. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2032. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2033. OP_ALG_AAI_HMAC_PRECOMP,
  2034. },
  2035. },
  2036. {
  2037. .aead = {
  2038. .base = {
  2039. .cra_name = "echainiv(authenc(hmac(sha256),"
  2040. "cbc(aes)))",
  2041. .cra_driver_name = "echainiv-authenc-"
  2042. "hmac-sha256-cbc-aes-caam",
  2043. .cra_blocksize = AES_BLOCK_SIZE,
  2044. },
  2045. .setkey = aead_setkey,
  2046. .setauthsize = aead_setauthsize,
  2047. .encrypt = aead_encrypt,
  2048. .decrypt = aead_decrypt,
  2049. .ivsize = AES_BLOCK_SIZE,
  2050. .maxauthsize = SHA256_DIGEST_SIZE,
  2051. },
  2052. .caam = {
  2053. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2054. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2055. OP_ALG_AAI_HMAC_PRECOMP,
  2056. .geniv = true,
  2057. },
  2058. },
  2059. {
  2060. .aead = {
  2061. .base = {
  2062. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  2063. .cra_driver_name = "authenc-hmac-sha384-"
  2064. "cbc-aes-caam",
  2065. .cra_blocksize = AES_BLOCK_SIZE,
  2066. },
  2067. .setkey = aead_setkey,
  2068. .setauthsize = aead_setauthsize,
  2069. .encrypt = aead_encrypt,
  2070. .decrypt = aead_decrypt,
  2071. .ivsize = AES_BLOCK_SIZE,
  2072. .maxauthsize = SHA384_DIGEST_SIZE,
  2073. },
  2074. .caam = {
  2075. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2076. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2077. OP_ALG_AAI_HMAC_PRECOMP,
  2078. },
  2079. },
  2080. {
  2081. .aead = {
  2082. .base = {
  2083. .cra_name = "echainiv(authenc(hmac(sha384),"
  2084. "cbc(aes)))",
  2085. .cra_driver_name = "echainiv-authenc-"
  2086. "hmac-sha384-cbc-aes-caam",
  2087. .cra_blocksize = AES_BLOCK_SIZE,
  2088. },
  2089. .setkey = aead_setkey,
  2090. .setauthsize = aead_setauthsize,
  2091. .encrypt = aead_encrypt,
  2092. .decrypt = aead_decrypt,
  2093. .ivsize = AES_BLOCK_SIZE,
  2094. .maxauthsize = SHA384_DIGEST_SIZE,
  2095. },
  2096. .caam = {
  2097. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2098. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2099. OP_ALG_AAI_HMAC_PRECOMP,
  2100. .geniv = true,
  2101. },
  2102. },
  2103. {
  2104. .aead = {
  2105. .base = {
  2106. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  2107. .cra_driver_name = "authenc-hmac-sha512-"
  2108. "cbc-aes-caam",
  2109. .cra_blocksize = AES_BLOCK_SIZE,
  2110. },
  2111. .setkey = aead_setkey,
  2112. .setauthsize = aead_setauthsize,
  2113. .encrypt = aead_encrypt,
  2114. .decrypt = aead_decrypt,
  2115. .ivsize = AES_BLOCK_SIZE,
  2116. .maxauthsize = SHA512_DIGEST_SIZE,
  2117. },
  2118. .caam = {
  2119. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2120. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2121. OP_ALG_AAI_HMAC_PRECOMP,
  2122. },
  2123. },
  2124. {
  2125. .aead = {
  2126. .base = {
  2127. .cra_name = "echainiv(authenc(hmac(sha512),"
  2128. "cbc(aes)))",
  2129. .cra_driver_name = "echainiv-authenc-"
  2130. "hmac-sha512-cbc-aes-caam",
  2131. .cra_blocksize = AES_BLOCK_SIZE,
  2132. },
  2133. .setkey = aead_setkey,
  2134. .setauthsize = aead_setauthsize,
  2135. .encrypt = aead_encrypt,
  2136. .decrypt = aead_decrypt,
  2137. .ivsize = AES_BLOCK_SIZE,
  2138. .maxauthsize = SHA512_DIGEST_SIZE,
  2139. },
  2140. .caam = {
  2141. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2142. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2143. OP_ALG_AAI_HMAC_PRECOMP,
  2144. .geniv = true,
  2145. },
  2146. },
  2147. {
  2148. .aead = {
  2149. .base = {
  2150. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  2151. .cra_driver_name = "authenc-hmac-md5-"
  2152. "cbc-des3_ede-caam",
  2153. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2154. },
  2155. .setkey = aead_setkey,
  2156. .setauthsize = aead_setauthsize,
  2157. .encrypt = aead_encrypt,
  2158. .decrypt = aead_decrypt,
  2159. .ivsize = DES3_EDE_BLOCK_SIZE,
  2160. .maxauthsize = MD5_DIGEST_SIZE,
  2161. },
  2162. .caam = {
  2163. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2164. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2165. OP_ALG_AAI_HMAC_PRECOMP,
  2166. }
  2167. },
  2168. {
  2169. .aead = {
  2170. .base = {
  2171. .cra_name = "echainiv(authenc(hmac(md5),"
  2172. "cbc(des3_ede)))",
  2173. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  2174. "cbc-des3_ede-caam",
  2175. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2176. },
  2177. .setkey = aead_setkey,
  2178. .setauthsize = aead_setauthsize,
  2179. .encrypt = aead_encrypt,
  2180. .decrypt = aead_decrypt,
  2181. .ivsize = DES3_EDE_BLOCK_SIZE,
  2182. .maxauthsize = MD5_DIGEST_SIZE,
  2183. },
  2184. .caam = {
  2185. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2186. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2187. OP_ALG_AAI_HMAC_PRECOMP,
  2188. .geniv = true,
  2189. }
  2190. },
  2191. {
  2192. .aead = {
  2193. .base = {
  2194. .cra_name = "authenc(hmac(sha1),"
  2195. "cbc(des3_ede))",
  2196. .cra_driver_name = "authenc-hmac-sha1-"
  2197. "cbc-des3_ede-caam",
  2198. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2199. },
  2200. .setkey = aead_setkey,
  2201. .setauthsize = aead_setauthsize,
  2202. .encrypt = aead_encrypt,
  2203. .decrypt = aead_decrypt,
  2204. .ivsize = DES3_EDE_BLOCK_SIZE,
  2205. .maxauthsize = SHA1_DIGEST_SIZE,
  2206. },
  2207. .caam = {
  2208. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2209. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2210. OP_ALG_AAI_HMAC_PRECOMP,
  2211. },
  2212. },
  2213. {
  2214. .aead = {
  2215. .base = {
  2216. .cra_name = "echainiv(authenc(hmac(sha1),"
  2217. "cbc(des3_ede)))",
  2218. .cra_driver_name = "echainiv-authenc-"
  2219. "hmac-sha1-"
  2220. "cbc-des3_ede-caam",
  2221. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2222. },
  2223. .setkey = aead_setkey,
  2224. .setauthsize = aead_setauthsize,
  2225. .encrypt = aead_encrypt,
  2226. .decrypt = aead_decrypt,
  2227. .ivsize = DES3_EDE_BLOCK_SIZE,
  2228. .maxauthsize = SHA1_DIGEST_SIZE,
  2229. },
  2230. .caam = {
  2231. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2232. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2233. OP_ALG_AAI_HMAC_PRECOMP,
  2234. .geniv = true,
  2235. },
  2236. },
  2237. {
  2238. .aead = {
  2239. .base = {
  2240. .cra_name = "authenc(hmac(sha224),"
  2241. "cbc(des3_ede))",
  2242. .cra_driver_name = "authenc-hmac-sha224-"
  2243. "cbc-des3_ede-caam",
  2244. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2245. },
  2246. .setkey = aead_setkey,
  2247. .setauthsize = aead_setauthsize,
  2248. .encrypt = aead_encrypt,
  2249. .decrypt = aead_decrypt,
  2250. .ivsize = DES3_EDE_BLOCK_SIZE,
  2251. .maxauthsize = SHA224_DIGEST_SIZE,
  2252. },
  2253. .caam = {
  2254. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2255. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2256. OP_ALG_AAI_HMAC_PRECOMP,
  2257. },
  2258. },
  2259. {
  2260. .aead = {
  2261. .base = {
  2262. .cra_name = "echainiv(authenc(hmac(sha224),"
  2263. "cbc(des3_ede)))",
  2264. .cra_driver_name = "echainiv-authenc-"
  2265. "hmac-sha224-"
  2266. "cbc-des3_ede-caam",
  2267. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2268. },
  2269. .setkey = aead_setkey,
  2270. .setauthsize = aead_setauthsize,
  2271. .encrypt = aead_encrypt,
  2272. .decrypt = aead_decrypt,
  2273. .ivsize = DES3_EDE_BLOCK_SIZE,
  2274. .maxauthsize = SHA224_DIGEST_SIZE,
  2275. },
  2276. .caam = {
  2277. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2278. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2279. OP_ALG_AAI_HMAC_PRECOMP,
  2280. .geniv = true,
  2281. },
  2282. },
  2283. {
  2284. .aead = {
  2285. .base = {
  2286. .cra_name = "authenc(hmac(sha256),"
  2287. "cbc(des3_ede))",
  2288. .cra_driver_name = "authenc-hmac-sha256-"
  2289. "cbc-des3_ede-caam",
  2290. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2291. },
  2292. .setkey = aead_setkey,
  2293. .setauthsize = aead_setauthsize,
  2294. .encrypt = aead_encrypt,
  2295. .decrypt = aead_decrypt,
  2296. .ivsize = DES3_EDE_BLOCK_SIZE,
  2297. .maxauthsize = SHA256_DIGEST_SIZE,
  2298. },
  2299. .caam = {
  2300. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2301. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2302. OP_ALG_AAI_HMAC_PRECOMP,
  2303. },
  2304. },
  2305. {
  2306. .aead = {
  2307. .base = {
  2308. .cra_name = "echainiv(authenc(hmac(sha256),"
  2309. "cbc(des3_ede)))",
  2310. .cra_driver_name = "echainiv-authenc-"
  2311. "hmac-sha256-"
  2312. "cbc-des3_ede-caam",
  2313. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2314. },
  2315. .setkey = aead_setkey,
  2316. .setauthsize = aead_setauthsize,
  2317. .encrypt = aead_encrypt,
  2318. .decrypt = aead_decrypt,
  2319. .ivsize = DES3_EDE_BLOCK_SIZE,
  2320. .maxauthsize = SHA256_DIGEST_SIZE,
  2321. },
  2322. .caam = {
  2323. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2324. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2325. OP_ALG_AAI_HMAC_PRECOMP,
  2326. .geniv = true,
  2327. },
  2328. },
  2329. {
  2330. .aead = {
  2331. .base = {
  2332. .cra_name = "authenc(hmac(sha384),"
  2333. "cbc(des3_ede))",
  2334. .cra_driver_name = "authenc-hmac-sha384-"
  2335. "cbc-des3_ede-caam",
  2336. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2337. },
  2338. .setkey = aead_setkey,
  2339. .setauthsize = aead_setauthsize,
  2340. .encrypt = aead_encrypt,
  2341. .decrypt = aead_decrypt,
  2342. .ivsize = DES3_EDE_BLOCK_SIZE,
  2343. .maxauthsize = SHA384_DIGEST_SIZE,
  2344. },
  2345. .caam = {
  2346. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2347. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2348. OP_ALG_AAI_HMAC_PRECOMP,
  2349. },
  2350. },
  2351. {
  2352. .aead = {
  2353. .base = {
  2354. .cra_name = "echainiv(authenc(hmac(sha384),"
  2355. "cbc(des3_ede)))",
  2356. .cra_driver_name = "echainiv-authenc-"
  2357. "hmac-sha384-"
  2358. "cbc-des3_ede-caam",
  2359. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2360. },
  2361. .setkey = aead_setkey,
  2362. .setauthsize = aead_setauthsize,
  2363. .encrypt = aead_encrypt,
  2364. .decrypt = aead_decrypt,
  2365. .ivsize = DES3_EDE_BLOCK_SIZE,
  2366. .maxauthsize = SHA384_DIGEST_SIZE,
  2367. },
  2368. .caam = {
  2369. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2370. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2371. OP_ALG_AAI_HMAC_PRECOMP,
  2372. .geniv = true,
  2373. },
  2374. },
  2375. {
  2376. .aead = {
  2377. .base = {
  2378. .cra_name = "authenc(hmac(sha512),"
  2379. "cbc(des3_ede))",
  2380. .cra_driver_name = "authenc-hmac-sha512-"
  2381. "cbc-des3_ede-caam",
  2382. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2383. },
  2384. .setkey = aead_setkey,
  2385. .setauthsize = aead_setauthsize,
  2386. .encrypt = aead_encrypt,
  2387. .decrypt = aead_decrypt,
  2388. .ivsize = DES3_EDE_BLOCK_SIZE,
  2389. .maxauthsize = SHA512_DIGEST_SIZE,
  2390. },
  2391. .caam = {
  2392. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2393. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2394. OP_ALG_AAI_HMAC_PRECOMP,
  2395. },
  2396. },
  2397. {
  2398. .aead = {
  2399. .base = {
  2400. .cra_name = "echainiv(authenc(hmac(sha512),"
  2401. "cbc(des3_ede)))",
  2402. .cra_driver_name = "echainiv-authenc-"
  2403. "hmac-sha512-"
  2404. "cbc-des3_ede-caam",
  2405. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2406. },
  2407. .setkey = aead_setkey,
  2408. .setauthsize = aead_setauthsize,
  2409. .encrypt = aead_encrypt,
  2410. .decrypt = aead_decrypt,
  2411. .ivsize = DES3_EDE_BLOCK_SIZE,
  2412. .maxauthsize = SHA512_DIGEST_SIZE,
  2413. },
  2414. .caam = {
  2415. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2416. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2417. OP_ALG_AAI_HMAC_PRECOMP,
  2418. .geniv = true,
  2419. },
  2420. },
  2421. {
  2422. .aead = {
  2423. .base = {
  2424. .cra_name = "authenc(hmac(md5),cbc(des))",
  2425. .cra_driver_name = "authenc-hmac-md5-"
  2426. "cbc-des-caam",
  2427. .cra_blocksize = DES_BLOCK_SIZE,
  2428. },
  2429. .setkey = aead_setkey,
  2430. .setauthsize = aead_setauthsize,
  2431. .encrypt = aead_encrypt,
  2432. .decrypt = aead_decrypt,
  2433. .ivsize = DES_BLOCK_SIZE,
  2434. .maxauthsize = MD5_DIGEST_SIZE,
  2435. },
  2436. .caam = {
  2437. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2438. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2439. OP_ALG_AAI_HMAC_PRECOMP,
  2440. },
  2441. },
  2442. {
  2443. .aead = {
  2444. .base = {
  2445. .cra_name = "echainiv(authenc(hmac(md5),"
  2446. "cbc(des)))",
  2447. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  2448. "cbc-des-caam",
  2449. .cra_blocksize = DES_BLOCK_SIZE,
  2450. },
  2451. .setkey = aead_setkey,
  2452. .setauthsize = aead_setauthsize,
  2453. .encrypt = aead_encrypt,
  2454. .decrypt = aead_decrypt,
  2455. .ivsize = DES_BLOCK_SIZE,
  2456. .maxauthsize = MD5_DIGEST_SIZE,
  2457. },
  2458. .caam = {
  2459. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2460. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2461. OP_ALG_AAI_HMAC_PRECOMP,
  2462. .geniv = true,
  2463. },
  2464. },
  2465. {
  2466. .aead = {
  2467. .base = {
  2468. .cra_name = "authenc(hmac(sha1),cbc(des))",
  2469. .cra_driver_name = "authenc-hmac-sha1-"
  2470. "cbc-des-caam",
  2471. .cra_blocksize = DES_BLOCK_SIZE,
  2472. },
  2473. .setkey = aead_setkey,
  2474. .setauthsize = aead_setauthsize,
  2475. .encrypt = aead_encrypt,
  2476. .decrypt = aead_decrypt,
  2477. .ivsize = DES_BLOCK_SIZE,
  2478. .maxauthsize = SHA1_DIGEST_SIZE,
  2479. },
  2480. .caam = {
  2481. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2482. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2483. OP_ALG_AAI_HMAC_PRECOMP,
  2484. },
  2485. },
  2486. {
  2487. .aead = {
  2488. .base = {
  2489. .cra_name = "echainiv(authenc(hmac(sha1),"
  2490. "cbc(des)))",
  2491. .cra_driver_name = "echainiv-authenc-"
  2492. "hmac-sha1-cbc-des-caam",
  2493. .cra_blocksize = DES_BLOCK_SIZE,
  2494. },
  2495. .setkey = aead_setkey,
  2496. .setauthsize = aead_setauthsize,
  2497. .encrypt = aead_encrypt,
  2498. .decrypt = aead_decrypt,
  2499. .ivsize = DES_BLOCK_SIZE,
  2500. .maxauthsize = SHA1_DIGEST_SIZE,
  2501. },
  2502. .caam = {
  2503. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2504. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2505. OP_ALG_AAI_HMAC_PRECOMP,
  2506. .geniv = true,
  2507. },
  2508. },
  2509. {
  2510. .aead = {
  2511. .base = {
  2512. .cra_name = "authenc(hmac(sha224),cbc(des))",
  2513. .cra_driver_name = "authenc-hmac-sha224-"
  2514. "cbc-des-caam",
  2515. .cra_blocksize = DES_BLOCK_SIZE,
  2516. },
  2517. .setkey = aead_setkey,
  2518. .setauthsize = aead_setauthsize,
  2519. .encrypt = aead_encrypt,
  2520. .decrypt = aead_decrypt,
  2521. .ivsize = DES_BLOCK_SIZE,
  2522. .maxauthsize = SHA224_DIGEST_SIZE,
  2523. },
  2524. .caam = {
  2525. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2526. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2527. OP_ALG_AAI_HMAC_PRECOMP,
  2528. },
  2529. },
  2530. {
  2531. .aead = {
  2532. .base = {
  2533. .cra_name = "echainiv(authenc(hmac(sha224),"
  2534. "cbc(des)))",
  2535. .cra_driver_name = "echainiv-authenc-"
  2536. "hmac-sha224-cbc-des-caam",
  2537. .cra_blocksize = DES_BLOCK_SIZE,
  2538. },
  2539. .setkey = aead_setkey,
  2540. .setauthsize = aead_setauthsize,
  2541. .encrypt = aead_encrypt,
  2542. .decrypt = aead_decrypt,
  2543. .ivsize = DES_BLOCK_SIZE,
  2544. .maxauthsize = SHA224_DIGEST_SIZE,
  2545. },
  2546. .caam = {
  2547. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2548. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2549. OP_ALG_AAI_HMAC_PRECOMP,
  2550. .geniv = true,
  2551. },
  2552. },
  2553. {
  2554. .aead = {
  2555. .base = {
  2556. .cra_name = "authenc(hmac(sha256),cbc(des))",
  2557. .cra_driver_name = "authenc-hmac-sha256-"
  2558. "cbc-des-caam",
  2559. .cra_blocksize = DES_BLOCK_SIZE,
  2560. },
  2561. .setkey = aead_setkey,
  2562. .setauthsize = aead_setauthsize,
  2563. .encrypt = aead_encrypt,
  2564. .decrypt = aead_decrypt,
  2565. .ivsize = DES_BLOCK_SIZE,
  2566. .maxauthsize = SHA256_DIGEST_SIZE,
  2567. },
  2568. .caam = {
  2569. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2570. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2571. OP_ALG_AAI_HMAC_PRECOMP,
  2572. },
  2573. },
  2574. {
  2575. .aead = {
  2576. .base = {
  2577. .cra_name = "echainiv(authenc(hmac(sha256),"
  2578. "cbc(des)))",
  2579. .cra_driver_name = "echainiv-authenc-"
  2580. "hmac-sha256-cbc-des-caam",
  2581. .cra_blocksize = DES_BLOCK_SIZE,
  2582. },
  2583. .setkey = aead_setkey,
  2584. .setauthsize = aead_setauthsize,
  2585. .encrypt = aead_encrypt,
  2586. .decrypt = aead_decrypt,
  2587. .ivsize = DES_BLOCK_SIZE,
  2588. .maxauthsize = SHA256_DIGEST_SIZE,
  2589. },
  2590. .caam = {
  2591. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2592. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2593. OP_ALG_AAI_HMAC_PRECOMP,
  2594. .geniv = true,
  2595. },
  2596. },
  2597. {
  2598. .aead = {
  2599. .base = {
  2600. .cra_name = "authenc(hmac(sha384),cbc(des))",
  2601. .cra_driver_name = "authenc-hmac-sha384-"
  2602. "cbc-des-caam",
  2603. .cra_blocksize = DES_BLOCK_SIZE,
  2604. },
  2605. .setkey = aead_setkey,
  2606. .setauthsize = aead_setauthsize,
  2607. .encrypt = aead_encrypt,
  2608. .decrypt = aead_decrypt,
  2609. .ivsize = DES_BLOCK_SIZE,
  2610. .maxauthsize = SHA384_DIGEST_SIZE,
  2611. },
  2612. .caam = {
  2613. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2614. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2615. OP_ALG_AAI_HMAC_PRECOMP,
  2616. },
  2617. },
  2618. {
  2619. .aead = {
  2620. .base = {
  2621. .cra_name = "echainiv(authenc(hmac(sha384),"
  2622. "cbc(des)))",
  2623. .cra_driver_name = "echainiv-authenc-"
  2624. "hmac-sha384-cbc-des-caam",
  2625. .cra_blocksize = DES_BLOCK_SIZE,
  2626. },
  2627. .setkey = aead_setkey,
  2628. .setauthsize = aead_setauthsize,
  2629. .encrypt = aead_encrypt,
  2630. .decrypt = aead_decrypt,
  2631. .ivsize = DES_BLOCK_SIZE,
  2632. .maxauthsize = SHA384_DIGEST_SIZE,
  2633. },
  2634. .caam = {
  2635. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2636. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2637. OP_ALG_AAI_HMAC_PRECOMP,
  2638. .geniv = true,
  2639. },
  2640. },
  2641. {
  2642. .aead = {
  2643. .base = {
  2644. .cra_name = "authenc(hmac(sha512),cbc(des))",
  2645. .cra_driver_name = "authenc-hmac-sha512-"
  2646. "cbc-des-caam",
  2647. .cra_blocksize = DES_BLOCK_SIZE,
  2648. },
  2649. .setkey = aead_setkey,
  2650. .setauthsize = aead_setauthsize,
  2651. .encrypt = aead_encrypt,
  2652. .decrypt = aead_decrypt,
  2653. .ivsize = DES_BLOCK_SIZE,
  2654. .maxauthsize = SHA512_DIGEST_SIZE,
  2655. },
  2656. .caam = {
  2657. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2658. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2659. OP_ALG_AAI_HMAC_PRECOMP,
  2660. },
  2661. },
  2662. {
  2663. .aead = {
  2664. .base = {
  2665. .cra_name = "echainiv(authenc(hmac(sha512),"
  2666. "cbc(des)))",
  2667. .cra_driver_name = "echainiv-authenc-"
  2668. "hmac-sha512-cbc-des-caam",
  2669. .cra_blocksize = DES_BLOCK_SIZE,
  2670. },
  2671. .setkey = aead_setkey,
  2672. .setauthsize = aead_setauthsize,
  2673. .encrypt = aead_encrypt,
  2674. .decrypt = aead_decrypt,
  2675. .ivsize = DES_BLOCK_SIZE,
  2676. .maxauthsize = SHA512_DIGEST_SIZE,
  2677. },
  2678. .caam = {
  2679. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2680. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2681. OP_ALG_AAI_HMAC_PRECOMP,
  2682. .geniv = true,
  2683. },
  2684. },
  2685. {
  2686. .aead = {
  2687. .base = {
  2688. .cra_name = "authenc(hmac(md5),"
  2689. "rfc3686(ctr(aes)))",
  2690. .cra_driver_name = "authenc-hmac-md5-"
  2691. "rfc3686-ctr-aes-caam",
  2692. .cra_blocksize = 1,
  2693. },
  2694. .setkey = aead_setkey,
  2695. .setauthsize = aead_setauthsize,
  2696. .encrypt = aead_encrypt,
  2697. .decrypt = aead_decrypt,
  2698. .ivsize = CTR_RFC3686_IV_SIZE,
  2699. .maxauthsize = MD5_DIGEST_SIZE,
  2700. },
  2701. .caam = {
  2702. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2703. OP_ALG_AAI_CTR_MOD128,
  2704. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2705. OP_ALG_AAI_HMAC_PRECOMP,
  2706. .rfc3686 = true,
  2707. },
  2708. },
  2709. {
  2710. .aead = {
  2711. .base = {
  2712. .cra_name = "seqiv(authenc("
  2713. "hmac(md5),rfc3686(ctr(aes))))",
  2714. .cra_driver_name = "seqiv-authenc-hmac-md5-"
  2715. "rfc3686-ctr-aes-caam",
  2716. .cra_blocksize = 1,
  2717. },
  2718. .setkey = aead_setkey,
  2719. .setauthsize = aead_setauthsize,
  2720. .encrypt = aead_encrypt,
  2721. .decrypt = aead_decrypt,
  2722. .ivsize = CTR_RFC3686_IV_SIZE,
  2723. .maxauthsize = MD5_DIGEST_SIZE,
  2724. },
  2725. .caam = {
  2726. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2727. OP_ALG_AAI_CTR_MOD128,
  2728. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2729. OP_ALG_AAI_HMAC_PRECOMP,
  2730. .rfc3686 = true,
  2731. .geniv = true,
  2732. },
  2733. },
  2734. {
  2735. .aead = {
  2736. .base = {
  2737. .cra_name = "authenc(hmac(sha1),"
  2738. "rfc3686(ctr(aes)))",
  2739. .cra_driver_name = "authenc-hmac-sha1-"
  2740. "rfc3686-ctr-aes-caam",
  2741. .cra_blocksize = 1,
  2742. },
  2743. .setkey = aead_setkey,
  2744. .setauthsize = aead_setauthsize,
  2745. .encrypt = aead_encrypt,
  2746. .decrypt = aead_decrypt,
  2747. .ivsize = CTR_RFC3686_IV_SIZE,
  2748. .maxauthsize = SHA1_DIGEST_SIZE,
  2749. },
  2750. .caam = {
  2751. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2752. OP_ALG_AAI_CTR_MOD128,
  2753. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2754. OP_ALG_AAI_HMAC_PRECOMP,
  2755. .rfc3686 = true,
  2756. },
  2757. },
  2758. {
  2759. .aead = {
  2760. .base = {
  2761. .cra_name = "seqiv(authenc("
  2762. "hmac(sha1),rfc3686(ctr(aes))))",
  2763. .cra_driver_name = "seqiv-authenc-hmac-sha1-"
  2764. "rfc3686-ctr-aes-caam",
  2765. .cra_blocksize = 1,
  2766. },
  2767. .setkey = aead_setkey,
  2768. .setauthsize = aead_setauthsize,
  2769. .encrypt = aead_encrypt,
  2770. .decrypt = aead_decrypt,
  2771. .ivsize = CTR_RFC3686_IV_SIZE,
  2772. .maxauthsize = SHA1_DIGEST_SIZE,
  2773. },
  2774. .caam = {
  2775. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2776. OP_ALG_AAI_CTR_MOD128,
  2777. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2778. OP_ALG_AAI_HMAC_PRECOMP,
  2779. .rfc3686 = true,
  2780. .geniv = true,
  2781. },
  2782. },
  2783. {
  2784. .aead = {
  2785. .base = {
  2786. .cra_name = "authenc(hmac(sha224),"
  2787. "rfc3686(ctr(aes)))",
  2788. .cra_driver_name = "authenc-hmac-sha224-"
  2789. "rfc3686-ctr-aes-caam",
  2790. .cra_blocksize = 1,
  2791. },
  2792. .setkey = aead_setkey,
  2793. .setauthsize = aead_setauthsize,
  2794. .encrypt = aead_encrypt,
  2795. .decrypt = aead_decrypt,
  2796. .ivsize = CTR_RFC3686_IV_SIZE,
  2797. .maxauthsize = SHA224_DIGEST_SIZE,
  2798. },
  2799. .caam = {
  2800. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2801. OP_ALG_AAI_CTR_MOD128,
  2802. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2803. OP_ALG_AAI_HMAC_PRECOMP,
  2804. .rfc3686 = true,
  2805. },
  2806. },
  2807. {
  2808. .aead = {
  2809. .base = {
  2810. .cra_name = "seqiv(authenc("
  2811. "hmac(sha224),rfc3686(ctr(aes))))",
  2812. .cra_driver_name = "seqiv-authenc-hmac-sha224-"
  2813. "rfc3686-ctr-aes-caam",
  2814. .cra_blocksize = 1,
  2815. },
  2816. .setkey = aead_setkey,
  2817. .setauthsize = aead_setauthsize,
  2818. .encrypt = aead_encrypt,
  2819. .decrypt = aead_decrypt,
  2820. .ivsize = CTR_RFC3686_IV_SIZE,
  2821. .maxauthsize = SHA224_DIGEST_SIZE,
  2822. },
  2823. .caam = {
  2824. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2825. OP_ALG_AAI_CTR_MOD128,
  2826. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2827. OP_ALG_AAI_HMAC_PRECOMP,
  2828. .rfc3686 = true,
  2829. .geniv = true,
  2830. },
  2831. },
  2832. {
  2833. .aead = {
  2834. .base = {
  2835. .cra_name = "authenc(hmac(sha256),"
  2836. "rfc3686(ctr(aes)))",
  2837. .cra_driver_name = "authenc-hmac-sha256-"
  2838. "rfc3686-ctr-aes-caam",
  2839. .cra_blocksize = 1,
  2840. },
  2841. .setkey = aead_setkey,
  2842. .setauthsize = aead_setauthsize,
  2843. .encrypt = aead_encrypt,
  2844. .decrypt = aead_decrypt,
  2845. .ivsize = CTR_RFC3686_IV_SIZE,
  2846. .maxauthsize = SHA256_DIGEST_SIZE,
  2847. },
  2848. .caam = {
  2849. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2850. OP_ALG_AAI_CTR_MOD128,
  2851. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2852. OP_ALG_AAI_HMAC_PRECOMP,
  2853. .rfc3686 = true,
  2854. },
  2855. },
  2856. {
  2857. .aead = {
  2858. .base = {
  2859. .cra_name = "seqiv(authenc(hmac(sha256),"
  2860. "rfc3686(ctr(aes))))",
  2861. .cra_driver_name = "seqiv-authenc-hmac-sha256-"
  2862. "rfc3686-ctr-aes-caam",
  2863. .cra_blocksize = 1,
  2864. },
  2865. .setkey = aead_setkey,
  2866. .setauthsize = aead_setauthsize,
  2867. .encrypt = aead_encrypt,
  2868. .decrypt = aead_decrypt,
  2869. .ivsize = CTR_RFC3686_IV_SIZE,
  2870. .maxauthsize = SHA256_DIGEST_SIZE,
  2871. },
  2872. .caam = {
  2873. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2874. OP_ALG_AAI_CTR_MOD128,
  2875. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2876. OP_ALG_AAI_HMAC_PRECOMP,
  2877. .rfc3686 = true,
  2878. .geniv = true,
  2879. },
  2880. },
  2881. {
  2882. .aead = {
  2883. .base = {
  2884. .cra_name = "authenc(hmac(sha384),"
  2885. "rfc3686(ctr(aes)))",
  2886. .cra_driver_name = "authenc-hmac-sha384-"
  2887. "rfc3686-ctr-aes-caam",
  2888. .cra_blocksize = 1,
  2889. },
  2890. .setkey = aead_setkey,
  2891. .setauthsize = aead_setauthsize,
  2892. .encrypt = aead_encrypt,
  2893. .decrypt = aead_decrypt,
  2894. .ivsize = CTR_RFC3686_IV_SIZE,
  2895. .maxauthsize = SHA384_DIGEST_SIZE,
  2896. },
  2897. .caam = {
  2898. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2899. OP_ALG_AAI_CTR_MOD128,
  2900. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2901. OP_ALG_AAI_HMAC_PRECOMP,
  2902. .rfc3686 = true,
  2903. },
  2904. },
  2905. {
  2906. .aead = {
  2907. .base = {
  2908. .cra_name = "seqiv(authenc(hmac(sha384),"
  2909. "rfc3686(ctr(aes))))",
  2910. .cra_driver_name = "seqiv-authenc-hmac-sha384-"
  2911. "rfc3686-ctr-aes-caam",
  2912. .cra_blocksize = 1,
  2913. },
  2914. .setkey = aead_setkey,
  2915. .setauthsize = aead_setauthsize,
  2916. .encrypt = aead_encrypt,
  2917. .decrypt = aead_decrypt,
  2918. .ivsize = CTR_RFC3686_IV_SIZE,
  2919. .maxauthsize = SHA384_DIGEST_SIZE,
  2920. },
  2921. .caam = {
  2922. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2923. OP_ALG_AAI_CTR_MOD128,
  2924. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2925. OP_ALG_AAI_HMAC_PRECOMP,
  2926. .rfc3686 = true,
  2927. .geniv = true,
  2928. },
  2929. },
  2930. {
  2931. .aead = {
  2932. .base = {
  2933. .cra_name = "authenc(hmac(sha512),"
  2934. "rfc3686(ctr(aes)))",
  2935. .cra_driver_name = "authenc-hmac-sha512-"
  2936. "rfc3686-ctr-aes-caam",
  2937. .cra_blocksize = 1,
  2938. },
  2939. .setkey = aead_setkey,
  2940. .setauthsize = aead_setauthsize,
  2941. .encrypt = aead_encrypt,
  2942. .decrypt = aead_decrypt,
  2943. .ivsize = CTR_RFC3686_IV_SIZE,
  2944. .maxauthsize = SHA512_DIGEST_SIZE,
  2945. },
  2946. .caam = {
  2947. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2948. OP_ALG_AAI_CTR_MOD128,
  2949. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2950. OP_ALG_AAI_HMAC_PRECOMP,
  2951. .rfc3686 = true,
  2952. },
  2953. },
  2954. {
  2955. .aead = {
  2956. .base = {
  2957. .cra_name = "seqiv(authenc(hmac(sha512),"
  2958. "rfc3686(ctr(aes))))",
  2959. .cra_driver_name = "seqiv-authenc-hmac-sha512-"
  2960. "rfc3686-ctr-aes-caam",
  2961. .cra_blocksize = 1,
  2962. },
  2963. .setkey = aead_setkey,
  2964. .setauthsize = aead_setauthsize,
  2965. .encrypt = aead_encrypt,
  2966. .decrypt = aead_decrypt,
  2967. .ivsize = CTR_RFC3686_IV_SIZE,
  2968. .maxauthsize = SHA512_DIGEST_SIZE,
  2969. },
  2970. .caam = {
  2971. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2972. OP_ALG_AAI_CTR_MOD128,
  2973. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2974. OP_ALG_AAI_HMAC_PRECOMP,
  2975. .rfc3686 = true,
  2976. .geniv = true,
  2977. },
  2978. },
  2979. };
  2980. struct caam_crypto_alg {
  2981. struct crypto_alg crypto_alg;
  2982. struct list_head entry;
  2983. struct caam_alg_entry caam;
  2984. };
  2985. static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
  2986. bool uses_dkp)
  2987. {
  2988. dma_addr_t dma_addr;
  2989. struct caam_drv_private *priv;
  2990. ctx->jrdev = caam_jr_alloc();
  2991. if (IS_ERR(ctx->jrdev)) {
  2992. pr_err("Job Ring Device allocation for transform failed\n");
  2993. return PTR_ERR(ctx->jrdev);
  2994. }
  2995. priv = dev_get_drvdata(ctx->jrdev->parent);
  2996. if (priv->era >= 6 && uses_dkp)
  2997. ctx->dir = DMA_BIDIRECTIONAL;
  2998. else
  2999. ctx->dir = DMA_TO_DEVICE;
  3000. dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
  3001. offsetof(struct caam_ctx,
  3002. sh_desc_enc_dma),
  3003. ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
  3004. if (dma_mapping_error(ctx->jrdev, dma_addr)) {
  3005. dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
  3006. caam_jr_free(ctx->jrdev);
  3007. return -ENOMEM;
  3008. }
  3009. ctx->sh_desc_enc_dma = dma_addr;
  3010. ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
  3011. sh_desc_dec);
  3012. ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
  3013. sh_desc_givenc);
  3014. ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
  3015. /* copy descriptor header template value */
  3016. ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  3017. ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  3018. return 0;
  3019. }
  3020. static int caam_cra_init(struct crypto_tfm *tfm)
  3021. {
  3022. struct crypto_alg *alg = tfm->__crt_alg;
  3023. struct caam_crypto_alg *caam_alg =
  3024. container_of(alg, struct caam_crypto_alg, crypto_alg);
  3025. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  3026. return caam_init_common(ctx, &caam_alg->caam, false);
  3027. }
  3028. static int caam_aead_init(struct crypto_aead *tfm)
  3029. {
  3030. struct aead_alg *alg = crypto_aead_alg(tfm);
  3031. struct caam_aead_alg *caam_alg =
  3032. container_of(alg, struct caam_aead_alg, aead);
  3033. struct caam_ctx *ctx = crypto_aead_ctx(tfm);
  3034. return caam_init_common(ctx, &caam_alg->caam,
  3035. alg->setkey == aead_setkey);
  3036. }
  3037. static void caam_exit_common(struct caam_ctx *ctx)
  3038. {
  3039. dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
  3040. offsetof(struct caam_ctx, sh_desc_enc_dma),
  3041. ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
  3042. caam_jr_free(ctx->jrdev);
  3043. }
  3044. static void caam_cra_exit(struct crypto_tfm *tfm)
  3045. {
  3046. caam_exit_common(crypto_tfm_ctx(tfm));
  3047. }
  3048. static void caam_aead_exit(struct crypto_aead *tfm)
  3049. {
  3050. caam_exit_common(crypto_aead_ctx(tfm));
  3051. }
  3052. static void __exit caam_algapi_exit(void)
  3053. {
  3054. struct caam_crypto_alg *t_alg, *n;
  3055. int i;
  3056. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  3057. struct caam_aead_alg *t_alg = driver_aeads + i;
  3058. if (t_alg->registered)
  3059. crypto_unregister_aead(&t_alg->aead);
  3060. }
  3061. if (!alg_list.next)
  3062. return;
  3063. list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
  3064. crypto_unregister_alg(&t_alg->crypto_alg);
  3065. list_del(&t_alg->entry);
  3066. kfree(t_alg);
  3067. }
  3068. }
  3069. static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
  3070. *template)
  3071. {
  3072. struct caam_crypto_alg *t_alg;
  3073. struct crypto_alg *alg;
  3074. t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
  3075. if (!t_alg) {
  3076. pr_err("failed to allocate t_alg\n");
  3077. return ERR_PTR(-ENOMEM);
  3078. }
  3079. alg = &t_alg->crypto_alg;
  3080. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
  3081. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  3082. template->driver_name);
  3083. alg->cra_module = THIS_MODULE;
  3084. alg->cra_init = caam_cra_init;
  3085. alg->cra_exit = caam_cra_exit;
  3086. alg->cra_priority = CAAM_CRA_PRIORITY;
  3087. alg->cra_blocksize = template->blocksize;
  3088. alg->cra_alignmask = 0;
  3089. alg->cra_ctxsize = sizeof(struct caam_ctx);
  3090. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
  3091. template->type;
  3092. switch (template->type) {
  3093. case CRYPTO_ALG_TYPE_GIVCIPHER:
  3094. alg->cra_type = &crypto_givcipher_type;
  3095. alg->cra_ablkcipher = template->template_ablkcipher;
  3096. break;
  3097. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3098. alg->cra_type = &crypto_ablkcipher_type;
  3099. alg->cra_ablkcipher = template->template_ablkcipher;
  3100. break;
  3101. }
  3102. t_alg->caam.class1_alg_type = template->class1_alg_type;
  3103. t_alg->caam.class2_alg_type = template->class2_alg_type;
  3104. return t_alg;
  3105. }
  3106. static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
  3107. {
  3108. struct aead_alg *alg = &t_alg->aead;
  3109. alg->base.cra_module = THIS_MODULE;
  3110. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  3111. alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  3112. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  3113. alg->init = caam_aead_init;
  3114. alg->exit = caam_aead_exit;
  3115. }
  3116. static int __init caam_algapi_init(void)
  3117. {
  3118. struct device_node *dev_node;
  3119. struct platform_device *pdev;
  3120. struct device *ctrldev;
  3121. struct caam_drv_private *priv;
  3122. int i = 0, err = 0;
  3123. u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
  3124. unsigned int md_limit = SHA512_DIGEST_SIZE;
  3125. bool registered = false;
  3126. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
  3127. if (!dev_node) {
  3128. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
  3129. if (!dev_node)
  3130. return -ENODEV;
  3131. }
  3132. pdev = of_find_device_by_node(dev_node);
  3133. if (!pdev) {
  3134. of_node_put(dev_node);
  3135. return -ENODEV;
  3136. }
  3137. ctrldev = &pdev->dev;
  3138. priv = dev_get_drvdata(ctrldev);
  3139. of_node_put(dev_node);
  3140. /*
  3141. * If priv is NULL, it's probably because the caam driver wasn't
  3142. * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
  3143. */
  3144. if (!priv)
  3145. return -ENODEV;
  3146. INIT_LIST_HEAD(&alg_list);
  3147. /*
  3148. * Register crypto algorithms the device supports.
  3149. * First, detect presence and attributes of DES, AES, and MD blocks.
  3150. */
  3151. cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
  3152. cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
  3153. des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
  3154. aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
  3155. md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  3156. /* If MD is present, limit digest size based on LP256 */
  3157. if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
  3158. md_limit = SHA256_DIGEST_SIZE;
  3159. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3160. struct caam_crypto_alg *t_alg;
  3161. struct caam_alg_template *alg = driver_algs + i;
  3162. u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
  3163. /* Skip DES algorithms if not supported by device */
  3164. if (!des_inst &&
  3165. ((alg_sel == OP_ALG_ALGSEL_3DES) ||
  3166. (alg_sel == OP_ALG_ALGSEL_DES)))
  3167. continue;
  3168. /* Skip AES algorithms if not supported by device */
  3169. if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
  3170. continue;
  3171. /*
  3172. * Check support for AES modes not available
  3173. * on LP devices.
  3174. */
  3175. if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
  3176. if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
  3177. OP_ALG_AAI_XTS)
  3178. continue;
  3179. t_alg = caam_alg_alloc(alg);
  3180. if (IS_ERR(t_alg)) {
  3181. err = PTR_ERR(t_alg);
  3182. pr_warn("%s alg allocation failed\n", alg->driver_name);
  3183. continue;
  3184. }
  3185. err = crypto_register_alg(&t_alg->crypto_alg);
  3186. if (err) {
  3187. pr_warn("%s alg registration failed\n",
  3188. t_alg->crypto_alg.cra_driver_name);
  3189. kfree(t_alg);
  3190. continue;
  3191. }
  3192. list_add_tail(&t_alg->entry, &alg_list);
  3193. registered = true;
  3194. }
  3195. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  3196. struct caam_aead_alg *t_alg = driver_aeads + i;
  3197. u32 c1_alg_sel = t_alg->caam.class1_alg_type &
  3198. OP_ALG_ALGSEL_MASK;
  3199. u32 c2_alg_sel = t_alg->caam.class2_alg_type &
  3200. OP_ALG_ALGSEL_MASK;
  3201. u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
  3202. /* Skip DES algorithms if not supported by device */
  3203. if (!des_inst &&
  3204. ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
  3205. (c1_alg_sel == OP_ALG_ALGSEL_DES)))
  3206. continue;
  3207. /* Skip AES algorithms if not supported by device */
  3208. if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
  3209. continue;
  3210. /*
  3211. * Check support for AES algorithms not available
  3212. * on LP devices.
  3213. */
  3214. if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
  3215. if (alg_aai == OP_ALG_AAI_GCM)
  3216. continue;
  3217. /*
  3218. * Skip algorithms requiring message digests
  3219. * if MD or MD size is not supported by device.
  3220. */
  3221. if (c2_alg_sel &&
  3222. (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
  3223. continue;
  3224. caam_aead_alg_init(t_alg);
  3225. err = crypto_register_aead(&t_alg->aead);
  3226. if (err) {
  3227. pr_warn("%s alg registration failed\n",
  3228. t_alg->aead.base.cra_driver_name);
  3229. continue;
  3230. }
  3231. t_alg->registered = true;
  3232. registered = true;
  3233. }
  3234. if (registered)
  3235. pr_info("caam algorithms registered in /proc/crypto\n");
  3236. return err;
  3237. }
  3238. module_init(caam_algapi_init);
  3239. module_exit(caam_algapi_exit);
  3240. MODULE_LICENSE("GPL");
  3241. MODULE_DESCRIPTION("FSL CAAM support for crypto API");
  3242. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");