chcr_algo.c 122 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316
  1. /*
  2. * This file is part of the Chelsio T6 Crypto driver for Linux.
  3. *
  4. * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * Written and Maintained by:
  35. * Manoj Malviya (manojmalviya@chelsio.com)
  36. * Atul Gupta (atul.gupta@chelsio.com)
  37. * Jitendra Lulla (jlulla@chelsio.com)
  38. * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39. * Harsh Jain (harsh@chelsio.com)
  40. */
  41. #define pr_fmt(fmt) "chcr:" fmt
  42. #include <linux/kernel.h>
  43. #include <linux/module.h>
  44. #include <linux/crypto.h>
  45. #include <linux/cryptohash.h>
  46. #include <linux/skbuff.h>
  47. #include <linux/rtnetlink.h>
  48. #include <linux/highmem.h>
  49. #include <linux/scatterlist.h>
  50. #include <crypto/aes.h>
  51. #include <crypto/algapi.h>
  52. #include <crypto/hash.h>
  53. #include <crypto/gcm.h>
  54. #include <crypto/sha.h>
  55. #include <crypto/authenc.h>
  56. #include <crypto/ctr.h>
  57. #include <crypto/gf128mul.h>
  58. #include <crypto/internal/aead.h>
  59. #include <crypto/null.h>
  60. #include <crypto/internal/skcipher.h>
  61. #include <crypto/aead.h>
  62. #include <crypto/scatterwalk.h>
  63. #include <crypto/internal/hash.h>
  64. #include "t4fw_api.h"
  65. #include "t4_msg.h"
  66. #include "chcr_core.h"
  67. #include "chcr_algo.h"
  68. #include "chcr_crypto.h"
  69. #define IV AES_BLOCK_SIZE
  70. static unsigned int sgl_ent_len[] = {
  71. 0, 0, 16, 24, 40, 48, 64, 72, 88,
  72. 96, 112, 120, 136, 144, 160, 168, 184,
  73. 192, 208, 216, 232, 240, 256, 264, 280,
  74. 288, 304, 312, 328, 336, 352, 360, 376
  75. };
  76. static unsigned int dsgl_ent_len[] = {
  77. 0, 32, 32, 48, 48, 64, 64, 80, 80,
  78. 112, 112, 128, 128, 144, 144, 160, 160,
  79. 192, 192, 208, 208, 224, 224, 240, 240,
  80. 272, 272, 288, 288, 304, 304, 320, 320
  81. };
  82. static u32 round_constant[11] = {
  83. 0x01000000, 0x02000000, 0x04000000, 0x08000000,
  84. 0x10000000, 0x20000000, 0x40000000, 0x80000000,
  85. 0x1B000000, 0x36000000, 0x6C000000
  86. };
  87. static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  88. unsigned char *input, int err);
  89. static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
  90. {
  91. return ctx->crypto_ctx->aeadctx;
  92. }
  93. static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
  94. {
  95. return ctx->crypto_ctx->ablkctx;
  96. }
  97. static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
  98. {
  99. return ctx->crypto_ctx->hmacctx;
  100. }
  101. static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
  102. {
  103. return gctx->ctx->gcm;
  104. }
  105. static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
  106. {
  107. return gctx->ctx->authenc;
  108. }
  109. static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
  110. {
  111. return ctx->dev->u_ctx;
  112. }
  113. static inline int is_ofld_imm(const struct sk_buff *skb)
  114. {
  115. return (skb->len <= SGE_MAX_WR_LEN);
  116. }
  117. static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
  118. {
  119. memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
  120. }
  121. static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
  122. unsigned int entlen,
  123. unsigned int skip)
  124. {
  125. int nents = 0;
  126. unsigned int less;
  127. unsigned int skip_len = 0;
  128. while (sg && skip) {
  129. if (sg_dma_len(sg) <= skip) {
  130. skip -= sg_dma_len(sg);
  131. skip_len = 0;
  132. sg = sg_next(sg);
  133. } else {
  134. skip_len = skip;
  135. skip = 0;
  136. }
  137. }
  138. while (sg && reqlen) {
  139. less = min(reqlen, sg_dma_len(sg) - skip_len);
  140. nents += DIV_ROUND_UP(less, entlen);
  141. reqlen -= less;
  142. skip_len = 0;
  143. sg = sg_next(sg);
  144. }
  145. return nents;
  146. }
  147. static inline int get_aead_subtype(struct crypto_aead *aead)
  148. {
  149. struct aead_alg *alg = crypto_aead_alg(aead);
  150. struct chcr_alg_template *chcr_crypto_alg =
  151. container_of(alg, struct chcr_alg_template, alg.aead);
  152. return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
  153. }
  154. void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
  155. {
  156. u8 temp[SHA512_DIGEST_SIZE];
  157. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  158. int authsize = crypto_aead_authsize(tfm);
  159. struct cpl_fw6_pld *fw6_pld;
  160. int cmp = 0;
  161. fw6_pld = (struct cpl_fw6_pld *)input;
  162. if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
  163. (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
  164. cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
  165. } else {
  166. sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
  167. authsize, req->assoclen +
  168. req->cryptlen - authsize);
  169. cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
  170. }
  171. if (cmp)
  172. *err = -EBADMSG;
  173. else
  174. *err = 0;
  175. }
  176. static inline void chcr_handle_aead_resp(struct aead_request *req,
  177. unsigned char *input,
  178. int err)
  179. {
  180. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  181. chcr_aead_common_exit(req);
  182. if (reqctx->verify == VERIFY_SW) {
  183. chcr_verify_tag(req, input, &err);
  184. reqctx->verify = VERIFY_HW;
  185. }
  186. req->base.complete(&req->base, err);
  187. }
  188. static void get_aes_decrypt_key(unsigned char *dec_key,
  189. const unsigned char *key,
  190. unsigned int keylength)
  191. {
  192. u32 temp;
  193. u32 w_ring[MAX_NK];
  194. int i, j, k;
  195. u8 nr, nk;
  196. switch (keylength) {
  197. case AES_KEYLENGTH_128BIT:
  198. nk = KEYLENGTH_4BYTES;
  199. nr = NUMBER_OF_ROUNDS_10;
  200. break;
  201. case AES_KEYLENGTH_192BIT:
  202. nk = KEYLENGTH_6BYTES;
  203. nr = NUMBER_OF_ROUNDS_12;
  204. break;
  205. case AES_KEYLENGTH_256BIT:
  206. nk = KEYLENGTH_8BYTES;
  207. nr = NUMBER_OF_ROUNDS_14;
  208. break;
  209. default:
  210. return;
  211. }
  212. for (i = 0; i < nk; i++)
  213. w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
  214. i = 0;
  215. temp = w_ring[nk - 1];
  216. while (i + nk < (nr + 1) * 4) {
  217. if (!(i % nk)) {
  218. /* RotWord(temp) */
  219. temp = (temp << 8) | (temp >> 24);
  220. temp = aes_ks_subword(temp);
  221. temp ^= round_constant[i / nk];
  222. } else if (nk == 8 && (i % 4 == 0)) {
  223. temp = aes_ks_subword(temp);
  224. }
  225. w_ring[i % nk] ^= temp;
  226. temp = w_ring[i % nk];
  227. i++;
  228. }
  229. i--;
  230. for (k = 0, j = i % nk; k < nk; k++) {
  231. *((u32 *)dec_key + k) = htonl(w_ring[j]);
  232. j--;
  233. if (j < 0)
  234. j += nk;
  235. }
  236. }
  237. static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
  238. {
  239. struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
  240. switch (ds) {
  241. case SHA1_DIGEST_SIZE:
  242. base_hash = crypto_alloc_shash("sha1", 0, 0);
  243. break;
  244. case SHA224_DIGEST_SIZE:
  245. base_hash = crypto_alloc_shash("sha224", 0, 0);
  246. break;
  247. case SHA256_DIGEST_SIZE:
  248. base_hash = crypto_alloc_shash("sha256", 0, 0);
  249. break;
  250. case SHA384_DIGEST_SIZE:
  251. base_hash = crypto_alloc_shash("sha384", 0, 0);
  252. break;
  253. case SHA512_DIGEST_SIZE:
  254. base_hash = crypto_alloc_shash("sha512", 0, 0);
  255. break;
  256. }
  257. return base_hash;
  258. }
  259. static int chcr_compute_partial_hash(struct shash_desc *desc,
  260. char *iopad, char *result_hash,
  261. int digest_size)
  262. {
  263. struct sha1_state sha1_st;
  264. struct sha256_state sha256_st;
  265. struct sha512_state sha512_st;
  266. int error;
  267. if (digest_size == SHA1_DIGEST_SIZE) {
  268. error = crypto_shash_init(desc) ?:
  269. crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
  270. crypto_shash_export(desc, (void *)&sha1_st);
  271. memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
  272. } else if (digest_size == SHA224_DIGEST_SIZE) {
  273. error = crypto_shash_init(desc) ?:
  274. crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
  275. crypto_shash_export(desc, (void *)&sha256_st);
  276. memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
  277. } else if (digest_size == SHA256_DIGEST_SIZE) {
  278. error = crypto_shash_init(desc) ?:
  279. crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
  280. crypto_shash_export(desc, (void *)&sha256_st);
  281. memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
  282. } else if (digest_size == SHA384_DIGEST_SIZE) {
  283. error = crypto_shash_init(desc) ?:
  284. crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
  285. crypto_shash_export(desc, (void *)&sha512_st);
  286. memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
  287. } else if (digest_size == SHA512_DIGEST_SIZE) {
  288. error = crypto_shash_init(desc) ?:
  289. crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
  290. crypto_shash_export(desc, (void *)&sha512_st);
  291. memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
  292. } else {
  293. error = -EINVAL;
  294. pr_err("Unknown digest size %d\n", digest_size);
  295. }
  296. return error;
  297. }
  298. static void chcr_change_order(char *buf, int ds)
  299. {
  300. int i;
  301. if (ds == SHA512_DIGEST_SIZE) {
  302. for (i = 0; i < (ds / sizeof(u64)); i++)
  303. *((__be64 *)buf + i) =
  304. cpu_to_be64(*((u64 *)buf + i));
  305. } else {
  306. for (i = 0; i < (ds / sizeof(u32)); i++)
  307. *((__be32 *)buf + i) =
  308. cpu_to_be32(*((u32 *)buf + i));
  309. }
  310. }
  311. static inline int is_hmac(struct crypto_tfm *tfm)
  312. {
  313. struct crypto_alg *alg = tfm->__crt_alg;
  314. struct chcr_alg_template *chcr_crypto_alg =
  315. container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
  316. alg.hash);
  317. if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
  318. return 1;
  319. return 0;
  320. }
  321. static inline void dsgl_walk_init(struct dsgl_walk *walk,
  322. struct cpl_rx_phys_dsgl *dsgl)
  323. {
  324. walk->dsgl = dsgl;
  325. walk->nents = 0;
  326. walk->to = (struct phys_sge_pairs *)(dsgl + 1);
  327. }
  328. static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
  329. int pci_chan_id)
  330. {
  331. struct cpl_rx_phys_dsgl *phys_cpl;
  332. phys_cpl = walk->dsgl;
  333. phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
  334. | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
  335. phys_cpl->pcirlxorder_to_noofsgentr =
  336. htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
  337. CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
  338. CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
  339. CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
  340. CPL_RX_PHYS_DSGL_DCAID_V(0) |
  341. CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
  342. phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
  343. phys_cpl->rss_hdr_int.qid = htons(qid);
  344. phys_cpl->rss_hdr_int.hash_val = 0;
  345. phys_cpl->rss_hdr_int.channel = pci_chan_id;
  346. }
  347. static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
  348. size_t size,
  349. dma_addr_t *addr)
  350. {
  351. int j;
  352. if (!size)
  353. return;
  354. j = walk->nents;
  355. walk->to->len[j % 8] = htons(size);
  356. walk->to->addr[j % 8] = cpu_to_be64(*addr);
  357. j++;
  358. if ((j % 8) == 0)
  359. walk->to++;
  360. walk->nents = j;
  361. }
  362. static void dsgl_walk_add_sg(struct dsgl_walk *walk,
  363. struct scatterlist *sg,
  364. unsigned int slen,
  365. unsigned int skip)
  366. {
  367. int skip_len = 0;
  368. unsigned int left_size = slen, len = 0;
  369. unsigned int j = walk->nents;
  370. int offset, ent_len;
  371. if (!slen)
  372. return;
  373. while (sg && skip) {
  374. if (sg_dma_len(sg) <= skip) {
  375. skip -= sg_dma_len(sg);
  376. skip_len = 0;
  377. sg = sg_next(sg);
  378. } else {
  379. skip_len = skip;
  380. skip = 0;
  381. }
  382. }
  383. while (left_size && sg) {
  384. len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
  385. offset = 0;
  386. while (len) {
  387. ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
  388. walk->to->len[j % 8] = htons(ent_len);
  389. walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
  390. offset + skip_len);
  391. offset += ent_len;
  392. len -= ent_len;
  393. j++;
  394. if ((j % 8) == 0)
  395. walk->to++;
  396. }
  397. walk->last_sg = sg;
  398. walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
  399. skip_len) + skip_len;
  400. left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
  401. skip_len = 0;
  402. sg = sg_next(sg);
  403. }
  404. walk->nents = j;
  405. }
  406. static inline void ulptx_walk_init(struct ulptx_walk *walk,
  407. struct ulptx_sgl *ulp)
  408. {
  409. walk->sgl = ulp;
  410. walk->nents = 0;
  411. walk->pair_idx = 0;
  412. walk->pair = ulp->sge;
  413. walk->last_sg = NULL;
  414. walk->last_sg_len = 0;
  415. }
  416. static inline void ulptx_walk_end(struct ulptx_walk *walk)
  417. {
  418. walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
  419. ULPTX_NSGE_V(walk->nents));
  420. }
  421. static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
  422. size_t size,
  423. dma_addr_t *addr)
  424. {
  425. if (!size)
  426. return;
  427. if (walk->nents == 0) {
  428. walk->sgl->len0 = cpu_to_be32(size);
  429. walk->sgl->addr0 = cpu_to_be64(*addr);
  430. } else {
  431. walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
  432. walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
  433. walk->pair_idx = !walk->pair_idx;
  434. if (!walk->pair_idx)
  435. walk->pair++;
  436. }
  437. walk->nents++;
  438. }
  439. static void ulptx_walk_add_sg(struct ulptx_walk *walk,
  440. struct scatterlist *sg,
  441. unsigned int len,
  442. unsigned int skip)
  443. {
  444. int small;
  445. int skip_len = 0;
  446. unsigned int sgmin;
  447. if (!len)
  448. return;
  449. while (sg && skip) {
  450. if (sg_dma_len(sg) <= skip) {
  451. skip -= sg_dma_len(sg);
  452. skip_len = 0;
  453. sg = sg_next(sg);
  454. } else {
  455. skip_len = skip;
  456. skip = 0;
  457. }
  458. }
  459. WARN(!sg, "SG should not be null here\n");
  460. if (sg && (walk->nents == 0)) {
  461. small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
  462. sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
  463. walk->sgl->len0 = cpu_to_be32(sgmin);
  464. walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
  465. walk->nents++;
  466. len -= sgmin;
  467. walk->last_sg = sg;
  468. walk->last_sg_len = sgmin + skip_len;
  469. skip_len += sgmin;
  470. if (sg_dma_len(sg) == skip_len) {
  471. sg = sg_next(sg);
  472. skip_len = 0;
  473. }
  474. }
  475. while (sg && len) {
  476. small = min(sg_dma_len(sg) - skip_len, len);
  477. sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
  478. walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
  479. walk->pair->addr[walk->pair_idx] =
  480. cpu_to_be64(sg_dma_address(sg) + skip_len);
  481. walk->pair_idx = !walk->pair_idx;
  482. walk->nents++;
  483. if (!walk->pair_idx)
  484. walk->pair++;
  485. len -= sgmin;
  486. skip_len += sgmin;
  487. walk->last_sg = sg;
  488. walk->last_sg_len = skip_len;
  489. if (sg_dma_len(sg) == skip_len) {
  490. sg = sg_next(sg);
  491. skip_len = 0;
  492. }
  493. }
  494. }
  495. static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
  496. {
  497. struct crypto_alg *alg = tfm->__crt_alg;
  498. struct chcr_alg_template *chcr_crypto_alg =
  499. container_of(alg, struct chcr_alg_template, alg.crypto);
  500. return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
  501. }
  502. static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
  503. {
  504. struct adapter *adap = netdev2adap(dev);
  505. struct sge_uld_txq_info *txq_info =
  506. adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
  507. struct sge_uld_txq *txq;
  508. int ret = 0;
  509. local_bh_disable();
  510. txq = &txq_info->uldtxq[idx];
  511. spin_lock(&txq->sendq.lock);
  512. if (txq->full)
  513. ret = -1;
  514. spin_unlock(&txq->sendq.lock);
  515. local_bh_enable();
  516. return ret;
  517. }
  518. static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
  519. struct _key_ctx *key_ctx)
  520. {
  521. if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
  522. memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
  523. } else {
  524. memcpy(key_ctx->key,
  525. ablkctx->key + (ablkctx->enckey_len >> 1),
  526. ablkctx->enckey_len >> 1);
  527. memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
  528. ablkctx->rrkey, ablkctx->enckey_len >> 1);
  529. }
  530. return 0;
  531. }
  532. static int chcr_hash_ent_in_wr(struct scatterlist *src,
  533. unsigned int minsg,
  534. unsigned int space,
  535. unsigned int srcskip)
  536. {
  537. int srclen = 0;
  538. int srcsg = minsg;
  539. int soffset = 0, sless;
  540. if (sg_dma_len(src) == srcskip) {
  541. src = sg_next(src);
  542. srcskip = 0;
  543. }
  544. while (src && space > (sgl_ent_len[srcsg + 1])) {
  545. sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
  546. CHCR_SRC_SG_SIZE);
  547. srclen += sless;
  548. soffset += sless;
  549. srcsg++;
  550. if (sg_dma_len(src) == (soffset + srcskip)) {
  551. src = sg_next(src);
  552. soffset = 0;
  553. srcskip = 0;
  554. }
  555. }
  556. return srclen;
  557. }
  558. static int chcr_sg_ent_in_wr(struct scatterlist *src,
  559. struct scatterlist *dst,
  560. unsigned int minsg,
  561. unsigned int space,
  562. unsigned int srcskip,
  563. unsigned int dstskip)
  564. {
  565. int srclen = 0, dstlen = 0;
  566. int srcsg = minsg, dstsg = minsg;
  567. int offset = 0, soffset = 0, less, sless = 0;
  568. if (sg_dma_len(src) == srcskip) {
  569. src = sg_next(src);
  570. srcskip = 0;
  571. }
  572. if (sg_dma_len(dst) == dstskip) {
  573. dst = sg_next(dst);
  574. dstskip = 0;
  575. }
  576. while (src && dst &&
  577. space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
  578. sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
  579. CHCR_SRC_SG_SIZE);
  580. srclen += sless;
  581. srcsg++;
  582. offset = 0;
  583. while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
  584. space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
  585. if (srclen <= dstlen)
  586. break;
  587. less = min_t(unsigned int, sg_dma_len(dst) - offset -
  588. dstskip, CHCR_DST_SG_SIZE);
  589. dstlen += less;
  590. offset += less;
  591. if ((offset + dstskip) == sg_dma_len(dst)) {
  592. dst = sg_next(dst);
  593. offset = 0;
  594. }
  595. dstsg++;
  596. dstskip = 0;
  597. }
  598. soffset += sless;
  599. if ((soffset + srcskip) == sg_dma_len(src)) {
  600. src = sg_next(src);
  601. srcskip = 0;
  602. soffset = 0;
  603. }
  604. }
  605. return min(srclen, dstlen);
  606. }
  607. static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
  608. u32 flags,
  609. struct scatterlist *src,
  610. struct scatterlist *dst,
  611. unsigned int nbytes,
  612. u8 *iv,
  613. unsigned short op_type)
  614. {
  615. int err;
  616. SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
  617. skcipher_request_set_tfm(subreq, cipher);
  618. skcipher_request_set_callback(subreq, flags, NULL, NULL);
  619. skcipher_request_set_crypt(subreq, src, dst,
  620. nbytes, iv);
  621. err = op_type ? crypto_skcipher_decrypt(subreq) :
  622. crypto_skcipher_encrypt(subreq);
  623. skcipher_request_zero(subreq);
  624. return err;
  625. }
  626. static inline void create_wreq(struct chcr_context *ctx,
  627. struct chcr_wr *chcr_req,
  628. struct crypto_async_request *req,
  629. unsigned int imm,
  630. int hash_sz,
  631. unsigned int len16,
  632. unsigned int sc_len,
  633. unsigned int lcb)
  634. {
  635. struct uld_ctx *u_ctx = ULD_CTX(ctx);
  636. int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
  637. chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
  638. chcr_req->wreq.pld_size_hash_size =
  639. htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
  640. chcr_req->wreq.len16_pkd =
  641. htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
  642. chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
  643. chcr_req->wreq.rx_chid_to_rx_q_id =
  644. FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
  645. !!lcb, ctx->tx_qidx);
  646. chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
  647. qid);
  648. chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
  649. ((sizeof(chcr_req->wreq)) >> 4)));
  650. chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
  651. chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
  652. sizeof(chcr_req->key_ctx) + sc_len);
  653. }
  654. /**
  655. * create_cipher_wr - form the WR for cipher operations
  656. * @req: cipher req.
  657. * @ctx: crypto driver context of the request.
  658. * @qid: ingress qid where response of this WR should be received.
  659. * @op_type: encryption or decryption
  660. */
  661. static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
  662. {
  663. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
  664. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  665. struct sk_buff *skb = NULL;
  666. struct chcr_wr *chcr_req;
  667. struct cpl_rx_phys_dsgl *phys_cpl;
  668. struct ulptx_sgl *ulptx;
  669. struct chcr_blkcipher_req_ctx *reqctx =
  670. ablkcipher_request_ctx(wrparam->req);
  671. unsigned int temp = 0, transhdr_len, dst_size;
  672. int error;
  673. int nents;
  674. unsigned int kctx_len;
  675. gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  676. GFP_KERNEL : GFP_ATOMIC;
  677. struct adapter *adap = padap(c_ctx(tfm)->dev);
  678. nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
  679. reqctx->dst_ofst);
  680. dst_size = get_space_for_phys_dsgl(nents);
  681. kctx_len = roundup(ablkctx->enckey_len, 16);
  682. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  683. nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
  684. CHCR_SRC_SG_SIZE, reqctx->src_ofst);
  685. temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
  686. (sgl_len(nents) * 8);
  687. transhdr_len += temp;
  688. transhdr_len = roundup(transhdr_len, 16);
  689. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  690. if (!skb) {
  691. error = -ENOMEM;
  692. goto err;
  693. }
  694. chcr_req = __skb_put_zero(skb, transhdr_len);
  695. chcr_req->sec_cpl.op_ivinsrtofst =
  696. FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
  697. chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
  698. chcr_req->sec_cpl.aadstart_cipherstop_hi =
  699. FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
  700. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  701. FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
  702. chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
  703. ablkctx->ciph_mode,
  704. 0, 0, IV >> 1);
  705. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
  706. 0, 1, dst_size);
  707. chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
  708. if ((reqctx->op == CHCR_DECRYPT_OP) &&
  709. (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  710. CRYPTO_ALG_SUB_TYPE_CTR)) &&
  711. (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  712. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
  713. generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
  714. } else {
  715. if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
  716. (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
  717. memcpy(chcr_req->key_ctx.key, ablkctx->key,
  718. ablkctx->enckey_len);
  719. } else {
  720. memcpy(chcr_req->key_ctx.key, ablkctx->key +
  721. (ablkctx->enckey_len >> 1),
  722. ablkctx->enckey_len >> 1);
  723. memcpy(chcr_req->key_ctx.key +
  724. (ablkctx->enckey_len >> 1),
  725. ablkctx->key,
  726. ablkctx->enckey_len >> 1);
  727. }
  728. }
  729. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  730. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  731. chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
  732. chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
  733. atomic_inc(&adap->chcr_stats.cipher_rqst);
  734. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
  735. + (reqctx->imm ? (wrparam->bytes) : 0);
  736. create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
  737. transhdr_len, temp,
  738. ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
  739. reqctx->skb = skb;
  740. if (reqctx->op && (ablkctx->ciph_mode ==
  741. CHCR_SCMD_CIPHER_MODE_AES_CBC))
  742. sg_pcopy_to_buffer(wrparam->req->src,
  743. sg_nents(wrparam->req->src), wrparam->req->info, 16,
  744. reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
  745. return skb;
  746. err:
  747. return ERR_PTR(error);
  748. }
  749. static inline int chcr_keyctx_ck_size(unsigned int keylen)
  750. {
  751. int ck_size = 0;
  752. if (keylen == AES_KEYSIZE_128)
  753. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  754. else if (keylen == AES_KEYSIZE_192)
  755. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  756. else if (keylen == AES_KEYSIZE_256)
  757. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  758. else
  759. ck_size = 0;
  760. return ck_size;
  761. }
  762. static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
  763. const u8 *key,
  764. unsigned int keylen)
  765. {
  766. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  767. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  768. int err = 0;
  769. crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  770. crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
  771. CRYPTO_TFM_REQ_MASK);
  772. err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
  773. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  774. tfm->crt_flags |=
  775. crypto_skcipher_get_flags(ablkctx->sw_cipher) &
  776. CRYPTO_TFM_RES_MASK;
  777. return err;
  778. }
  779. static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
  780. const u8 *key,
  781. unsigned int keylen)
  782. {
  783. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  784. unsigned int ck_size, context_size;
  785. u16 alignment = 0;
  786. int err;
  787. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  788. if (err)
  789. goto badkey_err;
  790. ck_size = chcr_keyctx_ck_size(keylen);
  791. alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
  792. memcpy(ablkctx->key, key, keylen);
  793. ablkctx->enckey_len = keylen;
  794. get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
  795. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  796. keylen + alignment) >> 4;
  797. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  798. 0, 0, context_size);
  799. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
  800. return 0;
  801. badkey_err:
  802. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  803. ablkctx->enckey_len = 0;
  804. return err;
  805. }
  806. static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
  807. const u8 *key,
  808. unsigned int keylen)
  809. {
  810. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  811. unsigned int ck_size, context_size;
  812. u16 alignment = 0;
  813. int err;
  814. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  815. if (err)
  816. goto badkey_err;
  817. ck_size = chcr_keyctx_ck_size(keylen);
  818. alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
  819. memcpy(ablkctx->key, key, keylen);
  820. ablkctx->enckey_len = keylen;
  821. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  822. keylen + alignment) >> 4;
  823. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  824. 0, 0, context_size);
  825. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  826. return 0;
  827. badkey_err:
  828. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  829. ablkctx->enckey_len = 0;
  830. return err;
  831. }
  832. static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
  833. const u8 *key,
  834. unsigned int keylen)
  835. {
  836. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  837. unsigned int ck_size, context_size;
  838. u16 alignment = 0;
  839. int err;
  840. if (keylen < CTR_RFC3686_NONCE_SIZE)
  841. return -EINVAL;
  842. memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
  843. CTR_RFC3686_NONCE_SIZE);
  844. keylen -= CTR_RFC3686_NONCE_SIZE;
  845. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  846. if (err)
  847. goto badkey_err;
  848. ck_size = chcr_keyctx_ck_size(keylen);
  849. alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
  850. memcpy(ablkctx->key, key, keylen);
  851. ablkctx->enckey_len = keylen;
  852. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  853. keylen + alignment) >> 4;
  854. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  855. 0, 0, context_size);
  856. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  857. return 0;
  858. badkey_err:
  859. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  860. ablkctx->enckey_len = 0;
  861. return err;
  862. }
  863. static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
  864. {
  865. unsigned int size = AES_BLOCK_SIZE;
  866. __be32 *b = (__be32 *)(dstiv + size);
  867. u32 c, prev;
  868. memcpy(dstiv, srciv, AES_BLOCK_SIZE);
  869. for (; size >= 4; size -= 4) {
  870. prev = be32_to_cpu(*--b);
  871. c = prev + add;
  872. *b = cpu_to_be32(c);
  873. if (prev < c)
  874. break;
  875. add = 1;
  876. }
  877. }
  878. static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
  879. {
  880. __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
  881. u64 c;
  882. u32 temp = be32_to_cpu(*--b);
  883. temp = ~temp;
  884. c = (u64)temp + 1; // No of block can processed withou overflow
  885. if ((bytes / AES_BLOCK_SIZE) > c)
  886. bytes = c * AES_BLOCK_SIZE;
  887. return bytes;
  888. }
  889. static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
  890. u32 isfinal)
  891. {
  892. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  893. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  894. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  895. struct crypto_cipher *cipher;
  896. int ret, i;
  897. u8 *key;
  898. unsigned int keylen;
  899. int round = reqctx->last_req_len / AES_BLOCK_SIZE;
  900. int round8 = round / 8;
  901. cipher = ablkctx->aes_generic;
  902. memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
  903. keylen = ablkctx->enckey_len / 2;
  904. key = ablkctx->key + keylen;
  905. ret = crypto_cipher_setkey(cipher, key, keylen);
  906. if (ret)
  907. goto out;
  908. crypto_cipher_encrypt_one(cipher, iv, iv);
  909. for (i = 0; i < round8; i++)
  910. gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
  911. for (i = 0; i < (round % 8); i++)
  912. gf128mul_x_ble((le128 *)iv, (le128 *)iv);
  913. if (!isfinal)
  914. crypto_cipher_decrypt_one(cipher, iv, iv);
  915. out:
  916. return ret;
  917. }
  918. static int chcr_update_cipher_iv(struct ablkcipher_request *req,
  919. struct cpl_fw6_pld *fw6_pld, u8 *iv)
  920. {
  921. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  922. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  923. int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
  924. int ret = 0;
  925. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
  926. ctr_add_iv(iv, req->info, (reqctx->processed /
  927. AES_BLOCK_SIZE));
  928. else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
  929. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  930. CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
  931. AES_BLOCK_SIZE) + 1);
  932. else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
  933. ret = chcr_update_tweak(req, iv, 0);
  934. else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
  935. if (reqctx->op)
  936. /*Updated before sending last WR*/
  937. memcpy(iv, req->info, AES_BLOCK_SIZE);
  938. else
  939. memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
  940. }
  941. return ret;
  942. }
  943. /* We need separate function for final iv because in rfc3686 Initial counter
  944. * starts from 1 and buffer size of iv is 8 byte only which remains constant
  945. * for subsequent update requests
  946. */
  947. static int chcr_final_cipher_iv(struct ablkcipher_request *req,
  948. struct cpl_fw6_pld *fw6_pld, u8 *iv)
  949. {
  950. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  951. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  952. int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
  953. int ret = 0;
  954. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
  955. ctr_add_iv(iv, req->info, (reqctx->processed /
  956. AES_BLOCK_SIZE));
  957. else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
  958. ret = chcr_update_tweak(req, iv, 1);
  959. else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
  960. /*Already updated for Decrypt*/
  961. if (!reqctx->op)
  962. memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
  963. }
  964. return ret;
  965. }
  966. static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  967. unsigned char *input, int err)
  968. {
  969. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  970. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  971. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  972. struct sk_buff *skb;
  973. struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
  974. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  975. struct cipher_wr_param wrparam;
  976. int bytes;
  977. if (err)
  978. goto unmap;
  979. if (req->nbytes == reqctx->processed) {
  980. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  981. req);
  982. err = chcr_final_cipher_iv(req, fw6_pld, req->info);
  983. goto complete;
  984. }
  985. if (!reqctx->imm) {
  986. bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
  987. CIP_SPACE_LEFT(ablkctx->enckey_len),
  988. reqctx->src_ofst, reqctx->dst_ofst);
  989. if ((bytes + reqctx->processed) >= req->nbytes)
  990. bytes = req->nbytes - reqctx->processed;
  991. else
  992. bytes = rounddown(bytes, 16);
  993. } else {
  994. /*CTR mode counter overfloa*/
  995. bytes = req->nbytes - reqctx->processed;
  996. }
  997. err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
  998. if (err)
  999. goto unmap;
  1000. if (unlikely(bytes == 0)) {
  1001. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1002. req);
  1003. err = chcr_cipher_fallback(ablkctx->sw_cipher,
  1004. req->base.flags,
  1005. req->src,
  1006. req->dst,
  1007. req->nbytes,
  1008. req->info,
  1009. reqctx->op);
  1010. goto complete;
  1011. }
  1012. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1013. CRYPTO_ALG_SUB_TYPE_CTR)
  1014. bytes = adjust_ctr_overflow(reqctx->iv, bytes);
  1015. wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
  1016. wrparam.req = req;
  1017. wrparam.bytes = bytes;
  1018. skb = create_cipher_wr(&wrparam);
  1019. if (IS_ERR(skb)) {
  1020. pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
  1021. err = PTR_ERR(skb);
  1022. goto unmap;
  1023. }
  1024. skb->dev = u_ctx->lldi.ports[0];
  1025. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1026. chcr_send_wr(skb);
  1027. reqctx->last_req_len = bytes;
  1028. reqctx->processed += bytes;
  1029. return 0;
  1030. unmap:
  1031. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1032. complete:
  1033. req->base.complete(&req->base, err);
  1034. return err;
  1035. }
  1036. static int process_cipher(struct ablkcipher_request *req,
  1037. unsigned short qid,
  1038. struct sk_buff **skb,
  1039. unsigned short op_type)
  1040. {
  1041. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1042. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  1043. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  1044. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  1045. struct cipher_wr_param wrparam;
  1046. int bytes, err = -EINVAL;
  1047. reqctx->processed = 0;
  1048. if (!req->info)
  1049. goto error;
  1050. if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
  1051. (req->nbytes == 0) ||
  1052. (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
  1053. pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
  1054. ablkctx->enckey_len, req->nbytes, ivsize);
  1055. goto error;
  1056. }
  1057. chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1058. if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
  1059. AES_MIN_KEY_SIZE +
  1060. sizeof(struct cpl_rx_phys_dsgl) +
  1061. /*Min dsgl size*/
  1062. 32))) {
  1063. /* Can be sent as Imm*/
  1064. unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
  1065. dnents = sg_nents_xlen(req->dst, req->nbytes,
  1066. CHCR_DST_SG_SIZE, 0);
  1067. phys_dsgl = get_space_for_phys_dsgl(dnents);
  1068. kctx_len = roundup(ablkctx->enckey_len, 16);
  1069. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
  1070. reqctx->imm = (transhdr_len + IV + req->nbytes) <=
  1071. SGE_MAX_WR_LEN;
  1072. bytes = IV + req->nbytes;
  1073. } else {
  1074. reqctx->imm = 0;
  1075. }
  1076. if (!reqctx->imm) {
  1077. bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
  1078. CIP_SPACE_LEFT(ablkctx->enckey_len),
  1079. 0, 0);
  1080. if ((bytes + reqctx->processed) >= req->nbytes)
  1081. bytes = req->nbytes - reqctx->processed;
  1082. else
  1083. bytes = rounddown(bytes, 16);
  1084. } else {
  1085. bytes = req->nbytes;
  1086. }
  1087. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1088. CRYPTO_ALG_SUB_TYPE_CTR) {
  1089. bytes = adjust_ctr_overflow(req->info, bytes);
  1090. }
  1091. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1092. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
  1093. memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
  1094. memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
  1095. CTR_RFC3686_IV_SIZE);
  1096. /* initialize counter portion of counter block */
  1097. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  1098. CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
  1099. } else {
  1100. memcpy(reqctx->iv, req->info, IV);
  1101. }
  1102. if (unlikely(bytes == 0)) {
  1103. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1104. req);
  1105. err = chcr_cipher_fallback(ablkctx->sw_cipher,
  1106. req->base.flags,
  1107. req->src,
  1108. req->dst,
  1109. req->nbytes,
  1110. reqctx->iv,
  1111. op_type);
  1112. goto error;
  1113. }
  1114. reqctx->op = op_type;
  1115. reqctx->srcsg = req->src;
  1116. reqctx->dstsg = req->dst;
  1117. reqctx->src_ofst = 0;
  1118. reqctx->dst_ofst = 0;
  1119. wrparam.qid = qid;
  1120. wrparam.req = req;
  1121. wrparam.bytes = bytes;
  1122. *skb = create_cipher_wr(&wrparam);
  1123. if (IS_ERR(*skb)) {
  1124. err = PTR_ERR(*skb);
  1125. goto unmap;
  1126. }
  1127. reqctx->processed = bytes;
  1128. reqctx->last_req_len = bytes;
  1129. return 0;
  1130. unmap:
  1131. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1132. error:
  1133. return err;
  1134. }
  1135. static int chcr_aes_encrypt(struct ablkcipher_request *req)
  1136. {
  1137. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1138. struct sk_buff *skb = NULL;
  1139. int err, isfull = 0;
  1140. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  1141. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1142. c_ctx(tfm)->tx_qidx))) {
  1143. isfull = 1;
  1144. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1145. return -ENOSPC;
  1146. }
  1147. err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
  1148. &skb, CHCR_ENCRYPT_OP);
  1149. if (err || !skb)
  1150. return err;
  1151. skb->dev = u_ctx->lldi.ports[0];
  1152. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1153. chcr_send_wr(skb);
  1154. return isfull ? -EBUSY : -EINPROGRESS;
  1155. }
  1156. static int chcr_aes_decrypt(struct ablkcipher_request *req)
  1157. {
  1158. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1159. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  1160. struct sk_buff *skb = NULL;
  1161. int err, isfull = 0;
  1162. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1163. c_ctx(tfm)->tx_qidx))) {
  1164. isfull = 1;
  1165. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1166. return -ENOSPC;
  1167. }
  1168. err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
  1169. &skb, CHCR_DECRYPT_OP);
  1170. if (err || !skb)
  1171. return err;
  1172. skb->dev = u_ctx->lldi.ports[0];
  1173. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1174. chcr_send_wr(skb);
  1175. return isfull ? -EBUSY : -EINPROGRESS;
  1176. }
  1177. static int chcr_device_init(struct chcr_context *ctx)
  1178. {
  1179. struct uld_ctx *u_ctx = NULL;
  1180. struct adapter *adap;
  1181. unsigned int id;
  1182. int txq_perchan, txq_idx, ntxq;
  1183. int err = 0, rxq_perchan, rxq_idx;
  1184. id = smp_processor_id();
  1185. if (!ctx->dev) {
  1186. u_ctx = assign_chcr_device();
  1187. if (!u_ctx) {
  1188. pr_err("chcr device assignment fails\n");
  1189. goto out;
  1190. }
  1191. ctx->dev = u_ctx->dev;
  1192. adap = padap(ctx->dev);
  1193. ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
  1194. adap->vres.ncrypto_fc);
  1195. rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
  1196. txq_perchan = ntxq / u_ctx->lldi.nchan;
  1197. spin_lock(&ctx->dev->lock_chcr_dev);
  1198. ctx->tx_chan_id = ctx->dev->tx_channel_id;
  1199. ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
  1200. ctx->dev->rx_channel_id = 0;
  1201. spin_unlock(&ctx->dev->lock_chcr_dev);
  1202. rxq_idx = ctx->tx_chan_id * rxq_perchan;
  1203. rxq_idx += id % rxq_perchan;
  1204. txq_idx = ctx->tx_chan_id * txq_perchan;
  1205. txq_idx += id % txq_perchan;
  1206. ctx->rx_qidx = rxq_idx;
  1207. ctx->tx_qidx = txq_idx;
  1208. /* Channel Id used by SGE to forward packet to Host.
  1209. * Same value should be used in cpl_fw6_pld RSS_CH field
  1210. * by FW. Driver programs PCI channel ID to be used in fw
  1211. * at the time of queue allocation with value "pi->tx_chan"
  1212. */
  1213. ctx->pci_chan_id = txq_idx / txq_perchan;
  1214. }
  1215. out:
  1216. return err;
  1217. }
  1218. static int chcr_cra_init(struct crypto_tfm *tfm)
  1219. {
  1220. struct crypto_alg *alg = tfm->__crt_alg;
  1221. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1222. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1223. ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
  1224. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  1225. if (IS_ERR(ablkctx->sw_cipher)) {
  1226. pr_err("failed to allocate fallback for %s\n", alg->cra_name);
  1227. return PTR_ERR(ablkctx->sw_cipher);
  1228. }
  1229. if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
  1230. /* To update tweak*/
  1231. ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
  1232. if (IS_ERR(ablkctx->aes_generic)) {
  1233. pr_err("failed to allocate aes cipher for tweak\n");
  1234. return PTR_ERR(ablkctx->aes_generic);
  1235. }
  1236. } else
  1237. ablkctx->aes_generic = NULL;
  1238. tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
  1239. return chcr_device_init(crypto_tfm_ctx(tfm));
  1240. }
  1241. static int chcr_rfc3686_init(struct crypto_tfm *tfm)
  1242. {
  1243. struct crypto_alg *alg = tfm->__crt_alg;
  1244. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1245. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1246. /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
  1247. * cannot be used as fallback in chcr_handle_cipher_response
  1248. */
  1249. ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
  1250. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  1251. if (IS_ERR(ablkctx->sw_cipher)) {
  1252. pr_err("failed to allocate fallback for %s\n", alg->cra_name);
  1253. return PTR_ERR(ablkctx->sw_cipher);
  1254. }
  1255. tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
  1256. return chcr_device_init(crypto_tfm_ctx(tfm));
  1257. }
  1258. static void chcr_cra_exit(struct crypto_tfm *tfm)
  1259. {
  1260. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1261. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1262. crypto_free_skcipher(ablkctx->sw_cipher);
  1263. if (ablkctx->aes_generic)
  1264. crypto_free_cipher(ablkctx->aes_generic);
  1265. }
  1266. static int get_alg_config(struct algo_param *params,
  1267. unsigned int auth_size)
  1268. {
  1269. switch (auth_size) {
  1270. case SHA1_DIGEST_SIZE:
  1271. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
  1272. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
  1273. params->result_size = SHA1_DIGEST_SIZE;
  1274. break;
  1275. case SHA224_DIGEST_SIZE:
  1276. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  1277. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
  1278. params->result_size = SHA256_DIGEST_SIZE;
  1279. break;
  1280. case SHA256_DIGEST_SIZE:
  1281. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  1282. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
  1283. params->result_size = SHA256_DIGEST_SIZE;
  1284. break;
  1285. case SHA384_DIGEST_SIZE:
  1286. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
  1287. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
  1288. params->result_size = SHA512_DIGEST_SIZE;
  1289. break;
  1290. case SHA512_DIGEST_SIZE:
  1291. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
  1292. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
  1293. params->result_size = SHA512_DIGEST_SIZE;
  1294. break;
  1295. default:
  1296. pr_err("chcr : ERROR, unsupported digest size\n");
  1297. return -EINVAL;
  1298. }
  1299. return 0;
  1300. }
  1301. static inline void chcr_free_shash(struct crypto_shash *base_hash)
  1302. {
  1303. crypto_free_shash(base_hash);
  1304. }
  1305. /**
  1306. * create_hash_wr - Create hash work request
  1307. * @req - Cipher req base
  1308. */
  1309. static struct sk_buff *create_hash_wr(struct ahash_request *req,
  1310. struct hash_wr_param *param)
  1311. {
  1312. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1313. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1314. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
  1315. struct sk_buff *skb = NULL;
  1316. struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
  1317. struct chcr_wr *chcr_req;
  1318. struct ulptx_sgl *ulptx;
  1319. unsigned int nents = 0, transhdr_len;
  1320. unsigned int temp = 0;
  1321. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  1322. GFP_ATOMIC;
  1323. struct adapter *adap = padap(h_ctx(tfm)->dev);
  1324. int error = 0;
  1325. transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
  1326. req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
  1327. param->sg_len) <= SGE_MAX_WR_LEN;
  1328. nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
  1329. CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
  1330. nents += param->bfr_len ? 1 : 0;
  1331. transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
  1332. param->sg_len, 16) : (sgl_len(nents) * 8);
  1333. transhdr_len = roundup(transhdr_len, 16);
  1334. skb = alloc_skb(transhdr_len, flags);
  1335. if (!skb)
  1336. return ERR_PTR(-ENOMEM);
  1337. chcr_req = __skb_put_zero(skb, transhdr_len);
  1338. chcr_req->sec_cpl.op_ivinsrtofst =
  1339. FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
  1340. chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
  1341. chcr_req->sec_cpl.aadstart_cipherstop_hi =
  1342. FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
  1343. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  1344. FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
  1345. chcr_req->sec_cpl.seqno_numivs =
  1346. FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
  1347. param->opad_needed, 0);
  1348. chcr_req->sec_cpl.ivgen_hdrlen =
  1349. FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
  1350. memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
  1351. param->alg_prm.result_size);
  1352. if (param->opad_needed)
  1353. memcpy(chcr_req->key_ctx.key +
  1354. ((param->alg_prm.result_size <= 32) ? 32 :
  1355. CHCR_HASH_MAX_DIGEST_SIZE),
  1356. hmacctx->opad, param->alg_prm.result_size);
  1357. chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
  1358. param->alg_prm.mk_size, 0,
  1359. param->opad_needed,
  1360. ((param->kctx_len +
  1361. sizeof(chcr_req->key_ctx)) >> 4));
  1362. chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
  1363. ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
  1364. DUMMY_BYTES);
  1365. if (param->bfr_len != 0) {
  1366. req_ctx->hctx_wr.dma_addr =
  1367. dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
  1368. param->bfr_len, DMA_TO_DEVICE);
  1369. if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
  1370. req_ctx->hctx_wr. dma_addr)) {
  1371. error = -ENOMEM;
  1372. goto err;
  1373. }
  1374. req_ctx->hctx_wr.dma_len = param->bfr_len;
  1375. } else {
  1376. req_ctx->hctx_wr.dma_addr = 0;
  1377. }
  1378. chcr_add_hash_src_ent(req, ulptx, param);
  1379. /* Request upto max wr size */
  1380. temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
  1381. (param->sg_len + param->bfr_len) : 0);
  1382. atomic_inc(&adap->chcr_stats.digest_rqst);
  1383. create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
  1384. param->hash_size, transhdr_len,
  1385. temp, 0);
  1386. req_ctx->hctx_wr.skb = skb;
  1387. return skb;
  1388. err:
  1389. kfree_skb(skb);
  1390. return ERR_PTR(error);
  1391. }
  1392. static int chcr_ahash_update(struct ahash_request *req)
  1393. {
  1394. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1395. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1396. struct uld_ctx *u_ctx = NULL;
  1397. struct sk_buff *skb;
  1398. u8 remainder = 0, bs;
  1399. unsigned int nbytes = req->nbytes;
  1400. struct hash_wr_param params;
  1401. int error, isfull = 0;
  1402. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1403. u_ctx = ULD_CTX(h_ctx(rtfm));
  1404. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1405. h_ctx(rtfm)->tx_qidx))) {
  1406. isfull = 1;
  1407. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1408. return -ENOSPC;
  1409. }
  1410. if (nbytes + req_ctx->reqlen >= bs) {
  1411. remainder = (nbytes + req_ctx->reqlen) % bs;
  1412. nbytes = nbytes + req_ctx->reqlen - remainder;
  1413. } else {
  1414. sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
  1415. + req_ctx->reqlen, nbytes, 0);
  1416. req_ctx->reqlen += nbytes;
  1417. return 0;
  1418. }
  1419. chcr_init_hctx_per_wr(req_ctx);
  1420. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1421. if (error)
  1422. return -ENOMEM;
  1423. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1424. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1425. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1426. HASH_SPACE_LEFT(params.kctx_len), 0);
  1427. if (params.sg_len > req->nbytes)
  1428. params.sg_len = req->nbytes;
  1429. params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
  1430. req_ctx->reqlen;
  1431. params.opad_needed = 0;
  1432. params.more = 1;
  1433. params.last = 0;
  1434. params.bfr_len = req_ctx->reqlen;
  1435. params.scmd1 = 0;
  1436. req_ctx->hctx_wr.srcsg = req->src;
  1437. params.hash_size = params.alg_prm.result_size;
  1438. req_ctx->data_len += params.sg_len + params.bfr_len;
  1439. skb = create_hash_wr(req, &params);
  1440. if (IS_ERR(skb)) {
  1441. error = PTR_ERR(skb);
  1442. goto unmap;
  1443. }
  1444. req_ctx->hctx_wr.processed += params.sg_len;
  1445. if (remainder) {
  1446. /* Swap buffers */
  1447. swap(req_ctx->reqbfr, req_ctx->skbfr);
  1448. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  1449. req_ctx->reqbfr, remainder, req->nbytes -
  1450. remainder);
  1451. }
  1452. req_ctx->reqlen = remainder;
  1453. skb->dev = u_ctx->lldi.ports[0];
  1454. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1455. chcr_send_wr(skb);
  1456. return isfull ? -EBUSY : -EINPROGRESS;
  1457. unmap:
  1458. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1459. return error;
  1460. }
  1461. static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
  1462. {
  1463. memset(bfr_ptr, 0, bs);
  1464. *bfr_ptr = 0x80;
  1465. if (bs == 64)
  1466. *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
  1467. else
  1468. *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
  1469. }
  1470. static int chcr_ahash_final(struct ahash_request *req)
  1471. {
  1472. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1473. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1474. struct hash_wr_param params;
  1475. struct sk_buff *skb;
  1476. struct uld_ctx *u_ctx = NULL;
  1477. u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1478. chcr_init_hctx_per_wr(req_ctx);
  1479. u_ctx = ULD_CTX(h_ctx(rtfm));
  1480. if (is_hmac(crypto_ahash_tfm(rtfm)))
  1481. params.opad_needed = 1;
  1482. else
  1483. params.opad_needed = 0;
  1484. params.sg_len = 0;
  1485. req_ctx->hctx_wr.isfinal = 1;
  1486. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1487. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1488. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1489. params.opad_needed = 1;
  1490. params.kctx_len *= 2;
  1491. } else {
  1492. params.opad_needed = 0;
  1493. }
  1494. req_ctx->hctx_wr.result = 1;
  1495. params.bfr_len = req_ctx->reqlen;
  1496. req_ctx->data_len += params.bfr_len + params.sg_len;
  1497. req_ctx->hctx_wr.srcsg = req->src;
  1498. if (req_ctx->reqlen == 0) {
  1499. create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
  1500. params.last = 0;
  1501. params.more = 1;
  1502. params.scmd1 = 0;
  1503. params.bfr_len = bs;
  1504. } else {
  1505. params.scmd1 = req_ctx->data_len;
  1506. params.last = 1;
  1507. params.more = 0;
  1508. }
  1509. params.hash_size = crypto_ahash_digestsize(rtfm);
  1510. skb = create_hash_wr(req, &params);
  1511. if (IS_ERR(skb))
  1512. return PTR_ERR(skb);
  1513. req_ctx->reqlen = 0;
  1514. skb->dev = u_ctx->lldi.ports[0];
  1515. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1516. chcr_send_wr(skb);
  1517. return -EINPROGRESS;
  1518. }
  1519. static int chcr_ahash_finup(struct ahash_request *req)
  1520. {
  1521. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1522. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1523. struct uld_ctx *u_ctx = NULL;
  1524. struct sk_buff *skb;
  1525. struct hash_wr_param params;
  1526. u8 bs;
  1527. int error, isfull = 0;
  1528. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1529. u_ctx = ULD_CTX(h_ctx(rtfm));
  1530. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1531. h_ctx(rtfm)->tx_qidx))) {
  1532. isfull = 1;
  1533. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1534. return -ENOSPC;
  1535. }
  1536. chcr_init_hctx_per_wr(req_ctx);
  1537. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1538. if (error)
  1539. return -ENOMEM;
  1540. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1541. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1542. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1543. params.kctx_len *= 2;
  1544. params.opad_needed = 1;
  1545. } else {
  1546. params.opad_needed = 0;
  1547. }
  1548. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1549. HASH_SPACE_LEFT(params.kctx_len), 0);
  1550. if (params.sg_len < req->nbytes) {
  1551. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1552. params.kctx_len /= 2;
  1553. params.opad_needed = 0;
  1554. }
  1555. params.last = 0;
  1556. params.more = 1;
  1557. params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
  1558. - req_ctx->reqlen;
  1559. params.hash_size = params.alg_prm.result_size;
  1560. params.scmd1 = 0;
  1561. } else {
  1562. params.last = 1;
  1563. params.more = 0;
  1564. params.sg_len = req->nbytes;
  1565. params.hash_size = crypto_ahash_digestsize(rtfm);
  1566. params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
  1567. params.sg_len;
  1568. }
  1569. params.bfr_len = req_ctx->reqlen;
  1570. req_ctx->data_len += params.bfr_len + params.sg_len;
  1571. req_ctx->hctx_wr.result = 1;
  1572. req_ctx->hctx_wr.srcsg = req->src;
  1573. if ((req_ctx->reqlen + req->nbytes) == 0) {
  1574. create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
  1575. params.last = 0;
  1576. params.more = 1;
  1577. params.scmd1 = 0;
  1578. params.bfr_len = bs;
  1579. }
  1580. skb = create_hash_wr(req, &params);
  1581. if (IS_ERR(skb)) {
  1582. error = PTR_ERR(skb);
  1583. goto unmap;
  1584. }
  1585. req_ctx->reqlen = 0;
  1586. req_ctx->hctx_wr.processed += params.sg_len;
  1587. skb->dev = u_ctx->lldi.ports[0];
  1588. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1589. chcr_send_wr(skb);
  1590. return isfull ? -EBUSY : -EINPROGRESS;
  1591. unmap:
  1592. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1593. return error;
  1594. }
  1595. static int chcr_ahash_digest(struct ahash_request *req)
  1596. {
  1597. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1598. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1599. struct uld_ctx *u_ctx = NULL;
  1600. struct sk_buff *skb;
  1601. struct hash_wr_param params;
  1602. u8 bs;
  1603. int error, isfull = 0;
  1604. rtfm->init(req);
  1605. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1606. u_ctx = ULD_CTX(h_ctx(rtfm));
  1607. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1608. h_ctx(rtfm)->tx_qidx))) {
  1609. isfull = 1;
  1610. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1611. return -ENOSPC;
  1612. }
  1613. chcr_init_hctx_per_wr(req_ctx);
  1614. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1615. if (error)
  1616. return -ENOMEM;
  1617. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1618. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1619. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1620. params.kctx_len *= 2;
  1621. params.opad_needed = 1;
  1622. } else {
  1623. params.opad_needed = 0;
  1624. }
  1625. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1626. HASH_SPACE_LEFT(params.kctx_len), 0);
  1627. if (params.sg_len < req->nbytes) {
  1628. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1629. params.kctx_len /= 2;
  1630. params.opad_needed = 0;
  1631. }
  1632. params.last = 0;
  1633. params.more = 1;
  1634. params.scmd1 = 0;
  1635. params.sg_len = rounddown(params.sg_len, bs);
  1636. params.hash_size = params.alg_prm.result_size;
  1637. } else {
  1638. params.sg_len = req->nbytes;
  1639. params.hash_size = crypto_ahash_digestsize(rtfm);
  1640. params.last = 1;
  1641. params.more = 0;
  1642. params.scmd1 = req->nbytes + req_ctx->data_len;
  1643. }
  1644. params.bfr_len = 0;
  1645. req_ctx->hctx_wr.result = 1;
  1646. req_ctx->hctx_wr.srcsg = req->src;
  1647. req_ctx->data_len += params.bfr_len + params.sg_len;
  1648. if (req->nbytes == 0) {
  1649. create_last_hash_block(req_ctx->reqbfr, bs, 0);
  1650. params.more = 1;
  1651. params.bfr_len = bs;
  1652. }
  1653. skb = create_hash_wr(req, &params);
  1654. if (IS_ERR(skb)) {
  1655. error = PTR_ERR(skb);
  1656. goto unmap;
  1657. }
  1658. req_ctx->hctx_wr.processed += params.sg_len;
  1659. skb->dev = u_ctx->lldi.ports[0];
  1660. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1661. chcr_send_wr(skb);
  1662. return isfull ? -EBUSY : -EINPROGRESS;
  1663. unmap:
  1664. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1665. return error;
  1666. }
  1667. static int chcr_ahash_continue(struct ahash_request *req)
  1668. {
  1669. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  1670. struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
  1671. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1672. struct uld_ctx *u_ctx = NULL;
  1673. struct sk_buff *skb;
  1674. struct hash_wr_param params;
  1675. u8 bs;
  1676. int error;
  1677. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1678. u_ctx = ULD_CTX(h_ctx(rtfm));
  1679. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1680. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1681. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1682. params.kctx_len *= 2;
  1683. params.opad_needed = 1;
  1684. } else {
  1685. params.opad_needed = 0;
  1686. }
  1687. params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
  1688. HASH_SPACE_LEFT(params.kctx_len),
  1689. hctx_wr->src_ofst);
  1690. if ((params.sg_len + hctx_wr->processed) > req->nbytes)
  1691. params.sg_len = req->nbytes - hctx_wr->processed;
  1692. if (!hctx_wr->result ||
  1693. ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
  1694. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1695. params.kctx_len /= 2;
  1696. params.opad_needed = 0;
  1697. }
  1698. params.last = 0;
  1699. params.more = 1;
  1700. params.sg_len = rounddown(params.sg_len, bs);
  1701. params.hash_size = params.alg_prm.result_size;
  1702. params.scmd1 = 0;
  1703. } else {
  1704. params.last = 1;
  1705. params.more = 0;
  1706. params.hash_size = crypto_ahash_digestsize(rtfm);
  1707. params.scmd1 = reqctx->data_len + params.sg_len;
  1708. }
  1709. params.bfr_len = 0;
  1710. reqctx->data_len += params.sg_len;
  1711. skb = create_hash_wr(req, &params);
  1712. if (IS_ERR(skb)) {
  1713. error = PTR_ERR(skb);
  1714. goto err;
  1715. }
  1716. hctx_wr->processed += params.sg_len;
  1717. skb->dev = u_ctx->lldi.ports[0];
  1718. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1719. chcr_send_wr(skb);
  1720. return 0;
  1721. err:
  1722. return error;
  1723. }
  1724. static inline void chcr_handle_ahash_resp(struct ahash_request *req,
  1725. unsigned char *input,
  1726. int err)
  1727. {
  1728. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  1729. struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
  1730. int digestsize, updated_digestsize;
  1731. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1732. struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
  1733. if (input == NULL)
  1734. goto out;
  1735. digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
  1736. updated_digestsize = digestsize;
  1737. if (digestsize == SHA224_DIGEST_SIZE)
  1738. updated_digestsize = SHA256_DIGEST_SIZE;
  1739. else if (digestsize == SHA384_DIGEST_SIZE)
  1740. updated_digestsize = SHA512_DIGEST_SIZE;
  1741. if (hctx_wr->dma_addr) {
  1742. dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
  1743. hctx_wr->dma_len, DMA_TO_DEVICE);
  1744. hctx_wr->dma_addr = 0;
  1745. }
  1746. if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
  1747. req->nbytes)) {
  1748. if (hctx_wr->result == 1) {
  1749. hctx_wr->result = 0;
  1750. memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
  1751. digestsize);
  1752. } else {
  1753. memcpy(reqctx->partial_hash,
  1754. input + sizeof(struct cpl_fw6_pld),
  1755. updated_digestsize);
  1756. }
  1757. goto unmap;
  1758. }
  1759. memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
  1760. updated_digestsize);
  1761. err = chcr_ahash_continue(req);
  1762. if (err)
  1763. goto unmap;
  1764. return;
  1765. unmap:
  1766. if (hctx_wr->is_sg_map)
  1767. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1768. out:
  1769. req->base.complete(&req->base, err);
  1770. }
  1771. /*
  1772. * chcr_handle_resp - Unmap the DMA buffers associated with the request
  1773. * @req: crypto request
  1774. */
  1775. int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
  1776. int err)
  1777. {
  1778. struct crypto_tfm *tfm = req->tfm;
  1779. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1780. struct adapter *adap = padap(ctx->dev);
  1781. switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
  1782. case CRYPTO_ALG_TYPE_AEAD:
  1783. chcr_handle_aead_resp(aead_request_cast(req), input, err);
  1784. break;
  1785. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  1786. err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
  1787. input, err);
  1788. break;
  1789. case CRYPTO_ALG_TYPE_AHASH:
  1790. chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
  1791. }
  1792. atomic_inc(&adap->chcr_stats.complete);
  1793. return err;
  1794. }
  1795. static int chcr_ahash_export(struct ahash_request *areq, void *out)
  1796. {
  1797. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1798. struct chcr_ahash_req_ctx *state = out;
  1799. state->reqlen = req_ctx->reqlen;
  1800. state->data_len = req_ctx->data_len;
  1801. memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
  1802. memcpy(state->partial_hash, req_ctx->partial_hash,
  1803. CHCR_HASH_MAX_DIGEST_SIZE);
  1804. chcr_init_hctx_per_wr(state);
  1805. return 0;
  1806. }
  1807. static int chcr_ahash_import(struct ahash_request *areq, const void *in)
  1808. {
  1809. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1810. struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
  1811. req_ctx->reqlen = state->reqlen;
  1812. req_ctx->data_len = state->data_len;
  1813. req_ctx->reqbfr = req_ctx->bfr1;
  1814. req_ctx->skbfr = req_ctx->bfr2;
  1815. memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
  1816. memcpy(req_ctx->partial_hash, state->partial_hash,
  1817. CHCR_HASH_MAX_DIGEST_SIZE);
  1818. chcr_init_hctx_per_wr(req_ctx);
  1819. return 0;
  1820. }
  1821. static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
  1822. unsigned int keylen)
  1823. {
  1824. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
  1825. unsigned int digestsize = crypto_ahash_digestsize(tfm);
  1826. unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  1827. unsigned int i, err = 0, updated_digestsize;
  1828. SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
  1829. /* use the key to calculate the ipad and opad. ipad will sent with the
  1830. * first request's data. opad will be sent with the final hash result
  1831. * ipad in hmacctx->ipad and opad in hmacctx->opad location
  1832. */
  1833. shash->tfm = hmacctx->base_hash;
  1834. shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
  1835. if (keylen > bs) {
  1836. err = crypto_shash_digest(shash, key, keylen,
  1837. hmacctx->ipad);
  1838. if (err)
  1839. goto out;
  1840. keylen = digestsize;
  1841. } else {
  1842. memcpy(hmacctx->ipad, key, keylen);
  1843. }
  1844. memset(hmacctx->ipad + keylen, 0, bs - keylen);
  1845. memcpy(hmacctx->opad, hmacctx->ipad, bs);
  1846. for (i = 0; i < bs / sizeof(int); i++) {
  1847. *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
  1848. *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
  1849. }
  1850. updated_digestsize = digestsize;
  1851. if (digestsize == SHA224_DIGEST_SIZE)
  1852. updated_digestsize = SHA256_DIGEST_SIZE;
  1853. else if (digestsize == SHA384_DIGEST_SIZE)
  1854. updated_digestsize = SHA512_DIGEST_SIZE;
  1855. err = chcr_compute_partial_hash(shash, hmacctx->ipad,
  1856. hmacctx->ipad, digestsize);
  1857. if (err)
  1858. goto out;
  1859. chcr_change_order(hmacctx->ipad, updated_digestsize);
  1860. err = chcr_compute_partial_hash(shash, hmacctx->opad,
  1861. hmacctx->opad, digestsize);
  1862. if (err)
  1863. goto out;
  1864. chcr_change_order(hmacctx->opad, updated_digestsize);
  1865. out:
  1866. return err;
  1867. }
  1868. static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  1869. unsigned int key_len)
  1870. {
  1871. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  1872. unsigned short context_size = 0;
  1873. int err;
  1874. err = chcr_cipher_fallback_setkey(cipher, key, key_len);
  1875. if (err)
  1876. goto badkey_err;
  1877. memcpy(ablkctx->key, key, key_len);
  1878. ablkctx->enckey_len = key_len;
  1879. get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
  1880. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
  1881. ablkctx->key_ctx_hdr =
  1882. FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
  1883. CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
  1884. CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
  1885. CHCR_KEYCTX_NO_KEY, 1,
  1886. 0, context_size);
  1887. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
  1888. return 0;
  1889. badkey_err:
  1890. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1891. ablkctx->enckey_len = 0;
  1892. return err;
  1893. }
  1894. static int chcr_sha_init(struct ahash_request *areq)
  1895. {
  1896. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1897. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1898. int digestsize = crypto_ahash_digestsize(tfm);
  1899. req_ctx->data_len = 0;
  1900. req_ctx->reqlen = 0;
  1901. req_ctx->reqbfr = req_ctx->bfr1;
  1902. req_ctx->skbfr = req_ctx->bfr2;
  1903. copy_hash_init_values(req_ctx->partial_hash, digestsize);
  1904. return 0;
  1905. }
  1906. static int chcr_sha_cra_init(struct crypto_tfm *tfm)
  1907. {
  1908. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1909. sizeof(struct chcr_ahash_req_ctx));
  1910. return chcr_device_init(crypto_tfm_ctx(tfm));
  1911. }
  1912. static int chcr_hmac_init(struct ahash_request *areq)
  1913. {
  1914. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1915. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
  1916. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
  1917. unsigned int digestsize = crypto_ahash_digestsize(rtfm);
  1918. unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1919. chcr_sha_init(areq);
  1920. req_ctx->data_len = bs;
  1921. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1922. if (digestsize == SHA224_DIGEST_SIZE)
  1923. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1924. SHA256_DIGEST_SIZE);
  1925. else if (digestsize == SHA384_DIGEST_SIZE)
  1926. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1927. SHA512_DIGEST_SIZE);
  1928. else
  1929. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1930. digestsize);
  1931. }
  1932. return 0;
  1933. }
  1934. static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
  1935. {
  1936. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1937. struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
  1938. unsigned int digestsize =
  1939. crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
  1940. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1941. sizeof(struct chcr_ahash_req_ctx));
  1942. hmacctx->base_hash = chcr_alloc_shash(digestsize);
  1943. if (IS_ERR(hmacctx->base_hash))
  1944. return PTR_ERR(hmacctx->base_hash);
  1945. return chcr_device_init(crypto_tfm_ctx(tfm));
  1946. }
  1947. static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
  1948. {
  1949. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1950. struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
  1951. if (hmacctx->base_hash) {
  1952. chcr_free_shash(hmacctx->base_hash);
  1953. hmacctx->base_hash = NULL;
  1954. }
  1955. }
  1956. inline void chcr_aead_common_exit(struct aead_request *req)
  1957. {
  1958. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  1959. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1960. struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
  1961. chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
  1962. }
  1963. static int chcr_aead_common_init(struct aead_request *req)
  1964. {
  1965. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1966. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  1967. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  1968. unsigned int authsize = crypto_aead_authsize(tfm);
  1969. int error = -EINVAL;
  1970. /* validate key size */
  1971. if (aeadctx->enckey_len == 0)
  1972. goto err;
  1973. if (reqctx->op && req->cryptlen < authsize)
  1974. goto err;
  1975. if (reqctx->b0_len)
  1976. reqctx->scratch_pad = reqctx->iv + IV;
  1977. else
  1978. reqctx->scratch_pad = NULL;
  1979. error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
  1980. reqctx->op);
  1981. if (error) {
  1982. error = -ENOMEM;
  1983. goto err;
  1984. }
  1985. reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
  1986. CHCR_SRC_SG_SIZE, 0);
  1987. reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
  1988. CHCR_SRC_SG_SIZE, req->assoclen);
  1989. return 0;
  1990. err:
  1991. return error;
  1992. }
  1993. static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
  1994. int aadmax, int wrlen,
  1995. unsigned short op_type)
  1996. {
  1997. unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
  1998. if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
  1999. dst_nents > MAX_DSGL_ENT ||
  2000. (req->assoclen > aadmax) ||
  2001. (wrlen > SGE_MAX_WR_LEN))
  2002. return 1;
  2003. return 0;
  2004. }
  2005. static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
  2006. {
  2007. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2008. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2009. struct aead_request *subreq = aead_request_ctx(req);
  2010. aead_request_set_tfm(subreq, aeadctx->sw_cipher);
  2011. aead_request_set_callback(subreq, req->base.flags,
  2012. req->base.complete, req->base.data);
  2013. aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
  2014. req->iv);
  2015. aead_request_set_ad(subreq, req->assoclen);
  2016. return op_type ? crypto_aead_decrypt(subreq) :
  2017. crypto_aead_encrypt(subreq);
  2018. }
  2019. static struct sk_buff *create_authenc_wr(struct aead_request *req,
  2020. unsigned short qid,
  2021. int size)
  2022. {
  2023. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2024. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2025. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  2026. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2027. struct sk_buff *skb = NULL;
  2028. struct chcr_wr *chcr_req;
  2029. struct cpl_rx_phys_dsgl *phys_cpl;
  2030. struct ulptx_sgl *ulptx;
  2031. unsigned int transhdr_len;
  2032. unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
  2033. unsigned int kctx_len = 0, dnents;
  2034. unsigned int assoclen = req->assoclen;
  2035. unsigned int authsize = crypto_aead_authsize(tfm);
  2036. int error = -EINVAL;
  2037. int null = 0;
  2038. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2039. GFP_ATOMIC;
  2040. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2041. if (req->cryptlen == 0)
  2042. return NULL;
  2043. reqctx->b0_len = 0;
  2044. error = chcr_aead_common_init(req);
  2045. if (error)
  2046. return ERR_PTR(error);
  2047. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
  2048. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  2049. null = 1;
  2050. assoclen = 0;
  2051. reqctx->aad_nents = 0;
  2052. }
  2053. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2054. dnents += sg_nents_xlen(req->dst, req->cryptlen +
  2055. (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
  2056. req->assoclen);
  2057. dnents += MIN_AUTH_SG; // For IV
  2058. dst_size = get_space_for_phys_dsgl(dnents);
  2059. kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
  2060. - sizeof(chcr_req->key_ctx);
  2061. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2062. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
  2063. SGE_MAX_WR_LEN;
  2064. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
  2065. : (sgl_len(reqctx->src_nents + reqctx->aad_nents
  2066. + MIN_GCM_SG) * 8);
  2067. transhdr_len += temp;
  2068. transhdr_len = roundup(transhdr_len, 16);
  2069. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
  2070. transhdr_len, reqctx->op)) {
  2071. atomic_inc(&adap->chcr_stats.fallback);
  2072. chcr_aead_common_exit(req);
  2073. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2074. }
  2075. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2076. if (!skb) {
  2077. error = -ENOMEM;
  2078. goto err;
  2079. }
  2080. chcr_req = __skb_put_zero(skb, transhdr_len);
  2081. temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
  2082. /*
  2083. * Input order is AAD,IV and Payload. where IV should be included as
  2084. * the part of authdata. All other fields should be filled according
  2085. * to the hardware spec
  2086. */
  2087. chcr_req->sec_cpl.op_ivinsrtofst =
  2088. FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
  2089. assoclen + 1);
  2090. chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
  2091. chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2092. assoclen ? 1 : 0, assoclen,
  2093. assoclen + IV + 1,
  2094. (temp & 0x1F0) >> 4);
  2095. chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
  2096. temp & 0xF,
  2097. null ? 0 : assoclen + IV + 1,
  2098. temp, temp);
  2099. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
  2100. subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
  2101. temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  2102. else
  2103. temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
  2104. chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
  2105. (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
  2106. temp,
  2107. actx->auth_mode, aeadctx->hmac_ctrl,
  2108. IV >> 1);
  2109. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
  2110. 0, 0, dst_size);
  2111. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2112. if (reqctx->op == CHCR_ENCRYPT_OP ||
  2113. subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  2114. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
  2115. memcpy(chcr_req->key_ctx.key, aeadctx->key,
  2116. aeadctx->enckey_len);
  2117. else
  2118. memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
  2119. aeadctx->enckey_len);
  2120. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2121. actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
  2122. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  2123. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  2124. memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
  2125. memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
  2126. CTR_RFC3686_IV_SIZE);
  2127. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  2128. CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
  2129. } else {
  2130. memcpy(reqctx->iv, req->iv, IV);
  2131. }
  2132. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2133. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2134. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2135. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2136. atomic_inc(&adap->chcr_stats.cipher_rqst);
  2137. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2138. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
  2139. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
  2140. transhdr_len, temp, 0);
  2141. reqctx->skb = skb;
  2142. return skb;
  2143. err:
  2144. chcr_aead_common_exit(req);
  2145. return ERR_PTR(error);
  2146. }
  2147. int chcr_aead_dma_map(struct device *dev,
  2148. struct aead_request *req,
  2149. unsigned short op_type)
  2150. {
  2151. int error;
  2152. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2153. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2154. unsigned int authsize = crypto_aead_authsize(tfm);
  2155. int dst_size;
  2156. dst_size = req->assoclen + req->cryptlen + (op_type ?
  2157. -authsize : authsize);
  2158. if (!req->cryptlen || !dst_size)
  2159. return 0;
  2160. reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
  2161. DMA_BIDIRECTIONAL);
  2162. if (dma_mapping_error(dev, reqctx->iv_dma))
  2163. return -ENOMEM;
  2164. if (reqctx->b0_len)
  2165. reqctx->b0_dma = reqctx->iv_dma + IV;
  2166. else
  2167. reqctx->b0_dma = 0;
  2168. if (req->src == req->dst) {
  2169. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2170. DMA_BIDIRECTIONAL);
  2171. if (!error)
  2172. goto err;
  2173. } else {
  2174. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2175. DMA_TO_DEVICE);
  2176. if (!error)
  2177. goto err;
  2178. error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
  2179. DMA_FROM_DEVICE);
  2180. if (!error) {
  2181. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2182. DMA_TO_DEVICE);
  2183. goto err;
  2184. }
  2185. }
  2186. return 0;
  2187. err:
  2188. dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
  2189. return -ENOMEM;
  2190. }
  2191. void chcr_aead_dma_unmap(struct device *dev,
  2192. struct aead_request *req,
  2193. unsigned short op_type)
  2194. {
  2195. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2196. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2197. unsigned int authsize = crypto_aead_authsize(tfm);
  2198. int dst_size;
  2199. dst_size = req->assoclen + req->cryptlen + (op_type ?
  2200. -authsize : authsize);
  2201. if (!req->cryptlen || !dst_size)
  2202. return;
  2203. dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
  2204. DMA_BIDIRECTIONAL);
  2205. if (req->src == req->dst) {
  2206. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2207. DMA_BIDIRECTIONAL);
  2208. } else {
  2209. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2210. DMA_TO_DEVICE);
  2211. dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
  2212. DMA_FROM_DEVICE);
  2213. }
  2214. }
  2215. void chcr_add_aead_src_ent(struct aead_request *req,
  2216. struct ulptx_sgl *ulptx,
  2217. unsigned int assoclen)
  2218. {
  2219. struct ulptx_walk ulp_walk;
  2220. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2221. if (reqctx->imm) {
  2222. u8 *buf = (u8 *)ulptx;
  2223. if (reqctx->b0_len) {
  2224. memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
  2225. buf += reqctx->b0_len;
  2226. }
  2227. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2228. buf, assoclen, 0);
  2229. buf += assoclen;
  2230. memcpy(buf, reqctx->iv, IV);
  2231. buf += IV;
  2232. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2233. buf, req->cryptlen, req->assoclen);
  2234. } else {
  2235. ulptx_walk_init(&ulp_walk, ulptx);
  2236. if (reqctx->b0_len)
  2237. ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
  2238. &reqctx->b0_dma);
  2239. ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
  2240. ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
  2241. ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
  2242. req->assoclen);
  2243. ulptx_walk_end(&ulp_walk);
  2244. }
  2245. }
  2246. void chcr_add_aead_dst_ent(struct aead_request *req,
  2247. struct cpl_rx_phys_dsgl *phys_cpl,
  2248. unsigned int assoclen,
  2249. unsigned short qid)
  2250. {
  2251. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2252. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2253. struct dsgl_walk dsgl_walk;
  2254. unsigned int authsize = crypto_aead_authsize(tfm);
  2255. struct chcr_context *ctx = a_ctx(tfm);
  2256. u32 temp;
  2257. dsgl_walk_init(&dsgl_walk, phys_cpl);
  2258. if (reqctx->b0_len)
  2259. dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
  2260. dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
  2261. dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
  2262. temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
  2263. dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
  2264. dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
  2265. }
  2266. void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
  2267. void *ulptx,
  2268. struct cipher_wr_param *wrparam)
  2269. {
  2270. struct ulptx_walk ulp_walk;
  2271. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2272. u8 *buf = ulptx;
  2273. memcpy(buf, reqctx->iv, IV);
  2274. buf += IV;
  2275. if (reqctx->imm) {
  2276. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2277. buf, wrparam->bytes, reqctx->processed);
  2278. } else {
  2279. ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
  2280. ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
  2281. reqctx->src_ofst);
  2282. reqctx->srcsg = ulp_walk.last_sg;
  2283. reqctx->src_ofst = ulp_walk.last_sg_len;
  2284. ulptx_walk_end(&ulp_walk);
  2285. }
  2286. }
  2287. void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
  2288. struct cpl_rx_phys_dsgl *phys_cpl,
  2289. struct cipher_wr_param *wrparam,
  2290. unsigned short qid)
  2291. {
  2292. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2293. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
  2294. struct chcr_context *ctx = c_ctx(tfm);
  2295. struct dsgl_walk dsgl_walk;
  2296. dsgl_walk_init(&dsgl_walk, phys_cpl);
  2297. dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
  2298. reqctx->dst_ofst);
  2299. reqctx->dstsg = dsgl_walk.last_sg;
  2300. reqctx->dst_ofst = dsgl_walk.last_sg_len;
  2301. dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
  2302. }
  2303. void chcr_add_hash_src_ent(struct ahash_request *req,
  2304. struct ulptx_sgl *ulptx,
  2305. struct hash_wr_param *param)
  2306. {
  2307. struct ulptx_walk ulp_walk;
  2308. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  2309. if (reqctx->hctx_wr.imm) {
  2310. u8 *buf = (u8 *)ulptx;
  2311. if (param->bfr_len) {
  2312. memcpy(buf, reqctx->reqbfr, param->bfr_len);
  2313. buf += param->bfr_len;
  2314. }
  2315. sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
  2316. sg_nents(reqctx->hctx_wr.srcsg), buf,
  2317. param->sg_len, 0);
  2318. } else {
  2319. ulptx_walk_init(&ulp_walk, ulptx);
  2320. if (param->bfr_len)
  2321. ulptx_walk_add_page(&ulp_walk, param->bfr_len,
  2322. &reqctx->hctx_wr.dma_addr);
  2323. ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
  2324. param->sg_len, reqctx->hctx_wr.src_ofst);
  2325. reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
  2326. reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
  2327. ulptx_walk_end(&ulp_walk);
  2328. }
  2329. }
  2330. int chcr_hash_dma_map(struct device *dev,
  2331. struct ahash_request *req)
  2332. {
  2333. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  2334. int error = 0;
  2335. if (!req->nbytes)
  2336. return 0;
  2337. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2338. DMA_TO_DEVICE);
  2339. if (!error)
  2340. return -ENOMEM;
  2341. req_ctx->hctx_wr.is_sg_map = 1;
  2342. return 0;
  2343. }
  2344. void chcr_hash_dma_unmap(struct device *dev,
  2345. struct ahash_request *req)
  2346. {
  2347. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  2348. if (!req->nbytes)
  2349. return;
  2350. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2351. DMA_TO_DEVICE);
  2352. req_ctx->hctx_wr.is_sg_map = 0;
  2353. }
  2354. int chcr_cipher_dma_map(struct device *dev,
  2355. struct ablkcipher_request *req)
  2356. {
  2357. int error;
  2358. if (req->src == req->dst) {
  2359. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2360. DMA_BIDIRECTIONAL);
  2361. if (!error)
  2362. goto err;
  2363. } else {
  2364. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2365. DMA_TO_DEVICE);
  2366. if (!error)
  2367. goto err;
  2368. error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
  2369. DMA_FROM_DEVICE);
  2370. if (!error) {
  2371. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2372. DMA_TO_DEVICE);
  2373. goto err;
  2374. }
  2375. }
  2376. return 0;
  2377. err:
  2378. return -ENOMEM;
  2379. }
  2380. void chcr_cipher_dma_unmap(struct device *dev,
  2381. struct ablkcipher_request *req)
  2382. {
  2383. if (req->src == req->dst) {
  2384. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2385. DMA_BIDIRECTIONAL);
  2386. } else {
  2387. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2388. DMA_TO_DEVICE);
  2389. dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
  2390. DMA_FROM_DEVICE);
  2391. }
  2392. }
  2393. static int set_msg_len(u8 *block, unsigned int msglen, int csize)
  2394. {
  2395. __be32 data;
  2396. memset(block, 0, csize);
  2397. block += csize;
  2398. if (csize >= 4)
  2399. csize = 4;
  2400. else if (msglen > (unsigned int)(1 << (8 * csize)))
  2401. return -EOVERFLOW;
  2402. data = cpu_to_be32(msglen);
  2403. memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
  2404. return 0;
  2405. }
  2406. static void generate_b0(struct aead_request *req,
  2407. struct chcr_aead_ctx *aeadctx,
  2408. unsigned short op_type)
  2409. {
  2410. unsigned int l, lp, m;
  2411. int rc;
  2412. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2413. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2414. u8 *b0 = reqctx->scratch_pad;
  2415. m = crypto_aead_authsize(aead);
  2416. memcpy(b0, reqctx->iv, 16);
  2417. lp = b0[0];
  2418. l = lp + 1;
  2419. /* set m, bits 3-5 */
  2420. *b0 |= (8 * ((m - 2) / 2));
  2421. /* set adata, bit 6, if associated data is used */
  2422. if (req->assoclen)
  2423. *b0 |= 64;
  2424. rc = set_msg_len(b0 + 16 - l,
  2425. (op_type == CHCR_DECRYPT_OP) ?
  2426. req->cryptlen - m : req->cryptlen, l);
  2427. }
  2428. static inline int crypto_ccm_check_iv(const u8 *iv)
  2429. {
  2430. /* 2 <= L <= 8, so 1 <= L' <= 7. */
  2431. if (iv[0] < 1 || iv[0] > 7)
  2432. return -EINVAL;
  2433. return 0;
  2434. }
  2435. static int ccm_format_packet(struct aead_request *req,
  2436. struct chcr_aead_ctx *aeadctx,
  2437. unsigned int sub_type,
  2438. unsigned short op_type,
  2439. unsigned int assoclen)
  2440. {
  2441. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2442. int rc = 0;
  2443. if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
  2444. reqctx->iv[0] = 3;
  2445. memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
  2446. memcpy(reqctx->iv + 4, req->iv, 8);
  2447. memset(reqctx->iv + 12, 0, 4);
  2448. } else {
  2449. memcpy(reqctx->iv, req->iv, 16);
  2450. }
  2451. if (assoclen)
  2452. *((unsigned short *)(reqctx->scratch_pad + 16)) =
  2453. htons(assoclen);
  2454. generate_b0(req, aeadctx, op_type);
  2455. /* zero the ctr value */
  2456. memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
  2457. return rc;
  2458. }
  2459. static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
  2460. unsigned int dst_size,
  2461. struct aead_request *req,
  2462. unsigned short op_type)
  2463. {
  2464. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2465. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2466. unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
  2467. unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
  2468. unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
  2469. unsigned int ccm_xtra;
  2470. unsigned char tag_offset = 0, auth_offset = 0;
  2471. unsigned int assoclen;
  2472. if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
  2473. assoclen = req->assoclen - 8;
  2474. else
  2475. assoclen = req->assoclen;
  2476. ccm_xtra = CCM_B0_SIZE +
  2477. ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
  2478. auth_offset = req->cryptlen ?
  2479. (assoclen + IV + 1 + ccm_xtra) : 0;
  2480. if (op_type == CHCR_DECRYPT_OP) {
  2481. if (crypto_aead_authsize(tfm) != req->cryptlen)
  2482. tag_offset = crypto_aead_authsize(tfm);
  2483. else
  2484. auth_offset = 0;
  2485. }
  2486. sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
  2487. 2, assoclen + 1 + ccm_xtra);
  2488. sec_cpl->pldlen =
  2489. htonl(assoclen + IV + req->cryptlen + ccm_xtra);
  2490. /* For CCM there wil be b0 always. So AAD start will be 1 always */
  2491. sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2492. 1, assoclen + ccm_xtra, assoclen
  2493. + IV + 1 + ccm_xtra, 0);
  2494. sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
  2495. auth_offset, tag_offset,
  2496. (op_type == CHCR_ENCRYPT_OP) ? 0 :
  2497. crypto_aead_authsize(tfm));
  2498. sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
  2499. (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
  2500. cipher_mode, mac_mode,
  2501. aeadctx->hmac_ctrl, IV >> 1);
  2502. sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
  2503. 0, dst_size);
  2504. }
  2505. static int aead_ccm_validate_input(unsigned short op_type,
  2506. struct aead_request *req,
  2507. struct chcr_aead_ctx *aeadctx,
  2508. unsigned int sub_type)
  2509. {
  2510. if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
  2511. if (crypto_ccm_check_iv(req->iv)) {
  2512. pr_err("CCM: IV check fails\n");
  2513. return -EINVAL;
  2514. }
  2515. } else {
  2516. if (req->assoclen != 16 && req->assoclen != 20) {
  2517. pr_err("RFC4309: Invalid AAD length %d\n",
  2518. req->assoclen);
  2519. return -EINVAL;
  2520. }
  2521. }
  2522. return 0;
  2523. }
  2524. static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
  2525. unsigned short qid,
  2526. int size)
  2527. {
  2528. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2529. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2530. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2531. struct sk_buff *skb = NULL;
  2532. struct chcr_wr *chcr_req;
  2533. struct cpl_rx_phys_dsgl *phys_cpl;
  2534. struct ulptx_sgl *ulptx;
  2535. unsigned int transhdr_len;
  2536. unsigned int dst_size = 0, kctx_len, dnents, temp;
  2537. unsigned int sub_type, assoclen = req->assoclen;
  2538. unsigned int authsize = crypto_aead_authsize(tfm);
  2539. int error = -EINVAL;
  2540. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2541. GFP_ATOMIC;
  2542. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2543. sub_type = get_aead_subtype(tfm);
  2544. if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
  2545. assoclen -= 8;
  2546. reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
  2547. error = chcr_aead_common_init(req);
  2548. if (error)
  2549. return ERR_PTR(error);
  2550. error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
  2551. if (error)
  2552. goto err;
  2553. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2554. dnents += sg_nents_xlen(req->dst, req->cryptlen
  2555. + (reqctx->op ? -authsize : authsize),
  2556. CHCR_DST_SG_SIZE, req->assoclen);
  2557. dnents += MIN_CCM_SG; // For IV and B0
  2558. dst_size = get_space_for_phys_dsgl(dnents);
  2559. kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
  2560. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2561. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
  2562. reqctx->b0_len) <= SGE_MAX_WR_LEN;
  2563. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
  2564. reqctx->b0_len, 16) :
  2565. (sgl_len(reqctx->src_nents + reqctx->aad_nents +
  2566. MIN_CCM_SG) * 8);
  2567. transhdr_len += temp;
  2568. transhdr_len = roundup(transhdr_len, 16);
  2569. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
  2570. reqctx->b0_len, transhdr_len, reqctx->op)) {
  2571. atomic_inc(&adap->chcr_stats.fallback);
  2572. chcr_aead_common_exit(req);
  2573. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2574. }
  2575. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2576. if (!skb) {
  2577. error = -ENOMEM;
  2578. goto err;
  2579. }
  2580. chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
  2581. fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
  2582. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2583. memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
  2584. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2585. aeadctx->key, aeadctx->enckey_len);
  2586. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2587. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2588. error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
  2589. if (error)
  2590. goto dstmap_fail;
  2591. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2592. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2593. atomic_inc(&adap->chcr_stats.aead_rqst);
  2594. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2595. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
  2596. reqctx->b0_len) : 0);
  2597. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
  2598. transhdr_len, temp, 0);
  2599. reqctx->skb = skb;
  2600. return skb;
  2601. dstmap_fail:
  2602. kfree_skb(skb);
  2603. err:
  2604. chcr_aead_common_exit(req);
  2605. return ERR_PTR(error);
  2606. }
  2607. static struct sk_buff *create_gcm_wr(struct aead_request *req,
  2608. unsigned short qid,
  2609. int size)
  2610. {
  2611. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2612. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2613. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2614. struct sk_buff *skb = NULL;
  2615. struct chcr_wr *chcr_req;
  2616. struct cpl_rx_phys_dsgl *phys_cpl;
  2617. struct ulptx_sgl *ulptx;
  2618. unsigned int transhdr_len, dnents = 0;
  2619. unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
  2620. unsigned int authsize = crypto_aead_authsize(tfm);
  2621. int error = -EINVAL;
  2622. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2623. GFP_ATOMIC;
  2624. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2625. if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
  2626. assoclen = req->assoclen - 8;
  2627. reqctx->b0_len = 0;
  2628. error = chcr_aead_common_init(req);
  2629. if (error)
  2630. return ERR_PTR(error);
  2631. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2632. dnents += sg_nents_xlen(req->dst, req->cryptlen +
  2633. (reqctx->op ? -authsize : authsize),
  2634. CHCR_DST_SG_SIZE, req->assoclen);
  2635. dnents += MIN_GCM_SG; // For IV
  2636. dst_size = get_space_for_phys_dsgl(dnents);
  2637. kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
  2638. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2639. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
  2640. SGE_MAX_WR_LEN;
  2641. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
  2642. (sgl_len(reqctx->src_nents +
  2643. reqctx->aad_nents + MIN_GCM_SG) * 8);
  2644. transhdr_len += temp;
  2645. transhdr_len = roundup(transhdr_len, 16);
  2646. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
  2647. transhdr_len, reqctx->op)) {
  2648. atomic_inc(&adap->chcr_stats.fallback);
  2649. chcr_aead_common_exit(req);
  2650. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2651. }
  2652. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2653. if (!skb) {
  2654. error = -ENOMEM;
  2655. goto err;
  2656. }
  2657. chcr_req = __skb_put_zero(skb, transhdr_len);
  2658. //Offset of tag from end
  2659. temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
  2660. chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
  2661. a_ctx(tfm)->dev->rx_channel_id, 2,
  2662. (assoclen + 1));
  2663. chcr_req->sec_cpl.pldlen =
  2664. htonl(assoclen + IV + req->cryptlen);
  2665. chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2666. assoclen ? 1 : 0, assoclen,
  2667. assoclen + IV + 1, 0);
  2668. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  2669. FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
  2670. temp, temp);
  2671. chcr_req->sec_cpl.seqno_numivs =
  2672. FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
  2673. CHCR_ENCRYPT_OP) ? 1 : 0,
  2674. CHCR_SCMD_CIPHER_MODE_AES_GCM,
  2675. CHCR_SCMD_AUTH_MODE_GHASH,
  2676. aeadctx->hmac_ctrl, IV >> 1);
  2677. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
  2678. 0, 0, dst_size);
  2679. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2680. memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
  2681. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2682. GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
  2683. /* prepare a 16 byte iv */
  2684. /* S A L T | IV | 0x00000001 */
  2685. if (get_aead_subtype(tfm) ==
  2686. CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
  2687. memcpy(reqctx->iv, aeadctx->salt, 4);
  2688. memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
  2689. } else {
  2690. memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
  2691. }
  2692. *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
  2693. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2694. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2695. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2696. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2697. atomic_inc(&adap->chcr_stats.aead_rqst);
  2698. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2699. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
  2700. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
  2701. transhdr_len, temp, reqctx->verify);
  2702. reqctx->skb = skb;
  2703. return skb;
  2704. err:
  2705. chcr_aead_common_exit(req);
  2706. return ERR_PTR(error);
  2707. }
  2708. static int chcr_aead_cra_init(struct crypto_aead *tfm)
  2709. {
  2710. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2711. struct aead_alg *alg = crypto_aead_alg(tfm);
  2712. aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
  2713. CRYPTO_ALG_NEED_FALLBACK |
  2714. CRYPTO_ALG_ASYNC);
  2715. if (IS_ERR(aeadctx->sw_cipher))
  2716. return PTR_ERR(aeadctx->sw_cipher);
  2717. crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
  2718. sizeof(struct aead_request) +
  2719. crypto_aead_reqsize(aeadctx->sw_cipher)));
  2720. return chcr_device_init(a_ctx(tfm));
  2721. }
  2722. static void chcr_aead_cra_exit(struct crypto_aead *tfm)
  2723. {
  2724. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2725. crypto_free_aead(aeadctx->sw_cipher);
  2726. }
  2727. static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
  2728. unsigned int authsize)
  2729. {
  2730. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2731. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
  2732. aeadctx->mayverify = VERIFY_HW;
  2733. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2734. }
  2735. static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
  2736. unsigned int authsize)
  2737. {
  2738. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2739. u32 maxauth = crypto_aead_maxauthsize(tfm);
  2740. /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
  2741. * true for sha1. authsize == 12 condition should be before
  2742. * authsize == (maxauth >> 1)
  2743. */
  2744. if (authsize == ICV_4) {
  2745. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2746. aeadctx->mayverify = VERIFY_HW;
  2747. } else if (authsize == ICV_6) {
  2748. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
  2749. aeadctx->mayverify = VERIFY_HW;
  2750. } else if (authsize == ICV_10) {
  2751. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
  2752. aeadctx->mayverify = VERIFY_HW;
  2753. } else if (authsize == ICV_12) {
  2754. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2755. aeadctx->mayverify = VERIFY_HW;
  2756. } else if (authsize == ICV_14) {
  2757. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2758. aeadctx->mayverify = VERIFY_HW;
  2759. } else if (authsize == (maxauth >> 1)) {
  2760. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2761. aeadctx->mayverify = VERIFY_HW;
  2762. } else if (authsize == maxauth) {
  2763. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2764. aeadctx->mayverify = VERIFY_HW;
  2765. } else {
  2766. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2767. aeadctx->mayverify = VERIFY_SW;
  2768. }
  2769. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2770. }
  2771. static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  2772. {
  2773. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2774. switch (authsize) {
  2775. case ICV_4:
  2776. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2777. aeadctx->mayverify = VERIFY_HW;
  2778. break;
  2779. case ICV_8:
  2780. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2781. aeadctx->mayverify = VERIFY_HW;
  2782. break;
  2783. case ICV_12:
  2784. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2785. aeadctx->mayverify = VERIFY_HW;
  2786. break;
  2787. case ICV_14:
  2788. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2789. aeadctx->mayverify = VERIFY_HW;
  2790. break;
  2791. case ICV_16:
  2792. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2793. aeadctx->mayverify = VERIFY_HW;
  2794. break;
  2795. case ICV_13:
  2796. case ICV_15:
  2797. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2798. aeadctx->mayverify = VERIFY_SW;
  2799. break;
  2800. default:
  2801. return -EINVAL;
  2802. }
  2803. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2804. }
  2805. static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
  2806. unsigned int authsize)
  2807. {
  2808. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2809. switch (authsize) {
  2810. case ICV_8:
  2811. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2812. aeadctx->mayverify = VERIFY_HW;
  2813. break;
  2814. case ICV_12:
  2815. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2816. aeadctx->mayverify = VERIFY_HW;
  2817. break;
  2818. case ICV_16:
  2819. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2820. aeadctx->mayverify = VERIFY_HW;
  2821. break;
  2822. default:
  2823. return -EINVAL;
  2824. }
  2825. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2826. }
  2827. static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
  2828. unsigned int authsize)
  2829. {
  2830. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2831. switch (authsize) {
  2832. case ICV_4:
  2833. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2834. aeadctx->mayverify = VERIFY_HW;
  2835. break;
  2836. case ICV_6:
  2837. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
  2838. aeadctx->mayverify = VERIFY_HW;
  2839. break;
  2840. case ICV_8:
  2841. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2842. aeadctx->mayverify = VERIFY_HW;
  2843. break;
  2844. case ICV_10:
  2845. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
  2846. aeadctx->mayverify = VERIFY_HW;
  2847. break;
  2848. case ICV_12:
  2849. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2850. aeadctx->mayverify = VERIFY_HW;
  2851. break;
  2852. case ICV_14:
  2853. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2854. aeadctx->mayverify = VERIFY_HW;
  2855. break;
  2856. case ICV_16:
  2857. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2858. aeadctx->mayverify = VERIFY_HW;
  2859. break;
  2860. default:
  2861. return -EINVAL;
  2862. }
  2863. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2864. }
  2865. static int chcr_ccm_common_setkey(struct crypto_aead *aead,
  2866. const u8 *key,
  2867. unsigned int keylen)
  2868. {
  2869. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2870. unsigned char ck_size, mk_size;
  2871. int key_ctx_size = 0;
  2872. key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
  2873. if (keylen == AES_KEYSIZE_128) {
  2874. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  2875. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
  2876. } else if (keylen == AES_KEYSIZE_192) {
  2877. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  2878. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
  2879. } else if (keylen == AES_KEYSIZE_256) {
  2880. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  2881. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  2882. } else {
  2883. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  2884. aeadctx->enckey_len = 0;
  2885. return -EINVAL;
  2886. }
  2887. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
  2888. key_ctx_size >> 4);
  2889. memcpy(aeadctx->key, key, keylen);
  2890. aeadctx->enckey_len = keylen;
  2891. return 0;
  2892. }
  2893. static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
  2894. const u8 *key,
  2895. unsigned int keylen)
  2896. {
  2897. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2898. int error;
  2899. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2900. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
  2901. CRYPTO_TFM_REQ_MASK);
  2902. error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2903. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2904. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2905. CRYPTO_TFM_RES_MASK);
  2906. if (error)
  2907. return error;
  2908. return chcr_ccm_common_setkey(aead, key, keylen);
  2909. }
  2910. static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
  2911. unsigned int keylen)
  2912. {
  2913. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2914. int error;
  2915. if (keylen < 3) {
  2916. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  2917. aeadctx->enckey_len = 0;
  2918. return -EINVAL;
  2919. }
  2920. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2921. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
  2922. CRYPTO_TFM_REQ_MASK);
  2923. error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2924. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2925. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2926. CRYPTO_TFM_RES_MASK);
  2927. if (error)
  2928. return error;
  2929. keylen -= 3;
  2930. memcpy(aeadctx->salt, key + keylen, 3);
  2931. return chcr_ccm_common_setkey(aead, key, keylen);
  2932. }
  2933. static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
  2934. unsigned int keylen)
  2935. {
  2936. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2937. struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
  2938. struct crypto_cipher *cipher;
  2939. unsigned int ck_size;
  2940. int ret = 0, key_ctx_size = 0;
  2941. aeadctx->enckey_len = 0;
  2942. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2943. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
  2944. & CRYPTO_TFM_REQ_MASK);
  2945. ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2946. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2947. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2948. CRYPTO_TFM_RES_MASK);
  2949. if (ret)
  2950. goto out;
  2951. if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
  2952. keylen > 3) {
  2953. keylen -= 4; /* nonce/salt is present in the last 4 bytes */
  2954. memcpy(aeadctx->salt, key + keylen, 4);
  2955. }
  2956. if (keylen == AES_KEYSIZE_128) {
  2957. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  2958. } else if (keylen == AES_KEYSIZE_192) {
  2959. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  2960. } else if (keylen == AES_KEYSIZE_256) {
  2961. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  2962. } else {
  2963. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  2964. pr_err("GCM: Invalid key length %d\n", keylen);
  2965. ret = -EINVAL;
  2966. goto out;
  2967. }
  2968. memcpy(aeadctx->key, key, keylen);
  2969. aeadctx->enckey_len = keylen;
  2970. key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
  2971. AEAD_H_SIZE;
  2972. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
  2973. CHCR_KEYCTX_MAC_KEY_SIZE_128,
  2974. 0, 0,
  2975. key_ctx_size >> 4);
  2976. /* Calculate the H = CIPH(K, 0 repeated 16 times).
  2977. * It will go in key context
  2978. */
  2979. cipher = crypto_alloc_cipher("aes-generic", 0, 0);
  2980. if (IS_ERR(cipher)) {
  2981. aeadctx->enckey_len = 0;
  2982. ret = -ENOMEM;
  2983. goto out;
  2984. }
  2985. ret = crypto_cipher_setkey(cipher, key, keylen);
  2986. if (ret) {
  2987. aeadctx->enckey_len = 0;
  2988. goto out1;
  2989. }
  2990. memset(gctx->ghash_h, 0, AEAD_H_SIZE);
  2991. crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
  2992. out1:
  2993. crypto_free_cipher(cipher);
  2994. out:
  2995. return ret;
  2996. }
  2997. static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
  2998. unsigned int keylen)
  2999. {
  3000. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
  3001. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  3002. /* it contains auth and cipher key both*/
  3003. struct crypto_authenc_keys keys;
  3004. unsigned int bs, subtype;
  3005. unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
  3006. int err = 0, i, key_ctx_len = 0;
  3007. unsigned char ck_size = 0;
  3008. unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
  3009. struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
  3010. struct algo_param param;
  3011. int align;
  3012. u8 *o_ptr = NULL;
  3013. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  3014. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
  3015. & CRYPTO_TFM_REQ_MASK);
  3016. err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  3017. crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
  3018. crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
  3019. & CRYPTO_TFM_RES_MASK);
  3020. if (err)
  3021. goto out;
  3022. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
  3023. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  3024. goto out;
  3025. }
  3026. if (get_alg_config(&param, max_authsize)) {
  3027. pr_err("chcr : Unsupported digest size\n");
  3028. goto out;
  3029. }
  3030. subtype = get_aead_subtype(authenc);
  3031. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  3032. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  3033. if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
  3034. goto out;
  3035. memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
  3036. - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
  3037. keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
  3038. }
  3039. if (keys.enckeylen == AES_KEYSIZE_128) {
  3040. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  3041. } else if (keys.enckeylen == AES_KEYSIZE_192) {
  3042. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  3043. } else if (keys.enckeylen == AES_KEYSIZE_256) {
  3044. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  3045. } else {
  3046. pr_err("chcr : Unsupported cipher key\n");
  3047. goto out;
  3048. }
  3049. /* Copy only encryption key. We use authkey to generate h(ipad) and
  3050. * h(opad) so authkey is not needed again. authkeylen size have the
  3051. * size of the hash digest size.
  3052. */
  3053. memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
  3054. aeadctx->enckey_len = keys.enckeylen;
  3055. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
  3056. subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
  3057. get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
  3058. aeadctx->enckey_len << 3);
  3059. }
  3060. base_hash = chcr_alloc_shash(max_authsize);
  3061. if (IS_ERR(base_hash)) {
  3062. pr_err("chcr : Base driver cannot be loaded\n");
  3063. aeadctx->enckey_len = 0;
  3064. memzero_explicit(&keys, sizeof(keys));
  3065. return -EINVAL;
  3066. }
  3067. {
  3068. SHASH_DESC_ON_STACK(shash, base_hash);
  3069. shash->tfm = base_hash;
  3070. shash->flags = crypto_shash_get_flags(base_hash);
  3071. bs = crypto_shash_blocksize(base_hash);
  3072. align = KEYCTX_ALIGN_PAD(max_authsize);
  3073. o_ptr = actx->h_iopad + param.result_size + align;
  3074. if (keys.authkeylen > bs) {
  3075. err = crypto_shash_digest(shash, keys.authkey,
  3076. keys.authkeylen,
  3077. o_ptr);
  3078. if (err) {
  3079. pr_err("chcr : Base driver cannot be loaded\n");
  3080. goto out;
  3081. }
  3082. keys.authkeylen = max_authsize;
  3083. } else
  3084. memcpy(o_ptr, keys.authkey, keys.authkeylen);
  3085. /* Compute the ipad-digest*/
  3086. memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
  3087. memcpy(pad, o_ptr, keys.authkeylen);
  3088. for (i = 0; i < bs >> 2; i++)
  3089. *((unsigned int *)pad + i) ^= IPAD_DATA;
  3090. if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
  3091. max_authsize))
  3092. goto out;
  3093. /* Compute the opad-digest */
  3094. memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
  3095. memcpy(pad, o_ptr, keys.authkeylen);
  3096. for (i = 0; i < bs >> 2; i++)
  3097. *((unsigned int *)pad + i) ^= OPAD_DATA;
  3098. if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
  3099. goto out;
  3100. /* convert the ipad and opad digest to network order */
  3101. chcr_change_order(actx->h_iopad, param.result_size);
  3102. chcr_change_order(o_ptr, param.result_size);
  3103. key_ctx_len = sizeof(struct _key_ctx) +
  3104. roundup(keys.enckeylen, 16) +
  3105. (param.result_size + align) * 2;
  3106. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
  3107. 0, 1, key_ctx_len >> 4);
  3108. actx->auth_mode = param.auth_mode;
  3109. chcr_free_shash(base_hash);
  3110. memzero_explicit(&keys, sizeof(keys));
  3111. return 0;
  3112. }
  3113. out:
  3114. aeadctx->enckey_len = 0;
  3115. memzero_explicit(&keys, sizeof(keys));
  3116. if (!IS_ERR(base_hash))
  3117. chcr_free_shash(base_hash);
  3118. return -EINVAL;
  3119. }
  3120. static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
  3121. const u8 *key, unsigned int keylen)
  3122. {
  3123. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
  3124. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  3125. struct crypto_authenc_keys keys;
  3126. int err;
  3127. /* it contains auth and cipher key both*/
  3128. unsigned int subtype;
  3129. int key_ctx_len = 0;
  3130. unsigned char ck_size = 0;
  3131. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  3132. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
  3133. & CRYPTO_TFM_REQ_MASK);
  3134. err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  3135. crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
  3136. crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
  3137. & CRYPTO_TFM_RES_MASK);
  3138. if (err)
  3139. goto out;
  3140. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
  3141. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  3142. goto out;
  3143. }
  3144. subtype = get_aead_subtype(authenc);
  3145. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  3146. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  3147. if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
  3148. goto out;
  3149. memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
  3150. - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
  3151. keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
  3152. }
  3153. if (keys.enckeylen == AES_KEYSIZE_128) {
  3154. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  3155. } else if (keys.enckeylen == AES_KEYSIZE_192) {
  3156. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  3157. } else if (keys.enckeylen == AES_KEYSIZE_256) {
  3158. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  3159. } else {
  3160. pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
  3161. goto out;
  3162. }
  3163. memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
  3164. aeadctx->enckey_len = keys.enckeylen;
  3165. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
  3166. subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
  3167. get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
  3168. aeadctx->enckey_len << 3);
  3169. }
  3170. key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
  3171. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
  3172. 0, key_ctx_len >> 4);
  3173. actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
  3174. memzero_explicit(&keys, sizeof(keys));
  3175. return 0;
  3176. out:
  3177. aeadctx->enckey_len = 0;
  3178. memzero_explicit(&keys, sizeof(keys));
  3179. return -EINVAL;
  3180. }
  3181. static int chcr_aead_op(struct aead_request *req,
  3182. int size,
  3183. create_wr_t create_wr_fn)
  3184. {
  3185. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3186. struct uld_ctx *u_ctx;
  3187. struct sk_buff *skb;
  3188. int isfull = 0;
  3189. if (!a_ctx(tfm)->dev) {
  3190. pr_err("chcr : %s : No crypto device.\n", __func__);
  3191. return -ENXIO;
  3192. }
  3193. u_ctx = ULD_CTX(a_ctx(tfm));
  3194. if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  3195. a_ctx(tfm)->tx_qidx)) {
  3196. isfull = 1;
  3197. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  3198. return -ENOSPC;
  3199. }
  3200. /* Form a WR from req */
  3201. skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
  3202. if (IS_ERR(skb) || !skb)
  3203. return PTR_ERR(skb);
  3204. skb->dev = u_ctx->lldi.ports[0];
  3205. set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
  3206. chcr_send_wr(skb);
  3207. return isfull ? -EBUSY : -EINPROGRESS;
  3208. }
  3209. static int chcr_aead_encrypt(struct aead_request *req)
  3210. {
  3211. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3212. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  3213. reqctx->verify = VERIFY_HW;
  3214. reqctx->op = CHCR_ENCRYPT_OP;
  3215. switch (get_aead_subtype(tfm)) {
  3216. case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
  3217. case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
  3218. case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
  3219. case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
  3220. return chcr_aead_op(req, 0, create_authenc_wr);
  3221. case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
  3222. case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
  3223. return chcr_aead_op(req, 0, create_aead_ccm_wr);
  3224. default:
  3225. return chcr_aead_op(req, 0, create_gcm_wr);
  3226. }
  3227. }
  3228. static int chcr_aead_decrypt(struct aead_request *req)
  3229. {
  3230. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3231. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  3232. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  3233. int size;
  3234. if (aeadctx->mayverify == VERIFY_SW) {
  3235. size = crypto_aead_maxauthsize(tfm);
  3236. reqctx->verify = VERIFY_SW;
  3237. } else {
  3238. size = 0;
  3239. reqctx->verify = VERIFY_HW;
  3240. }
  3241. reqctx->op = CHCR_DECRYPT_OP;
  3242. switch (get_aead_subtype(tfm)) {
  3243. case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
  3244. case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
  3245. case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
  3246. case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
  3247. return chcr_aead_op(req, size, create_authenc_wr);
  3248. case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
  3249. case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
  3250. return chcr_aead_op(req, size, create_aead_ccm_wr);
  3251. default:
  3252. return chcr_aead_op(req, size, create_gcm_wr);
  3253. }
  3254. }
  3255. static struct chcr_alg_template driver_algs[] = {
  3256. /* AES-CBC */
  3257. {
  3258. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
  3259. .is_registered = 0,
  3260. .alg.crypto = {
  3261. .cra_name = "cbc(aes)",
  3262. .cra_driver_name = "cbc-aes-chcr",
  3263. .cra_blocksize = AES_BLOCK_SIZE,
  3264. .cra_init = chcr_cra_init,
  3265. .cra_exit = chcr_cra_exit,
  3266. .cra_u.ablkcipher = {
  3267. .min_keysize = AES_MIN_KEY_SIZE,
  3268. .max_keysize = AES_MAX_KEY_SIZE,
  3269. .ivsize = AES_BLOCK_SIZE,
  3270. .setkey = chcr_aes_cbc_setkey,
  3271. .encrypt = chcr_aes_encrypt,
  3272. .decrypt = chcr_aes_decrypt,
  3273. }
  3274. }
  3275. },
  3276. {
  3277. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
  3278. .is_registered = 0,
  3279. .alg.crypto = {
  3280. .cra_name = "xts(aes)",
  3281. .cra_driver_name = "xts-aes-chcr",
  3282. .cra_blocksize = AES_BLOCK_SIZE,
  3283. .cra_init = chcr_cra_init,
  3284. .cra_exit = NULL,
  3285. .cra_u .ablkcipher = {
  3286. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  3287. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  3288. .ivsize = AES_BLOCK_SIZE,
  3289. .setkey = chcr_aes_xts_setkey,
  3290. .encrypt = chcr_aes_encrypt,
  3291. .decrypt = chcr_aes_decrypt,
  3292. }
  3293. }
  3294. },
  3295. {
  3296. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
  3297. .is_registered = 0,
  3298. .alg.crypto = {
  3299. .cra_name = "ctr(aes)",
  3300. .cra_driver_name = "ctr-aes-chcr",
  3301. .cra_blocksize = 1,
  3302. .cra_init = chcr_cra_init,
  3303. .cra_exit = chcr_cra_exit,
  3304. .cra_u.ablkcipher = {
  3305. .min_keysize = AES_MIN_KEY_SIZE,
  3306. .max_keysize = AES_MAX_KEY_SIZE,
  3307. .ivsize = AES_BLOCK_SIZE,
  3308. .setkey = chcr_aes_ctr_setkey,
  3309. .encrypt = chcr_aes_encrypt,
  3310. .decrypt = chcr_aes_decrypt,
  3311. }
  3312. }
  3313. },
  3314. {
  3315. .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
  3316. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
  3317. .is_registered = 0,
  3318. .alg.crypto = {
  3319. .cra_name = "rfc3686(ctr(aes))",
  3320. .cra_driver_name = "rfc3686-ctr-aes-chcr",
  3321. .cra_blocksize = 1,
  3322. .cra_init = chcr_rfc3686_init,
  3323. .cra_exit = chcr_cra_exit,
  3324. .cra_u.ablkcipher = {
  3325. .min_keysize = AES_MIN_KEY_SIZE +
  3326. CTR_RFC3686_NONCE_SIZE,
  3327. .max_keysize = AES_MAX_KEY_SIZE +
  3328. CTR_RFC3686_NONCE_SIZE,
  3329. .ivsize = CTR_RFC3686_IV_SIZE,
  3330. .setkey = chcr_aes_rfc3686_setkey,
  3331. .encrypt = chcr_aes_encrypt,
  3332. .decrypt = chcr_aes_decrypt,
  3333. .geniv = "seqiv",
  3334. }
  3335. }
  3336. },
  3337. /* SHA */
  3338. {
  3339. .type = CRYPTO_ALG_TYPE_AHASH,
  3340. .is_registered = 0,
  3341. .alg.hash = {
  3342. .halg.digestsize = SHA1_DIGEST_SIZE,
  3343. .halg.base = {
  3344. .cra_name = "sha1",
  3345. .cra_driver_name = "sha1-chcr",
  3346. .cra_blocksize = SHA1_BLOCK_SIZE,
  3347. }
  3348. }
  3349. },
  3350. {
  3351. .type = CRYPTO_ALG_TYPE_AHASH,
  3352. .is_registered = 0,
  3353. .alg.hash = {
  3354. .halg.digestsize = SHA256_DIGEST_SIZE,
  3355. .halg.base = {
  3356. .cra_name = "sha256",
  3357. .cra_driver_name = "sha256-chcr",
  3358. .cra_blocksize = SHA256_BLOCK_SIZE,
  3359. }
  3360. }
  3361. },
  3362. {
  3363. .type = CRYPTO_ALG_TYPE_AHASH,
  3364. .is_registered = 0,
  3365. .alg.hash = {
  3366. .halg.digestsize = SHA224_DIGEST_SIZE,
  3367. .halg.base = {
  3368. .cra_name = "sha224",
  3369. .cra_driver_name = "sha224-chcr",
  3370. .cra_blocksize = SHA224_BLOCK_SIZE,
  3371. }
  3372. }
  3373. },
  3374. {
  3375. .type = CRYPTO_ALG_TYPE_AHASH,
  3376. .is_registered = 0,
  3377. .alg.hash = {
  3378. .halg.digestsize = SHA384_DIGEST_SIZE,
  3379. .halg.base = {
  3380. .cra_name = "sha384",
  3381. .cra_driver_name = "sha384-chcr",
  3382. .cra_blocksize = SHA384_BLOCK_SIZE,
  3383. }
  3384. }
  3385. },
  3386. {
  3387. .type = CRYPTO_ALG_TYPE_AHASH,
  3388. .is_registered = 0,
  3389. .alg.hash = {
  3390. .halg.digestsize = SHA512_DIGEST_SIZE,
  3391. .halg.base = {
  3392. .cra_name = "sha512",
  3393. .cra_driver_name = "sha512-chcr",
  3394. .cra_blocksize = SHA512_BLOCK_SIZE,
  3395. }
  3396. }
  3397. },
  3398. /* HMAC */
  3399. {
  3400. .type = CRYPTO_ALG_TYPE_HMAC,
  3401. .is_registered = 0,
  3402. .alg.hash = {
  3403. .halg.digestsize = SHA1_DIGEST_SIZE,
  3404. .halg.base = {
  3405. .cra_name = "hmac(sha1)",
  3406. .cra_driver_name = "hmac-sha1-chcr",
  3407. .cra_blocksize = SHA1_BLOCK_SIZE,
  3408. }
  3409. }
  3410. },
  3411. {
  3412. .type = CRYPTO_ALG_TYPE_HMAC,
  3413. .is_registered = 0,
  3414. .alg.hash = {
  3415. .halg.digestsize = SHA224_DIGEST_SIZE,
  3416. .halg.base = {
  3417. .cra_name = "hmac(sha224)",
  3418. .cra_driver_name = "hmac-sha224-chcr",
  3419. .cra_blocksize = SHA224_BLOCK_SIZE,
  3420. }
  3421. }
  3422. },
  3423. {
  3424. .type = CRYPTO_ALG_TYPE_HMAC,
  3425. .is_registered = 0,
  3426. .alg.hash = {
  3427. .halg.digestsize = SHA256_DIGEST_SIZE,
  3428. .halg.base = {
  3429. .cra_name = "hmac(sha256)",
  3430. .cra_driver_name = "hmac-sha256-chcr",
  3431. .cra_blocksize = SHA256_BLOCK_SIZE,
  3432. }
  3433. }
  3434. },
  3435. {
  3436. .type = CRYPTO_ALG_TYPE_HMAC,
  3437. .is_registered = 0,
  3438. .alg.hash = {
  3439. .halg.digestsize = SHA384_DIGEST_SIZE,
  3440. .halg.base = {
  3441. .cra_name = "hmac(sha384)",
  3442. .cra_driver_name = "hmac-sha384-chcr",
  3443. .cra_blocksize = SHA384_BLOCK_SIZE,
  3444. }
  3445. }
  3446. },
  3447. {
  3448. .type = CRYPTO_ALG_TYPE_HMAC,
  3449. .is_registered = 0,
  3450. .alg.hash = {
  3451. .halg.digestsize = SHA512_DIGEST_SIZE,
  3452. .halg.base = {
  3453. .cra_name = "hmac(sha512)",
  3454. .cra_driver_name = "hmac-sha512-chcr",
  3455. .cra_blocksize = SHA512_BLOCK_SIZE,
  3456. }
  3457. }
  3458. },
  3459. /* Add AEAD Algorithms */
  3460. {
  3461. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
  3462. .is_registered = 0,
  3463. .alg.aead = {
  3464. .base = {
  3465. .cra_name = "gcm(aes)",
  3466. .cra_driver_name = "gcm-aes-chcr",
  3467. .cra_blocksize = 1,
  3468. .cra_priority = CHCR_AEAD_PRIORITY,
  3469. .cra_ctxsize = sizeof(struct chcr_context) +
  3470. sizeof(struct chcr_aead_ctx) +
  3471. sizeof(struct chcr_gcm_ctx),
  3472. },
  3473. .ivsize = GCM_AES_IV_SIZE,
  3474. .maxauthsize = GHASH_DIGEST_SIZE,
  3475. .setkey = chcr_gcm_setkey,
  3476. .setauthsize = chcr_gcm_setauthsize,
  3477. }
  3478. },
  3479. {
  3480. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
  3481. .is_registered = 0,
  3482. .alg.aead = {
  3483. .base = {
  3484. .cra_name = "rfc4106(gcm(aes))",
  3485. .cra_driver_name = "rfc4106-gcm-aes-chcr",
  3486. .cra_blocksize = 1,
  3487. .cra_priority = CHCR_AEAD_PRIORITY + 1,
  3488. .cra_ctxsize = sizeof(struct chcr_context) +
  3489. sizeof(struct chcr_aead_ctx) +
  3490. sizeof(struct chcr_gcm_ctx),
  3491. },
  3492. .ivsize = GCM_RFC4106_IV_SIZE,
  3493. .maxauthsize = GHASH_DIGEST_SIZE,
  3494. .setkey = chcr_gcm_setkey,
  3495. .setauthsize = chcr_4106_4309_setauthsize,
  3496. }
  3497. },
  3498. {
  3499. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
  3500. .is_registered = 0,
  3501. .alg.aead = {
  3502. .base = {
  3503. .cra_name = "ccm(aes)",
  3504. .cra_driver_name = "ccm-aes-chcr",
  3505. .cra_blocksize = 1,
  3506. .cra_priority = CHCR_AEAD_PRIORITY,
  3507. .cra_ctxsize = sizeof(struct chcr_context) +
  3508. sizeof(struct chcr_aead_ctx),
  3509. },
  3510. .ivsize = AES_BLOCK_SIZE,
  3511. .maxauthsize = GHASH_DIGEST_SIZE,
  3512. .setkey = chcr_aead_ccm_setkey,
  3513. .setauthsize = chcr_ccm_setauthsize,
  3514. }
  3515. },
  3516. {
  3517. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
  3518. .is_registered = 0,
  3519. .alg.aead = {
  3520. .base = {
  3521. .cra_name = "rfc4309(ccm(aes))",
  3522. .cra_driver_name = "rfc4309-ccm-aes-chcr",
  3523. .cra_blocksize = 1,
  3524. .cra_priority = CHCR_AEAD_PRIORITY + 1,
  3525. .cra_ctxsize = sizeof(struct chcr_context) +
  3526. sizeof(struct chcr_aead_ctx),
  3527. },
  3528. .ivsize = 8,
  3529. .maxauthsize = GHASH_DIGEST_SIZE,
  3530. .setkey = chcr_aead_rfc4309_setkey,
  3531. .setauthsize = chcr_4106_4309_setauthsize,
  3532. }
  3533. },
  3534. {
  3535. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3536. .is_registered = 0,
  3537. .alg.aead = {
  3538. .base = {
  3539. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  3540. .cra_driver_name =
  3541. "authenc-hmac-sha1-cbc-aes-chcr",
  3542. .cra_blocksize = AES_BLOCK_SIZE,
  3543. .cra_priority = CHCR_AEAD_PRIORITY,
  3544. .cra_ctxsize = sizeof(struct chcr_context) +
  3545. sizeof(struct chcr_aead_ctx) +
  3546. sizeof(struct chcr_authenc_ctx),
  3547. },
  3548. .ivsize = AES_BLOCK_SIZE,
  3549. .maxauthsize = SHA1_DIGEST_SIZE,
  3550. .setkey = chcr_authenc_setkey,
  3551. .setauthsize = chcr_authenc_setauthsize,
  3552. }
  3553. },
  3554. {
  3555. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3556. .is_registered = 0,
  3557. .alg.aead = {
  3558. .base = {
  3559. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  3560. .cra_driver_name =
  3561. "authenc-hmac-sha256-cbc-aes-chcr",
  3562. .cra_blocksize = AES_BLOCK_SIZE,
  3563. .cra_priority = CHCR_AEAD_PRIORITY,
  3564. .cra_ctxsize = sizeof(struct chcr_context) +
  3565. sizeof(struct chcr_aead_ctx) +
  3566. sizeof(struct chcr_authenc_ctx),
  3567. },
  3568. .ivsize = AES_BLOCK_SIZE,
  3569. .maxauthsize = SHA256_DIGEST_SIZE,
  3570. .setkey = chcr_authenc_setkey,
  3571. .setauthsize = chcr_authenc_setauthsize,
  3572. }
  3573. },
  3574. {
  3575. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3576. .is_registered = 0,
  3577. .alg.aead = {
  3578. .base = {
  3579. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  3580. .cra_driver_name =
  3581. "authenc-hmac-sha224-cbc-aes-chcr",
  3582. .cra_blocksize = AES_BLOCK_SIZE,
  3583. .cra_priority = CHCR_AEAD_PRIORITY,
  3584. .cra_ctxsize = sizeof(struct chcr_context) +
  3585. sizeof(struct chcr_aead_ctx) +
  3586. sizeof(struct chcr_authenc_ctx),
  3587. },
  3588. .ivsize = AES_BLOCK_SIZE,
  3589. .maxauthsize = SHA224_DIGEST_SIZE,
  3590. .setkey = chcr_authenc_setkey,
  3591. .setauthsize = chcr_authenc_setauthsize,
  3592. }
  3593. },
  3594. {
  3595. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3596. .is_registered = 0,
  3597. .alg.aead = {
  3598. .base = {
  3599. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  3600. .cra_driver_name =
  3601. "authenc-hmac-sha384-cbc-aes-chcr",
  3602. .cra_blocksize = AES_BLOCK_SIZE,
  3603. .cra_priority = CHCR_AEAD_PRIORITY,
  3604. .cra_ctxsize = sizeof(struct chcr_context) +
  3605. sizeof(struct chcr_aead_ctx) +
  3606. sizeof(struct chcr_authenc_ctx),
  3607. },
  3608. .ivsize = AES_BLOCK_SIZE,
  3609. .maxauthsize = SHA384_DIGEST_SIZE,
  3610. .setkey = chcr_authenc_setkey,
  3611. .setauthsize = chcr_authenc_setauthsize,
  3612. }
  3613. },
  3614. {
  3615. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3616. .is_registered = 0,
  3617. .alg.aead = {
  3618. .base = {
  3619. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  3620. .cra_driver_name =
  3621. "authenc-hmac-sha512-cbc-aes-chcr",
  3622. .cra_blocksize = AES_BLOCK_SIZE,
  3623. .cra_priority = CHCR_AEAD_PRIORITY,
  3624. .cra_ctxsize = sizeof(struct chcr_context) +
  3625. sizeof(struct chcr_aead_ctx) +
  3626. sizeof(struct chcr_authenc_ctx),
  3627. },
  3628. .ivsize = AES_BLOCK_SIZE,
  3629. .maxauthsize = SHA512_DIGEST_SIZE,
  3630. .setkey = chcr_authenc_setkey,
  3631. .setauthsize = chcr_authenc_setauthsize,
  3632. }
  3633. },
  3634. {
  3635. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
  3636. .is_registered = 0,
  3637. .alg.aead = {
  3638. .base = {
  3639. .cra_name = "authenc(digest_null,cbc(aes))",
  3640. .cra_driver_name =
  3641. "authenc-digest_null-cbc-aes-chcr",
  3642. .cra_blocksize = AES_BLOCK_SIZE,
  3643. .cra_priority = CHCR_AEAD_PRIORITY,
  3644. .cra_ctxsize = sizeof(struct chcr_context) +
  3645. sizeof(struct chcr_aead_ctx) +
  3646. sizeof(struct chcr_authenc_ctx),
  3647. },
  3648. .ivsize = AES_BLOCK_SIZE,
  3649. .maxauthsize = 0,
  3650. .setkey = chcr_aead_digest_null_setkey,
  3651. .setauthsize = chcr_authenc_null_setauthsize,
  3652. }
  3653. },
  3654. {
  3655. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3656. .is_registered = 0,
  3657. .alg.aead = {
  3658. .base = {
  3659. .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
  3660. .cra_driver_name =
  3661. "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
  3662. .cra_blocksize = 1,
  3663. .cra_priority = CHCR_AEAD_PRIORITY,
  3664. .cra_ctxsize = sizeof(struct chcr_context) +
  3665. sizeof(struct chcr_aead_ctx) +
  3666. sizeof(struct chcr_authenc_ctx),
  3667. },
  3668. .ivsize = CTR_RFC3686_IV_SIZE,
  3669. .maxauthsize = SHA1_DIGEST_SIZE,
  3670. .setkey = chcr_authenc_setkey,
  3671. .setauthsize = chcr_authenc_setauthsize,
  3672. }
  3673. },
  3674. {
  3675. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3676. .is_registered = 0,
  3677. .alg.aead = {
  3678. .base = {
  3679. .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
  3680. .cra_driver_name =
  3681. "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
  3682. .cra_blocksize = 1,
  3683. .cra_priority = CHCR_AEAD_PRIORITY,
  3684. .cra_ctxsize = sizeof(struct chcr_context) +
  3685. sizeof(struct chcr_aead_ctx) +
  3686. sizeof(struct chcr_authenc_ctx),
  3687. },
  3688. .ivsize = CTR_RFC3686_IV_SIZE,
  3689. .maxauthsize = SHA256_DIGEST_SIZE,
  3690. .setkey = chcr_authenc_setkey,
  3691. .setauthsize = chcr_authenc_setauthsize,
  3692. }
  3693. },
  3694. {
  3695. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3696. .is_registered = 0,
  3697. .alg.aead = {
  3698. .base = {
  3699. .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
  3700. .cra_driver_name =
  3701. "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
  3702. .cra_blocksize = 1,
  3703. .cra_priority = CHCR_AEAD_PRIORITY,
  3704. .cra_ctxsize = sizeof(struct chcr_context) +
  3705. sizeof(struct chcr_aead_ctx) +
  3706. sizeof(struct chcr_authenc_ctx),
  3707. },
  3708. .ivsize = CTR_RFC3686_IV_SIZE,
  3709. .maxauthsize = SHA224_DIGEST_SIZE,
  3710. .setkey = chcr_authenc_setkey,
  3711. .setauthsize = chcr_authenc_setauthsize,
  3712. }
  3713. },
  3714. {
  3715. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3716. .is_registered = 0,
  3717. .alg.aead = {
  3718. .base = {
  3719. .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
  3720. .cra_driver_name =
  3721. "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
  3722. .cra_blocksize = 1,
  3723. .cra_priority = CHCR_AEAD_PRIORITY,
  3724. .cra_ctxsize = sizeof(struct chcr_context) +
  3725. sizeof(struct chcr_aead_ctx) +
  3726. sizeof(struct chcr_authenc_ctx),
  3727. },
  3728. .ivsize = CTR_RFC3686_IV_SIZE,
  3729. .maxauthsize = SHA384_DIGEST_SIZE,
  3730. .setkey = chcr_authenc_setkey,
  3731. .setauthsize = chcr_authenc_setauthsize,
  3732. }
  3733. },
  3734. {
  3735. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3736. .is_registered = 0,
  3737. .alg.aead = {
  3738. .base = {
  3739. .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
  3740. .cra_driver_name =
  3741. "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
  3742. .cra_blocksize = 1,
  3743. .cra_priority = CHCR_AEAD_PRIORITY,
  3744. .cra_ctxsize = sizeof(struct chcr_context) +
  3745. sizeof(struct chcr_aead_ctx) +
  3746. sizeof(struct chcr_authenc_ctx),
  3747. },
  3748. .ivsize = CTR_RFC3686_IV_SIZE,
  3749. .maxauthsize = SHA512_DIGEST_SIZE,
  3750. .setkey = chcr_authenc_setkey,
  3751. .setauthsize = chcr_authenc_setauthsize,
  3752. }
  3753. },
  3754. {
  3755. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
  3756. .is_registered = 0,
  3757. .alg.aead = {
  3758. .base = {
  3759. .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
  3760. .cra_driver_name =
  3761. "authenc-digest_null-rfc3686-ctr-aes-chcr",
  3762. .cra_blocksize = 1,
  3763. .cra_priority = CHCR_AEAD_PRIORITY,
  3764. .cra_ctxsize = sizeof(struct chcr_context) +
  3765. sizeof(struct chcr_aead_ctx) +
  3766. sizeof(struct chcr_authenc_ctx),
  3767. },
  3768. .ivsize = CTR_RFC3686_IV_SIZE,
  3769. .maxauthsize = 0,
  3770. .setkey = chcr_aead_digest_null_setkey,
  3771. .setauthsize = chcr_authenc_null_setauthsize,
  3772. }
  3773. },
  3774. };
  3775. /*
  3776. * chcr_unregister_alg - Deregister crypto algorithms with
  3777. * kernel framework.
  3778. */
  3779. static int chcr_unregister_alg(void)
  3780. {
  3781. int i;
  3782. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3783. switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
  3784. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3785. if (driver_algs[i].is_registered)
  3786. crypto_unregister_alg(
  3787. &driver_algs[i].alg.crypto);
  3788. break;
  3789. case CRYPTO_ALG_TYPE_AEAD:
  3790. if (driver_algs[i].is_registered)
  3791. crypto_unregister_aead(
  3792. &driver_algs[i].alg.aead);
  3793. break;
  3794. case CRYPTO_ALG_TYPE_AHASH:
  3795. if (driver_algs[i].is_registered)
  3796. crypto_unregister_ahash(
  3797. &driver_algs[i].alg.hash);
  3798. break;
  3799. }
  3800. driver_algs[i].is_registered = 0;
  3801. }
  3802. return 0;
  3803. }
  3804. #define SZ_AHASH_CTX sizeof(struct chcr_context)
  3805. #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
  3806. #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
  3807. /*
  3808. * chcr_register_alg - Register crypto algorithms with kernel framework.
  3809. */
  3810. static int chcr_register_alg(void)
  3811. {
  3812. struct crypto_alg ai;
  3813. struct ahash_alg *a_hash;
  3814. int err = 0, i;
  3815. char *name = NULL;
  3816. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3817. if (driver_algs[i].is_registered)
  3818. continue;
  3819. switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
  3820. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3821. driver_algs[i].alg.crypto.cra_priority =
  3822. CHCR_CRA_PRIORITY;
  3823. driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
  3824. driver_algs[i].alg.crypto.cra_flags =
  3825. CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
  3826. CRYPTO_ALG_NEED_FALLBACK;
  3827. driver_algs[i].alg.crypto.cra_ctxsize =
  3828. sizeof(struct chcr_context) +
  3829. sizeof(struct ablk_ctx);
  3830. driver_algs[i].alg.crypto.cra_alignmask = 0;
  3831. driver_algs[i].alg.crypto.cra_type =
  3832. &crypto_ablkcipher_type;
  3833. err = crypto_register_alg(&driver_algs[i].alg.crypto);
  3834. name = driver_algs[i].alg.crypto.cra_driver_name;
  3835. break;
  3836. case CRYPTO_ALG_TYPE_AEAD:
  3837. driver_algs[i].alg.aead.base.cra_flags =
  3838. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
  3839. driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
  3840. driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
  3841. driver_algs[i].alg.aead.init = chcr_aead_cra_init;
  3842. driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
  3843. driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
  3844. err = crypto_register_aead(&driver_algs[i].alg.aead);
  3845. name = driver_algs[i].alg.aead.base.cra_driver_name;
  3846. break;
  3847. case CRYPTO_ALG_TYPE_AHASH:
  3848. a_hash = &driver_algs[i].alg.hash;
  3849. a_hash->update = chcr_ahash_update;
  3850. a_hash->final = chcr_ahash_final;
  3851. a_hash->finup = chcr_ahash_finup;
  3852. a_hash->digest = chcr_ahash_digest;
  3853. a_hash->export = chcr_ahash_export;
  3854. a_hash->import = chcr_ahash_import;
  3855. a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
  3856. a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
  3857. a_hash->halg.base.cra_module = THIS_MODULE;
  3858. a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
  3859. a_hash->halg.base.cra_alignmask = 0;
  3860. a_hash->halg.base.cra_exit = NULL;
  3861. if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
  3862. a_hash->halg.base.cra_init = chcr_hmac_cra_init;
  3863. a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
  3864. a_hash->init = chcr_hmac_init;
  3865. a_hash->setkey = chcr_ahash_setkey;
  3866. a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
  3867. } else {
  3868. a_hash->init = chcr_sha_init;
  3869. a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
  3870. a_hash->halg.base.cra_init = chcr_sha_cra_init;
  3871. }
  3872. err = crypto_register_ahash(&driver_algs[i].alg.hash);
  3873. ai = driver_algs[i].alg.hash.halg.base;
  3874. name = ai.cra_driver_name;
  3875. break;
  3876. }
  3877. if (err) {
  3878. pr_err("chcr : %s : Algorithm registration failed\n",
  3879. name);
  3880. goto register_err;
  3881. } else {
  3882. driver_algs[i].is_registered = 1;
  3883. }
  3884. }
  3885. return 0;
  3886. register_err:
  3887. chcr_unregister_alg();
  3888. return err;
  3889. }
  3890. /*
  3891. * start_crypto - Register the crypto algorithms.
  3892. * This should called once when the first device comesup. After this
  3893. * kernel will start calling driver APIs for crypto operations.
  3894. */
  3895. int start_crypto(void)
  3896. {
  3897. return chcr_register_alg();
  3898. }
  3899. /*
  3900. * stop_crypto - Deregister all the crypto algorithms with kernel.
  3901. * This should be called once when the last device goes down. After this
  3902. * kernel will not call the driver API for crypto operations.
  3903. */
  3904. int stop_crypto(void)
  3905. {
  3906. chcr_unregister_alg();
  3907. return 0;
  3908. }