12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316 |
- /*
- * This file is part of the Chelsio T6 Crypto driver for Linux.
- *
- * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Written and Maintained by:
- * Manoj Malviya (manojmalviya@chelsio.com)
- * Atul Gupta (atul.gupta@chelsio.com)
- * Jitendra Lulla (jlulla@chelsio.com)
- * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
- * Harsh Jain (harsh@chelsio.com)
- */
- #define pr_fmt(fmt) "chcr:" fmt
- #include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/crypto.h>
- #include <linux/cryptohash.h>
- #include <linux/skbuff.h>
- #include <linux/rtnetlink.h>
- #include <linux/highmem.h>
- #include <linux/scatterlist.h>
- #include <crypto/aes.h>
- #include <crypto/algapi.h>
- #include <crypto/hash.h>
- #include <crypto/gcm.h>
- #include <crypto/sha.h>
- #include <crypto/authenc.h>
- #include <crypto/ctr.h>
- #include <crypto/gf128mul.h>
- #include <crypto/internal/aead.h>
- #include <crypto/null.h>
- #include <crypto/internal/skcipher.h>
- #include <crypto/aead.h>
- #include <crypto/scatterwalk.h>
- #include <crypto/internal/hash.h>
- #include "t4fw_api.h"
- #include "t4_msg.h"
- #include "chcr_core.h"
- #include "chcr_algo.h"
- #include "chcr_crypto.h"
- #define IV AES_BLOCK_SIZE
- static unsigned int sgl_ent_len[] = {
- 0, 0, 16, 24, 40, 48, 64, 72, 88,
- 96, 112, 120, 136, 144, 160, 168, 184,
- 192, 208, 216, 232, 240, 256, 264, 280,
- 288, 304, 312, 328, 336, 352, 360, 376
- };
- static unsigned int dsgl_ent_len[] = {
- 0, 32, 32, 48, 48, 64, 64, 80, 80,
- 112, 112, 128, 128, 144, 144, 160, 160,
- 192, 192, 208, 208, 224, 224, 240, 240,
- 272, 272, 288, 288, 304, 304, 320, 320
- };
- static u32 round_constant[11] = {
- 0x01000000, 0x02000000, 0x04000000, 0x08000000,
- 0x10000000, 0x20000000, 0x40000000, 0x80000000,
- 0x1B000000, 0x36000000, 0x6C000000
- };
- static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
- unsigned char *input, int err);
- static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
- {
- return ctx->crypto_ctx->aeadctx;
- }
- static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
- {
- return ctx->crypto_ctx->ablkctx;
- }
- static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
- {
- return ctx->crypto_ctx->hmacctx;
- }
- static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
- {
- return gctx->ctx->gcm;
- }
- static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
- {
- return gctx->ctx->authenc;
- }
- static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
- {
- return ctx->dev->u_ctx;
- }
- static inline int is_ofld_imm(const struct sk_buff *skb)
- {
- return (skb->len <= SGE_MAX_WR_LEN);
- }
- static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
- {
- memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
- }
- static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
- unsigned int entlen,
- unsigned int skip)
- {
- int nents = 0;
- unsigned int less;
- unsigned int skip_len = 0;
- while (sg && skip) {
- if (sg_dma_len(sg) <= skip) {
- skip -= sg_dma_len(sg);
- skip_len = 0;
- sg = sg_next(sg);
- } else {
- skip_len = skip;
- skip = 0;
- }
- }
- while (sg && reqlen) {
- less = min(reqlen, sg_dma_len(sg) - skip_len);
- nents += DIV_ROUND_UP(less, entlen);
- reqlen -= less;
- skip_len = 0;
- sg = sg_next(sg);
- }
- return nents;
- }
- static inline int get_aead_subtype(struct crypto_aead *aead)
- {
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct chcr_alg_template *chcr_crypto_alg =
- container_of(alg, struct chcr_alg_template, alg.aead);
- return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
- }
- void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
- {
- u8 temp[SHA512_DIGEST_SIZE];
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- int authsize = crypto_aead_authsize(tfm);
- struct cpl_fw6_pld *fw6_pld;
- int cmp = 0;
- fw6_pld = (struct cpl_fw6_pld *)input;
- if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
- (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
- cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
- } else {
- sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
- authsize, req->assoclen +
- req->cryptlen - authsize);
- cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
- }
- if (cmp)
- *err = -EBADMSG;
- else
- *err = 0;
- }
- static inline void chcr_handle_aead_resp(struct aead_request *req,
- unsigned char *input,
- int err)
- {
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- chcr_aead_common_exit(req);
- if (reqctx->verify == VERIFY_SW) {
- chcr_verify_tag(req, input, &err);
- reqctx->verify = VERIFY_HW;
- }
- req->base.complete(&req->base, err);
- }
- static void get_aes_decrypt_key(unsigned char *dec_key,
- const unsigned char *key,
- unsigned int keylength)
- {
- u32 temp;
- u32 w_ring[MAX_NK];
- int i, j, k;
- u8 nr, nk;
- switch (keylength) {
- case AES_KEYLENGTH_128BIT:
- nk = KEYLENGTH_4BYTES;
- nr = NUMBER_OF_ROUNDS_10;
- break;
- case AES_KEYLENGTH_192BIT:
- nk = KEYLENGTH_6BYTES;
- nr = NUMBER_OF_ROUNDS_12;
- break;
- case AES_KEYLENGTH_256BIT:
- nk = KEYLENGTH_8BYTES;
- nr = NUMBER_OF_ROUNDS_14;
- break;
- default:
- return;
- }
- for (i = 0; i < nk; i++)
- w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
- i = 0;
- temp = w_ring[nk - 1];
- while (i + nk < (nr + 1) * 4) {
- if (!(i % nk)) {
- /* RotWord(temp) */
- temp = (temp << 8) | (temp >> 24);
- temp = aes_ks_subword(temp);
- temp ^= round_constant[i / nk];
- } else if (nk == 8 && (i % 4 == 0)) {
- temp = aes_ks_subword(temp);
- }
- w_ring[i % nk] ^= temp;
- temp = w_ring[i % nk];
- i++;
- }
- i--;
- for (k = 0, j = i % nk; k < nk; k++) {
- *((u32 *)dec_key + k) = htonl(w_ring[j]);
- j--;
- if (j < 0)
- j += nk;
- }
- }
- static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
- {
- struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
- switch (ds) {
- case SHA1_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha1", 0, 0);
- break;
- case SHA224_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha224", 0, 0);
- break;
- case SHA256_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha256", 0, 0);
- break;
- case SHA384_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha384", 0, 0);
- break;
- case SHA512_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha512", 0, 0);
- break;
- }
- return base_hash;
- }
- static int chcr_compute_partial_hash(struct shash_desc *desc,
- char *iopad, char *result_hash,
- int digest_size)
- {
- struct sha1_state sha1_st;
- struct sha256_state sha256_st;
- struct sha512_state sha512_st;
- int error;
- if (digest_size == SHA1_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha1_st);
- memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
- } else if (digest_size == SHA224_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha256_st);
- memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
- } else if (digest_size == SHA256_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha256_st);
- memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
- } else if (digest_size == SHA384_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha512_st);
- memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
- } else if (digest_size == SHA512_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha512_st);
- memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
- } else {
- error = -EINVAL;
- pr_err("Unknown digest size %d\n", digest_size);
- }
- return error;
- }
- static void chcr_change_order(char *buf, int ds)
- {
- int i;
- if (ds == SHA512_DIGEST_SIZE) {
- for (i = 0; i < (ds / sizeof(u64)); i++)
- *((__be64 *)buf + i) =
- cpu_to_be64(*((u64 *)buf + i));
- } else {
- for (i = 0; i < (ds / sizeof(u32)); i++)
- *((__be32 *)buf + i) =
- cpu_to_be32(*((u32 *)buf + i));
- }
- }
- static inline int is_hmac(struct crypto_tfm *tfm)
- {
- struct crypto_alg *alg = tfm->__crt_alg;
- struct chcr_alg_template *chcr_crypto_alg =
- container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
- alg.hash);
- if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
- return 1;
- return 0;
- }
- static inline void dsgl_walk_init(struct dsgl_walk *walk,
- struct cpl_rx_phys_dsgl *dsgl)
- {
- walk->dsgl = dsgl;
- walk->nents = 0;
- walk->to = (struct phys_sge_pairs *)(dsgl + 1);
- }
- static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
- int pci_chan_id)
- {
- struct cpl_rx_phys_dsgl *phys_cpl;
- phys_cpl = walk->dsgl;
- phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
- | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
- phys_cpl->pcirlxorder_to_noofsgentr =
- htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
- CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
- CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
- CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
- CPL_RX_PHYS_DSGL_DCAID_V(0) |
- CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
- phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
- phys_cpl->rss_hdr_int.qid = htons(qid);
- phys_cpl->rss_hdr_int.hash_val = 0;
- phys_cpl->rss_hdr_int.channel = pci_chan_id;
- }
- static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
- size_t size,
- dma_addr_t *addr)
- {
- int j;
- if (!size)
- return;
- j = walk->nents;
- walk->to->len[j % 8] = htons(size);
- walk->to->addr[j % 8] = cpu_to_be64(*addr);
- j++;
- if ((j % 8) == 0)
- walk->to++;
- walk->nents = j;
- }
- static void dsgl_walk_add_sg(struct dsgl_walk *walk,
- struct scatterlist *sg,
- unsigned int slen,
- unsigned int skip)
- {
- int skip_len = 0;
- unsigned int left_size = slen, len = 0;
- unsigned int j = walk->nents;
- int offset, ent_len;
- if (!slen)
- return;
- while (sg && skip) {
- if (sg_dma_len(sg) <= skip) {
- skip -= sg_dma_len(sg);
- skip_len = 0;
- sg = sg_next(sg);
- } else {
- skip_len = skip;
- skip = 0;
- }
- }
- while (left_size && sg) {
- len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
- offset = 0;
- while (len) {
- ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
- walk->to->len[j % 8] = htons(ent_len);
- walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
- offset + skip_len);
- offset += ent_len;
- len -= ent_len;
- j++;
- if ((j % 8) == 0)
- walk->to++;
- }
- walk->last_sg = sg;
- walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
- skip_len) + skip_len;
- left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
- skip_len = 0;
- sg = sg_next(sg);
- }
- walk->nents = j;
- }
- static inline void ulptx_walk_init(struct ulptx_walk *walk,
- struct ulptx_sgl *ulp)
- {
- walk->sgl = ulp;
- walk->nents = 0;
- walk->pair_idx = 0;
- walk->pair = ulp->sge;
- walk->last_sg = NULL;
- walk->last_sg_len = 0;
- }
- static inline void ulptx_walk_end(struct ulptx_walk *walk)
- {
- walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
- ULPTX_NSGE_V(walk->nents));
- }
- static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
- size_t size,
- dma_addr_t *addr)
- {
- if (!size)
- return;
- if (walk->nents == 0) {
- walk->sgl->len0 = cpu_to_be32(size);
- walk->sgl->addr0 = cpu_to_be64(*addr);
- } else {
- walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
- walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
- walk->pair_idx = !walk->pair_idx;
- if (!walk->pair_idx)
- walk->pair++;
- }
- walk->nents++;
- }
- static void ulptx_walk_add_sg(struct ulptx_walk *walk,
- struct scatterlist *sg,
- unsigned int len,
- unsigned int skip)
- {
- int small;
- int skip_len = 0;
- unsigned int sgmin;
- if (!len)
- return;
- while (sg && skip) {
- if (sg_dma_len(sg) <= skip) {
- skip -= sg_dma_len(sg);
- skip_len = 0;
- sg = sg_next(sg);
- } else {
- skip_len = skip;
- skip = 0;
- }
- }
- WARN(!sg, "SG should not be null here\n");
- if (sg && (walk->nents == 0)) {
- small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
- sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
- walk->sgl->len0 = cpu_to_be32(sgmin);
- walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
- walk->nents++;
- len -= sgmin;
- walk->last_sg = sg;
- walk->last_sg_len = sgmin + skip_len;
- skip_len += sgmin;
- if (sg_dma_len(sg) == skip_len) {
- sg = sg_next(sg);
- skip_len = 0;
- }
- }
- while (sg && len) {
- small = min(sg_dma_len(sg) - skip_len, len);
- sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
- walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
- walk->pair->addr[walk->pair_idx] =
- cpu_to_be64(sg_dma_address(sg) + skip_len);
- walk->pair_idx = !walk->pair_idx;
- walk->nents++;
- if (!walk->pair_idx)
- walk->pair++;
- len -= sgmin;
- skip_len += sgmin;
- walk->last_sg = sg;
- walk->last_sg_len = skip_len;
- if (sg_dma_len(sg) == skip_len) {
- sg = sg_next(sg);
- skip_len = 0;
- }
- }
- }
- static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
- {
- struct crypto_alg *alg = tfm->__crt_alg;
- struct chcr_alg_template *chcr_crypto_alg =
- container_of(alg, struct chcr_alg_template, alg.crypto);
- return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
- }
- static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
- {
- struct adapter *adap = netdev2adap(dev);
- struct sge_uld_txq_info *txq_info =
- adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
- struct sge_uld_txq *txq;
- int ret = 0;
- local_bh_disable();
- txq = &txq_info->uldtxq[idx];
- spin_lock(&txq->sendq.lock);
- if (txq->full)
- ret = -1;
- spin_unlock(&txq->sendq.lock);
- local_bh_enable();
- return ret;
- }
- static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
- struct _key_ctx *key_ctx)
- {
- if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
- } else {
- memcpy(key_ctx->key,
- ablkctx->key + (ablkctx->enckey_len >> 1),
- ablkctx->enckey_len >> 1);
- memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
- ablkctx->rrkey, ablkctx->enckey_len >> 1);
- }
- return 0;
- }
- static int chcr_hash_ent_in_wr(struct scatterlist *src,
- unsigned int minsg,
- unsigned int space,
- unsigned int srcskip)
- {
- int srclen = 0;
- int srcsg = minsg;
- int soffset = 0, sless;
- if (sg_dma_len(src) == srcskip) {
- src = sg_next(src);
- srcskip = 0;
- }
- while (src && space > (sgl_ent_len[srcsg + 1])) {
- sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
- CHCR_SRC_SG_SIZE);
- srclen += sless;
- soffset += sless;
- srcsg++;
- if (sg_dma_len(src) == (soffset + srcskip)) {
- src = sg_next(src);
- soffset = 0;
- srcskip = 0;
- }
- }
- return srclen;
- }
- static int chcr_sg_ent_in_wr(struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int minsg,
- unsigned int space,
- unsigned int srcskip,
- unsigned int dstskip)
- {
- int srclen = 0, dstlen = 0;
- int srcsg = minsg, dstsg = minsg;
- int offset = 0, soffset = 0, less, sless = 0;
- if (sg_dma_len(src) == srcskip) {
- src = sg_next(src);
- srcskip = 0;
- }
- if (sg_dma_len(dst) == dstskip) {
- dst = sg_next(dst);
- dstskip = 0;
- }
- while (src && dst &&
- space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
- sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
- CHCR_SRC_SG_SIZE);
- srclen += sless;
- srcsg++;
- offset = 0;
- while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
- space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
- if (srclen <= dstlen)
- break;
- less = min_t(unsigned int, sg_dma_len(dst) - offset -
- dstskip, CHCR_DST_SG_SIZE);
- dstlen += less;
- offset += less;
- if ((offset + dstskip) == sg_dma_len(dst)) {
- dst = sg_next(dst);
- offset = 0;
- }
- dstsg++;
- dstskip = 0;
- }
- soffset += sless;
- if ((soffset + srcskip) == sg_dma_len(src)) {
- src = sg_next(src);
- srcskip = 0;
- soffset = 0;
- }
- }
- return min(srclen, dstlen);
- }
- static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
- u32 flags,
- struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int nbytes,
- u8 *iv,
- unsigned short op_type)
- {
- int err;
- SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
- skcipher_request_set_tfm(subreq, cipher);
- skcipher_request_set_callback(subreq, flags, NULL, NULL);
- skcipher_request_set_crypt(subreq, src, dst,
- nbytes, iv);
- err = op_type ? crypto_skcipher_decrypt(subreq) :
- crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
- }
- static inline void create_wreq(struct chcr_context *ctx,
- struct chcr_wr *chcr_req,
- struct crypto_async_request *req,
- unsigned int imm,
- int hash_sz,
- unsigned int len16,
- unsigned int sc_len,
- unsigned int lcb)
- {
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
- chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
- chcr_req->wreq.pld_size_hash_size =
- htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
- chcr_req->wreq.len16_pkd =
- htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
- chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
- chcr_req->wreq.rx_chid_to_rx_q_id =
- FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
- !!lcb, ctx->tx_qidx);
- chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
- qid);
- chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
- ((sizeof(chcr_req->wreq)) >> 4)));
- chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
- chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
- sizeof(chcr_req->key_ctx) + sc_len);
- }
- /**
- * create_cipher_wr - form the WR for cipher operations
- * @req: cipher req.
- * @ctx: crypto driver context of the request.
- * @qid: ingress qid where response of this WR should be received.
- * @op_type: encryption or decryption
- */
- static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
- {
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
- struct sk_buff *skb = NULL;
- struct chcr_wr *chcr_req;
- struct cpl_rx_phys_dsgl *phys_cpl;
- struct ulptx_sgl *ulptx;
- struct chcr_blkcipher_req_ctx *reqctx =
- ablkcipher_request_ctx(wrparam->req);
- unsigned int temp = 0, transhdr_len, dst_size;
- int error;
- int nents;
- unsigned int kctx_len;
- gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
- GFP_KERNEL : GFP_ATOMIC;
- struct adapter *adap = padap(c_ctx(tfm)->dev);
- nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
- reqctx->dst_ofst);
- dst_size = get_space_for_phys_dsgl(nents);
- kctx_len = roundup(ablkctx->enckey_len, 16);
- transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
- CHCR_SRC_SG_SIZE, reqctx->src_ofst);
- temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
- (sgl_len(nents) * 8);
- transhdr_len += temp;
- transhdr_len = roundup(transhdr_len, 16);
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
- if (!skb) {
- error = -ENOMEM;
- goto err;
- }
- chcr_req = __skb_put_zero(skb, transhdr_len);
- chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
- chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
- chcr_req->sec_cpl.aadstart_cipherstop_hi =
- FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
- chcr_req->sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
- chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
- ablkctx->ciph_mode,
- 0, 0, IV >> 1);
- chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
- 0, 1, dst_size);
- chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
- if ((reqctx->op == CHCR_DECRYPT_OP) &&
- (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
- CRYPTO_ALG_SUB_TYPE_CTR)) &&
- (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
- CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
- generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
- } else {
- if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
- (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
- memcpy(chcr_req->key_ctx.key, ablkctx->key,
- ablkctx->enckey_len);
- } else {
- memcpy(chcr_req->key_ctx.key, ablkctx->key +
- (ablkctx->enckey_len >> 1),
- ablkctx->enckey_len >> 1);
- memcpy(chcr_req->key_ctx.key +
- (ablkctx->enckey_len >> 1),
- ablkctx->key,
- ablkctx->enckey_len >> 1);
- }
- }
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
- chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
- atomic_inc(&adap->chcr_stats.cipher_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
- + (reqctx->imm ? (wrparam->bytes) : 0);
- create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
- transhdr_len, temp,
- ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
- reqctx->skb = skb;
- if (reqctx->op && (ablkctx->ciph_mode ==
- CHCR_SCMD_CIPHER_MODE_AES_CBC))
- sg_pcopy_to_buffer(wrparam->req->src,
- sg_nents(wrparam->req->src), wrparam->req->info, 16,
- reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
- return skb;
- err:
- return ERR_PTR(error);
- }
- static inline int chcr_keyctx_ck_size(unsigned int keylen)
- {
- int ck_size = 0;
- if (keylen == AES_KEYSIZE_128)
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
- else if (keylen == AES_KEYSIZE_192)
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
- else if (keylen == AES_KEYSIZE_256)
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
- else
- ck_size = 0;
- return ck_size;
- }
- static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key,
- unsigned int keylen)
- {
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
- int err = 0;
- crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- crypto_skcipher_get_flags(ablkctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK;
- return err;
- }
- static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key,
- unsigned int keylen)
- {
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
- unsigned int ck_size, context_size;
- u16 alignment = 0;
- int err;
- err = chcr_cipher_fallback_setkey(cipher, key, keylen);
- if (err)
- goto badkey_err;
- ck_size = chcr_keyctx_ck_size(keylen);
- alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
- memcpy(ablkctx->key, key, keylen);
- ablkctx->enckey_len = keylen;
- get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
- context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
- keylen + alignment) >> 4;
- ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
- 0, 0, context_size);
- ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
- return 0;
- badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- ablkctx->enckey_len = 0;
- return err;
- }
- static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key,
- unsigned int keylen)
- {
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
- unsigned int ck_size, context_size;
- u16 alignment = 0;
- int err;
- err = chcr_cipher_fallback_setkey(cipher, key, keylen);
- if (err)
- goto badkey_err;
- ck_size = chcr_keyctx_ck_size(keylen);
- alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
- memcpy(ablkctx->key, key, keylen);
- ablkctx->enckey_len = keylen;
- context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
- keylen + alignment) >> 4;
- ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
- 0, 0, context_size);
- ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
- return 0;
- badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- ablkctx->enckey_len = 0;
- return err;
- }
- static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key,
- unsigned int keylen)
- {
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
- unsigned int ck_size, context_size;
- u16 alignment = 0;
- int err;
- if (keylen < CTR_RFC3686_NONCE_SIZE)
- return -EINVAL;
- memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
- CTR_RFC3686_NONCE_SIZE);
- keylen -= CTR_RFC3686_NONCE_SIZE;
- err = chcr_cipher_fallback_setkey(cipher, key, keylen);
- if (err)
- goto badkey_err;
- ck_size = chcr_keyctx_ck_size(keylen);
- alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
- memcpy(ablkctx->key, key, keylen);
- ablkctx->enckey_len = keylen;
- context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
- keylen + alignment) >> 4;
- ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
- 0, 0, context_size);
- ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
- return 0;
- badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- ablkctx->enckey_len = 0;
- return err;
- }
- static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
- {
- unsigned int size = AES_BLOCK_SIZE;
- __be32 *b = (__be32 *)(dstiv + size);
- u32 c, prev;
- memcpy(dstiv, srciv, AES_BLOCK_SIZE);
- for (; size >= 4; size -= 4) {
- prev = be32_to_cpu(*--b);
- c = prev + add;
- *b = cpu_to_be32(c);
- if (prev < c)
- break;
- add = 1;
- }
- }
- static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
- {
- __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
- u64 c;
- u32 temp = be32_to_cpu(*--b);
- temp = ~temp;
- c = (u64)temp + 1; // No of block can processed withou overflow
- if ((bytes / AES_BLOCK_SIZE) > c)
- bytes = c * AES_BLOCK_SIZE;
- return bytes;
- }
- static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
- u32 isfinal)
- {
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- struct crypto_cipher *cipher;
- int ret, i;
- u8 *key;
- unsigned int keylen;
- int round = reqctx->last_req_len / AES_BLOCK_SIZE;
- int round8 = round / 8;
- cipher = ablkctx->aes_generic;
- memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
- keylen = ablkctx->enckey_len / 2;
- key = ablkctx->key + keylen;
- ret = crypto_cipher_setkey(cipher, key, keylen);
- if (ret)
- goto out;
- crypto_cipher_encrypt_one(cipher, iv, iv);
- for (i = 0; i < round8; i++)
- gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
- for (i = 0; i < (round % 8); i++)
- gf128mul_x_ble((le128 *)iv, (le128 *)iv);
- if (!isfinal)
- crypto_cipher_decrypt_one(cipher, iv, iv);
- out:
- return ret;
- }
- static int chcr_update_cipher_iv(struct ablkcipher_request *req,
- struct cpl_fw6_pld *fw6_pld, u8 *iv)
- {
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
- int ret = 0;
- if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
- ctr_add_iv(iv, req->info, (reqctx->processed /
- AES_BLOCK_SIZE));
- else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
- *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
- CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
- AES_BLOCK_SIZE) + 1);
- else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv, 0);
- else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
- if (reqctx->op)
- /*Updated before sending last WR*/
- memcpy(iv, req->info, AES_BLOCK_SIZE);
- else
- memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
- }
- return ret;
- }
- /* We need separate function for final iv because in rfc3686 Initial counter
- * starts from 1 and buffer size of iv is 8 byte only which remains constant
- * for subsequent update requests
- */
- static int chcr_final_cipher_iv(struct ablkcipher_request *req,
- struct cpl_fw6_pld *fw6_pld, u8 *iv)
- {
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
- int ret = 0;
- if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
- ctr_add_iv(iv, req->info, (reqctx->processed /
- AES_BLOCK_SIZE));
- else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv, 1);
- else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
- /*Already updated for Decrypt*/
- if (!reqctx->op)
- memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
- }
- return ret;
- }
- static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
- unsigned char *input, int err)
- {
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
- struct sk_buff *skb;
- struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- struct cipher_wr_param wrparam;
- int bytes;
- if (err)
- goto unmap;
- if (req->nbytes == reqctx->processed) {
- chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
- req);
- err = chcr_final_cipher_iv(req, fw6_pld, req->info);
- goto complete;
- }
- if (!reqctx->imm) {
- bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
- CIP_SPACE_LEFT(ablkctx->enckey_len),
- reqctx->src_ofst, reqctx->dst_ofst);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
- else
- bytes = rounddown(bytes, 16);
- } else {
- /*CTR mode counter overfloa*/
- bytes = req->nbytes - reqctx->processed;
- }
- err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
- if (err)
- goto unmap;
- if (unlikely(bytes == 0)) {
- chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
- req);
- err = chcr_cipher_fallback(ablkctx->sw_cipher,
- req->base.flags,
- req->src,
- req->dst,
- req->nbytes,
- req->info,
- reqctx->op);
- goto complete;
- }
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
- CRYPTO_ALG_SUB_TYPE_CTR)
- bytes = adjust_ctr_overflow(reqctx->iv, bytes);
- wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
- wrparam.req = req;
- wrparam.bytes = bytes;
- skb = create_cipher_wr(&wrparam);
- if (IS_ERR(skb)) {
- pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
- err = PTR_ERR(skb);
- goto unmap;
- }
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
- chcr_send_wr(skb);
- reqctx->last_req_len = bytes;
- reqctx->processed += bytes;
- return 0;
- unmap:
- chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
- complete:
- req->base.complete(&req->base, err);
- return err;
- }
- static int process_cipher(struct ablkcipher_request *req,
- unsigned short qid,
- struct sk_buff **skb,
- unsigned short op_type)
- {
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
- struct cipher_wr_param wrparam;
- int bytes, err = -EINVAL;
- reqctx->processed = 0;
- if (!req->info)
- goto error;
- if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
- (req->nbytes == 0) ||
- (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
- pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
- ablkctx->enckey_len, req->nbytes, ivsize);
- goto error;
- }
- chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
- if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
- AES_MIN_KEY_SIZE +
- sizeof(struct cpl_rx_phys_dsgl) +
- /*Min dsgl size*/
- 32))) {
- /* Can be sent as Imm*/
- unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
- dnents = sg_nents_xlen(req->dst, req->nbytes,
- CHCR_DST_SG_SIZE, 0);
- phys_dsgl = get_space_for_phys_dsgl(dnents);
- kctx_len = roundup(ablkctx->enckey_len, 16);
- transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- reqctx->imm = (transhdr_len + IV + req->nbytes) <=
- SGE_MAX_WR_LEN;
- bytes = IV + req->nbytes;
- } else {
- reqctx->imm = 0;
- }
- if (!reqctx->imm) {
- bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
- CIP_SPACE_LEFT(ablkctx->enckey_len),
- 0, 0);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
- else
- bytes = rounddown(bytes, 16);
- } else {
- bytes = req->nbytes;
- }
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
- CRYPTO_ALG_SUB_TYPE_CTR) {
- bytes = adjust_ctr_overflow(req->info, bytes);
- }
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
- CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
- memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
- memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
- CTR_RFC3686_IV_SIZE);
- /* initialize counter portion of counter block */
- *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
- CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
- } else {
- memcpy(reqctx->iv, req->info, IV);
- }
- if (unlikely(bytes == 0)) {
- chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
- req);
- err = chcr_cipher_fallback(ablkctx->sw_cipher,
- req->base.flags,
- req->src,
- req->dst,
- req->nbytes,
- reqctx->iv,
- op_type);
- goto error;
- }
- reqctx->op = op_type;
- reqctx->srcsg = req->src;
- reqctx->dstsg = req->dst;
- reqctx->src_ofst = 0;
- reqctx->dst_ofst = 0;
- wrparam.qid = qid;
- wrparam.req = req;
- wrparam.bytes = bytes;
- *skb = create_cipher_wr(&wrparam);
- if (IS_ERR(*skb)) {
- err = PTR_ERR(*skb);
- goto unmap;
- }
- reqctx->processed = bytes;
- reqctx->last_req_len = bytes;
- return 0;
- unmap:
- chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
- error:
- return err;
- }
- static int chcr_aes_encrypt(struct ablkcipher_request *req)
- {
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct sk_buff *skb = NULL;
- int err, isfull = 0;
- struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
- if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- c_ctx(tfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
- }
- err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
- &skb, CHCR_ENCRYPT_OP);
- if (err || !skb)
- return err;
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
- chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
- }
- static int chcr_aes_decrypt(struct ablkcipher_request *req)
- {
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
- struct sk_buff *skb = NULL;
- int err, isfull = 0;
- if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- c_ctx(tfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
- }
- err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
- &skb, CHCR_DECRYPT_OP);
- if (err || !skb)
- return err;
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
- chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
- }
- static int chcr_device_init(struct chcr_context *ctx)
- {
- struct uld_ctx *u_ctx = NULL;
- struct adapter *adap;
- unsigned int id;
- int txq_perchan, txq_idx, ntxq;
- int err = 0, rxq_perchan, rxq_idx;
- id = smp_processor_id();
- if (!ctx->dev) {
- u_ctx = assign_chcr_device();
- if (!u_ctx) {
- pr_err("chcr device assignment fails\n");
- goto out;
- }
- ctx->dev = u_ctx->dev;
- adap = padap(ctx->dev);
- ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
- adap->vres.ncrypto_fc);
- rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
- txq_perchan = ntxq / u_ctx->lldi.nchan;
- spin_lock(&ctx->dev->lock_chcr_dev);
- ctx->tx_chan_id = ctx->dev->tx_channel_id;
- ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
- ctx->dev->rx_channel_id = 0;
- spin_unlock(&ctx->dev->lock_chcr_dev);
- rxq_idx = ctx->tx_chan_id * rxq_perchan;
- rxq_idx += id % rxq_perchan;
- txq_idx = ctx->tx_chan_id * txq_perchan;
- txq_idx += id % txq_perchan;
- ctx->rx_qidx = rxq_idx;
- ctx->tx_qidx = txq_idx;
- /* Channel Id used by SGE to forward packet to Host.
- * Same value should be used in cpl_fw6_pld RSS_CH field
- * by FW. Driver programs PCI channel ID to be used in fw
- * at the time of queue allocation with value "pi->tx_chan"
- */
- ctx->pci_chan_id = txq_idx / txq_perchan;
- }
- out:
- return err;
- }
- static int chcr_cra_init(struct crypto_tfm *tfm)
- {
- struct crypto_alg *alg = tfm->__crt_alg;
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ablkctx->sw_cipher)) {
- pr_err("failed to allocate fallback for %s\n", alg->cra_name);
- return PTR_ERR(ablkctx->sw_cipher);
- }
- if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
- /* To update tweak*/
- ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
- if (IS_ERR(ablkctx->aes_generic)) {
- pr_err("failed to allocate aes cipher for tweak\n");
- return PTR_ERR(ablkctx->aes_generic);
- }
- } else
- ablkctx->aes_generic = NULL;
- tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
- return chcr_device_init(crypto_tfm_ctx(tfm));
- }
- static int chcr_rfc3686_init(struct crypto_tfm *tfm)
- {
- struct crypto_alg *alg = tfm->__crt_alg;
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
- * cannot be used as fallback in chcr_handle_cipher_response
- */
- ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ablkctx->sw_cipher)) {
- pr_err("failed to allocate fallback for %s\n", alg->cra_name);
- return PTR_ERR(ablkctx->sw_cipher);
- }
- tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
- return chcr_device_init(crypto_tfm_ctx(tfm));
- }
- static void chcr_cra_exit(struct crypto_tfm *tfm)
- {
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- crypto_free_skcipher(ablkctx->sw_cipher);
- if (ablkctx->aes_generic)
- crypto_free_cipher(ablkctx->aes_generic);
- }
- static int get_alg_config(struct algo_param *params,
- unsigned int auth_size)
- {
- switch (auth_size) {
- case SHA1_DIGEST_SIZE:
- params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
- params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
- params->result_size = SHA1_DIGEST_SIZE;
- break;
- case SHA224_DIGEST_SIZE:
- params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
- params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
- params->result_size = SHA256_DIGEST_SIZE;
- break;
- case SHA256_DIGEST_SIZE:
- params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
- params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
- params->result_size = SHA256_DIGEST_SIZE;
- break;
- case SHA384_DIGEST_SIZE:
- params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
- params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
- params->result_size = SHA512_DIGEST_SIZE;
- break;
- case SHA512_DIGEST_SIZE:
- params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
- params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
- params->result_size = SHA512_DIGEST_SIZE;
- break;
- default:
- pr_err("chcr : ERROR, unsupported digest size\n");
- return -EINVAL;
- }
- return 0;
- }
- static inline void chcr_free_shash(struct crypto_shash *base_hash)
- {
- crypto_free_shash(base_hash);
- }
- /**
- * create_hash_wr - Create hash work request
- * @req - Cipher req base
- */
- static struct sk_buff *create_hash_wr(struct ahash_request *req,
- struct hash_wr_param *param)
- {
- struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
- struct sk_buff *skb = NULL;
- struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
- struct chcr_wr *chcr_req;
- struct ulptx_sgl *ulptx;
- unsigned int nents = 0, transhdr_len;
- unsigned int temp = 0;
- gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
- GFP_ATOMIC;
- struct adapter *adap = padap(h_ctx(tfm)->dev);
- int error = 0;
- transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
- req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
- param->sg_len) <= SGE_MAX_WR_LEN;
- nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
- CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
- nents += param->bfr_len ? 1 : 0;
- transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
- param->sg_len, 16) : (sgl_len(nents) * 8);
- transhdr_len = roundup(transhdr_len, 16);
- skb = alloc_skb(transhdr_len, flags);
- if (!skb)
- return ERR_PTR(-ENOMEM);
- chcr_req = __skb_put_zero(skb, transhdr_len);
- chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
- chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
- chcr_req->sec_cpl.aadstart_cipherstop_hi =
- FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
- chcr_req->sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
- chcr_req->sec_cpl.seqno_numivs =
- FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
- param->opad_needed, 0);
- chcr_req->sec_cpl.ivgen_hdrlen =
- FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
- memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
- param->alg_prm.result_size);
- if (param->opad_needed)
- memcpy(chcr_req->key_ctx.key +
- ((param->alg_prm.result_size <= 32) ? 32 :
- CHCR_HASH_MAX_DIGEST_SIZE),
- hmacctx->opad, param->alg_prm.result_size);
- chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
- param->alg_prm.mk_size, 0,
- param->opad_needed,
- ((param->kctx_len +
- sizeof(chcr_req->key_ctx)) >> 4));
- chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
- ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
- DUMMY_BYTES);
- if (param->bfr_len != 0) {
- req_ctx->hctx_wr.dma_addr =
- dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
- param->bfr_len, DMA_TO_DEVICE);
- if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
- req_ctx->hctx_wr. dma_addr)) {
- error = -ENOMEM;
- goto err;
- }
- req_ctx->hctx_wr.dma_len = param->bfr_len;
- } else {
- req_ctx->hctx_wr.dma_addr = 0;
- }
- chcr_add_hash_src_ent(req, ulptx, param);
- /* Request upto max wr size */
- temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
- (param->sg_len + param->bfr_len) : 0);
- atomic_inc(&adap->chcr_stats.digest_rqst);
- create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
- param->hash_size, transhdr_len,
- temp, 0);
- req_ctx->hctx_wr.skb = skb;
- return skb;
- err:
- kfree_skb(skb);
- return ERR_PTR(error);
- }
- static int chcr_ahash_update(struct ahash_request *req)
- {
- struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
- struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct uld_ctx *u_ctx = NULL;
- struct sk_buff *skb;
- u8 remainder = 0, bs;
- unsigned int nbytes = req->nbytes;
- struct hash_wr_param params;
- int error, isfull = 0;
- bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(h_ctx(rtfm));
- if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
- }
- if (nbytes + req_ctx->reqlen >= bs) {
- remainder = (nbytes + req_ctx->reqlen) % bs;
- nbytes = nbytes + req_ctx->reqlen - remainder;
- } else {
- sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
- + req_ctx->reqlen, nbytes, 0);
- req_ctx->reqlen += nbytes;
- return 0;
- }
- chcr_init_hctx_per_wr(req_ctx);
- error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
- get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
- params.kctx_len = roundup(params.alg_prm.result_size, 16);
- params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
- HASH_SPACE_LEFT(params.kctx_len), 0);
- if (params.sg_len > req->nbytes)
- params.sg_len = req->nbytes;
- params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
- req_ctx->reqlen;
- params.opad_needed = 0;
- params.more = 1;
- params.last = 0;
- params.bfr_len = req_ctx->reqlen;
- params.scmd1 = 0;
- req_ctx->hctx_wr.srcsg = req->src;
- params.hash_size = params.alg_prm.result_size;
- req_ctx->data_len += params.sg_len + params.bfr_len;
- skb = create_hash_wr(req, ¶ms);
- if (IS_ERR(skb)) {
- error = PTR_ERR(skb);
- goto unmap;
- }
- req_ctx->hctx_wr.processed += params.sg_len;
- if (remainder) {
- /* Swap buffers */
- swap(req_ctx->reqbfr, req_ctx->skbfr);
- sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- req_ctx->reqbfr, remainder, req->nbytes -
- remainder);
- }
- req_ctx->reqlen = remainder;
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
- chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
- unmap:
- chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
- return error;
- }
- static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
- {
- memset(bfr_ptr, 0, bs);
- *bfr_ptr = 0x80;
- if (bs == 64)
- *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
- else
- *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
- }
- static int chcr_ahash_final(struct ahash_request *req)
- {
- struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
- struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct hash_wr_param params;
- struct sk_buff *skb;
- struct uld_ctx *u_ctx = NULL;
- u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- chcr_init_hctx_per_wr(req_ctx);
- u_ctx = ULD_CTX(h_ctx(rtfm));
- if (is_hmac(crypto_ahash_tfm(rtfm)))
- params.opad_needed = 1;
- else
- params.opad_needed = 0;
- params.sg_len = 0;
- req_ctx->hctx_wr.isfinal = 1;
- get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
- params.kctx_len = roundup(params.alg_prm.result_size, 16);
- if (is_hmac(crypto_ahash_tfm(rtfm))) {
- params.opad_needed = 1;
- params.kctx_len *= 2;
- } else {
- params.opad_needed = 0;
- }
- req_ctx->hctx_wr.result = 1;
- params.bfr_len = req_ctx->reqlen;
- req_ctx->data_len += params.bfr_len + params.sg_len;
- req_ctx->hctx_wr.srcsg = req->src;
- if (req_ctx->reqlen == 0) {
- create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
- params.last = 0;
- params.more = 1;
- params.scmd1 = 0;
- params.bfr_len = bs;
- } else {
- params.scmd1 = req_ctx->data_len;
- params.last = 1;
- params.more = 0;
- }
- params.hash_size = crypto_ahash_digestsize(rtfm);
- skb = create_hash_wr(req, ¶ms);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
- req_ctx->reqlen = 0;
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
- chcr_send_wr(skb);
- return -EINPROGRESS;
- }
- static int chcr_ahash_finup(struct ahash_request *req)
- {
- struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
- struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct uld_ctx *u_ctx = NULL;
- struct sk_buff *skb;
- struct hash_wr_param params;
- u8 bs;
- int error, isfull = 0;
- bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(h_ctx(rtfm));
- if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
- }
- chcr_init_hctx_per_wr(req_ctx);
- error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
- get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
- params.kctx_len = roundup(params.alg_prm.result_size, 16);
- if (is_hmac(crypto_ahash_tfm(rtfm))) {
- params.kctx_len *= 2;
- params.opad_needed = 1;
- } else {
- params.opad_needed = 0;
- }
- params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
- HASH_SPACE_LEFT(params.kctx_len), 0);
- if (params.sg_len < req->nbytes) {
- if (is_hmac(crypto_ahash_tfm(rtfm))) {
- params.kctx_len /= 2;
- params.opad_needed = 0;
- }
- params.last = 0;
- params.more = 1;
- params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
- - req_ctx->reqlen;
- params.hash_size = params.alg_prm.result_size;
- params.scmd1 = 0;
- } else {
- params.last = 1;
- params.more = 0;
- params.sg_len = req->nbytes;
- params.hash_size = crypto_ahash_digestsize(rtfm);
- params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
- params.sg_len;
- }
- params.bfr_len = req_ctx->reqlen;
- req_ctx->data_len += params.bfr_len + params.sg_len;
- req_ctx->hctx_wr.result = 1;
- req_ctx->hctx_wr.srcsg = req->src;
- if ((req_ctx->reqlen + req->nbytes) == 0) {
- create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
- params.last = 0;
- params.more = 1;
- params.scmd1 = 0;
- params.bfr_len = bs;
- }
- skb = create_hash_wr(req, ¶ms);
- if (IS_ERR(skb)) {
- error = PTR_ERR(skb);
- goto unmap;
- }
- req_ctx->reqlen = 0;
- req_ctx->hctx_wr.processed += params.sg_len;
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
- chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
- unmap:
- chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
- return error;
- }
- static int chcr_ahash_digest(struct ahash_request *req)
- {
- struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
- struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct uld_ctx *u_ctx = NULL;
- struct sk_buff *skb;
- struct hash_wr_param params;
- u8 bs;
- int error, isfull = 0;
- rtfm->init(req);
- bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(h_ctx(rtfm));
- if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
- }
- chcr_init_hctx_per_wr(req_ctx);
- error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
- get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
- params.kctx_len = roundup(params.alg_prm.result_size, 16);
- if (is_hmac(crypto_ahash_tfm(rtfm))) {
- params.kctx_len *= 2;
- params.opad_needed = 1;
- } else {
- params.opad_needed = 0;
- }
- params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
- HASH_SPACE_LEFT(params.kctx_len), 0);
- if (params.sg_len < req->nbytes) {
- if (is_hmac(crypto_ahash_tfm(rtfm))) {
- params.kctx_len /= 2;
- params.opad_needed = 0;
- }
- params.last = 0;
- params.more = 1;
- params.scmd1 = 0;
- params.sg_len = rounddown(params.sg_len, bs);
- params.hash_size = params.alg_prm.result_size;
- } else {
- params.sg_len = req->nbytes;
- params.hash_size = crypto_ahash_digestsize(rtfm);
- params.last = 1;
- params.more = 0;
- params.scmd1 = req->nbytes + req_ctx->data_len;
- }
- params.bfr_len = 0;
- req_ctx->hctx_wr.result = 1;
- req_ctx->hctx_wr.srcsg = req->src;
- req_ctx->data_len += params.bfr_len + params.sg_len;
- if (req->nbytes == 0) {
- create_last_hash_block(req_ctx->reqbfr, bs, 0);
- params.more = 1;
- params.bfr_len = bs;
- }
- skb = create_hash_wr(req, ¶ms);
- if (IS_ERR(skb)) {
- error = PTR_ERR(skb);
- goto unmap;
- }
- req_ctx->hctx_wr.processed += params.sg_len;
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
- chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
- unmap:
- chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
- return error;
- }
- static int chcr_ahash_continue(struct ahash_request *req)
- {
- struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
- struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
- struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct uld_ctx *u_ctx = NULL;
- struct sk_buff *skb;
- struct hash_wr_param params;
- u8 bs;
- int error;
- bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(h_ctx(rtfm));
- get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
- params.kctx_len = roundup(params.alg_prm.result_size, 16);
- if (is_hmac(crypto_ahash_tfm(rtfm))) {
- params.kctx_len *= 2;
- params.opad_needed = 1;
- } else {
- params.opad_needed = 0;
- }
- params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
- HASH_SPACE_LEFT(params.kctx_len),
- hctx_wr->src_ofst);
- if ((params.sg_len + hctx_wr->processed) > req->nbytes)
- params.sg_len = req->nbytes - hctx_wr->processed;
- if (!hctx_wr->result ||
- ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
- if (is_hmac(crypto_ahash_tfm(rtfm))) {
- params.kctx_len /= 2;
- params.opad_needed = 0;
- }
- params.last = 0;
- params.more = 1;
- params.sg_len = rounddown(params.sg_len, bs);
- params.hash_size = params.alg_prm.result_size;
- params.scmd1 = 0;
- } else {
- params.last = 1;
- params.more = 0;
- params.hash_size = crypto_ahash_digestsize(rtfm);
- params.scmd1 = reqctx->data_len + params.sg_len;
- }
- params.bfr_len = 0;
- reqctx->data_len += params.sg_len;
- skb = create_hash_wr(req, ¶ms);
- if (IS_ERR(skb)) {
- error = PTR_ERR(skb);
- goto err;
- }
- hctx_wr->processed += params.sg_len;
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
- chcr_send_wr(skb);
- return 0;
- err:
- return error;
- }
- static inline void chcr_handle_ahash_resp(struct ahash_request *req,
- unsigned char *input,
- int err)
- {
- struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
- struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
- int digestsize, updated_digestsize;
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
- if (input == NULL)
- goto out;
- digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
- updated_digestsize = digestsize;
- if (digestsize == SHA224_DIGEST_SIZE)
- updated_digestsize = SHA256_DIGEST_SIZE;
- else if (digestsize == SHA384_DIGEST_SIZE)
- updated_digestsize = SHA512_DIGEST_SIZE;
- if (hctx_wr->dma_addr) {
- dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
- hctx_wr->dma_len, DMA_TO_DEVICE);
- hctx_wr->dma_addr = 0;
- }
- if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
- req->nbytes)) {
- if (hctx_wr->result == 1) {
- hctx_wr->result = 0;
- memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
- digestsize);
- } else {
- memcpy(reqctx->partial_hash,
- input + sizeof(struct cpl_fw6_pld),
- updated_digestsize);
- }
- goto unmap;
- }
- memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
- updated_digestsize);
- err = chcr_ahash_continue(req);
- if (err)
- goto unmap;
- return;
- unmap:
- if (hctx_wr->is_sg_map)
- chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
- out:
- req->base.complete(&req->base, err);
- }
- /*
- * chcr_handle_resp - Unmap the DMA buffers associated with the request
- * @req: crypto request
- */
- int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
- int err)
- {
- struct crypto_tfm *tfm = req->tfm;
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct adapter *adap = padap(ctx->dev);
- switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_AEAD:
- chcr_handle_aead_resp(aead_request_cast(req), input, err);
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
- input, err);
- break;
- case CRYPTO_ALG_TYPE_AHASH:
- chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
- }
- atomic_inc(&adap->chcr_stats.complete);
- return err;
- }
- static int chcr_ahash_export(struct ahash_request *areq, void *out)
- {
- struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- struct chcr_ahash_req_ctx *state = out;
- state->reqlen = req_ctx->reqlen;
- state->data_len = req_ctx->data_len;
- memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
- memcpy(state->partial_hash, req_ctx->partial_hash,
- CHCR_HASH_MAX_DIGEST_SIZE);
- chcr_init_hctx_per_wr(state);
- return 0;
- }
- static int chcr_ahash_import(struct ahash_request *areq, const void *in)
- {
- struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
- req_ctx->reqlen = state->reqlen;
- req_ctx->data_len = state->data_len;
- req_ctx->reqbfr = req_ctx->bfr1;
- req_ctx->skbfr = req_ctx->bfr2;
- memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
- memcpy(req_ctx->partial_hash, state->partial_hash,
- CHCR_HASH_MAX_DIGEST_SIZE);
- chcr_init_hctx_per_wr(req_ctx);
- return 0;
- }
- static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
- {
- struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
- unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
- unsigned int i, err = 0, updated_digestsize;
- SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
- /* use the key to calculate the ipad and opad. ipad will sent with the
- * first request's data. opad will be sent with the final hash result
- * ipad in hmacctx->ipad and opad in hmacctx->opad location
- */
- shash->tfm = hmacctx->base_hash;
- shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
- if (keylen > bs) {
- err = crypto_shash_digest(shash, key, keylen,
- hmacctx->ipad);
- if (err)
- goto out;
- keylen = digestsize;
- } else {
- memcpy(hmacctx->ipad, key, keylen);
- }
- memset(hmacctx->ipad + keylen, 0, bs - keylen);
- memcpy(hmacctx->opad, hmacctx->ipad, bs);
- for (i = 0; i < bs / sizeof(int); i++) {
- *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
- *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
- }
- updated_digestsize = digestsize;
- if (digestsize == SHA224_DIGEST_SIZE)
- updated_digestsize = SHA256_DIGEST_SIZE;
- else if (digestsize == SHA384_DIGEST_SIZE)
- updated_digestsize = SHA512_DIGEST_SIZE;
- err = chcr_compute_partial_hash(shash, hmacctx->ipad,
- hmacctx->ipad, digestsize);
- if (err)
- goto out;
- chcr_change_order(hmacctx->ipad, updated_digestsize);
- err = chcr_compute_partial_hash(shash, hmacctx->opad,
- hmacctx->opad, digestsize);
- if (err)
- goto out;
- chcr_change_order(hmacctx->opad, updated_digestsize);
- out:
- return err;
- }
- static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
- unsigned int key_len)
- {
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
- unsigned short context_size = 0;
- int err;
- err = chcr_cipher_fallback_setkey(cipher, key, key_len);
- if (err)
- goto badkey_err;
- memcpy(ablkctx->key, key, key_len);
- ablkctx->enckey_len = key_len;
- get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
- context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
- ablkctx->key_ctx_hdr =
- FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
- CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
- CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
- CHCR_KEYCTX_NO_KEY, 1,
- 0, context_size);
- ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
- return 0;
- badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- ablkctx->enckey_len = 0;
- return err;
- }
- static int chcr_sha_init(struct ahash_request *areq)
- {
- struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- int digestsize = crypto_ahash_digestsize(tfm);
- req_ctx->data_len = 0;
- req_ctx->reqlen = 0;
- req_ctx->reqbfr = req_ctx->bfr1;
- req_ctx->skbfr = req_ctx->bfr2;
- copy_hash_init_values(req_ctx->partial_hash, digestsize);
- return 0;
- }
- static int chcr_sha_cra_init(struct crypto_tfm *tfm)
- {
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct chcr_ahash_req_ctx));
- return chcr_device_init(crypto_tfm_ctx(tfm));
- }
- static int chcr_hmac_init(struct ahash_request *areq)
- {
- struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
- struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
- unsigned int digestsize = crypto_ahash_digestsize(rtfm);
- unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- chcr_sha_init(areq);
- req_ctx->data_len = bs;
- if (is_hmac(crypto_ahash_tfm(rtfm))) {
- if (digestsize == SHA224_DIGEST_SIZE)
- memcpy(req_ctx->partial_hash, hmacctx->ipad,
- SHA256_DIGEST_SIZE);
- else if (digestsize == SHA384_DIGEST_SIZE)
- memcpy(req_ctx->partial_hash, hmacctx->ipad,
- SHA512_DIGEST_SIZE);
- else
- memcpy(req_ctx->partial_hash, hmacctx->ipad,
- digestsize);
- }
- return 0;
- }
- static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
- {
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
- unsigned int digestsize =
- crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct chcr_ahash_req_ctx));
- hmacctx->base_hash = chcr_alloc_shash(digestsize);
- if (IS_ERR(hmacctx->base_hash))
- return PTR_ERR(hmacctx->base_hash);
- return chcr_device_init(crypto_tfm_ctx(tfm));
- }
- static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
- {
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
- if (hmacctx->base_hash) {
- chcr_free_shash(hmacctx->base_hash);
- hmacctx->base_hash = NULL;
- }
- }
- inline void chcr_aead_common_exit(struct aead_request *req)
- {
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
- chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
- }
- static int chcr_aead_common_init(struct aead_request *req)
- {
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- unsigned int authsize = crypto_aead_authsize(tfm);
- int error = -EINVAL;
- /* validate key size */
- if (aeadctx->enckey_len == 0)
- goto err;
- if (reqctx->op && req->cryptlen < authsize)
- goto err;
- if (reqctx->b0_len)
- reqctx->scratch_pad = reqctx->iv + IV;
- else
- reqctx->scratch_pad = NULL;
- error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
- reqctx->op);
- if (error) {
- error = -ENOMEM;
- goto err;
- }
- reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
- CHCR_SRC_SG_SIZE, 0);
- reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
- CHCR_SRC_SG_SIZE, req->assoclen);
- return 0;
- err:
- return error;
- }
- static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
- int aadmax, int wrlen,
- unsigned short op_type)
- {
- unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
- if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
- dst_nents > MAX_DSGL_ENT ||
- (req->assoclen > aadmax) ||
- (wrlen > SGE_MAX_WR_LEN))
- return 1;
- return 0;
- }
- static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
- {
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- struct aead_request *subreq = aead_request_ctx(req);
- aead_request_set_tfm(subreq, aeadctx->sw_cipher);
- aead_request_set_callback(subreq, req->base.flags,
- req->base.complete, req->base.data);
- aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
- req->iv);
- aead_request_set_ad(subreq, req->assoclen);
- return op_type ? crypto_aead_decrypt(subreq) :
- crypto_aead_encrypt(subreq);
- }
- static struct sk_buff *create_authenc_wr(struct aead_request *req,
- unsigned short qid,
- int size)
- {
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct sk_buff *skb = NULL;
- struct chcr_wr *chcr_req;
- struct cpl_rx_phys_dsgl *phys_cpl;
- struct ulptx_sgl *ulptx;
- unsigned int transhdr_len;
- unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
- unsigned int kctx_len = 0, dnents;
- unsigned int assoclen = req->assoclen;
- unsigned int authsize = crypto_aead_authsize(tfm);
- int error = -EINVAL;
- int null = 0;
- gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
- GFP_ATOMIC;
- struct adapter *adap = padap(a_ctx(tfm)->dev);
- if (req->cryptlen == 0)
- return NULL;
- reqctx->b0_len = 0;
- error = chcr_aead_common_init(req);
- if (error)
- return ERR_PTR(error);
- if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
- subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
- null = 1;
- assoclen = 0;
- reqctx->aad_nents = 0;
- }
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
- (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
- req->assoclen);
- dnents += MIN_AUTH_SG; // For IV
- dst_size = get_space_for_phys_dsgl(dnents);
- kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
- - sizeof(chcr_req->key_ctx);
- transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
- SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
- : (sgl_len(reqctx->src_nents + reqctx->aad_nents
- + MIN_GCM_SG) * 8);
- transhdr_len += temp;
- transhdr_len = roundup(transhdr_len, 16);
- if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
- transhdr_len, reqctx->op)) {
- atomic_inc(&adap->chcr_stats.fallback);
- chcr_aead_common_exit(req);
- return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
- }
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
- if (!skb) {
- error = -ENOMEM;
- goto err;
- }
- chcr_req = __skb_put_zero(skb, transhdr_len);
- temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
- /*
- * Input order is AAD,IV and Payload. where IV should be included as
- * the part of authdata. All other fields should be filled according
- * to the hardware spec
- */
- chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
- assoclen + 1);
- chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
- chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- assoclen ? 1 : 0, assoclen,
- assoclen + IV + 1,
- (temp & 0x1F0) >> 4);
- chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
- temp & 0xF,
- null ? 0 : assoclen + IV + 1,
- temp, temp);
- if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
- subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
- temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
- else
- temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
- chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
- (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
- temp,
- actx->auth_mode, aeadctx->hmac_ctrl,
- IV >> 1);
- chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
- 0, 0, dst_size);
- chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
- if (reqctx->op == CHCR_ENCRYPT_OP ||
- subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
- subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
- memcpy(chcr_req->key_ctx.key, aeadctx->key,
- aeadctx->enckey_len);
- else
- memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
- aeadctx->enckey_len);
- memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
- actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
- if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
- subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
- memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
- memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
- CTR_RFC3686_IV_SIZE);
- *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
- CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
- } else {
- memcpy(reqctx->iv, req->iv, IV);
- }
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
- atomic_inc(&adap->chcr_stats.cipher_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
- create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
- transhdr_len, temp, 0);
- reqctx->skb = skb;
- return skb;
- err:
- chcr_aead_common_exit(req);
- return ERR_PTR(error);
- }
- int chcr_aead_dma_map(struct device *dev,
- struct aead_request *req,
- unsigned short op_type)
- {
- int error;
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned int authsize = crypto_aead_authsize(tfm);
- int dst_size;
- dst_size = req->assoclen + req->cryptlen + (op_type ?
- -authsize : authsize);
- if (!req->cryptlen || !dst_size)
- return 0;
- reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, reqctx->iv_dma))
- return -ENOMEM;
- if (reqctx->b0_len)
- reqctx->b0_dma = reqctx->iv_dma + IV;
- else
- reqctx->b0_dma = 0;
- if (req->src == req->dst) {
- error = dma_map_sg(dev, req->src, sg_nents(req->src),
- DMA_BIDIRECTIONAL);
- if (!error)
- goto err;
- } else {
- error = dma_map_sg(dev, req->src, sg_nents(req->src),
- DMA_TO_DEVICE);
- if (!error)
- goto err;
- error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
- DMA_FROM_DEVICE);
- if (!error) {
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
- DMA_TO_DEVICE);
- goto err;
- }
- }
- return 0;
- err:
- dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
- return -ENOMEM;
- }
- void chcr_aead_dma_unmap(struct device *dev,
- struct aead_request *req,
- unsigned short op_type)
- {
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned int authsize = crypto_aead_authsize(tfm);
- int dst_size;
- dst_size = req->assoclen + req->cryptlen + (op_type ?
- -authsize : authsize);
- if (!req->cryptlen || !dst_size)
- return;
- dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
- DMA_BIDIRECTIONAL);
- if (req->src == req->dst) {
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
- DMA_BIDIRECTIONAL);
- } else {
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
- DMA_TO_DEVICE);
- dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
- DMA_FROM_DEVICE);
- }
- }
- void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen)
- {
- struct ulptx_walk ulp_walk;
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- if (reqctx->imm) {
- u8 *buf = (u8 *)ulptx;
- if (reqctx->b0_len) {
- memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
- buf += reqctx->b0_len;
- }
- sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- buf, assoclen, 0);
- buf += assoclen;
- memcpy(buf, reqctx->iv, IV);
- buf += IV;
- sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- buf, req->cryptlen, req->assoclen);
- } else {
- ulptx_walk_init(&ulp_walk, ulptx);
- if (reqctx->b0_len)
- ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
- &reqctx->b0_dma);
- ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
- ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
- ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
- req->assoclen);
- ulptx_walk_end(&ulp_walk);
- }
- }
- void chcr_add_aead_dst_ent(struct aead_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
- unsigned short qid)
- {
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct dsgl_walk dsgl_walk;
- unsigned int authsize = crypto_aead_authsize(tfm);
- struct chcr_context *ctx = a_ctx(tfm);
- u32 temp;
- dsgl_walk_init(&dsgl_walk, phys_cpl);
- if (reqctx->b0_len)
- dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
- dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
- dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
- temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
- dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
- dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
- }
- void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
- void *ulptx,
- struct cipher_wr_param *wrparam)
- {
- struct ulptx_walk ulp_walk;
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- u8 *buf = ulptx;
- memcpy(buf, reqctx->iv, IV);
- buf += IV;
- if (reqctx->imm) {
- sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- buf, wrparam->bytes, reqctx->processed);
- } else {
- ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
- ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
- reqctx->src_ofst);
- reqctx->srcsg = ulp_walk.last_sg;
- reqctx->src_ofst = ulp_walk.last_sg_len;
- ulptx_walk_end(&ulp_walk);
- }
- }
- void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- struct cipher_wr_param *wrparam,
- unsigned short qid)
- {
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
- struct chcr_context *ctx = c_ctx(tfm);
- struct dsgl_walk dsgl_walk;
- dsgl_walk_init(&dsgl_walk, phys_cpl);
- dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
- reqctx->dst_ofst);
- reqctx->dstsg = dsgl_walk.last_sg;
- reqctx->dst_ofst = dsgl_walk.last_sg_len;
- dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
- }
- void chcr_add_hash_src_ent(struct ahash_request *req,
- struct ulptx_sgl *ulptx,
- struct hash_wr_param *param)
- {
- struct ulptx_walk ulp_walk;
- struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
- if (reqctx->hctx_wr.imm) {
- u8 *buf = (u8 *)ulptx;
- if (param->bfr_len) {
- memcpy(buf, reqctx->reqbfr, param->bfr_len);
- buf += param->bfr_len;
- }
- sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
- sg_nents(reqctx->hctx_wr.srcsg), buf,
- param->sg_len, 0);
- } else {
- ulptx_walk_init(&ulp_walk, ulptx);
- if (param->bfr_len)
- ulptx_walk_add_page(&ulp_walk, param->bfr_len,
- &reqctx->hctx_wr.dma_addr);
- ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
- param->sg_len, reqctx->hctx_wr.src_ofst);
- reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
- reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
- ulptx_walk_end(&ulp_walk);
- }
- }
- int chcr_hash_dma_map(struct device *dev,
- struct ahash_request *req)
- {
- struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
- int error = 0;
- if (!req->nbytes)
- return 0;
- error = dma_map_sg(dev, req->src, sg_nents(req->src),
- DMA_TO_DEVICE);
- if (!error)
- return -ENOMEM;
- req_ctx->hctx_wr.is_sg_map = 1;
- return 0;
- }
- void chcr_hash_dma_unmap(struct device *dev,
- struct ahash_request *req)
- {
- struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
- if (!req->nbytes)
- return;
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
- DMA_TO_DEVICE);
- req_ctx->hctx_wr.is_sg_map = 0;
- }
- int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req)
- {
- int error;
- if (req->src == req->dst) {
- error = dma_map_sg(dev, req->src, sg_nents(req->src),
- DMA_BIDIRECTIONAL);
- if (!error)
- goto err;
- } else {
- error = dma_map_sg(dev, req->src, sg_nents(req->src),
- DMA_TO_DEVICE);
- if (!error)
- goto err;
- error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
- DMA_FROM_DEVICE);
- if (!error) {
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
- DMA_TO_DEVICE);
- goto err;
- }
- }
- return 0;
- err:
- return -ENOMEM;
- }
- void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req)
- {
- if (req->src == req->dst) {
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
- DMA_BIDIRECTIONAL);
- } else {
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
- DMA_TO_DEVICE);
- dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
- DMA_FROM_DEVICE);
- }
- }
- static int set_msg_len(u8 *block, unsigned int msglen, int csize)
- {
- __be32 data;
- memset(block, 0, csize);
- block += csize;
- if (csize >= 4)
- csize = 4;
- else if (msglen > (unsigned int)(1 << (8 * csize)))
- return -EOVERFLOW;
- data = cpu_to_be32(msglen);
- memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
- return 0;
- }
- static void generate_b0(struct aead_request *req,
- struct chcr_aead_ctx *aeadctx,
- unsigned short op_type)
- {
- unsigned int l, lp, m;
- int rc;
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- u8 *b0 = reqctx->scratch_pad;
- m = crypto_aead_authsize(aead);
- memcpy(b0, reqctx->iv, 16);
- lp = b0[0];
- l = lp + 1;
- /* set m, bits 3-5 */
- *b0 |= (8 * ((m - 2) / 2));
- /* set adata, bit 6, if associated data is used */
- if (req->assoclen)
- *b0 |= 64;
- rc = set_msg_len(b0 + 16 - l,
- (op_type == CHCR_DECRYPT_OP) ?
- req->cryptlen - m : req->cryptlen, l);
- }
- static inline int crypto_ccm_check_iv(const u8 *iv)
- {
- /* 2 <= L <= 8, so 1 <= L' <= 7. */
- if (iv[0] < 1 || iv[0] > 7)
- return -EINVAL;
- return 0;
- }
- static int ccm_format_packet(struct aead_request *req,
- struct chcr_aead_ctx *aeadctx,
- unsigned int sub_type,
- unsigned short op_type,
- unsigned int assoclen)
- {
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- int rc = 0;
- if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
- reqctx->iv[0] = 3;
- memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
- memcpy(reqctx->iv + 4, req->iv, 8);
- memset(reqctx->iv + 12, 0, 4);
- } else {
- memcpy(reqctx->iv, req->iv, 16);
- }
- if (assoclen)
- *((unsigned short *)(reqctx->scratch_pad + 16)) =
- htons(assoclen);
- generate_b0(req, aeadctx, op_type);
- /* zero the ctr value */
- memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
- return rc;
- }
- static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
- unsigned int dst_size,
- struct aead_request *req,
- unsigned short op_type)
- {
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
- unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
- unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
- unsigned int ccm_xtra;
- unsigned char tag_offset = 0, auth_offset = 0;
- unsigned int assoclen;
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
- assoclen = req->assoclen - 8;
- else
- assoclen = req->assoclen;
- ccm_xtra = CCM_B0_SIZE +
- ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
- auth_offset = req->cryptlen ?
- (assoclen + IV + 1 + ccm_xtra) : 0;
- if (op_type == CHCR_DECRYPT_OP) {
- if (crypto_aead_authsize(tfm) != req->cryptlen)
- tag_offset = crypto_aead_authsize(tfm);
- else
- auth_offset = 0;
- }
- sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
- 2, assoclen + 1 + ccm_xtra);
- sec_cpl->pldlen =
- htonl(assoclen + IV + req->cryptlen + ccm_xtra);
- /* For CCM there wil be b0 always. So AAD start will be 1 always */
- sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- 1, assoclen + ccm_xtra, assoclen
- + IV + 1 + ccm_xtra, 0);
- sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
- auth_offset, tag_offset,
- (op_type == CHCR_ENCRYPT_OP) ? 0 :
- crypto_aead_authsize(tfm));
- sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
- (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
- cipher_mode, mac_mode,
- aeadctx->hmac_ctrl, IV >> 1);
- sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
- 0, dst_size);
- }
- static int aead_ccm_validate_input(unsigned short op_type,
- struct aead_request *req,
- struct chcr_aead_ctx *aeadctx,
- unsigned int sub_type)
- {
- if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
- if (crypto_ccm_check_iv(req->iv)) {
- pr_err("CCM: IV check fails\n");
- return -EINVAL;
- }
- } else {
- if (req->assoclen != 16 && req->assoclen != 20) {
- pr_err("RFC4309: Invalid AAD length %d\n",
- req->assoclen);
- return -EINVAL;
- }
- }
- return 0;
- }
- static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
- unsigned short qid,
- int size)
- {
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct sk_buff *skb = NULL;
- struct chcr_wr *chcr_req;
- struct cpl_rx_phys_dsgl *phys_cpl;
- struct ulptx_sgl *ulptx;
- unsigned int transhdr_len;
- unsigned int dst_size = 0, kctx_len, dnents, temp;
- unsigned int sub_type, assoclen = req->assoclen;
- unsigned int authsize = crypto_aead_authsize(tfm);
- int error = -EINVAL;
- gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
- GFP_ATOMIC;
- struct adapter *adap = padap(a_ctx(tfm)->dev);
- sub_type = get_aead_subtype(tfm);
- if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
- assoclen -= 8;
- reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
- error = chcr_aead_common_init(req);
- if (error)
- return ERR_PTR(error);
- error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
- if (error)
- goto err;
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen
- + (reqctx->op ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
- dnents += MIN_CCM_SG; // For IV and B0
- dst_size = get_space_for_phys_dsgl(dnents);
- kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
- transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
- reqctx->b0_len) <= SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
- reqctx->b0_len, 16) :
- (sgl_len(reqctx->src_nents + reqctx->aad_nents +
- MIN_CCM_SG) * 8);
- transhdr_len += temp;
- transhdr_len = roundup(transhdr_len, 16);
- if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
- reqctx->b0_len, transhdr_len, reqctx->op)) {
- atomic_inc(&adap->chcr_stats.fallback);
- chcr_aead_common_exit(req);
- return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
- }
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
- if (!skb) {
- error = -ENOMEM;
- goto err;
- }
- chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
- fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
- chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
- memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
- memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
- aeadctx->key, aeadctx->enckey_len);
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
- if (error)
- goto dstmap_fail;
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
- atomic_inc(&adap->chcr_stats.aead_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
- reqctx->b0_len) : 0);
- create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
- transhdr_len, temp, 0);
- reqctx->skb = skb;
- return skb;
- dstmap_fail:
- kfree_skb(skb);
- err:
- chcr_aead_common_exit(req);
- return ERR_PTR(error);
- }
- static struct sk_buff *create_gcm_wr(struct aead_request *req,
- unsigned short qid,
- int size)
- {
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct sk_buff *skb = NULL;
- struct chcr_wr *chcr_req;
- struct cpl_rx_phys_dsgl *phys_cpl;
- struct ulptx_sgl *ulptx;
- unsigned int transhdr_len, dnents = 0;
- unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
- unsigned int authsize = crypto_aead_authsize(tfm);
- int error = -EINVAL;
- gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
- GFP_ATOMIC;
- struct adapter *adap = padap(a_ctx(tfm)->dev);
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
- assoclen = req->assoclen - 8;
- reqctx->b0_len = 0;
- error = chcr_aead_common_init(req);
- if (error)
- return ERR_PTR(error);
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
- (reqctx->op ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
- dnents += MIN_GCM_SG; // For IV
- dst_size = get_space_for_phys_dsgl(dnents);
- kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
- transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
- SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
- (sgl_len(reqctx->src_nents +
- reqctx->aad_nents + MIN_GCM_SG) * 8);
- transhdr_len += temp;
- transhdr_len = roundup(transhdr_len, 16);
- if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
- transhdr_len, reqctx->op)) {
- atomic_inc(&adap->chcr_stats.fallback);
- chcr_aead_common_exit(req);
- return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
- }
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
- if (!skb) {
- error = -ENOMEM;
- goto err;
- }
- chcr_req = __skb_put_zero(skb, transhdr_len);
- //Offset of tag from end
- temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
- chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
- a_ctx(tfm)->dev->rx_channel_id, 2,
- (assoclen + 1));
- chcr_req->sec_cpl.pldlen =
- htonl(assoclen + IV + req->cryptlen);
- chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- assoclen ? 1 : 0, assoclen,
- assoclen + IV + 1, 0);
- chcr_req->sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
- temp, temp);
- chcr_req->sec_cpl.seqno_numivs =
- FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
- CHCR_ENCRYPT_OP) ? 1 : 0,
- CHCR_SCMD_CIPHER_MODE_AES_GCM,
- CHCR_SCMD_AUTH_MODE_GHASH,
- aeadctx->hmac_ctrl, IV >> 1);
- chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
- 0, 0, dst_size);
- chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
- memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
- memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
- GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
- /* prepare a 16 byte iv */
- /* S A L T | IV | 0x00000001 */
- if (get_aead_subtype(tfm) ==
- CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
- memcpy(reqctx->iv, aeadctx->salt, 4);
- memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
- } else {
- memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
- }
- *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
- atomic_inc(&adap->chcr_stats.aead_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
- create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
- transhdr_len, temp, reqctx->verify);
- reqctx->skb = skb;
- return skb;
- err:
- chcr_aead_common_exit(req);
- return ERR_PTR(error);
- }
- static int chcr_aead_cra_init(struct crypto_aead *tfm)
- {
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- struct aead_alg *alg = crypto_aead_alg(tfm);
- aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(aeadctx->sw_cipher))
- return PTR_ERR(aeadctx->sw_cipher);
- crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
- sizeof(struct aead_request) +
- crypto_aead_reqsize(aeadctx->sw_cipher)));
- return chcr_device_init(a_ctx(tfm));
- }
- static void chcr_aead_cra_exit(struct crypto_aead *tfm)
- {
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- crypto_free_aead(aeadctx->sw_cipher);
- }
- static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
- unsigned int authsize)
- {
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
- aeadctx->mayverify = VERIFY_HW;
- return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
- }
- static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
- unsigned int authsize)
- {
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- u32 maxauth = crypto_aead_maxauthsize(tfm);
- /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
- * true for sha1. authsize == 12 condition should be before
- * authsize == (maxauth >> 1)
- */
- if (authsize == ICV_4) {
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
- aeadctx->mayverify = VERIFY_HW;
- } else if (authsize == ICV_6) {
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
- aeadctx->mayverify = VERIFY_HW;
- } else if (authsize == ICV_10) {
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
- aeadctx->mayverify = VERIFY_HW;
- } else if (authsize == ICV_12) {
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
- aeadctx->mayverify = VERIFY_HW;
- } else if (authsize == ICV_14) {
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
- aeadctx->mayverify = VERIFY_HW;
- } else if (authsize == (maxauth >> 1)) {
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
- aeadctx->mayverify = VERIFY_HW;
- } else if (authsize == maxauth) {
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
- aeadctx->mayverify = VERIFY_HW;
- } else {
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
- aeadctx->mayverify = VERIFY_SW;
- }
- return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
- }
- static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
- {
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- switch (authsize) {
- case ICV_4:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
- aeadctx->mayverify = VERIFY_HW;
- break;
- case ICV_8:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
- aeadctx->mayverify = VERIFY_HW;
- break;
- case ICV_12:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
- aeadctx->mayverify = VERIFY_HW;
- break;
- case ICV_14:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
- aeadctx->mayverify = VERIFY_HW;
- break;
- case ICV_16:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
- aeadctx->mayverify = VERIFY_HW;
- break;
- case ICV_13:
- case ICV_15:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
- aeadctx->mayverify = VERIFY_SW;
- break;
- default:
- return -EINVAL;
- }
- return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
- }
- static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
- unsigned int authsize)
- {
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- switch (authsize) {
- case ICV_8:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
- aeadctx->mayverify = VERIFY_HW;
- break;
- case ICV_12:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
- aeadctx->mayverify = VERIFY_HW;
- break;
- case ICV_16:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
- aeadctx->mayverify = VERIFY_HW;
- break;
- default:
- return -EINVAL;
- }
- return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
- }
- static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
- unsigned int authsize)
- {
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- switch (authsize) {
- case ICV_4:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
- aeadctx->mayverify = VERIFY_HW;
- break;
- case ICV_6:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
- aeadctx->mayverify = VERIFY_HW;
- break;
- case ICV_8:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
- aeadctx->mayverify = VERIFY_HW;
- break;
- case ICV_10:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
- aeadctx->mayverify = VERIFY_HW;
- break;
- case ICV_12:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
- aeadctx->mayverify = VERIFY_HW;
- break;
- case ICV_14:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
- aeadctx->mayverify = VERIFY_HW;
- break;
- case ICV_16:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
- aeadctx->mayverify = VERIFY_HW;
- break;
- default:
- return -EINVAL;
- }
- return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
- }
- static int chcr_ccm_common_setkey(struct crypto_aead *aead,
- const u8 *key,
- unsigned int keylen)
- {
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
- unsigned char ck_size, mk_size;
- int key_ctx_size = 0;
- key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
- if (keylen == AES_KEYSIZE_128) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
- mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
- } else if (keylen == AES_KEYSIZE_192) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
- mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
- } else if (keylen == AES_KEYSIZE_256) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
- mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
- } else {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
- aeadctx->enckey_len = 0;
- return -EINVAL;
- }
- aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
- key_ctx_size >> 4);
- memcpy(aeadctx->key, key, keylen);
- aeadctx->enckey_len = keylen;
- return 0;
- }
- static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
- const u8 *key,
- unsigned int keylen)
- {
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
- int error;
- crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
- crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
- CRYPTO_TFM_REQ_MASK);
- error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
- if (error)
- return error;
- return chcr_ccm_common_setkey(aead, key, keylen);
- }
- static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int keylen)
- {
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
- int error;
- if (keylen < 3) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
- aeadctx->enckey_len = 0;
- return -EINVAL;
- }
- crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
- crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
- CRYPTO_TFM_REQ_MASK);
- error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
- if (error)
- return error;
- keylen -= 3;
- memcpy(aeadctx->salt, key + keylen, 3);
- return chcr_ccm_common_setkey(aead, key, keylen);
- }
- static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int keylen)
- {
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
- struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
- struct crypto_cipher *cipher;
- unsigned int ck_size;
- int ret = 0, key_ctx_size = 0;
- aeadctx->enckey_len = 0;
- crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
- crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
- & CRYPTO_TFM_REQ_MASK);
- ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
- if (ret)
- goto out;
- if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
- keylen > 3) {
- keylen -= 4; /* nonce/salt is present in the last 4 bytes */
- memcpy(aeadctx->salt, key + keylen, 4);
- }
- if (keylen == AES_KEYSIZE_128) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
- } else if (keylen == AES_KEYSIZE_192) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
- } else if (keylen == AES_KEYSIZE_256) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
- } else {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
- pr_err("GCM: Invalid key length %d\n", keylen);
- ret = -EINVAL;
- goto out;
- }
- memcpy(aeadctx->key, key, keylen);
- aeadctx->enckey_len = keylen;
- key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
- AEAD_H_SIZE;
- aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
- CHCR_KEYCTX_MAC_KEY_SIZE_128,
- 0, 0,
- key_ctx_size >> 4);
- /* Calculate the H = CIPH(K, 0 repeated 16 times).
- * It will go in key context
- */
- cipher = crypto_alloc_cipher("aes-generic", 0, 0);
- if (IS_ERR(cipher)) {
- aeadctx->enckey_len = 0;
- ret = -ENOMEM;
- goto out;
- }
- ret = crypto_cipher_setkey(cipher, key, keylen);
- if (ret) {
- aeadctx->enckey_len = 0;
- goto out1;
- }
- memset(gctx->ghash_h, 0, AEAD_H_SIZE);
- crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
- out1:
- crypto_free_cipher(cipher);
- out:
- return ret;
- }
- static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
- unsigned int keylen)
- {
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
- struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
- /* it contains auth and cipher key both*/
- struct crypto_authenc_keys keys;
- unsigned int bs, subtype;
- unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
- int err = 0, i, key_ctx_len = 0;
- unsigned char ck_size = 0;
- unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
- struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
- struct algo_param param;
- int align;
- u8 *o_ptr = NULL;
- crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
- crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
- & CRYPTO_TFM_REQ_MASK);
- err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
- & CRYPTO_TFM_RES_MASK);
- if (err)
- goto out;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
- }
- if (get_alg_config(¶m, max_authsize)) {
- pr_err("chcr : Unsupported digest size\n");
- goto out;
- }
- subtype = get_aead_subtype(authenc);
- if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
- subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
- if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
- goto out;
- memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
- - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
- keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
- }
- if (keys.enckeylen == AES_KEYSIZE_128) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
- } else if (keys.enckeylen == AES_KEYSIZE_192) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
- } else if (keys.enckeylen == AES_KEYSIZE_256) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
- } else {
- pr_err("chcr : Unsupported cipher key\n");
- goto out;
- }
- /* Copy only encryption key. We use authkey to generate h(ipad) and
- * h(opad) so authkey is not needed again. authkeylen size have the
- * size of the hash digest size.
- */
- memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
- aeadctx->enckey_len = keys.enckeylen;
- if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
- subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
- }
- base_hash = chcr_alloc_shash(max_authsize);
- if (IS_ERR(base_hash)) {
- pr_err("chcr : Base driver cannot be loaded\n");
- aeadctx->enckey_len = 0;
- memzero_explicit(&keys, sizeof(keys));
- return -EINVAL;
- }
- {
- SHASH_DESC_ON_STACK(shash, base_hash);
- shash->tfm = base_hash;
- shash->flags = crypto_shash_get_flags(base_hash);
- bs = crypto_shash_blocksize(base_hash);
- align = KEYCTX_ALIGN_PAD(max_authsize);
- o_ptr = actx->h_iopad + param.result_size + align;
- if (keys.authkeylen > bs) {
- err = crypto_shash_digest(shash, keys.authkey,
- keys.authkeylen,
- o_ptr);
- if (err) {
- pr_err("chcr : Base driver cannot be loaded\n");
- goto out;
- }
- keys.authkeylen = max_authsize;
- } else
- memcpy(o_ptr, keys.authkey, keys.authkeylen);
- /* Compute the ipad-digest*/
- memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
- memcpy(pad, o_ptr, keys.authkeylen);
- for (i = 0; i < bs >> 2; i++)
- *((unsigned int *)pad + i) ^= IPAD_DATA;
- if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
- max_authsize))
- goto out;
- /* Compute the opad-digest */
- memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
- memcpy(pad, o_ptr, keys.authkeylen);
- for (i = 0; i < bs >> 2; i++)
- *((unsigned int *)pad + i) ^= OPAD_DATA;
- if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
- goto out;
- /* convert the ipad and opad digest to network order */
- chcr_change_order(actx->h_iopad, param.result_size);
- chcr_change_order(o_ptr, param.result_size);
- key_ctx_len = sizeof(struct _key_ctx) +
- roundup(keys.enckeylen, 16) +
- (param.result_size + align) * 2;
- aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
- 0, 1, key_ctx_len >> 4);
- actx->auth_mode = param.auth_mode;
- chcr_free_shash(base_hash);
- memzero_explicit(&keys, sizeof(keys));
- return 0;
- }
- out:
- aeadctx->enckey_len = 0;
- memzero_explicit(&keys, sizeof(keys));
- if (!IS_ERR(base_hash))
- chcr_free_shash(base_hash);
- return -EINVAL;
- }
- static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
- const u8 *key, unsigned int keylen)
- {
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
- struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
- struct crypto_authenc_keys keys;
- int err;
- /* it contains auth and cipher key both*/
- unsigned int subtype;
- int key_ctx_len = 0;
- unsigned char ck_size = 0;
- crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
- crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
- & CRYPTO_TFM_REQ_MASK);
- err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
- & CRYPTO_TFM_RES_MASK);
- if (err)
- goto out;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
- }
- subtype = get_aead_subtype(authenc);
- if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
- subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
- if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
- goto out;
- memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
- - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
- keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
- }
- if (keys.enckeylen == AES_KEYSIZE_128) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
- } else if (keys.enckeylen == AES_KEYSIZE_192) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
- } else if (keys.enckeylen == AES_KEYSIZE_256) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
- } else {
- pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
- goto out;
- }
- memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
- aeadctx->enckey_len = keys.enckeylen;
- if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
- subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
- }
- key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
- aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
- 0, key_ctx_len >> 4);
- actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
- memzero_explicit(&keys, sizeof(keys));
- return 0;
- out:
- aeadctx->enckey_len = 0;
- memzero_explicit(&keys, sizeof(keys));
- return -EINVAL;
- }
- static int chcr_aead_op(struct aead_request *req,
- int size,
- create_wr_t create_wr_fn)
- {
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct uld_ctx *u_ctx;
- struct sk_buff *skb;
- int isfull = 0;
- if (!a_ctx(tfm)->dev) {
- pr_err("chcr : %s : No crypto device.\n", __func__);
- return -ENXIO;
- }
- u_ctx = ULD_CTX(a_ctx(tfm));
- if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- a_ctx(tfm)->tx_qidx)) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
- }
- /* Form a WR from req */
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
- if (IS_ERR(skb) || !skb)
- return PTR_ERR(skb);
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
- chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
- }
- static int chcr_aead_encrypt(struct aead_request *req)
- {
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- reqctx->verify = VERIFY_HW;
- reqctx->op = CHCR_ENCRYPT_OP;
- switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
- case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
- case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
- case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
- return chcr_aead_op(req, 0, create_authenc_wr);
- case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
- case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
- return chcr_aead_op(req, 0, create_aead_ccm_wr);
- default:
- return chcr_aead_op(req, 0, create_gcm_wr);
- }
- }
- static int chcr_aead_decrypt(struct aead_request *req)
- {
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- int size;
- if (aeadctx->mayverify == VERIFY_SW) {
- size = crypto_aead_maxauthsize(tfm);
- reqctx->verify = VERIFY_SW;
- } else {
- size = 0;
- reqctx->verify = VERIFY_HW;
- }
- reqctx->op = CHCR_DECRYPT_OP;
- switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
- case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
- case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
- case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
- return chcr_aead_op(req, size, create_authenc_wr);
- case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
- case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
- return chcr_aead_op(req, size, create_aead_ccm_wr);
- default:
- return chcr_aead_op(req, size, create_gcm_wr);
- }
- }
- static struct chcr_alg_template driver_algs[] = {
- /* AES-CBC */
- {
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
- .is_registered = 0,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_init = chcr_cra_init,
- .cra_exit = chcr_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = chcr_aes_cbc_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
- }
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
- .is_registered = 0,
- .alg.crypto = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_init = chcr_cra_init,
- .cra_exit = NULL,
- .cra_u .ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = chcr_aes_xts_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
- }
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
- .is_registered = 0,
- .alg.crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_init = chcr_cra_init,
- .cra_exit = chcr_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = chcr_aes_ctr_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
- }
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
- .is_registered = 0,
- .alg.crypto = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "rfc3686-ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_init = chcr_rfc3686_init,
- .cra_exit = chcr_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .setkey = chcr_aes_rfc3686_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
- .geniv = "seqiv",
- }
- }
- },
- /* SHA */
- {
- .type = CRYPTO_ALG_TYPE_AHASH,
- .is_registered = 0,
- .alg.hash = {
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "sha1",
- .cra_driver_name = "sha1-chcr",
- .cra_blocksize = SHA1_BLOCK_SIZE,
- }
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AHASH,
- .is_registered = 0,
- .alg.hash = {
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-chcr",
- .cra_blocksize = SHA256_BLOCK_SIZE,
- }
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AHASH,
- .is_registered = 0,
- .alg.hash = {
- .halg.digestsize = SHA224_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "sha224",
- .cra_driver_name = "sha224-chcr",
- .cra_blocksize = SHA224_BLOCK_SIZE,
- }
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AHASH,
- .is_registered = 0,
- .alg.hash = {
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "sha384",
- .cra_driver_name = "sha384-chcr",
- .cra_blocksize = SHA384_BLOCK_SIZE,
- }
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AHASH,
- .is_registered = 0,
- .alg.hash = {
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "sha512",
- .cra_driver_name = "sha512-chcr",
- .cra_blocksize = SHA512_BLOCK_SIZE,
- }
- }
- },
- /* HMAC */
- {
- .type = CRYPTO_ALG_TYPE_HMAC,
- .is_registered = 0,
- .alg.hash = {
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "hmac-sha1-chcr",
- .cra_blocksize = SHA1_BLOCK_SIZE,
- }
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_HMAC,
- .is_registered = 0,
- .alg.hash = {
- .halg.digestsize = SHA224_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "hmac(sha224)",
- .cra_driver_name = "hmac-sha224-chcr",
- .cra_blocksize = SHA224_BLOCK_SIZE,
- }
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_HMAC,
- .is_registered = 0,
- .alg.hash = {
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "hmac-sha256-chcr",
- .cra_blocksize = SHA256_BLOCK_SIZE,
- }
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_HMAC,
- .is_registered = 0,
- .alg.hash = {
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "hmac-sha384-chcr",
- .cra_blocksize = SHA384_BLOCK_SIZE,
- }
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_HMAC,
- .is_registered = 0,
- .alg.hash = {
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "hmac-sha512-chcr",
- .cra_blocksize = SHA512_BLOCK_SIZE,
- }
- }
- },
- /* Add AEAD Algorithms */
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "gcm(aes)",
- .cra_driver_name = "gcm-aes-chcr",
- .cra_blocksize = 1,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_gcm_ctx),
- },
- .ivsize = GCM_AES_IV_SIZE,
- .maxauthsize = GHASH_DIGEST_SIZE,
- .setkey = chcr_gcm_setkey,
- .setauthsize = chcr_gcm_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "rfc4106(gcm(aes))",
- .cra_driver_name = "rfc4106-gcm-aes-chcr",
- .cra_blocksize = 1,
- .cra_priority = CHCR_AEAD_PRIORITY + 1,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_gcm_ctx),
- },
- .ivsize = GCM_RFC4106_IV_SIZE,
- .maxauthsize = GHASH_DIGEST_SIZE,
- .setkey = chcr_gcm_setkey,
- .setauthsize = chcr_4106_4309_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "ccm(aes)",
- .cra_driver_name = "ccm-aes-chcr",
- .cra_blocksize = 1,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx),
- },
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = GHASH_DIGEST_SIZE,
- .setkey = chcr_aead_ccm_setkey,
- .setauthsize = chcr_ccm_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "rfc4309(ccm(aes))",
- .cra_driver_name = "rfc4309-ccm-aes-chcr",
- .cra_blocksize = 1,
- .cra_priority = CHCR_AEAD_PRIORITY + 1,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx),
- },
- .ivsize = 8,
- .maxauthsize = GHASH_DIGEST_SIZE,
- .setkey = chcr_aead_rfc4309_setkey,
- .setauthsize = chcr_4106_4309_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "authenc(hmac(sha1),cbc(aes))",
- .cra_driver_name =
- "authenc-hmac-sha1-cbc-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_authenc_ctx),
- },
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- .setkey = chcr_authenc_setkey,
- .setauthsize = chcr_authenc_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "authenc(hmac(sha256),cbc(aes))",
- .cra_driver_name =
- "authenc-hmac-sha256-cbc-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_authenc_ctx),
- },
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- .setkey = chcr_authenc_setkey,
- .setauthsize = chcr_authenc_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "authenc(hmac(sha224),cbc(aes))",
- .cra_driver_name =
- "authenc-hmac-sha224-cbc-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_authenc_ctx),
- },
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA224_DIGEST_SIZE,
- .setkey = chcr_authenc_setkey,
- .setauthsize = chcr_authenc_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "authenc(hmac(sha384),cbc(aes))",
- .cra_driver_name =
- "authenc-hmac-sha384-cbc-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_authenc_ctx),
- },
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA384_DIGEST_SIZE,
- .setkey = chcr_authenc_setkey,
- .setauthsize = chcr_authenc_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "authenc(hmac(sha512),cbc(aes))",
- .cra_driver_name =
- "authenc-hmac-sha512-cbc-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_authenc_ctx),
- },
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA512_DIGEST_SIZE,
- .setkey = chcr_authenc_setkey,
- .setauthsize = chcr_authenc_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "authenc(digest_null,cbc(aes))",
- .cra_driver_name =
- "authenc-digest_null-cbc-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_authenc_ctx),
- },
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = 0,
- .setkey = chcr_aead_digest_null_setkey,
- .setauthsize = chcr_authenc_null_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
- .cra_driver_name =
- "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_authenc_ctx),
- },
- .ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- .setkey = chcr_authenc_setkey,
- .setauthsize = chcr_authenc_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
- .cra_driver_name =
- "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_authenc_ctx),
- },
- .ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- .setkey = chcr_authenc_setkey,
- .setauthsize = chcr_authenc_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
- .cra_driver_name =
- "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_authenc_ctx),
- },
- .ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA224_DIGEST_SIZE,
- .setkey = chcr_authenc_setkey,
- .setauthsize = chcr_authenc_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
- .cra_driver_name =
- "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_authenc_ctx),
- },
- .ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA384_DIGEST_SIZE,
- .setkey = chcr_authenc_setkey,
- .setauthsize = chcr_authenc_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
- .cra_driver_name =
- "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_authenc_ctx),
- },
- .ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA512_DIGEST_SIZE,
- .setkey = chcr_authenc_setkey,
- .setauthsize = chcr_authenc_setauthsize,
- }
- },
- {
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
- .is_registered = 0,
- .alg.aead = {
- .base = {
- .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
- .cra_driver_name =
- "authenc-digest_null-rfc3686-ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_priority = CHCR_AEAD_PRIORITY,
- .cra_ctxsize = sizeof(struct chcr_context) +
- sizeof(struct chcr_aead_ctx) +
- sizeof(struct chcr_authenc_ctx),
- },
- .ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = 0,
- .setkey = chcr_aead_digest_null_setkey,
- .setauthsize = chcr_authenc_null_setauthsize,
- }
- },
- };
- /*
- * chcr_unregister_alg - Deregister crypto algorithms with
- * kernel framework.
- */
- static int chcr_unregister_alg(void)
- {
- int i;
- for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- if (driver_algs[i].is_registered)
- crypto_unregister_alg(
- &driver_algs[i].alg.crypto);
- break;
- case CRYPTO_ALG_TYPE_AEAD:
- if (driver_algs[i].is_registered)
- crypto_unregister_aead(
- &driver_algs[i].alg.aead);
- break;
- case CRYPTO_ALG_TYPE_AHASH:
- if (driver_algs[i].is_registered)
- crypto_unregister_ahash(
- &driver_algs[i].alg.hash);
- break;
- }
- driver_algs[i].is_registered = 0;
- }
- return 0;
- }
- #define SZ_AHASH_CTX sizeof(struct chcr_context)
- #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
- #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
- /*
- * chcr_register_alg - Register crypto algorithms with kernel framework.
- */
- static int chcr_register_alg(void)
- {
- struct crypto_alg ai;
- struct ahash_alg *a_hash;
- int err = 0, i;
- char *name = NULL;
- for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- if (driver_algs[i].is_registered)
- continue;
- switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- driver_algs[i].alg.crypto.cra_priority =
- CHCR_CRA_PRIORITY;
- driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
- driver_algs[i].alg.crypto.cra_flags =
- CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK;
- driver_algs[i].alg.crypto.cra_ctxsize =
- sizeof(struct chcr_context) +
- sizeof(struct ablk_ctx);
- driver_algs[i].alg.crypto.cra_alignmask = 0;
- driver_algs[i].alg.crypto.cra_type =
- &crypto_ablkcipher_type;
- err = crypto_register_alg(&driver_algs[i].alg.crypto);
- name = driver_algs[i].alg.crypto.cra_driver_name;
- break;
- case CRYPTO_ALG_TYPE_AEAD:
- driver_algs[i].alg.aead.base.cra_flags =
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
- driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
- driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
- driver_algs[i].alg.aead.init = chcr_aead_cra_init;
- driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
- driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
- err = crypto_register_aead(&driver_algs[i].alg.aead);
- name = driver_algs[i].alg.aead.base.cra_driver_name;
- break;
- case CRYPTO_ALG_TYPE_AHASH:
- a_hash = &driver_algs[i].alg.hash;
- a_hash->update = chcr_ahash_update;
- a_hash->final = chcr_ahash_final;
- a_hash->finup = chcr_ahash_finup;
- a_hash->digest = chcr_ahash_digest;
- a_hash->export = chcr_ahash_export;
- a_hash->import = chcr_ahash_import;
- a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
- a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
- a_hash->halg.base.cra_module = THIS_MODULE;
- a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
- a_hash->halg.base.cra_alignmask = 0;
- a_hash->halg.base.cra_exit = NULL;
- if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
- a_hash->halg.base.cra_init = chcr_hmac_cra_init;
- a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
- a_hash->init = chcr_hmac_init;
- a_hash->setkey = chcr_ahash_setkey;
- a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
- } else {
- a_hash->init = chcr_sha_init;
- a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
- a_hash->halg.base.cra_init = chcr_sha_cra_init;
- }
- err = crypto_register_ahash(&driver_algs[i].alg.hash);
- ai = driver_algs[i].alg.hash.halg.base;
- name = ai.cra_driver_name;
- break;
- }
- if (err) {
- pr_err("chcr : %s : Algorithm registration failed\n",
- name);
- goto register_err;
- } else {
- driver_algs[i].is_registered = 1;
- }
- }
- return 0;
- register_err:
- chcr_unregister_alg();
- return err;
- }
- /*
- * start_crypto - Register the crypto algorithms.
- * This should called once when the first device comesup. After this
- * kernel will start calling driver APIs for crypto operations.
- */
- int start_crypto(void)
- {
- return chcr_register_alg();
- }
- /*
- * stop_crypto - Deregister all the crypto algorithms with kernel.
- * This should be called once when the last device goes down. After this
- * kernel will not call the driver API for crypto operations.
- */
- int stop_crypto(void)
- {
- chcr_unregister_alg();
- return 0;
- }
|