spu2.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403
  1. /*
  2. * Copyright 2016 Broadcom
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License, version 2, as
  6. * published by the Free Software Foundation (the "GPL").
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License version 2 (GPLv2) for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * version 2 (GPLv2) along with this source code.
  15. */
  16. /*
  17. * This file works with the SPU2 version of the SPU. SPU2 has different message
  18. * formats than the previous version of the SPU. All SPU message format
  19. * differences should be hidden in the spux.c,h files.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/string.h>
  23. #include "util.h"
  24. #include "spu.h"
  25. #include "spu2.h"
  26. #define SPU2_TX_STATUS_LEN 0 /* SPU2 has no STATUS in input packet */
  27. /*
  28. * Controlled by pkt_stat_cnt field in CRYPTO_SS_SPU0_CORE_SPU2_CONTROL0
  29. * register. Defaults to 2.
  30. */
  31. #define SPU2_RX_STATUS_LEN 2
  32. enum spu2_proto_sel {
  33. SPU2_PROTO_RESV = 0,
  34. SPU2_MACSEC_SECTAG8_ECB = 1,
  35. SPU2_MACSEC_SECTAG8_SCB = 2,
  36. SPU2_MACSEC_SECTAG16 = 3,
  37. SPU2_MACSEC_SECTAG16_8_XPN = 4,
  38. SPU2_IPSEC = 5,
  39. SPU2_IPSEC_ESN = 6,
  40. SPU2_TLS_CIPHER = 7,
  41. SPU2_TLS_AEAD = 8,
  42. SPU2_DTLS_CIPHER = 9,
  43. SPU2_DTLS_AEAD = 10
  44. };
  45. char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
  46. "DES", "3DES"
  47. };
  48. char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", "XTS",
  49. "CCM", "GCM"
  50. };
  51. char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
  52. "Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384",
  53. "SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256",
  54. "SHA3-384", "SHA3-512"
  55. };
  56. char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
  57. "Rabin", "CCM", "GCM", "Reserved"
  58. };
  59. static char *spu2_ciph_type_name(enum spu2_cipher_type cipher_type)
  60. {
  61. if (cipher_type >= SPU2_CIPHER_TYPE_LAST)
  62. return "Reserved";
  63. return spu2_cipher_type_names[cipher_type];
  64. }
  65. static char *spu2_ciph_mode_name(enum spu2_cipher_mode cipher_mode)
  66. {
  67. if (cipher_mode >= SPU2_CIPHER_MODE_LAST)
  68. return "Reserved";
  69. return spu2_cipher_mode_names[cipher_mode];
  70. }
  71. static char *spu2_hash_type_name(enum spu2_hash_type hash_type)
  72. {
  73. if (hash_type >= SPU2_HASH_TYPE_LAST)
  74. return "Reserved";
  75. return spu2_hash_type_names[hash_type];
  76. }
  77. static char *spu2_hash_mode_name(enum spu2_hash_mode hash_mode)
  78. {
  79. if (hash_mode >= SPU2_HASH_MODE_LAST)
  80. return "Reserved";
  81. return spu2_hash_mode_names[hash_mode];
  82. }
  83. /*
  84. * Convert from a software cipher mode value to the corresponding value
  85. * for SPU2.
  86. */
  87. static int spu2_cipher_mode_xlate(enum spu_cipher_mode cipher_mode,
  88. enum spu2_cipher_mode *spu2_mode)
  89. {
  90. switch (cipher_mode) {
  91. case CIPHER_MODE_ECB:
  92. *spu2_mode = SPU2_CIPHER_MODE_ECB;
  93. break;
  94. case CIPHER_MODE_CBC:
  95. *spu2_mode = SPU2_CIPHER_MODE_CBC;
  96. break;
  97. case CIPHER_MODE_OFB:
  98. *spu2_mode = SPU2_CIPHER_MODE_OFB;
  99. break;
  100. case CIPHER_MODE_CFB:
  101. *spu2_mode = SPU2_CIPHER_MODE_CFB;
  102. break;
  103. case CIPHER_MODE_CTR:
  104. *spu2_mode = SPU2_CIPHER_MODE_CTR;
  105. break;
  106. case CIPHER_MODE_CCM:
  107. *spu2_mode = SPU2_CIPHER_MODE_CCM;
  108. break;
  109. case CIPHER_MODE_GCM:
  110. *spu2_mode = SPU2_CIPHER_MODE_GCM;
  111. break;
  112. case CIPHER_MODE_XTS:
  113. *spu2_mode = SPU2_CIPHER_MODE_XTS;
  114. break;
  115. default:
  116. return -EINVAL;
  117. }
  118. return 0;
  119. }
  120. /**
  121. * spu2_cipher_xlate() - Convert a cipher {alg/mode/type} triple to a SPU2
  122. * cipher type and mode.
  123. * @cipher_alg: [in] cipher algorithm value from software enumeration
  124. * @cipher_mode: [in] cipher mode value from software enumeration
  125. * @cipher_type: [in] cipher type value from software enumeration
  126. * @spu2_type: [out] cipher type value used by spu2 hardware
  127. * @spu2_mode: [out] cipher mode value used by spu2 hardware
  128. *
  129. * Return: 0 if successful
  130. */
  131. static int spu2_cipher_xlate(enum spu_cipher_alg cipher_alg,
  132. enum spu_cipher_mode cipher_mode,
  133. enum spu_cipher_type cipher_type,
  134. enum spu2_cipher_type *spu2_type,
  135. enum spu2_cipher_mode *spu2_mode)
  136. {
  137. int err;
  138. err = spu2_cipher_mode_xlate(cipher_mode, spu2_mode);
  139. if (err) {
  140. flow_log("Invalid cipher mode %d\n", cipher_mode);
  141. return err;
  142. }
  143. switch (cipher_alg) {
  144. case CIPHER_ALG_NONE:
  145. *spu2_type = SPU2_CIPHER_TYPE_NONE;
  146. break;
  147. case CIPHER_ALG_RC4:
  148. /* SPU2 does not support RC4 */
  149. err = -EINVAL;
  150. *spu2_type = SPU2_CIPHER_TYPE_NONE;
  151. break;
  152. case CIPHER_ALG_DES:
  153. *spu2_type = SPU2_CIPHER_TYPE_DES;
  154. break;
  155. case CIPHER_ALG_3DES:
  156. *spu2_type = SPU2_CIPHER_TYPE_3DES;
  157. break;
  158. case CIPHER_ALG_AES:
  159. switch (cipher_type) {
  160. case CIPHER_TYPE_AES128:
  161. *spu2_type = SPU2_CIPHER_TYPE_AES128;
  162. break;
  163. case CIPHER_TYPE_AES192:
  164. *spu2_type = SPU2_CIPHER_TYPE_AES192;
  165. break;
  166. case CIPHER_TYPE_AES256:
  167. *spu2_type = SPU2_CIPHER_TYPE_AES256;
  168. break;
  169. default:
  170. err = -EINVAL;
  171. }
  172. break;
  173. case CIPHER_ALG_LAST:
  174. default:
  175. err = -EINVAL;
  176. break;
  177. }
  178. if (err)
  179. flow_log("Invalid cipher alg %d or type %d\n",
  180. cipher_alg, cipher_type);
  181. return err;
  182. }
  183. /*
  184. * Convert from a software hash mode value to the corresponding value
  185. * for SPU2. Note that HASH_MODE_NONE and HASH_MODE_XCBC have the same value.
  186. */
  187. static int spu2_hash_mode_xlate(enum hash_mode hash_mode,
  188. enum spu2_hash_mode *spu2_mode)
  189. {
  190. switch (hash_mode) {
  191. case HASH_MODE_XCBC:
  192. *spu2_mode = SPU2_HASH_MODE_XCBC_MAC;
  193. break;
  194. case HASH_MODE_CMAC:
  195. *spu2_mode = SPU2_HASH_MODE_CMAC;
  196. break;
  197. case HASH_MODE_HMAC:
  198. *spu2_mode = SPU2_HASH_MODE_HMAC;
  199. break;
  200. case HASH_MODE_CCM:
  201. *spu2_mode = SPU2_HASH_MODE_CCM;
  202. break;
  203. case HASH_MODE_GCM:
  204. *spu2_mode = SPU2_HASH_MODE_GCM;
  205. break;
  206. default:
  207. return -EINVAL;
  208. }
  209. return 0;
  210. }
  211. /**
  212. * spu2_hash_xlate() - Convert a hash {alg/mode/type} triple to a SPU2 hash type
  213. * and mode.
  214. * @hash_alg: [in] hash algorithm value from software enumeration
  215. * @hash_mode: [in] hash mode value from software enumeration
  216. * @hash_type: [in] hash type value from software enumeration
  217. * @ciph_type: [in] cipher type value from software enumeration
  218. * @spu2_type: [out] hash type value used by SPU2 hardware
  219. * @spu2_mode: [out] hash mode value used by SPU2 hardware
  220. *
  221. * Return: 0 if successful
  222. */
  223. static int
  224. spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode,
  225. enum hash_type hash_type, enum spu_cipher_type ciph_type,
  226. enum spu2_hash_type *spu2_type, enum spu2_hash_mode *spu2_mode)
  227. {
  228. int err;
  229. err = spu2_hash_mode_xlate(hash_mode, spu2_mode);
  230. if (err) {
  231. flow_log("Invalid hash mode %d\n", hash_mode);
  232. return err;
  233. }
  234. switch (hash_alg) {
  235. case HASH_ALG_NONE:
  236. *spu2_type = SPU2_HASH_TYPE_NONE;
  237. break;
  238. case HASH_ALG_MD5:
  239. *spu2_type = SPU2_HASH_TYPE_MD5;
  240. break;
  241. case HASH_ALG_SHA1:
  242. *spu2_type = SPU2_HASH_TYPE_SHA1;
  243. break;
  244. case HASH_ALG_SHA224:
  245. *spu2_type = SPU2_HASH_TYPE_SHA224;
  246. break;
  247. case HASH_ALG_SHA256:
  248. *spu2_type = SPU2_HASH_TYPE_SHA256;
  249. break;
  250. case HASH_ALG_SHA384:
  251. *spu2_type = SPU2_HASH_TYPE_SHA384;
  252. break;
  253. case HASH_ALG_SHA512:
  254. *spu2_type = SPU2_HASH_TYPE_SHA512;
  255. break;
  256. case HASH_ALG_AES:
  257. switch (ciph_type) {
  258. case CIPHER_TYPE_AES128:
  259. *spu2_type = SPU2_HASH_TYPE_AES128;
  260. break;
  261. case CIPHER_TYPE_AES192:
  262. *spu2_type = SPU2_HASH_TYPE_AES192;
  263. break;
  264. case CIPHER_TYPE_AES256:
  265. *spu2_type = SPU2_HASH_TYPE_AES256;
  266. break;
  267. default:
  268. err = -EINVAL;
  269. }
  270. break;
  271. case HASH_ALG_SHA3_224:
  272. *spu2_type = SPU2_HASH_TYPE_SHA3_224;
  273. break;
  274. case HASH_ALG_SHA3_256:
  275. *spu2_type = SPU2_HASH_TYPE_SHA3_256;
  276. break;
  277. case HASH_ALG_SHA3_384:
  278. *spu2_type = SPU2_HASH_TYPE_SHA3_384;
  279. break;
  280. case HASH_ALG_SHA3_512:
  281. *spu2_type = SPU2_HASH_TYPE_SHA3_512;
  282. break;
  283. case HASH_ALG_LAST:
  284. default:
  285. err = -EINVAL;
  286. break;
  287. }
  288. if (err)
  289. flow_log("Invalid hash alg %d or type %d\n",
  290. hash_alg, hash_type);
  291. return err;
  292. }
  293. /* Dump FMD ctrl0. The ctrl0 input is in host byte order */
  294. static void spu2_dump_fmd_ctrl0(u64 ctrl0)
  295. {
  296. enum spu2_cipher_type ciph_type;
  297. enum spu2_cipher_mode ciph_mode;
  298. enum spu2_hash_type hash_type;
  299. enum spu2_hash_mode hash_mode;
  300. char *ciph_name;
  301. char *ciph_mode_name;
  302. char *hash_name;
  303. char *hash_mode_name;
  304. u8 cfb;
  305. u8 proto;
  306. packet_log(" FMD CTRL0 %#16llx\n", ctrl0);
  307. if (ctrl0 & SPU2_CIPH_ENCRYPT_EN)
  308. packet_log(" encrypt\n");
  309. else
  310. packet_log(" decrypt\n");
  311. ciph_type = (ctrl0 & SPU2_CIPH_TYPE) >> SPU2_CIPH_TYPE_SHIFT;
  312. ciph_name = spu2_ciph_type_name(ciph_type);
  313. packet_log(" Cipher type: %s\n", ciph_name);
  314. if (ciph_type != SPU2_CIPHER_TYPE_NONE) {
  315. ciph_mode = (ctrl0 & SPU2_CIPH_MODE) >> SPU2_CIPH_MODE_SHIFT;
  316. ciph_mode_name = spu2_ciph_mode_name(ciph_mode);
  317. packet_log(" Cipher mode: %s\n", ciph_mode_name);
  318. }
  319. cfb = (ctrl0 & SPU2_CFB_MASK) >> SPU2_CFB_MASK_SHIFT;
  320. packet_log(" CFB %#x\n", cfb);
  321. proto = (ctrl0 & SPU2_PROTO_SEL) >> SPU2_PROTO_SEL_SHIFT;
  322. packet_log(" protocol %#x\n", proto);
  323. if (ctrl0 & SPU2_HASH_FIRST)
  324. packet_log(" hash first\n");
  325. else
  326. packet_log(" cipher first\n");
  327. if (ctrl0 & SPU2_CHK_TAG)
  328. packet_log(" check tag\n");
  329. hash_type = (ctrl0 & SPU2_HASH_TYPE) >> SPU2_HASH_TYPE_SHIFT;
  330. hash_name = spu2_hash_type_name(hash_type);
  331. packet_log(" Hash type: %s\n", hash_name);
  332. if (hash_type != SPU2_HASH_TYPE_NONE) {
  333. hash_mode = (ctrl0 & SPU2_HASH_MODE) >> SPU2_HASH_MODE_SHIFT;
  334. hash_mode_name = spu2_hash_mode_name(hash_mode);
  335. packet_log(" Hash mode: %s\n", hash_mode_name);
  336. }
  337. if (ctrl0 & SPU2_CIPH_PAD_EN) {
  338. packet_log(" Cipher pad: %#2llx\n",
  339. (ctrl0 & SPU2_CIPH_PAD) >> SPU2_CIPH_PAD_SHIFT);
  340. }
  341. }
  342. /* Dump FMD ctrl1. The ctrl1 input is in host byte order */
  343. static void spu2_dump_fmd_ctrl1(u64 ctrl1)
  344. {
  345. u8 hash_key_len;
  346. u8 ciph_key_len;
  347. u8 ret_iv_len;
  348. u8 iv_offset;
  349. u8 iv_len;
  350. u8 hash_tag_len;
  351. u8 ret_md;
  352. packet_log(" FMD CTRL1 %#16llx\n", ctrl1);
  353. if (ctrl1 & SPU2_TAG_LOC)
  354. packet_log(" Tag after payload\n");
  355. packet_log(" Msg includes ");
  356. if (ctrl1 & SPU2_HAS_FR_DATA)
  357. packet_log("FD ");
  358. if (ctrl1 & SPU2_HAS_AAD1)
  359. packet_log("AAD1 ");
  360. if (ctrl1 & SPU2_HAS_NAAD)
  361. packet_log("NAAD ");
  362. if (ctrl1 & SPU2_HAS_AAD2)
  363. packet_log("AAD2 ");
  364. if (ctrl1 & SPU2_HAS_ESN)
  365. packet_log("ESN ");
  366. packet_log("\n");
  367. hash_key_len = (ctrl1 & SPU2_HASH_KEY_LEN) >> SPU2_HASH_KEY_LEN_SHIFT;
  368. packet_log(" Hash key len %u\n", hash_key_len);
  369. ciph_key_len = (ctrl1 & SPU2_CIPH_KEY_LEN) >> SPU2_CIPH_KEY_LEN_SHIFT;
  370. packet_log(" Cipher key len %u\n", ciph_key_len);
  371. if (ctrl1 & SPU2_GENIV)
  372. packet_log(" Generate IV\n");
  373. if (ctrl1 & SPU2_HASH_IV)
  374. packet_log(" IV included in hash\n");
  375. if (ctrl1 & SPU2_RET_IV)
  376. packet_log(" Return IV in output before payload\n");
  377. ret_iv_len = (ctrl1 & SPU2_RET_IV_LEN) >> SPU2_RET_IV_LEN_SHIFT;
  378. packet_log(" Length of returned IV %u bytes\n",
  379. ret_iv_len ? ret_iv_len : 16);
  380. iv_offset = (ctrl1 & SPU2_IV_OFFSET) >> SPU2_IV_OFFSET_SHIFT;
  381. packet_log(" IV offset %u\n", iv_offset);
  382. iv_len = (ctrl1 & SPU2_IV_LEN) >> SPU2_IV_LEN_SHIFT;
  383. packet_log(" Input IV len %u bytes\n", iv_len);
  384. hash_tag_len = (ctrl1 & SPU2_HASH_TAG_LEN) >> SPU2_HASH_TAG_LEN_SHIFT;
  385. packet_log(" Hash tag length %u bytes\n", hash_tag_len);
  386. packet_log(" Return ");
  387. ret_md = (ctrl1 & SPU2_RETURN_MD) >> SPU2_RETURN_MD_SHIFT;
  388. if (ret_md)
  389. packet_log("FMD ");
  390. if (ret_md == SPU2_RET_FMD_OMD)
  391. packet_log("OMD ");
  392. else if (ret_md == SPU2_RET_FMD_OMD_IV)
  393. packet_log("OMD IV ");
  394. if (ctrl1 & SPU2_RETURN_FD)
  395. packet_log("FD ");
  396. if (ctrl1 & SPU2_RETURN_AAD1)
  397. packet_log("AAD1 ");
  398. if (ctrl1 & SPU2_RETURN_NAAD)
  399. packet_log("NAAD ");
  400. if (ctrl1 & SPU2_RETURN_AAD2)
  401. packet_log("AAD2 ");
  402. if (ctrl1 & SPU2_RETURN_PAY)
  403. packet_log("Payload");
  404. packet_log("\n");
  405. }
  406. /* Dump FMD ctrl2. The ctrl2 input is in host byte order */
  407. static void spu2_dump_fmd_ctrl2(u64 ctrl2)
  408. {
  409. packet_log(" FMD CTRL2 %#16llx\n", ctrl2);
  410. packet_log(" AAD1 offset %llu length %llu bytes\n",
  411. ctrl2 & SPU2_AAD1_OFFSET,
  412. (ctrl2 & SPU2_AAD1_LEN) >> SPU2_AAD1_LEN_SHIFT);
  413. packet_log(" AAD2 offset %llu\n",
  414. (ctrl2 & SPU2_AAD2_OFFSET) >> SPU2_AAD2_OFFSET_SHIFT);
  415. packet_log(" Payload offset %llu\n",
  416. (ctrl2 & SPU2_PL_OFFSET) >> SPU2_PL_OFFSET_SHIFT);
  417. }
  418. /* Dump FMD ctrl3. The ctrl3 input is in host byte order */
  419. static void spu2_dump_fmd_ctrl3(u64 ctrl3)
  420. {
  421. packet_log(" FMD CTRL3 %#16llx\n", ctrl3);
  422. packet_log(" Payload length %llu bytes\n", ctrl3 & SPU2_PL_LEN);
  423. packet_log(" TLS length %llu bytes\n",
  424. (ctrl3 & SPU2_TLS_LEN) >> SPU2_TLS_LEN_SHIFT);
  425. }
  426. static void spu2_dump_fmd(struct SPU2_FMD *fmd)
  427. {
  428. spu2_dump_fmd_ctrl0(le64_to_cpu(fmd->ctrl0));
  429. spu2_dump_fmd_ctrl1(le64_to_cpu(fmd->ctrl1));
  430. spu2_dump_fmd_ctrl2(le64_to_cpu(fmd->ctrl2));
  431. spu2_dump_fmd_ctrl3(le64_to_cpu(fmd->ctrl3));
  432. }
  433. static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len,
  434. u16 hash_iv_len, u16 ciph_iv_len)
  435. {
  436. u8 *ptr = omd;
  437. packet_log(" OMD:\n");
  438. if (hash_key_len) {
  439. packet_log(" Hash Key Length %u bytes\n", hash_key_len);
  440. packet_dump(" KEY: ", ptr, hash_key_len);
  441. ptr += hash_key_len;
  442. }
  443. if (ciph_key_len) {
  444. packet_log(" Cipher Key Length %u bytes\n", ciph_key_len);
  445. packet_dump(" KEY: ", ptr, ciph_key_len);
  446. ptr += ciph_key_len;
  447. }
  448. if (hash_iv_len) {
  449. packet_log(" Hash IV Length %u bytes\n", hash_iv_len);
  450. packet_dump(" hash IV: ", ptr, hash_iv_len);
  451. ptr += ciph_key_len;
  452. }
  453. if (ciph_iv_len) {
  454. packet_log(" Cipher IV Length %u bytes\n", ciph_iv_len);
  455. packet_dump(" cipher IV: ", ptr, ciph_iv_len);
  456. }
  457. }
  458. /* Dump a SPU2 header for debug */
  459. void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len)
  460. {
  461. struct SPU2_FMD *fmd = (struct SPU2_FMD *)buf;
  462. u8 *omd;
  463. u64 ctrl1;
  464. u16 hash_key_len;
  465. u16 ciph_key_len;
  466. u16 hash_iv_len;
  467. u16 ciph_iv_len;
  468. u16 omd_len;
  469. packet_log("\n");
  470. packet_log("SPU2 message header %p len: %u\n", buf, buf_len);
  471. spu2_dump_fmd(fmd);
  472. omd = (u8 *)(fmd + 1);
  473. ctrl1 = le64_to_cpu(fmd->ctrl1);
  474. hash_key_len = (ctrl1 & SPU2_HASH_KEY_LEN) >> SPU2_HASH_KEY_LEN_SHIFT;
  475. ciph_key_len = (ctrl1 & SPU2_CIPH_KEY_LEN) >> SPU2_CIPH_KEY_LEN_SHIFT;
  476. hash_iv_len = 0;
  477. ciph_iv_len = (ctrl1 & SPU2_IV_LEN) >> SPU2_IV_LEN_SHIFT;
  478. spu2_dump_omd(omd, hash_key_len, ciph_key_len, hash_iv_len,
  479. ciph_iv_len);
  480. /* Double check sanity */
  481. omd_len = hash_key_len + ciph_key_len + hash_iv_len + ciph_iv_len;
  482. if (FMD_SIZE + omd_len != buf_len) {
  483. packet_log
  484. (" Packet parsed incorrectly. buf_len %u, sum of MD %zu\n",
  485. buf_len, FMD_SIZE + omd_len);
  486. }
  487. packet_log("\n");
  488. }
  489. /**
  490. * spu2_fmd_init() - At setkey time, initialize the fixed meta data for
  491. * subsequent ablkcipher requests for this context.
  492. * @spu2_cipher_type: Cipher algorithm
  493. * @spu2_mode: Cipher mode
  494. * @cipher_key_len: Length of cipher key, in bytes
  495. * @cipher_iv_len: Length of cipher initialization vector, in bytes
  496. *
  497. * Return: 0 (success)
  498. */
  499. static int spu2_fmd_init(struct SPU2_FMD *fmd,
  500. enum spu2_cipher_type spu2_type,
  501. enum spu2_cipher_mode spu2_mode,
  502. u32 cipher_key_len, u32 cipher_iv_len)
  503. {
  504. u64 ctrl0;
  505. u64 ctrl1;
  506. u64 ctrl2;
  507. u64 ctrl3;
  508. u32 aad1_offset;
  509. u32 aad2_offset;
  510. u16 aad1_len = 0;
  511. u64 payload_offset;
  512. ctrl0 = (spu2_type << SPU2_CIPH_TYPE_SHIFT) |
  513. (spu2_mode << SPU2_CIPH_MODE_SHIFT);
  514. ctrl1 = (cipher_key_len << SPU2_CIPH_KEY_LEN_SHIFT) |
  515. ((u64)cipher_iv_len << SPU2_IV_LEN_SHIFT) |
  516. ((u64)SPU2_RET_FMD_ONLY << SPU2_RETURN_MD_SHIFT) | SPU2_RETURN_PAY;
  517. /*
  518. * AAD1 offset is from start of FD. FD length is always 0 for this
  519. * driver. So AAD1_offset is always 0.
  520. */
  521. aad1_offset = 0;
  522. aad2_offset = aad1_offset;
  523. payload_offset = 0;
  524. ctrl2 = aad1_offset |
  525. (aad1_len << SPU2_AAD1_LEN_SHIFT) |
  526. (aad2_offset << SPU2_AAD2_OFFSET_SHIFT) |
  527. (payload_offset << SPU2_PL_OFFSET_SHIFT);
  528. ctrl3 = 0;
  529. fmd->ctrl0 = cpu_to_le64(ctrl0);
  530. fmd->ctrl1 = cpu_to_le64(ctrl1);
  531. fmd->ctrl2 = cpu_to_le64(ctrl2);
  532. fmd->ctrl3 = cpu_to_le64(ctrl3);
  533. return 0;
  534. }
  535. /**
  536. * spu2_fmd_ctrl0_write() - Write ctrl0 field in fixed metadata (FMD) field of
  537. * SPU request packet.
  538. * @fmd: Start of FMD field to be written
  539. * @is_inbound: true if decrypting. false if encrypting.
  540. * @authFirst: true if alg authenticates before encrypting
  541. * @protocol: protocol selector
  542. * @cipher_type: cipher algorithm
  543. * @cipher_mode: cipher mode
  544. * @auth_type: authentication type
  545. * @auth_mode: authentication mode
  546. */
  547. static void spu2_fmd_ctrl0_write(struct SPU2_FMD *fmd,
  548. bool is_inbound, bool auth_first,
  549. enum spu2_proto_sel protocol,
  550. enum spu2_cipher_type cipher_type,
  551. enum spu2_cipher_mode cipher_mode,
  552. enum spu2_hash_type auth_type,
  553. enum spu2_hash_mode auth_mode)
  554. {
  555. u64 ctrl0 = 0;
  556. if ((cipher_type != SPU2_CIPHER_TYPE_NONE) && !is_inbound)
  557. ctrl0 |= SPU2_CIPH_ENCRYPT_EN;
  558. ctrl0 |= ((u64)cipher_type << SPU2_CIPH_TYPE_SHIFT) |
  559. ((u64)cipher_mode << SPU2_CIPH_MODE_SHIFT);
  560. if (protocol)
  561. ctrl0 |= (u64)protocol << SPU2_PROTO_SEL_SHIFT;
  562. if (auth_first)
  563. ctrl0 |= SPU2_HASH_FIRST;
  564. if (is_inbound && (auth_type != SPU2_HASH_TYPE_NONE))
  565. ctrl0 |= SPU2_CHK_TAG;
  566. ctrl0 |= (((u64)auth_type << SPU2_HASH_TYPE_SHIFT) |
  567. ((u64)auth_mode << SPU2_HASH_MODE_SHIFT));
  568. fmd->ctrl0 = cpu_to_le64(ctrl0);
  569. }
  570. /**
  571. * spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of
  572. * SPU request packet.
  573. * @fmd: Start of FMD field to be written
  574. * @assoc_size: Length of additional associated data, in bytes
  575. * @auth_key_len: Length of authentication key, in bytes
  576. * @cipher_key_len: Length of cipher key, in bytes
  577. * @gen_iv: If true, hw generates IV and returns in response
  578. * @hash_iv: IV participates in hash. Used for IPSEC and TLS.
  579. * @return_iv: Return IV in output packet before payload
  580. * @ret_iv_len: Length of IV returned from SPU, in bytes
  581. * @ret_iv_offset: Offset into full IV of start of returned IV
  582. * @cipher_iv_len: Length of input cipher IV, in bytes
  583. * @digest_size: Length of digest (aka, hash tag or ICV), in bytes
  584. * @return_payload: Return payload in SPU response
  585. * @return_md : return metadata in SPU response
  586. *
  587. * Packet can have AAD2 w/o AAD1. For algorithms currently supported,
  588. * associated data goes in AAD2.
  589. */
  590. static void spu2_fmd_ctrl1_write(struct SPU2_FMD *fmd, bool is_inbound,
  591. u64 assoc_size,
  592. u64 auth_key_len, u64 cipher_key_len,
  593. bool gen_iv, bool hash_iv, bool return_iv,
  594. u64 ret_iv_len, u64 ret_iv_offset,
  595. u64 cipher_iv_len, u64 digest_size,
  596. bool return_payload, bool return_md)
  597. {
  598. u64 ctrl1 = 0;
  599. if (is_inbound && digest_size)
  600. ctrl1 |= SPU2_TAG_LOC;
  601. if (assoc_size) {
  602. ctrl1 |= SPU2_HAS_AAD2;
  603. ctrl1 |= SPU2_RETURN_AAD2; /* need aad2 for gcm aes esp */
  604. }
  605. if (auth_key_len)
  606. ctrl1 |= ((auth_key_len << SPU2_HASH_KEY_LEN_SHIFT) &
  607. SPU2_HASH_KEY_LEN);
  608. if (cipher_key_len)
  609. ctrl1 |= ((cipher_key_len << SPU2_CIPH_KEY_LEN_SHIFT) &
  610. SPU2_CIPH_KEY_LEN);
  611. if (gen_iv)
  612. ctrl1 |= SPU2_GENIV;
  613. if (hash_iv)
  614. ctrl1 |= SPU2_HASH_IV;
  615. if (return_iv) {
  616. ctrl1 |= SPU2_RET_IV;
  617. ctrl1 |= ret_iv_len << SPU2_RET_IV_LEN_SHIFT;
  618. ctrl1 |= ret_iv_offset << SPU2_IV_OFFSET_SHIFT;
  619. }
  620. ctrl1 |= ((cipher_iv_len << SPU2_IV_LEN_SHIFT) & SPU2_IV_LEN);
  621. if (digest_size)
  622. ctrl1 |= ((digest_size << SPU2_HASH_TAG_LEN_SHIFT) &
  623. SPU2_HASH_TAG_LEN);
  624. /* Let's ask for the output pkt to include FMD, but don't need to
  625. * get keys and IVs back in OMD.
  626. */
  627. if (return_md)
  628. ctrl1 |= ((u64)SPU2_RET_FMD_ONLY << SPU2_RETURN_MD_SHIFT);
  629. else
  630. ctrl1 |= ((u64)SPU2_RET_NO_MD << SPU2_RETURN_MD_SHIFT);
  631. /* Crypto API does not get assoc data back. So no need for AAD2. */
  632. if (return_payload)
  633. ctrl1 |= SPU2_RETURN_PAY;
  634. fmd->ctrl1 = cpu_to_le64(ctrl1);
  635. }
  636. /**
  637. * spu2_fmd_ctrl2_write() - Set the ctrl2 field in the fixed metadata field of
  638. * SPU2 header.
  639. * @fmd: Start of FMD field to be written
  640. * @cipher_offset: Number of bytes from Start of Packet (end of FD field) where
  641. * data to be encrypted or decrypted begins
  642. * @auth_key_len: Length of authentication key, in bytes
  643. * @auth_iv_len: Length of authentication initialization vector, in bytes
  644. * @cipher_key_len: Length of cipher key, in bytes
  645. * @cipher_iv_len: Length of cipher IV, in bytes
  646. */
  647. static void spu2_fmd_ctrl2_write(struct SPU2_FMD *fmd, u64 cipher_offset,
  648. u64 auth_key_len, u64 auth_iv_len,
  649. u64 cipher_key_len, u64 cipher_iv_len)
  650. {
  651. u64 ctrl2;
  652. u64 aad1_offset;
  653. u64 aad2_offset;
  654. u16 aad1_len = 0;
  655. u64 payload_offset;
  656. /* AAD1 offset is from start of FD. FD length always 0. */
  657. aad1_offset = 0;
  658. aad2_offset = aad1_offset;
  659. payload_offset = cipher_offset;
  660. ctrl2 = aad1_offset |
  661. (aad1_len << SPU2_AAD1_LEN_SHIFT) |
  662. (aad2_offset << SPU2_AAD2_OFFSET_SHIFT) |
  663. (payload_offset << SPU2_PL_OFFSET_SHIFT);
  664. fmd->ctrl2 = cpu_to_le64(ctrl2);
  665. }
  666. /**
  667. * spu2_fmd_ctrl3_write() - Set the ctrl3 field in FMD
  668. * @fmd: Fixed meta data. First field in SPU2 msg header.
  669. * @payload_len: Length of payload, in bytes
  670. */
  671. static void spu2_fmd_ctrl3_write(struct SPU2_FMD *fmd, u64 payload_len)
  672. {
  673. u64 ctrl3;
  674. ctrl3 = payload_len & SPU2_PL_LEN;
  675. fmd->ctrl3 = cpu_to_le64(ctrl3);
  676. }
  677. /**
  678. * spu2_ctx_max_payload() - Determine the maximum length of the payload for a
  679. * SPU message for a given cipher and hash alg context.
  680. * @cipher_alg: The cipher algorithm
  681. * @cipher_mode: The cipher mode
  682. * @blocksize: The size of a block of data for this algo
  683. *
  684. * For SPU2, the hardware generally ignores the PayloadLen field in ctrl3 of
  685. * FMD and just keeps computing until it receives a DMA descriptor with the EOF
  686. * flag set. So we consider the max payload to be infinite. AES CCM is an
  687. * exception.
  688. *
  689. * Return: Max payload length in bytes
  690. */
  691. u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
  692. enum spu_cipher_mode cipher_mode,
  693. unsigned int blocksize)
  694. {
  695. if ((cipher_alg == CIPHER_ALG_AES) &&
  696. (cipher_mode == CIPHER_MODE_CCM)) {
  697. u32 excess = SPU2_MAX_PAYLOAD % blocksize;
  698. return SPU2_MAX_PAYLOAD - excess;
  699. } else {
  700. return SPU_MAX_PAYLOAD_INF;
  701. }
  702. }
  703. /**
  704. * spu_payload_length() - Given a SPU2 message header, extract the payload
  705. * length.
  706. * @spu_hdr: Start of SPU message header (FMD)
  707. *
  708. * Return: payload length, in bytes
  709. */
  710. u32 spu2_payload_length(u8 *spu_hdr)
  711. {
  712. struct SPU2_FMD *fmd = (struct SPU2_FMD *)spu_hdr;
  713. u32 pl_len;
  714. u64 ctrl3;
  715. ctrl3 = le64_to_cpu(fmd->ctrl3);
  716. pl_len = ctrl3 & SPU2_PL_LEN;
  717. return pl_len;
  718. }
  719. /**
  720. * spu_response_hdr_len() - Determine the expected length of a SPU response
  721. * header.
  722. * @auth_key_len: Length of authentication key, in bytes
  723. * @enc_key_len: Length of encryption key, in bytes
  724. *
  725. * For SPU2, includes just FMD. OMD is never requested.
  726. *
  727. * Return: Length of FMD, in bytes
  728. */
  729. u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash)
  730. {
  731. return FMD_SIZE;
  732. }
  733. /**
  734. * spu_hash_pad_len() - Calculate the length of hash padding required to extend
  735. * data to a full block size.
  736. * @hash_alg: hash algorithm
  737. * @hash_mode: hash mode
  738. * @chunksize: length of data, in bytes
  739. * @hash_block_size: size of a hash block, in bytes
  740. *
  741. * SPU2 hardware does all hash padding
  742. *
  743. * Return: length of hash pad in bytes
  744. */
  745. u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
  746. u32 chunksize, u16 hash_block_size)
  747. {
  748. return 0;
  749. }
  750. /**
  751. * spu2_gcm_ccm_padlen() - Determine the length of GCM/CCM padding for either
  752. * the AAD field or the data.
  753. *
  754. * Return: 0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required.
  755. */
  756. u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
  757. unsigned int data_size)
  758. {
  759. return 0;
  760. }
  761. /**
  762. * spu_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
  763. * associated data in a SPU2 output packet.
  764. * @cipher_mode: cipher mode
  765. * @assoc_len: length of additional associated data, in bytes
  766. * @iv_len: length of initialization vector, in bytes
  767. * @is_encrypt: true if encrypting. false if decrypt.
  768. *
  769. * Return: Length of buffer to catch associated data in response
  770. */
  771. u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode,
  772. unsigned int assoc_len, unsigned int iv_len,
  773. bool is_encrypt)
  774. {
  775. u32 resp_len = assoc_len;
  776. if (is_encrypt)
  777. /* gcm aes esp has to write 8-byte IV in response */
  778. resp_len += iv_len;
  779. return resp_len;
  780. }
  781. /*
  782. * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
  783. * in a SPU request after the AAD and before the payload.
  784. * @cipher_mode: cipher mode
  785. * @iv_ctr_len: initialization vector length in bytes
  786. *
  787. * For SPU2, AEAD IV is included in OMD and does not need to be repeated
  788. * prior to the payload.
  789. *
  790. * Return: Length of AEAD IV in bytes
  791. */
  792. u8 spu2_aead_ivlen(enum spu_cipher_mode cipher_mode, u16 iv_len)
  793. {
  794. return 0;
  795. }
  796. /**
  797. * spu2_hash_type() - Determine the type of hash operation.
  798. * @src_sent: The number of bytes in the current request that have already
  799. * been sent to the SPU to be hashed.
  800. *
  801. * SPU2 always does a FULL hash operation
  802. */
  803. enum hash_type spu2_hash_type(u32 src_sent)
  804. {
  805. return HASH_TYPE_FULL;
  806. }
  807. /**
  808. * spu2_digest_size() - Determine the size of a hash digest to expect the SPU to
  809. * return.
  810. * alg_digest_size: Number of bytes in the final digest for the given algo
  811. * alg: The hash algorithm
  812. * htype: Type of hash operation (init, update, full, etc)
  813. *
  814. */
  815. u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
  816. enum hash_type htype)
  817. {
  818. return alg_digest_size;
  819. }
  820. /**
  821. * spu_create_request() - Build a SPU2 request message header, includint FMD and
  822. * OMD.
  823. * @spu_hdr: Start of buffer where SPU request header is to be written
  824. * @req_opts: SPU request message options
  825. * @cipher_parms: Parameters related to cipher algorithm
  826. * @hash_parms: Parameters related to hash algorithm
  827. * @aead_parms: Parameters related to AEAD operation
  828. * @data_size: Length of data to be encrypted or authenticated. If AEAD, does
  829. * not include length of AAD.
  830. *
  831. * Construct the message starting at spu_hdr. Caller should allocate this buffer
  832. * in DMA-able memory at least SPU_HEADER_ALLOC_LEN bytes long.
  833. *
  834. * Return: the length of the SPU header in bytes. 0 if an error occurs.
  835. */
  836. u32 spu2_create_request(u8 *spu_hdr,
  837. struct spu_request_opts *req_opts,
  838. struct spu_cipher_parms *cipher_parms,
  839. struct spu_hash_parms *hash_parms,
  840. struct spu_aead_parms *aead_parms,
  841. unsigned int data_size)
  842. {
  843. struct SPU2_FMD *fmd;
  844. u8 *ptr;
  845. unsigned int buf_len;
  846. int err;
  847. enum spu2_cipher_type spu2_ciph_type = SPU2_CIPHER_TYPE_NONE;
  848. enum spu2_cipher_mode spu2_ciph_mode;
  849. enum spu2_hash_type spu2_auth_type = SPU2_HASH_TYPE_NONE;
  850. enum spu2_hash_mode spu2_auth_mode;
  851. bool return_md = true;
  852. enum spu2_proto_sel proto = SPU2_PROTO_RESV;
  853. /* size of the payload */
  854. unsigned int payload_len =
  855. hash_parms->prebuf_len + data_size + hash_parms->pad_len -
  856. ((req_opts->is_aead && req_opts->is_inbound) ?
  857. hash_parms->digestsize : 0);
  858. /* offset of prebuf or data from start of AAD2 */
  859. unsigned int cipher_offset = aead_parms->assoc_size +
  860. aead_parms->aad_pad_len + aead_parms->iv_len;
  861. #ifdef DEBUG
  862. /* total size of the data following OMD (without STAT word padding) */
  863. unsigned int real_db_size = spu_real_db_size(aead_parms->assoc_size,
  864. aead_parms->iv_len,
  865. hash_parms->prebuf_len,
  866. data_size,
  867. aead_parms->aad_pad_len,
  868. aead_parms->data_pad_len,
  869. hash_parms->pad_len);
  870. #endif
  871. unsigned int assoc_size = aead_parms->assoc_size;
  872. if (req_opts->is_aead &&
  873. (cipher_parms->alg == CIPHER_ALG_AES) &&
  874. (cipher_parms->mode == CIPHER_MODE_GCM))
  875. /*
  876. * On SPU 2, aes gcm cipher first on encrypt, auth first on
  877. * decrypt
  878. */
  879. req_opts->auth_first = req_opts->is_inbound;
  880. /* and do opposite for ccm (auth 1st on encrypt) */
  881. if (req_opts->is_aead &&
  882. (cipher_parms->alg == CIPHER_ALG_AES) &&
  883. (cipher_parms->mode == CIPHER_MODE_CCM))
  884. req_opts->auth_first = !req_opts->is_inbound;
  885. flow_log("%s()\n", __func__);
  886. flow_log(" in:%u authFirst:%u\n",
  887. req_opts->is_inbound, req_opts->auth_first);
  888. flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
  889. cipher_parms->mode, cipher_parms->type);
  890. flow_log(" is_esp: %s\n", req_opts->is_esp ? "yes" : "no");
  891. flow_log(" key: %d\n", cipher_parms->key_len);
  892. flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len);
  893. flow_log(" iv: %d\n", cipher_parms->iv_len);
  894. flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
  895. flow_log(" auth alg:%u mode:%u type %u\n",
  896. hash_parms->alg, hash_parms->mode, hash_parms->type);
  897. flow_log(" digestsize: %u\n", hash_parms->digestsize);
  898. flow_log(" authkey: %d\n", hash_parms->key_len);
  899. flow_dump(" authkey: ", hash_parms->key_buf, hash_parms->key_len);
  900. flow_log(" assoc_size:%u\n", assoc_size);
  901. flow_log(" prebuf_len:%u\n", hash_parms->prebuf_len);
  902. flow_log(" data_size:%u\n", data_size);
  903. flow_log(" hash_pad_len:%u\n", hash_parms->pad_len);
  904. flow_log(" real_db_size:%u\n", real_db_size);
  905. flow_log(" cipher_offset:%u payload_len:%u\n",
  906. cipher_offset, payload_len);
  907. flow_log(" aead_iv: %u\n", aead_parms->iv_len);
  908. /* Convert to spu2 values for cipher alg, hash alg */
  909. err = spu2_cipher_xlate(cipher_parms->alg, cipher_parms->mode,
  910. cipher_parms->type,
  911. &spu2_ciph_type, &spu2_ciph_mode);
  912. /* If we are doing GCM hashing only - either via rfc4543 transform
  913. * or because we happen to do GCM with AAD only and no payload - we
  914. * need to configure hardware to use hash key rather than cipher key
  915. * and put data into payload. This is because unlike SPU-M, running
  916. * GCM cipher with 0 size payload is not permitted.
  917. */
  918. if ((req_opts->is_rfc4543) ||
  919. ((spu2_ciph_mode == SPU2_CIPHER_MODE_GCM) &&
  920. (payload_len == 0))) {
  921. /* Use hashing (only) and set up hash key */
  922. spu2_ciph_type = SPU2_CIPHER_TYPE_NONE;
  923. hash_parms->key_len = cipher_parms->key_len;
  924. memcpy(hash_parms->key_buf, cipher_parms->key_buf,
  925. cipher_parms->key_len);
  926. cipher_parms->key_len = 0;
  927. if (req_opts->is_rfc4543)
  928. payload_len += assoc_size;
  929. else
  930. payload_len = assoc_size;
  931. cipher_offset = 0;
  932. assoc_size = 0;
  933. }
  934. if (err)
  935. return 0;
  936. flow_log("spu2 cipher type %s, cipher mode %s\n",
  937. spu2_ciph_type_name(spu2_ciph_type),
  938. spu2_ciph_mode_name(spu2_ciph_mode));
  939. err = spu2_hash_xlate(hash_parms->alg, hash_parms->mode,
  940. hash_parms->type,
  941. cipher_parms->type,
  942. &spu2_auth_type, &spu2_auth_mode);
  943. if (err)
  944. return 0;
  945. flow_log("spu2 hash type %s, hash mode %s\n",
  946. spu2_hash_type_name(spu2_auth_type),
  947. spu2_hash_mode_name(spu2_auth_mode));
  948. fmd = (struct SPU2_FMD *)spu_hdr;
  949. spu2_fmd_ctrl0_write(fmd, req_opts->is_inbound, req_opts->auth_first,
  950. proto, spu2_ciph_type, spu2_ciph_mode,
  951. spu2_auth_type, spu2_auth_mode);
  952. spu2_fmd_ctrl1_write(fmd, req_opts->is_inbound, assoc_size,
  953. hash_parms->key_len, cipher_parms->key_len,
  954. false, false,
  955. aead_parms->return_iv, aead_parms->ret_iv_len,
  956. aead_parms->ret_iv_off,
  957. cipher_parms->iv_len, hash_parms->digestsize,
  958. !req_opts->bd_suppress, return_md);
  959. spu2_fmd_ctrl2_write(fmd, cipher_offset, hash_parms->key_len, 0,
  960. cipher_parms->key_len, cipher_parms->iv_len);
  961. spu2_fmd_ctrl3_write(fmd, payload_len);
  962. ptr = (u8 *)(fmd + 1);
  963. buf_len = sizeof(struct SPU2_FMD);
  964. /* Write OMD */
  965. if (hash_parms->key_len) {
  966. memcpy(ptr, hash_parms->key_buf, hash_parms->key_len);
  967. ptr += hash_parms->key_len;
  968. buf_len += hash_parms->key_len;
  969. }
  970. if (cipher_parms->key_len) {
  971. memcpy(ptr, cipher_parms->key_buf, cipher_parms->key_len);
  972. ptr += cipher_parms->key_len;
  973. buf_len += cipher_parms->key_len;
  974. }
  975. if (cipher_parms->iv_len) {
  976. memcpy(ptr, cipher_parms->iv_buf, cipher_parms->iv_len);
  977. ptr += cipher_parms->iv_len;
  978. buf_len += cipher_parms->iv_len;
  979. }
  980. packet_dump(" SPU request header: ", spu_hdr, buf_len);
  981. return buf_len;
  982. }
  983. /**
  984. * spu_cipher_req_init() - Build an ablkcipher SPU2 request message header,
  985. * including FMD and OMD.
  986. * @spu_hdr: Location of start of SPU request (FMD field)
  987. * @cipher_parms: Parameters describing cipher request
  988. *
  989. * Called at setkey time to initialize a msg header that can be reused for all
  990. * subsequent ablkcipher requests. Construct the message starting at spu_hdr.
  991. * Caller should allocate this buffer in DMA-able memory at least
  992. * SPU_HEADER_ALLOC_LEN bytes long.
  993. *
  994. * Return: the total length of the SPU header (FMD and OMD) in bytes. 0 if an
  995. * error occurs.
  996. */
  997. u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
  998. {
  999. struct SPU2_FMD *fmd;
  1000. u8 *omd;
  1001. enum spu2_cipher_type spu2_type = SPU2_CIPHER_TYPE_NONE;
  1002. enum spu2_cipher_mode spu2_mode;
  1003. int err;
  1004. flow_log("%s()\n", __func__);
  1005. flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
  1006. cipher_parms->mode, cipher_parms->type);
  1007. flow_log(" cipher_iv_len: %u\n", cipher_parms->iv_len);
  1008. flow_log(" key: %d\n", cipher_parms->key_len);
  1009. flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len);
  1010. /* Convert to spu2 values */
  1011. err = spu2_cipher_xlate(cipher_parms->alg, cipher_parms->mode,
  1012. cipher_parms->type, &spu2_type, &spu2_mode);
  1013. if (err)
  1014. return 0;
  1015. flow_log("spu2 cipher type %s, cipher mode %s\n",
  1016. spu2_ciph_type_name(spu2_type),
  1017. spu2_ciph_mode_name(spu2_mode));
  1018. /* Construct the FMD header */
  1019. fmd = (struct SPU2_FMD *)spu_hdr;
  1020. err = spu2_fmd_init(fmd, spu2_type, spu2_mode, cipher_parms->key_len,
  1021. cipher_parms->iv_len);
  1022. if (err)
  1023. return 0;
  1024. /* Write cipher key to OMD */
  1025. omd = (u8 *)(fmd + 1);
  1026. if (cipher_parms->key_buf && cipher_parms->key_len)
  1027. memcpy(omd, cipher_parms->key_buf, cipher_parms->key_len);
  1028. packet_dump(" SPU request header: ", spu_hdr,
  1029. FMD_SIZE + cipher_parms->key_len + cipher_parms->iv_len);
  1030. return FMD_SIZE + cipher_parms->key_len + cipher_parms->iv_len;
  1031. }
  1032. /**
  1033. * spu_cipher_req_finish() - Finish building a SPU request message header for a
  1034. * block cipher request.
  1035. * @spu_hdr: Start of the request message header (MH field)
  1036. * @spu_req_hdr_len: Length in bytes of the SPU request header
  1037. * @isInbound: 0 encrypt, 1 decrypt
  1038. * @cipher_parms: Parameters describing cipher operation to be performed
  1039. * @update_key: If true, rewrite the cipher key in SCTX
  1040. * @data_size: Length of the data in the BD field
  1041. *
  1042. * Assumes much of the header was already filled in at setkey() time in
  1043. * spu_cipher_req_init().
  1044. * spu_cipher_req_init() fills in the encryption key. For RC4, when submitting a
  1045. * request for a non-first chunk, we use the 260-byte SUPDT field from the
  1046. * previous response as the key. update_key is true for this case. Unused in all
  1047. * other cases.
  1048. */
  1049. void spu2_cipher_req_finish(u8 *spu_hdr,
  1050. u16 spu_req_hdr_len,
  1051. unsigned int is_inbound,
  1052. struct spu_cipher_parms *cipher_parms,
  1053. bool update_key,
  1054. unsigned int data_size)
  1055. {
  1056. struct SPU2_FMD *fmd;
  1057. u8 *omd; /* start of optional metadata */
  1058. u64 ctrl0;
  1059. u64 ctrl3;
  1060. flow_log("%s()\n", __func__);
  1061. flow_log(" in: %u\n", is_inbound);
  1062. flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg,
  1063. cipher_parms->type);
  1064. if (update_key) {
  1065. flow_log(" cipher key len: %u\n", cipher_parms->key_len);
  1066. flow_dump(" key: ", cipher_parms->key_buf,
  1067. cipher_parms->key_len);
  1068. }
  1069. flow_log(" iv len: %d\n", cipher_parms->iv_len);
  1070. flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
  1071. flow_log(" data_size: %u\n", data_size);
  1072. fmd = (struct SPU2_FMD *)spu_hdr;
  1073. omd = (u8 *)(fmd + 1);
  1074. /*
  1075. * FMD ctrl0 was initialized at setkey time. update it to indicate
  1076. * whether we are encrypting or decrypting.
  1077. */
  1078. ctrl0 = le64_to_cpu(fmd->ctrl0);
  1079. if (is_inbound)
  1080. ctrl0 &= ~SPU2_CIPH_ENCRYPT_EN; /* decrypt */
  1081. else
  1082. ctrl0 |= SPU2_CIPH_ENCRYPT_EN; /* encrypt */
  1083. fmd->ctrl0 = cpu_to_le64(ctrl0);
  1084. if (cipher_parms->alg && cipher_parms->iv_buf && cipher_parms->iv_len) {
  1085. /* cipher iv provided so put it in here */
  1086. memcpy(omd + cipher_parms->key_len, cipher_parms->iv_buf,
  1087. cipher_parms->iv_len);
  1088. }
  1089. ctrl3 = le64_to_cpu(fmd->ctrl3);
  1090. data_size &= SPU2_PL_LEN;
  1091. ctrl3 |= data_size;
  1092. fmd->ctrl3 = cpu_to_le64(ctrl3);
  1093. packet_dump(" SPU request header: ", spu_hdr, spu_req_hdr_len);
  1094. }
  1095. /**
  1096. * spu_request_pad() - Create pad bytes at the end of the data.
  1097. * @pad_start: Start of buffer where pad bytes are to be written
  1098. * @gcm_padding: Length of GCM padding, in bytes
  1099. * @hash_pad_len: Number of bytes of padding extend data to full block
  1100. * @auth_alg: Authentication algorithm
  1101. * @auth_mode: Authentication mode
  1102. * @total_sent: Length inserted at end of hash pad
  1103. * @status_padding: Number of bytes of padding to align STATUS word
  1104. *
  1105. * There may be three forms of pad:
  1106. * 1. GCM pad - for GCM mode ciphers, pad to 16-byte alignment
  1107. * 2. hash pad - pad to a block length, with 0x80 data terminator and
  1108. * size at the end
  1109. * 3. STAT pad - to ensure the STAT field is 4-byte aligned
  1110. */
  1111. void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len,
  1112. enum hash_alg auth_alg, enum hash_mode auth_mode,
  1113. unsigned int total_sent, u32 status_padding)
  1114. {
  1115. u8 *ptr = pad_start;
  1116. /* fix data alignent for GCM */
  1117. if (gcm_padding > 0) {
  1118. flow_log(" GCM: padding to 16 byte alignment: %u bytes\n",
  1119. gcm_padding);
  1120. memset(ptr, 0, gcm_padding);
  1121. ptr += gcm_padding;
  1122. }
  1123. if (hash_pad_len > 0) {
  1124. /* clear the padding section */
  1125. memset(ptr, 0, hash_pad_len);
  1126. /* terminate the data */
  1127. *ptr = 0x80;
  1128. ptr += (hash_pad_len - sizeof(u64));
  1129. /* add the size at the end as required per alg */
  1130. if (auth_alg == HASH_ALG_MD5)
  1131. *(u64 *)ptr = cpu_to_le64((u64)total_sent * 8);
  1132. else /* SHA1, SHA2-224, SHA2-256 */
  1133. *(u64 *)ptr = cpu_to_be64((u64)total_sent * 8);
  1134. ptr += sizeof(u64);
  1135. }
  1136. /* pad to a 4byte alignment for STAT */
  1137. if (status_padding > 0) {
  1138. flow_log(" STAT: padding to 4 byte alignment: %u bytes\n",
  1139. status_padding);
  1140. memset(ptr, 0, status_padding);
  1141. ptr += status_padding;
  1142. }
  1143. }
  1144. /**
  1145. * spu2_xts_tweak_in_payload() - Indicate that SPU2 does NOT place the XTS
  1146. * tweak field in the packet payload (it uses IV instead)
  1147. *
  1148. * Return: 0
  1149. */
  1150. u8 spu2_xts_tweak_in_payload(void)
  1151. {
  1152. return 0;
  1153. }
  1154. /**
  1155. * spu2_tx_status_len() - Return the length of the STATUS field in a SPU
  1156. * response message.
  1157. *
  1158. * Return: Length of STATUS field in bytes.
  1159. */
  1160. u8 spu2_tx_status_len(void)
  1161. {
  1162. return SPU2_TX_STATUS_LEN;
  1163. }
  1164. /**
  1165. * spu2_rx_status_len() - Return the length of the STATUS field in a SPU
  1166. * response message.
  1167. *
  1168. * Return: Length of STATUS field in bytes.
  1169. */
  1170. u8 spu2_rx_status_len(void)
  1171. {
  1172. return SPU2_RX_STATUS_LEN;
  1173. }
  1174. /**
  1175. * spu_status_process() - Process the status from a SPU response message.
  1176. * @statp: start of STATUS word
  1177. *
  1178. * Return: 0 - if status is good and response should be processed
  1179. * !0 - status indicates an error and response is invalid
  1180. */
  1181. int spu2_status_process(u8 *statp)
  1182. {
  1183. /* SPU2 status is 2 bytes by default - SPU_RX_STATUS_LEN */
  1184. u16 status = le16_to_cpu(*(__le16 *)statp);
  1185. if (status == 0)
  1186. return 0;
  1187. flow_log("rx status is %#x\n", status);
  1188. if (status == SPU2_INVALID_ICV)
  1189. return SPU_INVALID_ICV;
  1190. return -EBADMSG;
  1191. }
  1192. /**
  1193. * spu2_ccm_update_iv() - Update the IV as per the requirements for CCM mode.
  1194. *
  1195. * @digestsize: Digest size of this request
  1196. * @cipher_parms: (pointer to) cipher parmaeters, includes IV buf & IV len
  1197. * @assoclen: Length of AAD data
  1198. * @chunksize: length of input data to be sent in this req
  1199. * @is_encrypt: true if this is an output/encrypt operation
  1200. * @is_esp: true if this is an ESP / RFC4309 operation
  1201. *
  1202. */
  1203. void spu2_ccm_update_iv(unsigned int digestsize,
  1204. struct spu_cipher_parms *cipher_parms,
  1205. unsigned int assoclen, unsigned int chunksize,
  1206. bool is_encrypt, bool is_esp)
  1207. {
  1208. int L; /* size of length field, in bytes */
  1209. /*
  1210. * In RFC4309 mode, L is fixed at 4 bytes; otherwise, IV from
  1211. * testmgr contains (L-1) in bottom 3 bits of first byte,
  1212. * per RFC 3610.
  1213. */
  1214. if (is_esp)
  1215. L = CCM_ESP_L_VALUE;
  1216. else
  1217. L = ((cipher_parms->iv_buf[0] & CCM_B0_L_PRIME) >>
  1218. CCM_B0_L_PRIME_SHIFT) + 1;
  1219. /* SPU2 doesn't want these length bytes nor the first byte... */
  1220. cipher_parms->iv_len -= (1 + L);
  1221. memmove(cipher_parms->iv_buf, &cipher_parms->iv_buf[1],
  1222. cipher_parms->iv_len);
  1223. }
  1224. /**
  1225. * spu2_wordalign_padlen() - SPU2 does not require padding.
  1226. * @data_size: length of data field in bytes
  1227. *
  1228. * Return: length of status field padding, in bytes (always 0 on SPU2)
  1229. */
  1230. u32 spu2_wordalign_padlen(u32 data_size)
  1231. {
  1232. return 0;
  1233. }