spu.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252
  1. /*
  2. * Copyright 2016 Broadcom
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License, version 2, as
  6. * published by the Free Software Foundation (the "GPL").
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License version 2 (GPLv2) for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * version 2 (GPLv2) along with this source code.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include "util.h"
  19. #include "spu.h"
  20. #include "spum.h"
  21. #include "cipher.h"
  22. /* This array is based on the hash algo type supported in spu.h */
  23. char *tag_to_hash_idx[] = { "none", "md5", "sha1", "sha224", "sha256" };
  24. char *hash_alg_name[] = { "None", "md5", "sha1", "sha224", "sha256", "aes",
  25. "sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" };
  26. char *aead_alg_name[] = { "ccm(aes)", "gcm(aes)", "authenc" };
  27. /* Assumes SPU-M messages are in big endian */
  28. void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len)
  29. {
  30. u8 *ptr = buf;
  31. struct SPUHEADER *spuh = (struct SPUHEADER *)buf;
  32. unsigned int hash_key_len = 0;
  33. unsigned int hash_state_len = 0;
  34. unsigned int cipher_key_len = 0;
  35. unsigned int iv_len;
  36. u32 pflags;
  37. u32 cflags;
  38. u32 ecf;
  39. u32 cipher_alg;
  40. u32 cipher_mode;
  41. u32 cipher_type;
  42. u32 hash_alg;
  43. u32 hash_mode;
  44. u32 hash_type;
  45. u32 sctx_size; /* SCTX length in words */
  46. u32 sctx_pl_len; /* SCTX payload length in bytes */
  47. packet_log("\n");
  48. packet_log("SPU Message header %p len: %u\n", buf, buf_len);
  49. /* ========== Decode MH ========== */
  50. packet_log(" MH 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
  51. if (spuh->mh.flags & MH_SCTX_PRES)
  52. packet_log(" SCTX present\n");
  53. if (spuh->mh.flags & MH_BDESC_PRES)
  54. packet_log(" BDESC present\n");
  55. if (spuh->mh.flags & MH_MFM_PRES)
  56. packet_log(" MFM present\n");
  57. if (spuh->mh.flags & MH_BD_PRES)
  58. packet_log(" BD present\n");
  59. if (spuh->mh.flags & MH_HASH_PRES)
  60. packet_log(" HASH present\n");
  61. if (spuh->mh.flags & MH_SUPDT_PRES)
  62. packet_log(" SUPDT present\n");
  63. packet_log(" Opcode 0x%02x\n", spuh->mh.op_code);
  64. ptr += sizeof(spuh->mh) + sizeof(spuh->emh); /* skip emh. unused */
  65. /* ========== Decode SCTX ========== */
  66. if (spuh->mh.flags & MH_SCTX_PRES) {
  67. pflags = be32_to_cpu(spuh->sa.proto_flags);
  68. packet_log(" SCTX[0] 0x%08x\n", pflags);
  69. sctx_size = pflags & SCTX_SIZE;
  70. packet_log(" Size %u words\n", sctx_size);
  71. cflags = be32_to_cpu(spuh->sa.cipher_flags);
  72. packet_log(" SCTX[1] 0x%08x\n", cflags);
  73. packet_log(" Inbound:%lu (1:decrypt/vrfy 0:encrypt/auth)\n",
  74. (cflags & CIPHER_INBOUND) >> CIPHER_INBOUND_SHIFT);
  75. packet_log(" Order:%lu (1:AuthFirst 0:EncFirst)\n",
  76. (cflags & CIPHER_ORDER) >> CIPHER_ORDER_SHIFT);
  77. packet_log(" ICV_IS_512:%lx\n",
  78. (cflags & ICV_IS_512) >> ICV_IS_512_SHIFT);
  79. cipher_alg = (cflags & CIPHER_ALG) >> CIPHER_ALG_SHIFT;
  80. cipher_mode = (cflags & CIPHER_MODE) >> CIPHER_MODE_SHIFT;
  81. cipher_type = (cflags & CIPHER_TYPE) >> CIPHER_TYPE_SHIFT;
  82. packet_log(" Crypto Alg:%u Mode:%u Type:%u\n",
  83. cipher_alg, cipher_mode, cipher_type);
  84. hash_alg = (cflags & HASH_ALG) >> HASH_ALG_SHIFT;
  85. hash_mode = (cflags & HASH_MODE) >> HASH_MODE_SHIFT;
  86. hash_type = (cflags & HASH_TYPE) >> HASH_TYPE_SHIFT;
  87. packet_log(" Hash Alg:%x Mode:%x Type:%x\n",
  88. hash_alg, hash_mode, hash_type);
  89. packet_log(" UPDT_Offset:%u\n", cflags & UPDT_OFST);
  90. ecf = be32_to_cpu(spuh->sa.ecf);
  91. packet_log(" SCTX[2] 0x%08x\n", ecf);
  92. packet_log(" WriteICV:%lu CheckICV:%lu ICV_SIZE:%u ",
  93. (ecf & INSERT_ICV) >> INSERT_ICV_SHIFT,
  94. (ecf & CHECK_ICV) >> CHECK_ICV_SHIFT,
  95. (ecf & ICV_SIZE) >> ICV_SIZE_SHIFT);
  96. packet_log("BD_SUPPRESS:%lu\n",
  97. (ecf & BD_SUPPRESS) >> BD_SUPPRESS_SHIFT);
  98. packet_log(" SCTX_IV:%lu ExplicitIV:%lu GenIV:%lu ",
  99. (ecf & SCTX_IV) >> SCTX_IV_SHIFT,
  100. (ecf & EXPLICIT_IV) >> EXPLICIT_IV_SHIFT,
  101. (ecf & GEN_IV) >> GEN_IV_SHIFT);
  102. packet_log("IV_OV_OFST:%lu EXP_IV_SIZE:%u\n",
  103. (ecf & IV_OFFSET) >> IV_OFFSET_SHIFT,
  104. ecf & EXP_IV_SIZE);
  105. ptr += sizeof(struct SCTX);
  106. if (hash_alg && hash_mode) {
  107. char *name = "NONE";
  108. switch (hash_alg) {
  109. case HASH_ALG_MD5:
  110. hash_key_len = 16;
  111. name = "MD5";
  112. break;
  113. case HASH_ALG_SHA1:
  114. hash_key_len = 20;
  115. name = "SHA1";
  116. break;
  117. case HASH_ALG_SHA224:
  118. hash_key_len = 28;
  119. name = "SHA224";
  120. break;
  121. case HASH_ALG_SHA256:
  122. hash_key_len = 32;
  123. name = "SHA256";
  124. break;
  125. case HASH_ALG_SHA384:
  126. hash_key_len = 48;
  127. name = "SHA384";
  128. break;
  129. case HASH_ALG_SHA512:
  130. hash_key_len = 64;
  131. name = "SHA512";
  132. break;
  133. case HASH_ALG_AES:
  134. hash_key_len = 0;
  135. name = "AES";
  136. break;
  137. case HASH_ALG_NONE:
  138. break;
  139. }
  140. packet_log(" Auth Key Type:%s Length:%u Bytes\n",
  141. name, hash_key_len);
  142. packet_dump(" KEY: ", ptr, hash_key_len);
  143. ptr += hash_key_len;
  144. } else if ((hash_alg == HASH_ALG_AES) &&
  145. (hash_mode == HASH_MODE_XCBC)) {
  146. char *name = "NONE";
  147. switch (cipher_type) {
  148. case CIPHER_TYPE_AES128:
  149. hash_key_len = 16;
  150. name = "AES128-XCBC";
  151. break;
  152. case CIPHER_TYPE_AES192:
  153. hash_key_len = 24;
  154. name = "AES192-XCBC";
  155. break;
  156. case CIPHER_TYPE_AES256:
  157. hash_key_len = 32;
  158. name = "AES256-XCBC";
  159. break;
  160. }
  161. packet_log(" Auth Key Type:%s Length:%u Bytes\n",
  162. name, hash_key_len);
  163. packet_dump(" KEY: ", ptr, hash_key_len);
  164. ptr += hash_key_len;
  165. }
  166. if (hash_alg && (hash_mode == HASH_MODE_NONE) &&
  167. (hash_type == HASH_TYPE_UPDT)) {
  168. char *name = "NONE";
  169. switch (hash_alg) {
  170. case HASH_ALG_MD5:
  171. hash_state_len = 16;
  172. name = "MD5";
  173. break;
  174. case HASH_ALG_SHA1:
  175. hash_state_len = 20;
  176. name = "SHA1";
  177. break;
  178. case HASH_ALG_SHA224:
  179. hash_state_len = 32;
  180. name = "SHA224";
  181. break;
  182. case HASH_ALG_SHA256:
  183. hash_state_len = 32;
  184. name = "SHA256";
  185. break;
  186. case HASH_ALG_SHA384:
  187. hash_state_len = 48;
  188. name = "SHA384";
  189. break;
  190. case HASH_ALG_SHA512:
  191. hash_state_len = 64;
  192. name = "SHA512";
  193. break;
  194. case HASH_ALG_AES:
  195. hash_state_len = 0;
  196. name = "AES";
  197. break;
  198. case HASH_ALG_NONE:
  199. break;
  200. }
  201. packet_log(" Auth State Type:%s Length:%u Bytes\n",
  202. name, hash_state_len);
  203. packet_dump(" State: ", ptr, hash_state_len);
  204. ptr += hash_state_len;
  205. }
  206. if (cipher_alg) {
  207. char *name = "NONE";
  208. switch (cipher_alg) {
  209. case CIPHER_ALG_DES:
  210. cipher_key_len = 8;
  211. name = "DES";
  212. break;
  213. case CIPHER_ALG_3DES:
  214. cipher_key_len = 24;
  215. name = "3DES";
  216. break;
  217. case CIPHER_ALG_RC4:
  218. cipher_key_len = 260;
  219. name = "ARC4";
  220. break;
  221. case CIPHER_ALG_AES:
  222. switch (cipher_type) {
  223. case CIPHER_TYPE_AES128:
  224. cipher_key_len = 16;
  225. name = "AES128";
  226. break;
  227. case CIPHER_TYPE_AES192:
  228. cipher_key_len = 24;
  229. name = "AES192";
  230. break;
  231. case CIPHER_TYPE_AES256:
  232. cipher_key_len = 32;
  233. name = "AES256";
  234. break;
  235. }
  236. break;
  237. case CIPHER_ALG_NONE:
  238. break;
  239. }
  240. packet_log(" Cipher Key Type:%s Length:%u Bytes\n",
  241. name, cipher_key_len);
  242. /* XTS has two keys */
  243. if (cipher_mode == CIPHER_MODE_XTS) {
  244. packet_dump(" KEY2: ", ptr, cipher_key_len);
  245. ptr += cipher_key_len;
  246. packet_dump(" KEY1: ", ptr, cipher_key_len);
  247. ptr += cipher_key_len;
  248. cipher_key_len *= 2;
  249. } else {
  250. packet_dump(" KEY: ", ptr, cipher_key_len);
  251. ptr += cipher_key_len;
  252. }
  253. if (ecf & SCTX_IV) {
  254. sctx_pl_len = sctx_size * sizeof(u32) -
  255. sizeof(struct SCTX);
  256. iv_len = sctx_pl_len -
  257. (hash_key_len + hash_state_len +
  258. cipher_key_len);
  259. packet_log(" IV Length:%u Bytes\n", iv_len);
  260. packet_dump(" IV: ", ptr, iv_len);
  261. ptr += iv_len;
  262. }
  263. }
  264. }
  265. /* ========== Decode BDESC ========== */
  266. if (spuh->mh.flags & MH_BDESC_PRES) {
  267. #ifdef DEBUG
  268. struct BDESC_HEADER *bdesc = (struct BDESC_HEADER *)ptr;
  269. #endif
  270. packet_log(" BDESC[0] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
  271. packet_log(" OffsetMAC:%u LengthMAC:%u\n",
  272. be16_to_cpu(bdesc->offset_mac),
  273. be16_to_cpu(bdesc->length_mac));
  274. ptr += sizeof(u32);
  275. packet_log(" BDESC[1] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
  276. packet_log(" OffsetCrypto:%u LengthCrypto:%u\n",
  277. be16_to_cpu(bdesc->offset_crypto),
  278. be16_to_cpu(bdesc->length_crypto));
  279. ptr += sizeof(u32);
  280. packet_log(" BDESC[2] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
  281. packet_log(" OffsetICV:%u OffsetIV:%u\n",
  282. be16_to_cpu(bdesc->offset_icv),
  283. be16_to_cpu(bdesc->offset_iv));
  284. ptr += sizeof(u32);
  285. }
  286. /* ========== Decode BD ========== */
  287. if (spuh->mh.flags & MH_BD_PRES) {
  288. #ifdef DEBUG
  289. struct BD_HEADER *bd = (struct BD_HEADER *)ptr;
  290. #endif
  291. packet_log(" BD[0] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
  292. packet_log(" Size:%ubytes PrevLength:%u\n",
  293. be16_to_cpu(bd->size), be16_to_cpu(bd->prev_length));
  294. ptr += 4;
  295. }
  296. /* Double check sanity */
  297. if (buf + buf_len != ptr) {
  298. packet_log(" Packet parsed incorrectly. ");
  299. packet_log("buf:%p buf_len:%u buf+buf_len:%p ptr:%p\n",
  300. buf, buf_len, buf + buf_len, ptr);
  301. }
  302. packet_log("\n");
  303. }
  304. /**
  305. * spum_ns2_ctx_max_payload() - Determine the max length of the payload for a
  306. * SPU message for a given cipher and hash alg context.
  307. * @cipher_alg: The cipher algorithm
  308. * @cipher_mode: The cipher mode
  309. * @blocksize: The size of a block of data for this algo
  310. *
  311. * The max payload must be a multiple of the blocksize so that if a request is
  312. * too large to fit in a single SPU message, the request can be broken into
  313. * max_payload sized chunks. Each chunk must be a multiple of blocksize.
  314. *
  315. * Return: Max payload length in bytes
  316. */
  317. u32 spum_ns2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
  318. enum spu_cipher_mode cipher_mode,
  319. unsigned int blocksize)
  320. {
  321. u32 max_payload = SPUM_NS2_MAX_PAYLOAD;
  322. u32 excess;
  323. /* In XTS on SPU-M, we'll need to insert tweak before input data */
  324. if (cipher_mode == CIPHER_MODE_XTS)
  325. max_payload -= SPU_XTS_TWEAK_SIZE;
  326. excess = max_payload % blocksize;
  327. return max_payload - excess;
  328. }
  329. /**
  330. * spum_nsp_ctx_max_payload() - Determine the max length of the payload for a
  331. * SPU message for a given cipher and hash alg context.
  332. * @cipher_alg: The cipher algorithm
  333. * @cipher_mode: The cipher mode
  334. * @blocksize: The size of a block of data for this algo
  335. *
  336. * The max payload must be a multiple of the blocksize so that if a request is
  337. * too large to fit in a single SPU message, the request can be broken into
  338. * max_payload sized chunks. Each chunk must be a multiple of blocksize.
  339. *
  340. * Return: Max payload length in bytes
  341. */
  342. u32 spum_nsp_ctx_max_payload(enum spu_cipher_alg cipher_alg,
  343. enum spu_cipher_mode cipher_mode,
  344. unsigned int blocksize)
  345. {
  346. u32 max_payload = SPUM_NSP_MAX_PAYLOAD;
  347. u32 excess;
  348. /* In XTS on SPU-M, we'll need to insert tweak before input data */
  349. if (cipher_mode == CIPHER_MODE_XTS)
  350. max_payload -= SPU_XTS_TWEAK_SIZE;
  351. excess = max_payload % blocksize;
  352. return max_payload - excess;
  353. }
  354. /** spum_payload_length() - Given a SPU-M message header, extract the payload
  355. * length.
  356. * @spu_hdr: Start of SPU header
  357. *
  358. * Assumes just MH, EMH, BD (no SCTX, BDESC. Works for response frames.
  359. *
  360. * Return: payload length in bytes
  361. */
  362. u32 spum_payload_length(u8 *spu_hdr)
  363. {
  364. struct BD_HEADER *bd;
  365. u32 pl_len;
  366. /* Find BD header. skip MH, EMH */
  367. bd = (struct BD_HEADER *)(spu_hdr + 8);
  368. pl_len = be16_to_cpu(bd->size);
  369. return pl_len;
  370. }
  371. /**
  372. * spum_response_hdr_len() - Given the length of the hash key and encryption
  373. * key, determine the expected length of a SPU response header.
  374. * @auth_key_len: authentication key length (bytes)
  375. * @enc_key_len: encryption key length (bytes)
  376. * @is_hash: true if response message is for a hash operation
  377. *
  378. * Return: length of SPU response header (bytes)
  379. */
  380. u16 spum_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash)
  381. {
  382. if (is_hash)
  383. return SPU_HASH_RESP_HDR_LEN;
  384. else
  385. return SPU_RESP_HDR_LEN;
  386. }
  387. /**
  388. * spum_hash_pad_len() - Calculate the length of hash padding required to extend
  389. * data to a full block size.
  390. * @hash_alg: hash algorithm
  391. * @hash_mode: hash mode
  392. * @chunksize: length of data, in bytes
  393. * @hash_block_size: size of a block of data for hash algorithm
  394. *
  395. * Reserve space for 1 byte (0x80) start of pad and the total length as u64
  396. *
  397. * Return: length of hash pad in bytes
  398. */
  399. u16 spum_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
  400. u32 chunksize, u16 hash_block_size)
  401. {
  402. unsigned int length_len;
  403. unsigned int used_space_last_block;
  404. int hash_pad_len;
  405. /* AES-XCBC hash requires just padding to next block boundary */
  406. if ((hash_alg == HASH_ALG_AES) && (hash_mode == HASH_MODE_XCBC)) {
  407. used_space_last_block = chunksize % hash_block_size;
  408. hash_pad_len = hash_block_size - used_space_last_block;
  409. if (hash_pad_len >= hash_block_size)
  410. hash_pad_len -= hash_block_size;
  411. return hash_pad_len;
  412. }
  413. used_space_last_block = chunksize % hash_block_size + 1;
  414. if ((hash_alg == HASH_ALG_SHA384) || (hash_alg == HASH_ALG_SHA512))
  415. length_len = 2 * sizeof(u64);
  416. else
  417. length_len = sizeof(u64);
  418. used_space_last_block += length_len;
  419. hash_pad_len = hash_block_size - used_space_last_block;
  420. if (hash_pad_len < 0)
  421. hash_pad_len += hash_block_size;
  422. hash_pad_len += 1 + length_len;
  423. return hash_pad_len;
  424. }
  425. /**
  426. * spum_gcm_ccm_pad_len() - Determine the required length of GCM or CCM padding.
  427. * @cipher_mode: Algo type
  428. * @data_size: Length of plaintext (bytes)
  429. *
  430. * @Return: Length of padding, in bytes
  431. */
  432. u32 spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
  433. unsigned int data_size)
  434. {
  435. u32 pad_len = 0;
  436. u32 m1 = SPU_GCM_CCM_ALIGN - 1;
  437. if ((cipher_mode == CIPHER_MODE_GCM) ||
  438. (cipher_mode == CIPHER_MODE_CCM))
  439. pad_len = ((data_size + m1) & ~m1) - data_size;
  440. return pad_len;
  441. }
  442. /**
  443. * spum_assoc_resp_len() - Determine the size of the receive buffer required to
  444. * catch associated data.
  445. * @cipher_mode: cipher mode
  446. * @assoc_len: length of associated data (bytes)
  447. * @iv_len: length of IV (bytes)
  448. * @is_encrypt: true if encrypting. false if decrypting.
  449. *
  450. * Return: length of associated data in response message (bytes)
  451. */
  452. u32 spum_assoc_resp_len(enum spu_cipher_mode cipher_mode,
  453. unsigned int assoc_len, unsigned int iv_len,
  454. bool is_encrypt)
  455. {
  456. u32 buflen = 0;
  457. u32 pad;
  458. if (assoc_len)
  459. buflen = assoc_len;
  460. if (cipher_mode == CIPHER_MODE_GCM) {
  461. /* AAD needs to be padded in responses too */
  462. pad = spum_gcm_ccm_pad_len(cipher_mode, buflen);
  463. buflen += pad;
  464. }
  465. if (cipher_mode == CIPHER_MODE_CCM) {
  466. /*
  467. * AAD needs to be padded in responses too
  468. * for CCM, len + 2 needs to be 128-bit aligned.
  469. */
  470. pad = spum_gcm_ccm_pad_len(cipher_mode, buflen + 2);
  471. buflen += pad;
  472. }
  473. return buflen;
  474. }
  475. /**
  476. * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
  477. * in a SPU request after the AAD and before the payload.
  478. * @cipher_mode: cipher mode
  479. * @iv_ctr_len: initialization vector length in bytes
  480. *
  481. * In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need
  482. * to include the IV as a separate field in the SPU request msg.
  483. *
  484. * Return: Length of AEAD IV in bytes
  485. */
  486. u8 spum_aead_ivlen(enum spu_cipher_mode cipher_mode, u16 iv_len)
  487. {
  488. return 0;
  489. }
  490. /**
  491. * spum_hash_type() - Determine the type of hash operation.
  492. * @src_sent: The number of bytes in the current request that have already
  493. * been sent to the SPU to be hashed.
  494. *
  495. * We do not use HASH_TYPE_FULL for requests that fit in a single SPU message.
  496. * Using FULL causes failures (such as when the string to be hashed is empty).
  497. * For similar reasons, we never use HASH_TYPE_FIN. Instead, submit messages
  498. * as INIT or UPDT and do the hash padding in sw.
  499. */
  500. enum hash_type spum_hash_type(u32 src_sent)
  501. {
  502. return src_sent ? HASH_TYPE_UPDT : HASH_TYPE_INIT;
  503. }
  504. /**
  505. * spum_digest_size() - Determine the size of a hash digest to expect the SPU to
  506. * return.
  507. * alg_digest_size: Number of bytes in the final digest for the given algo
  508. * alg: The hash algorithm
  509. * htype: Type of hash operation (init, update, full, etc)
  510. *
  511. * When doing incremental hashing for an algorithm with a truncated hash
  512. * (e.g., SHA224), the SPU returns the full digest so that it can be fed back as
  513. * a partial result for the next chunk.
  514. */
  515. u32 spum_digest_size(u32 alg_digest_size, enum hash_alg alg,
  516. enum hash_type htype)
  517. {
  518. u32 digestsize = alg_digest_size;
  519. /* SPU returns complete digest when doing incremental hash and truncated
  520. * hash algo.
  521. */
  522. if ((htype == HASH_TYPE_INIT) || (htype == HASH_TYPE_UPDT)) {
  523. if (alg == HASH_ALG_SHA224)
  524. digestsize = SHA256_DIGEST_SIZE;
  525. else if (alg == HASH_ALG_SHA384)
  526. digestsize = SHA512_DIGEST_SIZE;
  527. }
  528. return digestsize;
  529. }
  530. /**
  531. * spum_create_request() - Build a SPU request message header, up to and
  532. * including the BD header. Construct the message starting at spu_hdr. Caller
  533. * should allocate this buffer in DMA-able memory at least SPU_HEADER_ALLOC_LEN
  534. * bytes long.
  535. * @spu_hdr: Start of buffer where SPU request header is to be written
  536. * @req_opts: SPU request message options
  537. * @cipher_parms: Parameters related to cipher algorithm
  538. * @hash_parms: Parameters related to hash algorithm
  539. * @aead_parms: Parameters related to AEAD operation
  540. * @data_size: Length of data to be encrypted or authenticated. If AEAD, does
  541. * not include length of AAD.
  542. * Return: the length of the SPU header in bytes. 0 if an error occurs.
  543. */
  544. u32 spum_create_request(u8 *spu_hdr,
  545. struct spu_request_opts *req_opts,
  546. struct spu_cipher_parms *cipher_parms,
  547. struct spu_hash_parms *hash_parms,
  548. struct spu_aead_parms *aead_parms,
  549. unsigned int data_size)
  550. {
  551. struct SPUHEADER *spuh;
  552. struct BDESC_HEADER *bdesc;
  553. struct BD_HEADER *bd;
  554. u8 *ptr;
  555. u32 protocol_bits = 0;
  556. u32 cipher_bits = 0;
  557. u32 ecf_bits = 0;
  558. u8 sctx_words = 0;
  559. unsigned int buf_len = 0;
  560. /* size of the cipher payload */
  561. unsigned int cipher_len = hash_parms->prebuf_len + data_size +
  562. hash_parms->pad_len;
  563. /* offset of prebuf or data from end of BD header */
  564. unsigned int cipher_offset = aead_parms->assoc_size +
  565. aead_parms->iv_len + aead_parms->aad_pad_len;
  566. /* total size of the DB data (without STAT word padding) */
  567. unsigned int real_db_size = spu_real_db_size(aead_parms->assoc_size,
  568. aead_parms->iv_len,
  569. hash_parms->prebuf_len,
  570. data_size,
  571. aead_parms->aad_pad_len,
  572. aead_parms->data_pad_len,
  573. hash_parms->pad_len);
  574. unsigned int auth_offset = 0;
  575. unsigned int offset_iv = 0;
  576. /* size/offset of the auth payload */
  577. unsigned int auth_len;
  578. auth_len = real_db_size;
  579. if (req_opts->is_aead && req_opts->is_inbound)
  580. cipher_len -= hash_parms->digestsize;
  581. if (req_opts->is_aead && req_opts->is_inbound)
  582. auth_len -= hash_parms->digestsize;
  583. if ((hash_parms->alg == HASH_ALG_AES) &&
  584. (hash_parms->mode == HASH_MODE_XCBC)) {
  585. auth_len -= hash_parms->pad_len;
  586. cipher_len -= hash_parms->pad_len;
  587. }
  588. flow_log("%s()\n", __func__);
  589. flow_log(" in:%u authFirst:%u\n",
  590. req_opts->is_inbound, req_opts->auth_first);
  591. flow_log(" %s. cipher alg:%u mode:%u type %u\n",
  592. spu_alg_name(cipher_parms->alg, cipher_parms->mode),
  593. cipher_parms->alg, cipher_parms->mode, cipher_parms->type);
  594. flow_log(" key: %d\n", cipher_parms->key_len);
  595. flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len);
  596. flow_log(" iv: %d\n", cipher_parms->iv_len);
  597. flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
  598. flow_log(" auth alg:%u mode:%u type %u\n",
  599. hash_parms->alg, hash_parms->mode, hash_parms->type);
  600. flow_log(" digestsize: %u\n", hash_parms->digestsize);
  601. flow_log(" authkey: %d\n", hash_parms->key_len);
  602. flow_dump(" authkey: ", hash_parms->key_buf, hash_parms->key_len);
  603. flow_log(" assoc_size:%u\n", aead_parms->assoc_size);
  604. flow_log(" prebuf_len:%u\n", hash_parms->prebuf_len);
  605. flow_log(" data_size:%u\n", data_size);
  606. flow_log(" hash_pad_len:%u\n", hash_parms->pad_len);
  607. flow_log(" real_db_size:%u\n", real_db_size);
  608. flow_log(" auth_offset:%u auth_len:%u cipher_offset:%u cipher_len:%u\n",
  609. auth_offset, auth_len, cipher_offset, cipher_len);
  610. flow_log(" aead_iv: %u\n", aead_parms->iv_len);
  611. /* starting out: zero the header (plus some) */
  612. ptr = spu_hdr;
  613. memset(ptr, 0, sizeof(struct SPUHEADER));
  614. /* format master header word */
  615. /* Do not set the next bit even though the datasheet says to */
  616. spuh = (struct SPUHEADER *)ptr;
  617. ptr += sizeof(struct SPUHEADER);
  618. buf_len += sizeof(struct SPUHEADER);
  619. spuh->mh.op_code = SPU_CRYPTO_OPERATION_GENERIC;
  620. spuh->mh.flags |= (MH_SCTX_PRES | MH_BDESC_PRES | MH_BD_PRES);
  621. /* Format sctx word 0 (protocol_bits) */
  622. sctx_words = 3; /* size in words */
  623. /* Format sctx word 1 (cipher_bits) */
  624. if (req_opts->is_inbound)
  625. cipher_bits |= CIPHER_INBOUND;
  626. if (req_opts->auth_first)
  627. cipher_bits |= CIPHER_ORDER;
  628. /* Set the crypto parameters in the cipher.flags */
  629. cipher_bits |= cipher_parms->alg << CIPHER_ALG_SHIFT;
  630. cipher_bits |= cipher_parms->mode << CIPHER_MODE_SHIFT;
  631. cipher_bits |= cipher_parms->type << CIPHER_TYPE_SHIFT;
  632. /* Set the auth parameters in the cipher.flags */
  633. cipher_bits |= hash_parms->alg << HASH_ALG_SHIFT;
  634. cipher_bits |= hash_parms->mode << HASH_MODE_SHIFT;
  635. cipher_bits |= hash_parms->type << HASH_TYPE_SHIFT;
  636. /*
  637. * Format sctx extensions if required, and update main fields if
  638. * required)
  639. */
  640. if (hash_parms->alg) {
  641. /* Write the authentication key material if present */
  642. if (hash_parms->key_len) {
  643. memcpy(ptr, hash_parms->key_buf, hash_parms->key_len);
  644. ptr += hash_parms->key_len;
  645. buf_len += hash_parms->key_len;
  646. sctx_words += hash_parms->key_len / 4;
  647. }
  648. if ((cipher_parms->mode == CIPHER_MODE_GCM) ||
  649. (cipher_parms->mode == CIPHER_MODE_CCM))
  650. /* unpadded length */
  651. offset_iv = aead_parms->assoc_size;
  652. /* if GCM/CCM we need to write ICV into the payload */
  653. if (!req_opts->is_inbound) {
  654. if ((cipher_parms->mode == CIPHER_MODE_GCM) ||
  655. (cipher_parms->mode == CIPHER_MODE_CCM))
  656. ecf_bits |= 1 << INSERT_ICV_SHIFT;
  657. } else {
  658. ecf_bits |= CHECK_ICV;
  659. }
  660. /* Inform the SPU of the ICV size (in words) */
  661. if (hash_parms->digestsize == 64)
  662. cipher_bits |= ICV_IS_512;
  663. else
  664. ecf_bits |=
  665. (hash_parms->digestsize / 4) << ICV_SIZE_SHIFT;
  666. }
  667. if (req_opts->bd_suppress)
  668. ecf_bits |= BD_SUPPRESS;
  669. /* copy the encryption keys in the SAD entry */
  670. if (cipher_parms->alg) {
  671. if (cipher_parms->key_len) {
  672. memcpy(ptr, cipher_parms->key_buf,
  673. cipher_parms->key_len);
  674. ptr += cipher_parms->key_len;
  675. buf_len += cipher_parms->key_len;
  676. sctx_words += cipher_parms->key_len / 4;
  677. }
  678. /*
  679. * if encrypting then set IV size, use SCTX IV unless no IV
  680. * given here
  681. */
  682. if (cipher_parms->iv_buf && cipher_parms->iv_len) {
  683. /* Use SCTX IV */
  684. ecf_bits |= SCTX_IV;
  685. /* cipher iv provided so put it in here */
  686. memcpy(ptr, cipher_parms->iv_buf, cipher_parms->iv_len);
  687. ptr += cipher_parms->iv_len;
  688. buf_len += cipher_parms->iv_len;
  689. sctx_words += cipher_parms->iv_len / 4;
  690. }
  691. }
  692. /*
  693. * RFC4543 (GMAC/ESP) requires data to be sent as part of AAD
  694. * so we need to override the BDESC parameters.
  695. */
  696. if (req_opts->is_rfc4543) {
  697. if (req_opts->is_inbound)
  698. data_size -= hash_parms->digestsize;
  699. offset_iv = aead_parms->assoc_size + data_size;
  700. cipher_len = 0;
  701. cipher_offset = offset_iv;
  702. auth_len = cipher_offset + aead_parms->data_pad_len;
  703. }
  704. /* write in the total sctx length now that we know it */
  705. protocol_bits |= sctx_words;
  706. /* Endian adjust the SCTX */
  707. spuh->sa.proto_flags = cpu_to_be32(protocol_bits);
  708. spuh->sa.cipher_flags = cpu_to_be32(cipher_bits);
  709. spuh->sa.ecf = cpu_to_be32(ecf_bits);
  710. /* === create the BDESC section === */
  711. bdesc = (struct BDESC_HEADER *)ptr;
  712. bdesc->offset_mac = cpu_to_be16(auth_offset);
  713. bdesc->length_mac = cpu_to_be16(auth_len);
  714. bdesc->offset_crypto = cpu_to_be16(cipher_offset);
  715. bdesc->length_crypto = cpu_to_be16(cipher_len);
  716. /*
  717. * CCM in SPU-M requires that ICV not be in same 32-bit word as data or
  718. * padding. So account for padding as necessary.
  719. */
  720. if (cipher_parms->mode == CIPHER_MODE_CCM)
  721. auth_len += spum_wordalign_padlen(auth_len);
  722. bdesc->offset_icv = cpu_to_be16(auth_len);
  723. bdesc->offset_iv = cpu_to_be16(offset_iv);
  724. ptr += sizeof(struct BDESC_HEADER);
  725. buf_len += sizeof(struct BDESC_HEADER);
  726. /* === no MFM section === */
  727. /* === create the BD section === */
  728. /* add the BD header */
  729. bd = (struct BD_HEADER *)ptr;
  730. bd->size = cpu_to_be16(real_db_size);
  731. bd->prev_length = 0;
  732. ptr += sizeof(struct BD_HEADER);
  733. buf_len += sizeof(struct BD_HEADER);
  734. packet_dump(" SPU request header: ", spu_hdr, buf_len);
  735. return buf_len;
  736. }
  737. /**
  738. * spum_cipher_req_init() - Build a SPU request message header, up to and
  739. * including the BD header.
  740. * @spu_hdr: Start of SPU request header (MH)
  741. * @cipher_parms: Parameters that describe the cipher request
  742. *
  743. * Construct the message starting at spu_hdr. Caller should allocate this buffer
  744. * in DMA-able memory at least SPU_HEADER_ALLOC_LEN bytes long.
  745. *
  746. * Return: the length of the SPU header in bytes. 0 if an error occurs.
  747. */
  748. u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
  749. {
  750. struct SPUHEADER *spuh;
  751. u32 protocol_bits = 0;
  752. u32 cipher_bits = 0;
  753. u32 ecf_bits = 0;
  754. u8 sctx_words = 0;
  755. u8 *ptr = spu_hdr;
  756. flow_log("%s()\n", __func__);
  757. flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
  758. cipher_parms->mode, cipher_parms->type);
  759. flow_log(" cipher_iv_len: %u\n", cipher_parms->iv_len);
  760. flow_log(" key: %d\n", cipher_parms->key_len);
  761. flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len);
  762. /* starting out: zero the header (plus some) */
  763. memset(spu_hdr, 0, sizeof(struct SPUHEADER));
  764. ptr += sizeof(struct SPUHEADER);
  765. /* format master header word */
  766. /* Do not set the next bit even though the datasheet says to */
  767. spuh = (struct SPUHEADER *)spu_hdr;
  768. spuh->mh.op_code = SPU_CRYPTO_OPERATION_GENERIC;
  769. spuh->mh.flags |= (MH_SCTX_PRES | MH_BDESC_PRES | MH_BD_PRES);
  770. /* Format sctx word 0 (protocol_bits) */
  771. sctx_words = 3; /* size in words */
  772. /* copy the encryption keys in the SAD entry */
  773. if (cipher_parms->alg) {
  774. if (cipher_parms->key_len) {
  775. ptr += cipher_parms->key_len;
  776. sctx_words += cipher_parms->key_len / 4;
  777. }
  778. /*
  779. * if encrypting then set IV size, use SCTX IV unless no IV
  780. * given here
  781. */
  782. if (cipher_parms->iv_len) {
  783. /* Use SCTX IV */
  784. ecf_bits |= SCTX_IV;
  785. ptr += cipher_parms->iv_len;
  786. sctx_words += cipher_parms->iv_len / 4;
  787. }
  788. }
  789. /* Set the crypto parameters in the cipher.flags */
  790. cipher_bits |= cipher_parms->alg << CIPHER_ALG_SHIFT;
  791. cipher_bits |= cipher_parms->mode << CIPHER_MODE_SHIFT;
  792. cipher_bits |= cipher_parms->type << CIPHER_TYPE_SHIFT;
  793. /* copy the encryption keys in the SAD entry */
  794. if (cipher_parms->alg && cipher_parms->key_len)
  795. memcpy(spuh + 1, cipher_parms->key_buf, cipher_parms->key_len);
  796. /* write in the total sctx length now that we know it */
  797. protocol_bits |= sctx_words;
  798. /* Endian adjust the SCTX */
  799. spuh->sa.proto_flags = cpu_to_be32(protocol_bits);
  800. /* Endian adjust the SCTX */
  801. spuh->sa.cipher_flags = cpu_to_be32(cipher_bits);
  802. spuh->sa.ecf = cpu_to_be32(ecf_bits);
  803. packet_dump(" SPU request header: ", spu_hdr,
  804. sizeof(struct SPUHEADER));
  805. return sizeof(struct SPUHEADER) + cipher_parms->key_len +
  806. cipher_parms->iv_len + sizeof(struct BDESC_HEADER) +
  807. sizeof(struct BD_HEADER);
  808. }
  809. /**
  810. * spum_cipher_req_finish() - Finish building a SPU request message header for a
  811. * block cipher request. Assumes much of the header was already filled in at
  812. * setkey() time in spu_cipher_req_init().
  813. * @spu_hdr: Start of the request message header (MH field)
  814. * @spu_req_hdr_len: Length in bytes of the SPU request header
  815. * @isInbound: 0 encrypt, 1 decrypt
  816. * @cipher_parms: Parameters describing cipher operation to be performed
  817. * @update_key: If true, rewrite the cipher key in SCTX
  818. * @data_size: Length of the data in the BD field
  819. *
  820. * Assumes much of the header was already filled in at setkey() time in
  821. * spum_cipher_req_init().
  822. * spum_cipher_req_init() fills in the encryption key. For RC4, when submitting
  823. * a request for a non-first chunk, we use the 260-byte SUPDT field from the
  824. * previous response as the key. update_key is true for this case. Unused in all
  825. * other cases.
  826. */
  827. void spum_cipher_req_finish(u8 *spu_hdr,
  828. u16 spu_req_hdr_len,
  829. unsigned int is_inbound,
  830. struct spu_cipher_parms *cipher_parms,
  831. bool update_key,
  832. unsigned int data_size)
  833. {
  834. struct SPUHEADER *spuh;
  835. struct BDESC_HEADER *bdesc;
  836. struct BD_HEADER *bd;
  837. u8 *bdesc_ptr = spu_hdr + spu_req_hdr_len -
  838. (sizeof(struct BD_HEADER) + sizeof(struct BDESC_HEADER));
  839. u32 cipher_bits;
  840. flow_log("%s()\n", __func__);
  841. flow_log(" in: %u\n", is_inbound);
  842. flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg,
  843. cipher_parms->type);
  844. if (update_key) {
  845. flow_log(" cipher key len: %u\n", cipher_parms->key_len);
  846. flow_dump(" key: ", cipher_parms->key_buf,
  847. cipher_parms->key_len);
  848. }
  849. /*
  850. * In XTS mode, API puts "i" parameter (block tweak) in IV. For
  851. * SPU-M, should be in start of the BD; tx_sg_create() copies it there.
  852. * IV in SPU msg for SPU-M should be 0, since that's the "j" parameter
  853. * (block ctr within larger data unit) - given we can send entire disk
  854. * block (<= 4KB) in 1 SPU msg, don't need to use this parameter.
  855. */
  856. if (cipher_parms->mode == CIPHER_MODE_XTS)
  857. memset(cipher_parms->iv_buf, 0, cipher_parms->iv_len);
  858. flow_log(" iv len: %d\n", cipher_parms->iv_len);
  859. flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
  860. flow_log(" data_size: %u\n", data_size);
  861. /* format master header word */
  862. /* Do not set the next bit even though the datasheet says to */
  863. spuh = (struct SPUHEADER *)spu_hdr;
  864. /* cipher_bits was initialized at setkey time */
  865. cipher_bits = be32_to_cpu(spuh->sa.cipher_flags);
  866. /* Format sctx word 1 (cipher_bits) */
  867. if (is_inbound)
  868. cipher_bits |= CIPHER_INBOUND;
  869. else
  870. cipher_bits &= ~CIPHER_INBOUND;
  871. /* update encryption key for RC4 on non-first chunk */
  872. if (update_key) {
  873. spuh->sa.cipher_flags |=
  874. cipher_parms->type << CIPHER_TYPE_SHIFT;
  875. memcpy(spuh + 1, cipher_parms->key_buf, cipher_parms->key_len);
  876. }
  877. if (cipher_parms->alg && cipher_parms->iv_buf && cipher_parms->iv_len)
  878. /* cipher iv provided so put it in here */
  879. memcpy(bdesc_ptr - cipher_parms->iv_len, cipher_parms->iv_buf,
  880. cipher_parms->iv_len);
  881. spuh->sa.cipher_flags = cpu_to_be32(cipher_bits);
  882. /* === create the BDESC section === */
  883. bdesc = (struct BDESC_HEADER *)bdesc_ptr;
  884. bdesc->offset_mac = 0;
  885. bdesc->length_mac = 0;
  886. bdesc->offset_crypto = 0;
  887. /* XTS mode, data_size needs to include tweak parameter */
  888. if (cipher_parms->mode == CIPHER_MODE_XTS)
  889. bdesc->length_crypto = cpu_to_be16(data_size +
  890. SPU_XTS_TWEAK_SIZE);
  891. else
  892. bdesc->length_crypto = cpu_to_be16(data_size);
  893. bdesc->offset_icv = 0;
  894. bdesc->offset_iv = 0;
  895. /* === no MFM section === */
  896. /* === create the BD section === */
  897. /* add the BD header */
  898. bd = (struct BD_HEADER *)(bdesc_ptr + sizeof(struct BDESC_HEADER));
  899. bd->size = cpu_to_be16(data_size);
  900. /* XTS mode, data_size needs to include tweak parameter */
  901. if (cipher_parms->mode == CIPHER_MODE_XTS)
  902. bd->size = cpu_to_be16(data_size + SPU_XTS_TWEAK_SIZE);
  903. else
  904. bd->size = cpu_to_be16(data_size);
  905. bd->prev_length = 0;
  906. packet_dump(" SPU request header: ", spu_hdr, spu_req_hdr_len);
  907. }
  908. /**
  909. * spum_request_pad() - Create pad bytes at the end of the data.
  910. * @pad_start: Start of buffer where pad bytes are to be written
  911. * @gcm_ccm_padding: length of GCM/CCM padding, in bytes
  912. * @hash_pad_len: Number of bytes of padding extend data to full block
  913. * @auth_alg: authentication algorithm
  914. * @auth_mode: authentication mode
  915. * @total_sent: length inserted at end of hash pad
  916. * @status_padding: Number of bytes of padding to align STATUS word
  917. *
  918. * There may be three forms of pad:
  919. * 1. GCM/CCM pad - for GCM/CCM mode ciphers, pad to 16-byte alignment
  920. * 2. hash pad - pad to a block length, with 0x80 data terminator and
  921. * size at the end
  922. * 3. STAT pad - to ensure the STAT field is 4-byte aligned
  923. */
  924. void spum_request_pad(u8 *pad_start,
  925. u32 gcm_ccm_padding,
  926. u32 hash_pad_len,
  927. enum hash_alg auth_alg,
  928. enum hash_mode auth_mode,
  929. unsigned int total_sent, u32 status_padding)
  930. {
  931. u8 *ptr = pad_start;
  932. /* fix data alignent for GCM/CCM */
  933. if (gcm_ccm_padding > 0) {
  934. flow_log(" GCM: padding to 16 byte alignment: %u bytes\n",
  935. gcm_ccm_padding);
  936. memset(ptr, 0, gcm_ccm_padding);
  937. ptr += gcm_ccm_padding;
  938. }
  939. if (hash_pad_len > 0) {
  940. /* clear the padding section */
  941. memset(ptr, 0, hash_pad_len);
  942. if ((auth_alg == HASH_ALG_AES) &&
  943. (auth_mode == HASH_MODE_XCBC)) {
  944. /* AES/XCBC just requires padding to be 0s */
  945. ptr += hash_pad_len;
  946. } else {
  947. /* terminate the data */
  948. *ptr = 0x80;
  949. ptr += (hash_pad_len - sizeof(u64));
  950. /* add the size at the end as required per alg */
  951. if (auth_alg == HASH_ALG_MD5)
  952. *(u64 *)ptr = cpu_to_le64((u64)total_sent * 8);
  953. else /* SHA1, SHA2-224, SHA2-256 */
  954. *(u64 *)ptr = cpu_to_be64((u64)total_sent * 8);
  955. ptr += sizeof(u64);
  956. }
  957. }
  958. /* pad to a 4byte alignment for STAT */
  959. if (status_padding > 0) {
  960. flow_log(" STAT: padding to 4 byte alignment: %u bytes\n",
  961. status_padding);
  962. memset(ptr, 0, status_padding);
  963. ptr += status_padding;
  964. }
  965. }
  966. /**
  967. * spum_xts_tweak_in_payload() - Indicate that SPUM DOES place the XTS tweak
  968. * field in the packet payload (rather than using IV)
  969. *
  970. * Return: 1
  971. */
  972. u8 spum_xts_tweak_in_payload(void)
  973. {
  974. return 1;
  975. }
  976. /**
  977. * spum_tx_status_len() - Return the length of the STATUS field in a SPU
  978. * response message.
  979. *
  980. * Return: Length of STATUS field in bytes.
  981. */
  982. u8 spum_tx_status_len(void)
  983. {
  984. return SPU_TX_STATUS_LEN;
  985. }
  986. /**
  987. * spum_rx_status_len() - Return the length of the STATUS field in a SPU
  988. * response message.
  989. *
  990. * Return: Length of STATUS field in bytes.
  991. */
  992. u8 spum_rx_status_len(void)
  993. {
  994. return SPU_RX_STATUS_LEN;
  995. }
  996. /**
  997. * spum_status_process() - Process the status from a SPU response message.
  998. * @statp: start of STATUS word
  999. * Return:
  1000. * 0 - if status is good and response should be processed
  1001. * !0 - status indicates an error and response is invalid
  1002. */
  1003. int spum_status_process(u8 *statp)
  1004. {
  1005. u32 status;
  1006. status = __be32_to_cpu(*(__be32 *)statp);
  1007. flow_log("SPU response STATUS %#08x\n", status);
  1008. if (status & SPU_STATUS_ERROR_FLAG) {
  1009. pr_err("%s() Warning: Error result from SPU: %#08x\n",
  1010. __func__, status);
  1011. if (status & SPU_STATUS_INVALID_ICV)
  1012. return SPU_INVALID_ICV;
  1013. return -EBADMSG;
  1014. }
  1015. return 0;
  1016. }
  1017. /**
  1018. * spum_ccm_update_iv() - Update the IV as per the requirements for CCM mode.
  1019. *
  1020. * @digestsize: Digest size of this request
  1021. * @cipher_parms: (pointer to) cipher parmaeters, includes IV buf & IV len
  1022. * @assoclen: Length of AAD data
  1023. * @chunksize: length of input data to be sent in this req
  1024. * @is_encrypt: true if this is an output/encrypt operation
  1025. * @is_esp: true if this is an ESP / RFC4309 operation
  1026. *
  1027. */
  1028. void spum_ccm_update_iv(unsigned int digestsize,
  1029. struct spu_cipher_parms *cipher_parms,
  1030. unsigned int assoclen,
  1031. unsigned int chunksize,
  1032. bool is_encrypt,
  1033. bool is_esp)
  1034. {
  1035. u8 L; /* L from CCM algorithm, length of plaintext data */
  1036. u8 mprime; /* M' from CCM algo, (M - 2) / 2, where M=authsize */
  1037. u8 adata;
  1038. if (cipher_parms->iv_len != CCM_AES_IV_SIZE) {
  1039. pr_err("%s(): Invalid IV len %d for CCM mode, should be %d\n",
  1040. __func__, cipher_parms->iv_len, CCM_AES_IV_SIZE);
  1041. return;
  1042. }
  1043. /*
  1044. * IV needs to be formatted as follows:
  1045. *
  1046. * | Byte 0 | Bytes 1 - N | Bytes (N+1) - 15 |
  1047. * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | Bits 7 - 0 | Bits 7 - 0 |
  1048. * | 0 |Ad?|(M - 2) / 2| L - 1 | Nonce | Plaintext Length |
  1049. *
  1050. * Ad? = 1 if AAD present, 0 if not present
  1051. * M = size of auth field, 8, 12, or 16 bytes (SPU-M) -or-
  1052. * 4, 6, 8, 10, 12, 14, 16 bytes (SPU2)
  1053. * L = Size of Plaintext Length field; Nonce size = 15 - L
  1054. *
  1055. * It appears that the crypto API already expects the L-1 portion
  1056. * to be set in the first byte of the IV, which implicitly determines
  1057. * the nonce size, and also fills in the nonce. But the other bits
  1058. * in byte 0 as well as the plaintext length need to be filled in.
  1059. *
  1060. * In rfc4309/esp mode, L is not already in the supplied IV and
  1061. * we need to fill it in, as well as move the IV data to be after
  1062. * the salt
  1063. */
  1064. if (is_esp) {
  1065. L = CCM_ESP_L_VALUE; /* RFC4309 has fixed L */
  1066. } else {
  1067. /* L' = plaintext length - 1 so Plaintext length is L' + 1 */
  1068. L = ((cipher_parms->iv_buf[0] & CCM_B0_L_PRIME) >>
  1069. CCM_B0_L_PRIME_SHIFT) + 1;
  1070. }
  1071. mprime = (digestsize - 2) >> 1; /* M' = (M - 2) / 2 */
  1072. adata = (assoclen > 0); /* adata = 1 if any associated data */
  1073. cipher_parms->iv_buf[0] = (adata << CCM_B0_ADATA_SHIFT) |
  1074. (mprime << CCM_B0_M_PRIME_SHIFT) |
  1075. ((L - 1) << CCM_B0_L_PRIME_SHIFT);
  1076. /* Nonce is already filled in by crypto API, and is 15 - L bytes */
  1077. /* Don't include digest in plaintext size when decrypting */
  1078. if (!is_encrypt)
  1079. chunksize -= digestsize;
  1080. /* Fill in length of plaintext, formatted to be L bytes long */
  1081. format_value_ccm(chunksize, &cipher_parms->iv_buf[15 - L + 1], L);
  1082. }
  1083. /**
  1084. * spum_wordalign_padlen() - Given the length of a data field, determine the
  1085. * padding required to align the data following this field on a 4-byte boundary.
  1086. * @data_size: length of data field in bytes
  1087. *
  1088. * Return: length of status field padding, in bytes
  1089. */
  1090. u32 spum_wordalign_padlen(u32 data_size)
  1091. {
  1092. return ((data_size + 3) & ~3) - data_size;
  1093. }