caampkc.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075
  1. /*
  2. * caam - Freescale FSL CAAM support for Public Key Cryptography
  3. *
  4. * Copyright 2016 Freescale Semiconductor, Inc.
  5. *
  6. * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
  7. * all the desired key parameters, input and output pointers.
  8. */
  9. #include "compat.h"
  10. #include "regs.h"
  11. #include "intern.h"
  12. #include "jr.h"
  13. #include "error.h"
  14. #include "desc_constr.h"
  15. #include "sg_sw_sec4.h"
  16. #include "caampkc.h"
  17. #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
  18. #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
  19. sizeof(struct rsa_priv_f1_pdb))
  20. #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
  21. sizeof(struct rsa_priv_f2_pdb))
  22. #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
  23. sizeof(struct rsa_priv_f3_pdb))
  24. static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
  25. struct akcipher_request *req)
  26. {
  27. dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
  28. dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
  29. if (edesc->sec4_sg_bytes)
  30. dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
  31. DMA_TO_DEVICE);
  32. }
  33. static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
  34. struct akcipher_request *req)
  35. {
  36. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  37. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  38. struct caam_rsa_key *key = &ctx->key;
  39. struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
  40. dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
  41. dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
  42. }
  43. static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
  44. struct akcipher_request *req)
  45. {
  46. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  47. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  48. struct caam_rsa_key *key = &ctx->key;
  49. struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
  50. dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
  51. dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
  52. }
  53. static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
  54. struct akcipher_request *req)
  55. {
  56. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  57. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  58. struct caam_rsa_key *key = &ctx->key;
  59. struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
  60. size_t p_sz = key->p_sz;
  61. size_t q_sz = key->q_sz;
  62. dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
  63. dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
  64. dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
  65. dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
  66. dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
  67. }
  68. static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
  69. struct akcipher_request *req)
  70. {
  71. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  72. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  73. struct caam_rsa_key *key = &ctx->key;
  74. struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
  75. size_t p_sz = key->p_sz;
  76. size_t q_sz = key->q_sz;
  77. dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
  78. dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
  79. dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
  80. dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
  81. dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
  82. dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
  83. dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
  84. }
  85. /* RSA Job Completion handler */
  86. static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
  87. {
  88. struct akcipher_request *req = context;
  89. struct rsa_edesc *edesc;
  90. if (err)
  91. caam_jr_strstatus(dev, err);
  92. edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
  93. rsa_pub_unmap(dev, edesc, req);
  94. rsa_io_unmap(dev, edesc, req);
  95. kfree(edesc);
  96. akcipher_request_complete(req, err);
  97. }
  98. static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
  99. void *context)
  100. {
  101. struct akcipher_request *req = context;
  102. struct rsa_edesc *edesc;
  103. if (err)
  104. caam_jr_strstatus(dev, err);
  105. edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
  106. rsa_priv_f1_unmap(dev, edesc, req);
  107. rsa_io_unmap(dev, edesc, req);
  108. kfree(edesc);
  109. akcipher_request_complete(req, err);
  110. }
  111. static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
  112. void *context)
  113. {
  114. struct akcipher_request *req = context;
  115. struct rsa_edesc *edesc;
  116. if (err)
  117. caam_jr_strstatus(dev, err);
  118. edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
  119. rsa_priv_f2_unmap(dev, edesc, req);
  120. rsa_io_unmap(dev, edesc, req);
  121. kfree(edesc);
  122. akcipher_request_complete(req, err);
  123. }
  124. static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
  125. void *context)
  126. {
  127. struct akcipher_request *req = context;
  128. struct rsa_edesc *edesc;
  129. if (err)
  130. caam_jr_strstatus(dev, err);
  131. edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
  132. rsa_priv_f3_unmap(dev, edesc, req);
  133. rsa_io_unmap(dev, edesc, req);
  134. kfree(edesc);
  135. akcipher_request_complete(req, err);
  136. }
  137. static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
  138. unsigned int nbytes,
  139. unsigned int flags)
  140. {
  141. struct sg_mapping_iter miter;
  142. int lzeros, ents;
  143. unsigned int len;
  144. unsigned int tbytes = nbytes;
  145. const u8 *buff;
  146. ents = sg_nents_for_len(sgl, nbytes);
  147. if (ents < 0)
  148. return ents;
  149. sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
  150. lzeros = 0;
  151. len = 0;
  152. while (nbytes > 0) {
  153. while (len && !*buff) {
  154. lzeros++;
  155. len--;
  156. buff++;
  157. }
  158. if (len && *buff)
  159. break;
  160. sg_miter_next(&miter);
  161. buff = miter.addr;
  162. len = miter.length;
  163. nbytes -= lzeros;
  164. lzeros = 0;
  165. }
  166. miter.consumed = lzeros;
  167. sg_miter_stop(&miter);
  168. nbytes -= lzeros;
  169. return tbytes - nbytes;
  170. }
  171. static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
  172. size_t desclen)
  173. {
  174. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  175. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  176. struct device *dev = ctx->dev;
  177. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  178. struct rsa_edesc *edesc;
  179. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  180. GFP_KERNEL : GFP_ATOMIC;
  181. int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
  182. int sgc;
  183. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  184. int src_nents, dst_nents;
  185. int lzeros;
  186. lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
  187. if (lzeros < 0)
  188. return ERR_PTR(lzeros);
  189. req->src_len -= lzeros;
  190. req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
  191. src_nents = sg_nents_for_len(req->src, req->src_len);
  192. dst_nents = sg_nents_for_len(req->dst, req->dst_len);
  193. if (src_nents > 1)
  194. sec4_sg_len = src_nents;
  195. if (dst_nents > 1)
  196. sec4_sg_len += dst_nents;
  197. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  198. /* allocate space for base edesc, hw desc commands and link tables */
  199. edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
  200. GFP_DMA | flags);
  201. if (!edesc)
  202. return ERR_PTR(-ENOMEM);
  203. sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
  204. if (unlikely(!sgc)) {
  205. dev_err(dev, "unable to map source\n");
  206. goto src_fail;
  207. }
  208. sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
  209. if (unlikely(!sgc)) {
  210. dev_err(dev, "unable to map destination\n");
  211. goto dst_fail;
  212. }
  213. edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
  214. sec4_sg_index = 0;
  215. if (src_nents > 1) {
  216. sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
  217. sec4_sg_index += src_nents;
  218. }
  219. if (dst_nents > 1)
  220. sg_to_sec4_sg_last(req->dst, dst_nents,
  221. edesc->sec4_sg + sec4_sg_index, 0);
  222. /* Save nents for later use in Job Descriptor */
  223. edesc->src_nents = src_nents;
  224. edesc->dst_nents = dst_nents;
  225. if (!sec4_sg_bytes)
  226. return edesc;
  227. edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
  228. sec4_sg_bytes, DMA_TO_DEVICE);
  229. if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
  230. dev_err(dev, "unable to map S/G table\n");
  231. goto sec4_sg_fail;
  232. }
  233. edesc->sec4_sg_bytes = sec4_sg_bytes;
  234. return edesc;
  235. sec4_sg_fail:
  236. dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
  237. dst_fail:
  238. dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
  239. src_fail:
  240. kfree(edesc);
  241. return ERR_PTR(-ENOMEM);
  242. }
  243. static int set_rsa_pub_pdb(struct akcipher_request *req,
  244. struct rsa_edesc *edesc)
  245. {
  246. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  247. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  248. struct caam_rsa_key *key = &ctx->key;
  249. struct device *dev = ctx->dev;
  250. struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
  251. int sec4_sg_index = 0;
  252. pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
  253. if (dma_mapping_error(dev, pdb->n_dma)) {
  254. dev_err(dev, "Unable to map RSA modulus memory\n");
  255. return -ENOMEM;
  256. }
  257. pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
  258. if (dma_mapping_error(dev, pdb->e_dma)) {
  259. dev_err(dev, "Unable to map RSA public exponent memory\n");
  260. dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
  261. return -ENOMEM;
  262. }
  263. if (edesc->src_nents > 1) {
  264. pdb->sgf |= RSA_PDB_SGF_F;
  265. pdb->f_dma = edesc->sec4_sg_dma;
  266. sec4_sg_index += edesc->src_nents;
  267. } else {
  268. pdb->f_dma = sg_dma_address(req->src);
  269. }
  270. if (edesc->dst_nents > 1) {
  271. pdb->sgf |= RSA_PDB_SGF_G;
  272. pdb->g_dma = edesc->sec4_sg_dma +
  273. sec4_sg_index * sizeof(struct sec4_sg_entry);
  274. } else {
  275. pdb->g_dma = sg_dma_address(req->dst);
  276. }
  277. pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
  278. pdb->f_len = req->src_len;
  279. return 0;
  280. }
  281. static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
  282. struct rsa_edesc *edesc)
  283. {
  284. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  285. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  286. struct caam_rsa_key *key = &ctx->key;
  287. struct device *dev = ctx->dev;
  288. struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
  289. int sec4_sg_index = 0;
  290. pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
  291. if (dma_mapping_error(dev, pdb->n_dma)) {
  292. dev_err(dev, "Unable to map modulus memory\n");
  293. return -ENOMEM;
  294. }
  295. pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
  296. if (dma_mapping_error(dev, pdb->d_dma)) {
  297. dev_err(dev, "Unable to map RSA private exponent memory\n");
  298. dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
  299. return -ENOMEM;
  300. }
  301. if (edesc->src_nents > 1) {
  302. pdb->sgf |= RSA_PRIV_PDB_SGF_G;
  303. pdb->g_dma = edesc->sec4_sg_dma;
  304. sec4_sg_index += edesc->src_nents;
  305. } else {
  306. pdb->g_dma = sg_dma_address(req->src);
  307. }
  308. if (edesc->dst_nents > 1) {
  309. pdb->sgf |= RSA_PRIV_PDB_SGF_F;
  310. pdb->f_dma = edesc->sec4_sg_dma +
  311. sec4_sg_index * sizeof(struct sec4_sg_entry);
  312. } else {
  313. pdb->f_dma = sg_dma_address(req->dst);
  314. }
  315. pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
  316. return 0;
  317. }
  318. static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
  319. struct rsa_edesc *edesc)
  320. {
  321. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  322. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  323. struct caam_rsa_key *key = &ctx->key;
  324. struct device *dev = ctx->dev;
  325. struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
  326. int sec4_sg_index = 0;
  327. size_t p_sz = key->p_sz;
  328. size_t q_sz = key->q_sz;
  329. pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
  330. if (dma_mapping_error(dev, pdb->d_dma)) {
  331. dev_err(dev, "Unable to map RSA private exponent memory\n");
  332. return -ENOMEM;
  333. }
  334. pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
  335. if (dma_mapping_error(dev, pdb->p_dma)) {
  336. dev_err(dev, "Unable to map RSA prime factor p memory\n");
  337. goto unmap_d;
  338. }
  339. pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
  340. if (dma_mapping_error(dev, pdb->q_dma)) {
  341. dev_err(dev, "Unable to map RSA prime factor q memory\n");
  342. goto unmap_p;
  343. }
  344. pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
  345. if (dma_mapping_error(dev, pdb->tmp1_dma)) {
  346. dev_err(dev, "Unable to map RSA tmp1 memory\n");
  347. goto unmap_q;
  348. }
  349. pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
  350. if (dma_mapping_error(dev, pdb->tmp2_dma)) {
  351. dev_err(dev, "Unable to map RSA tmp2 memory\n");
  352. goto unmap_tmp1;
  353. }
  354. if (edesc->src_nents > 1) {
  355. pdb->sgf |= RSA_PRIV_PDB_SGF_G;
  356. pdb->g_dma = edesc->sec4_sg_dma;
  357. sec4_sg_index += edesc->src_nents;
  358. } else {
  359. pdb->g_dma = sg_dma_address(req->src);
  360. }
  361. if (edesc->dst_nents > 1) {
  362. pdb->sgf |= RSA_PRIV_PDB_SGF_F;
  363. pdb->f_dma = edesc->sec4_sg_dma +
  364. sec4_sg_index * sizeof(struct sec4_sg_entry);
  365. } else {
  366. pdb->f_dma = sg_dma_address(req->dst);
  367. }
  368. pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
  369. pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
  370. return 0;
  371. unmap_tmp1:
  372. dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
  373. unmap_q:
  374. dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
  375. unmap_p:
  376. dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
  377. unmap_d:
  378. dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
  379. return -ENOMEM;
  380. }
  381. static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
  382. struct rsa_edesc *edesc)
  383. {
  384. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  385. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  386. struct caam_rsa_key *key = &ctx->key;
  387. struct device *dev = ctx->dev;
  388. struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
  389. int sec4_sg_index = 0;
  390. size_t p_sz = key->p_sz;
  391. size_t q_sz = key->q_sz;
  392. pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
  393. if (dma_mapping_error(dev, pdb->p_dma)) {
  394. dev_err(dev, "Unable to map RSA prime factor p memory\n");
  395. return -ENOMEM;
  396. }
  397. pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
  398. if (dma_mapping_error(dev, pdb->q_dma)) {
  399. dev_err(dev, "Unable to map RSA prime factor q memory\n");
  400. goto unmap_p;
  401. }
  402. pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
  403. if (dma_mapping_error(dev, pdb->dp_dma)) {
  404. dev_err(dev, "Unable to map RSA exponent dp memory\n");
  405. goto unmap_q;
  406. }
  407. pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
  408. if (dma_mapping_error(dev, pdb->dq_dma)) {
  409. dev_err(dev, "Unable to map RSA exponent dq memory\n");
  410. goto unmap_dp;
  411. }
  412. pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
  413. if (dma_mapping_error(dev, pdb->c_dma)) {
  414. dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
  415. goto unmap_dq;
  416. }
  417. pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
  418. if (dma_mapping_error(dev, pdb->tmp1_dma)) {
  419. dev_err(dev, "Unable to map RSA tmp1 memory\n");
  420. goto unmap_qinv;
  421. }
  422. pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
  423. if (dma_mapping_error(dev, pdb->tmp2_dma)) {
  424. dev_err(dev, "Unable to map RSA tmp2 memory\n");
  425. goto unmap_tmp1;
  426. }
  427. if (edesc->src_nents > 1) {
  428. pdb->sgf |= RSA_PRIV_PDB_SGF_G;
  429. pdb->g_dma = edesc->sec4_sg_dma;
  430. sec4_sg_index += edesc->src_nents;
  431. } else {
  432. pdb->g_dma = sg_dma_address(req->src);
  433. }
  434. if (edesc->dst_nents > 1) {
  435. pdb->sgf |= RSA_PRIV_PDB_SGF_F;
  436. pdb->f_dma = edesc->sec4_sg_dma +
  437. sec4_sg_index * sizeof(struct sec4_sg_entry);
  438. } else {
  439. pdb->f_dma = sg_dma_address(req->dst);
  440. }
  441. pdb->sgf |= key->n_sz;
  442. pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
  443. return 0;
  444. unmap_tmp1:
  445. dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
  446. unmap_qinv:
  447. dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
  448. unmap_dq:
  449. dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
  450. unmap_dp:
  451. dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
  452. unmap_q:
  453. dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
  454. unmap_p:
  455. dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
  456. return -ENOMEM;
  457. }
  458. static int caam_rsa_enc(struct akcipher_request *req)
  459. {
  460. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  461. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  462. struct caam_rsa_key *key = &ctx->key;
  463. struct device *jrdev = ctx->dev;
  464. struct rsa_edesc *edesc;
  465. int ret;
  466. if (unlikely(!key->n || !key->e))
  467. return -EINVAL;
  468. if (req->dst_len < key->n_sz) {
  469. req->dst_len = key->n_sz;
  470. dev_err(jrdev, "Output buffer length less than parameter n\n");
  471. return -EOVERFLOW;
  472. }
  473. /* Allocate extended descriptor */
  474. edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
  475. if (IS_ERR(edesc))
  476. return PTR_ERR(edesc);
  477. /* Set RSA Encrypt Protocol Data Block */
  478. ret = set_rsa_pub_pdb(req, edesc);
  479. if (ret)
  480. goto init_fail;
  481. /* Initialize Job Descriptor */
  482. init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
  483. ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
  484. if (!ret)
  485. return -EINPROGRESS;
  486. rsa_pub_unmap(jrdev, edesc, req);
  487. init_fail:
  488. rsa_io_unmap(jrdev, edesc, req);
  489. kfree(edesc);
  490. return ret;
  491. }
  492. static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
  493. {
  494. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  495. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  496. struct device *jrdev = ctx->dev;
  497. struct rsa_edesc *edesc;
  498. int ret;
  499. /* Allocate extended descriptor */
  500. edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
  501. if (IS_ERR(edesc))
  502. return PTR_ERR(edesc);
  503. /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
  504. ret = set_rsa_priv_f1_pdb(req, edesc);
  505. if (ret)
  506. goto init_fail;
  507. /* Initialize Job Descriptor */
  508. init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
  509. ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
  510. if (!ret)
  511. return -EINPROGRESS;
  512. rsa_priv_f1_unmap(jrdev, edesc, req);
  513. init_fail:
  514. rsa_io_unmap(jrdev, edesc, req);
  515. kfree(edesc);
  516. return ret;
  517. }
  518. static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
  519. {
  520. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  521. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  522. struct device *jrdev = ctx->dev;
  523. struct rsa_edesc *edesc;
  524. int ret;
  525. /* Allocate extended descriptor */
  526. edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
  527. if (IS_ERR(edesc))
  528. return PTR_ERR(edesc);
  529. /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
  530. ret = set_rsa_priv_f2_pdb(req, edesc);
  531. if (ret)
  532. goto init_fail;
  533. /* Initialize Job Descriptor */
  534. init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
  535. ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
  536. if (!ret)
  537. return -EINPROGRESS;
  538. rsa_priv_f2_unmap(jrdev, edesc, req);
  539. init_fail:
  540. rsa_io_unmap(jrdev, edesc, req);
  541. kfree(edesc);
  542. return ret;
  543. }
  544. static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
  545. {
  546. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  547. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  548. struct device *jrdev = ctx->dev;
  549. struct rsa_edesc *edesc;
  550. int ret;
  551. /* Allocate extended descriptor */
  552. edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
  553. if (IS_ERR(edesc))
  554. return PTR_ERR(edesc);
  555. /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
  556. ret = set_rsa_priv_f3_pdb(req, edesc);
  557. if (ret)
  558. goto init_fail;
  559. /* Initialize Job Descriptor */
  560. init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
  561. ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
  562. if (!ret)
  563. return -EINPROGRESS;
  564. rsa_priv_f3_unmap(jrdev, edesc, req);
  565. init_fail:
  566. rsa_io_unmap(jrdev, edesc, req);
  567. kfree(edesc);
  568. return ret;
  569. }
  570. static int caam_rsa_dec(struct akcipher_request *req)
  571. {
  572. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  573. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  574. struct caam_rsa_key *key = &ctx->key;
  575. int ret;
  576. if (unlikely(!key->n || !key->d))
  577. return -EINVAL;
  578. if (req->dst_len < key->n_sz) {
  579. req->dst_len = key->n_sz;
  580. dev_err(ctx->dev, "Output buffer length less than parameter n\n");
  581. return -EOVERFLOW;
  582. }
  583. if (key->priv_form == FORM3)
  584. ret = caam_rsa_dec_priv_f3(req);
  585. else if (key->priv_form == FORM2)
  586. ret = caam_rsa_dec_priv_f2(req);
  587. else
  588. ret = caam_rsa_dec_priv_f1(req);
  589. return ret;
  590. }
  591. static void caam_rsa_free_key(struct caam_rsa_key *key)
  592. {
  593. kzfree(key->d);
  594. kzfree(key->p);
  595. kzfree(key->q);
  596. kzfree(key->dp);
  597. kzfree(key->dq);
  598. kzfree(key->qinv);
  599. kzfree(key->tmp1);
  600. kzfree(key->tmp2);
  601. kfree(key->e);
  602. kfree(key->n);
  603. memset(key, 0, sizeof(*key));
  604. }
  605. static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
  606. {
  607. while (!**ptr && *nbytes) {
  608. (*ptr)++;
  609. (*nbytes)--;
  610. }
  611. }
  612. /**
  613. * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
  614. * dP, dQ and qInv could decode to less than corresponding p, q length, as the
  615. * BER-encoding requires that the minimum number of bytes be used to encode the
  616. * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
  617. * length.
  618. *
  619. * @ptr : pointer to {dP, dQ, qInv} CRT member
  620. * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
  621. * @dstlen: length in bytes of corresponding p or q prime factor
  622. */
  623. static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
  624. {
  625. u8 *dst;
  626. caam_rsa_drop_leading_zeros(&ptr, &nbytes);
  627. if (!nbytes)
  628. return NULL;
  629. dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
  630. if (!dst)
  631. return NULL;
  632. memcpy(dst + (dstlen - nbytes), ptr, nbytes);
  633. return dst;
  634. }
  635. /**
  636. * caam_read_raw_data - Read a raw byte stream as a positive integer.
  637. * The function skips buffer's leading zeros, copies the remained data
  638. * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
  639. * the address of the new buffer.
  640. *
  641. * @buf : The data to read
  642. * @nbytes: The amount of data to read
  643. */
  644. static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
  645. {
  646. caam_rsa_drop_leading_zeros(&buf, nbytes);
  647. if (!*nbytes)
  648. return NULL;
  649. return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
  650. }
  651. static int caam_rsa_check_key_length(unsigned int len)
  652. {
  653. if (len > 4096)
  654. return -EINVAL;
  655. return 0;
  656. }
  657. static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
  658. unsigned int keylen)
  659. {
  660. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  661. struct rsa_key raw_key = {NULL};
  662. struct caam_rsa_key *rsa_key = &ctx->key;
  663. int ret;
  664. /* Free the old RSA key if any */
  665. caam_rsa_free_key(rsa_key);
  666. ret = rsa_parse_pub_key(&raw_key, key, keylen);
  667. if (ret)
  668. return ret;
  669. /* Copy key in DMA zone */
  670. rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
  671. if (!rsa_key->e)
  672. goto err;
  673. /*
  674. * Skip leading zeros and copy the positive integer to a buffer
  675. * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
  676. * expects a positive integer for the RSA modulus and uses its length as
  677. * decryption output length.
  678. */
  679. rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
  680. if (!rsa_key->n)
  681. goto err;
  682. if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
  683. caam_rsa_free_key(rsa_key);
  684. return -EINVAL;
  685. }
  686. rsa_key->e_sz = raw_key.e_sz;
  687. rsa_key->n_sz = raw_key.n_sz;
  688. memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
  689. return 0;
  690. err:
  691. caam_rsa_free_key(rsa_key);
  692. return -ENOMEM;
  693. }
  694. static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
  695. struct rsa_key *raw_key)
  696. {
  697. struct caam_rsa_key *rsa_key = &ctx->key;
  698. size_t p_sz = raw_key->p_sz;
  699. size_t q_sz = raw_key->q_sz;
  700. rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
  701. if (!rsa_key->p)
  702. return;
  703. rsa_key->p_sz = p_sz;
  704. rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
  705. if (!rsa_key->q)
  706. goto free_p;
  707. rsa_key->q_sz = q_sz;
  708. rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
  709. if (!rsa_key->tmp1)
  710. goto free_q;
  711. rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
  712. if (!rsa_key->tmp2)
  713. goto free_tmp1;
  714. rsa_key->priv_form = FORM2;
  715. rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
  716. if (!rsa_key->dp)
  717. goto free_tmp2;
  718. rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
  719. if (!rsa_key->dq)
  720. goto free_dp;
  721. rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
  722. q_sz);
  723. if (!rsa_key->qinv)
  724. goto free_dq;
  725. rsa_key->priv_form = FORM3;
  726. return;
  727. free_dq:
  728. kzfree(rsa_key->dq);
  729. free_dp:
  730. kzfree(rsa_key->dp);
  731. free_tmp2:
  732. kzfree(rsa_key->tmp2);
  733. free_tmp1:
  734. kzfree(rsa_key->tmp1);
  735. free_q:
  736. kzfree(rsa_key->q);
  737. free_p:
  738. kzfree(rsa_key->p);
  739. }
  740. static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
  741. unsigned int keylen)
  742. {
  743. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  744. struct rsa_key raw_key = {NULL};
  745. struct caam_rsa_key *rsa_key = &ctx->key;
  746. int ret;
  747. /* Free the old RSA key if any */
  748. caam_rsa_free_key(rsa_key);
  749. ret = rsa_parse_priv_key(&raw_key, key, keylen);
  750. if (ret)
  751. return ret;
  752. /* Copy key in DMA zone */
  753. rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
  754. if (!rsa_key->d)
  755. goto err;
  756. rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
  757. if (!rsa_key->e)
  758. goto err;
  759. /*
  760. * Skip leading zeros and copy the positive integer to a buffer
  761. * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
  762. * expects a positive integer for the RSA modulus and uses its length as
  763. * decryption output length.
  764. */
  765. rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
  766. if (!rsa_key->n)
  767. goto err;
  768. if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
  769. caam_rsa_free_key(rsa_key);
  770. return -EINVAL;
  771. }
  772. rsa_key->d_sz = raw_key.d_sz;
  773. rsa_key->e_sz = raw_key.e_sz;
  774. rsa_key->n_sz = raw_key.n_sz;
  775. memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
  776. memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
  777. caam_rsa_set_priv_key_form(ctx, &raw_key);
  778. return 0;
  779. err:
  780. caam_rsa_free_key(rsa_key);
  781. return -ENOMEM;
  782. }
  783. static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
  784. {
  785. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  786. return ctx->key.n_sz;
  787. }
  788. /* Per session pkc's driver context creation function */
  789. static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
  790. {
  791. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  792. ctx->dev = caam_jr_alloc();
  793. if (IS_ERR(ctx->dev)) {
  794. pr_err("Job Ring Device allocation for transform failed\n");
  795. return PTR_ERR(ctx->dev);
  796. }
  797. return 0;
  798. }
  799. /* Per session pkc's driver context cleanup function */
  800. static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
  801. {
  802. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  803. struct caam_rsa_key *key = &ctx->key;
  804. caam_rsa_free_key(key);
  805. caam_jr_free(ctx->dev);
  806. }
  807. static struct akcipher_alg caam_rsa = {
  808. .encrypt = caam_rsa_enc,
  809. .decrypt = caam_rsa_dec,
  810. .sign = caam_rsa_dec,
  811. .verify = caam_rsa_enc,
  812. .set_pub_key = caam_rsa_set_pub_key,
  813. .set_priv_key = caam_rsa_set_priv_key,
  814. .max_size = caam_rsa_max_size,
  815. .init = caam_rsa_init_tfm,
  816. .exit = caam_rsa_exit_tfm,
  817. .reqsize = sizeof(struct caam_rsa_req_ctx),
  818. .base = {
  819. .cra_name = "rsa",
  820. .cra_driver_name = "rsa-caam",
  821. .cra_priority = 3000,
  822. .cra_module = THIS_MODULE,
  823. .cra_ctxsize = sizeof(struct caam_rsa_ctx),
  824. },
  825. };
  826. /* Public Key Cryptography module initialization handler */
  827. static int __init caam_pkc_init(void)
  828. {
  829. struct device_node *dev_node;
  830. struct platform_device *pdev;
  831. struct device *ctrldev;
  832. struct caam_drv_private *priv;
  833. u32 cha_inst, pk_inst;
  834. int err;
  835. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
  836. if (!dev_node) {
  837. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
  838. if (!dev_node)
  839. return -ENODEV;
  840. }
  841. pdev = of_find_device_by_node(dev_node);
  842. if (!pdev) {
  843. of_node_put(dev_node);
  844. return -ENODEV;
  845. }
  846. ctrldev = &pdev->dev;
  847. priv = dev_get_drvdata(ctrldev);
  848. of_node_put(dev_node);
  849. /*
  850. * If priv is NULL, it's probably because the caam driver wasn't
  851. * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
  852. */
  853. if (!priv)
  854. return -ENODEV;
  855. /* Determine public key hardware accelerator presence. */
  856. cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
  857. pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
  858. /* Do not register algorithms if PKHA is not present. */
  859. if (!pk_inst)
  860. return -ENODEV;
  861. err = crypto_register_akcipher(&caam_rsa);
  862. if (err)
  863. dev_warn(ctrldev, "%s alg registration failed\n",
  864. caam_rsa.base.cra_driver_name);
  865. else
  866. dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
  867. return err;
  868. }
  869. static void __exit caam_pkc_exit(void)
  870. {
  871. crypto_unregister_akcipher(&caam_rsa);
  872. }
  873. module_init(caam_pkc_init);
  874. module_exit(caam_pkc_exit);
  875. MODULE_LICENSE("Dual BSD/GPL");
  876. MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
  877. MODULE_AUTHOR("Freescale Semiconductor");