sun4i-ss-hash.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528
  1. /*
  2. * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC
  3. *
  4. * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
  5. *
  6. * This file add support for MD5 and SHA1.
  7. *
  8. * You could find the datasheet in Documentation/arm/sunxi/README
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. */
  15. #include "sun4i-ss.h"
  16. #include <linux/scatterlist.h>
  17. /* This is a totally arbitrary value */
  18. #define SS_TIMEOUT 100
  19. int sun4i_hash_crainit(struct crypto_tfm *tfm)
  20. {
  21. struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
  22. struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
  23. struct sun4i_ss_alg_template *algt;
  24. memset(op, 0, sizeof(struct sun4i_tfm_ctx));
  25. algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
  26. op->ss = algt->ss;
  27. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  28. sizeof(struct sun4i_req_ctx));
  29. return 0;
  30. }
  31. /* sun4i_hash_init: initialize request context */
  32. int sun4i_hash_init(struct ahash_request *areq)
  33. {
  34. struct sun4i_req_ctx *op = ahash_request_ctx(areq);
  35. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  36. struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
  37. struct sun4i_ss_alg_template *algt;
  38. memset(op, 0, sizeof(struct sun4i_req_ctx));
  39. algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
  40. op->mode = algt->mode;
  41. return 0;
  42. }
  43. int sun4i_hash_export_md5(struct ahash_request *areq, void *out)
  44. {
  45. struct sun4i_req_ctx *op = ahash_request_ctx(areq);
  46. struct md5_state *octx = out;
  47. int i;
  48. octx->byte_count = op->byte_count + op->len;
  49. memcpy(octx->block, op->buf, op->len);
  50. if (op->byte_count) {
  51. for (i = 0; i < 4; i++)
  52. octx->hash[i] = op->hash[i];
  53. } else {
  54. octx->hash[0] = SHA1_H0;
  55. octx->hash[1] = SHA1_H1;
  56. octx->hash[2] = SHA1_H2;
  57. octx->hash[3] = SHA1_H3;
  58. }
  59. return 0;
  60. }
  61. int sun4i_hash_import_md5(struct ahash_request *areq, const void *in)
  62. {
  63. struct sun4i_req_ctx *op = ahash_request_ctx(areq);
  64. const struct md5_state *ictx = in;
  65. int i;
  66. sun4i_hash_init(areq);
  67. op->byte_count = ictx->byte_count & ~0x3F;
  68. op->len = ictx->byte_count & 0x3F;
  69. memcpy(op->buf, ictx->block, op->len);
  70. for (i = 0; i < 4; i++)
  71. op->hash[i] = ictx->hash[i];
  72. return 0;
  73. }
  74. int sun4i_hash_export_sha1(struct ahash_request *areq, void *out)
  75. {
  76. struct sun4i_req_ctx *op = ahash_request_ctx(areq);
  77. struct sha1_state *octx = out;
  78. int i;
  79. octx->count = op->byte_count + op->len;
  80. memcpy(octx->buffer, op->buf, op->len);
  81. if (op->byte_count) {
  82. for (i = 0; i < 5; i++)
  83. octx->state[i] = op->hash[i];
  84. } else {
  85. octx->state[0] = SHA1_H0;
  86. octx->state[1] = SHA1_H1;
  87. octx->state[2] = SHA1_H2;
  88. octx->state[3] = SHA1_H3;
  89. octx->state[4] = SHA1_H4;
  90. }
  91. return 0;
  92. }
  93. int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in)
  94. {
  95. struct sun4i_req_ctx *op = ahash_request_ctx(areq);
  96. const struct sha1_state *ictx = in;
  97. int i;
  98. sun4i_hash_init(areq);
  99. op->byte_count = ictx->count & ~0x3F;
  100. op->len = ictx->count & 0x3F;
  101. memcpy(op->buf, ictx->buffer, op->len);
  102. for (i = 0; i < 5; i++)
  103. op->hash[i] = ictx->state[i];
  104. return 0;
  105. }
  106. #define SS_HASH_UPDATE 1
  107. #define SS_HASH_FINAL 2
  108. /*
  109. * sun4i_hash_update: update hash engine
  110. *
  111. * Could be used for both SHA1 and MD5
  112. * Write data by step of 32bits and put then in the SS.
  113. *
  114. * Since we cannot leave partial data and hash state in the engine,
  115. * we need to get the hash state at the end of this function.
  116. * We can get the hash state every 64 bytes
  117. *
  118. * So the first work is to get the number of bytes to write to SS modulo 64
  119. * The extra bytes will go to a temporary buffer op->buf storing op->len bytes
  120. *
  121. * So at the begin of update()
  122. * if op->len + areq->nbytes < 64
  123. * => all data will be written to wait buffer (op->buf) and end=0
  124. * if not, write all data from op->buf to the device and position end to
  125. * complete to 64bytes
  126. *
  127. * example 1:
  128. * update1 60o => op->len=60
  129. * update2 60o => need one more word to have 64 bytes
  130. * end=4
  131. * so write all data from op->buf and one word of SGs
  132. * write remaining data in op->buf
  133. * final state op->len=56
  134. */
  135. static int sun4i_hash(struct ahash_request *areq)
  136. {
  137. /*
  138. * i is the total bytes read from SGs, to be compared to areq->nbytes
  139. * i is important because we cannot rely on SG length since the sum of
  140. * SG->length could be greater than areq->nbytes
  141. *
  142. * end is the position when we need to stop writing to the device,
  143. * to be compared to i
  144. *
  145. * in_i: advancement in the current SG
  146. */
  147. unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo;
  148. unsigned int in_i = 0;
  149. u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0;
  150. struct sun4i_req_ctx *op = ahash_request_ctx(areq);
  151. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  152. struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  153. struct sun4i_ss_ctx *ss = tfmctx->ss;
  154. struct scatterlist *in_sg = areq->src;
  155. struct sg_mapping_iter mi;
  156. int in_r, err = 0;
  157. size_t copied = 0;
  158. __le32 wb = 0;
  159. dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
  160. __func__, crypto_tfm_alg_name(areq->base.tfm),
  161. op->byte_count, areq->nbytes, op->mode,
  162. op->len, op->hash[0]);
  163. if (unlikely(!areq->nbytes) && !(op->flags & SS_HASH_FINAL))
  164. return 0;
  165. /* protect against overflow */
  166. if (unlikely(areq->nbytes > UINT_MAX - op->len)) {
  167. dev_err(ss->dev, "Cannot process too large request\n");
  168. return -EINVAL;
  169. }
  170. if (op->len + areq->nbytes < 64 && !(op->flags & SS_HASH_FINAL)) {
  171. /* linearize data to op->buf */
  172. copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
  173. op->buf + op->len, areq->nbytes, 0);
  174. op->len += copied;
  175. return 0;
  176. }
  177. spin_lock_bh(&ss->slock);
  178. /*
  179. * if some data have been processed before,
  180. * we need to restore the partial hash state
  181. */
  182. if (op->byte_count) {
  183. ivmode = SS_IV_ARBITRARY;
  184. for (i = 0; i < 5; i++)
  185. writel(op->hash[i], ss->base + SS_IV0 + i * 4);
  186. }
  187. /* Enable the device */
  188. writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
  189. if (!(op->flags & SS_HASH_UPDATE))
  190. goto hash_final;
  191. /* start of handling data */
  192. if (!(op->flags & SS_HASH_FINAL)) {
  193. end = ((areq->nbytes + op->len) / 64) * 64 - op->len;
  194. if (end > areq->nbytes || areq->nbytes - end > 63) {
  195. dev_err(ss->dev, "ERROR: Bound error %u %u\n",
  196. end, areq->nbytes);
  197. err = -EINVAL;
  198. goto release_ss;
  199. }
  200. } else {
  201. /* Since we have the flag final, we can go up to modulo 4 */
  202. if (areq->nbytes < 4)
  203. end = 0;
  204. else
  205. end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
  206. }
  207. /* TODO if SGlen % 4 and !op->len then DMA */
  208. i = 1;
  209. while (in_sg && i == 1) {
  210. if (in_sg->length % 4)
  211. i = 0;
  212. in_sg = sg_next(in_sg);
  213. }
  214. if (i == 1 && !op->len && areq->nbytes)
  215. dev_dbg(ss->dev, "We can DMA\n");
  216. i = 0;
  217. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  218. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  219. sg_miter_next(&mi);
  220. in_i = 0;
  221. do {
  222. /*
  223. * we need to linearize in two case:
  224. * - the buffer is already used
  225. * - the SG does not have enough byte remaining ( < 4)
  226. */
  227. if (op->len || (mi.length - in_i) < 4) {
  228. /*
  229. * if we have entered here we have two reason to stop
  230. * - the buffer is full
  231. * - reach the end
  232. */
  233. while (op->len < 64 && i < end) {
  234. /* how many bytes we can read from current SG */
  235. in_r = min(end - i, 64 - op->len);
  236. in_r = min_t(size_t, mi.length - in_i, in_r);
  237. memcpy(op->buf + op->len, mi.addr + in_i, in_r);
  238. op->len += in_r;
  239. i += in_r;
  240. in_i += in_r;
  241. if (in_i == mi.length) {
  242. sg_miter_next(&mi);
  243. in_i = 0;
  244. }
  245. }
  246. if (op->len > 3 && !(op->len % 4)) {
  247. /* write buf to the device */
  248. writesl(ss->base + SS_RXFIFO, op->buf,
  249. op->len / 4);
  250. op->byte_count += op->len;
  251. op->len = 0;
  252. }
  253. }
  254. if (mi.length - in_i > 3 && i < end) {
  255. /* how many bytes we can read from current SG */
  256. in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i);
  257. in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r);
  258. /* how many bytes we can write in the device*/
  259. todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
  260. writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
  261. op->byte_count += todo * 4;
  262. i += todo * 4;
  263. in_i += todo * 4;
  264. rx_cnt -= todo;
  265. if (!rx_cnt) {
  266. spaces = readl(ss->base + SS_FCSR);
  267. rx_cnt = SS_RXFIFO_SPACES(spaces);
  268. }
  269. if (in_i == mi.length) {
  270. sg_miter_next(&mi);
  271. in_i = 0;
  272. }
  273. }
  274. } while (i < end);
  275. /*
  276. * Now we have written to the device all that we can,
  277. * store the remaining bytes in op->buf
  278. */
  279. if ((areq->nbytes - i) < 64) {
  280. while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
  281. /* how many bytes we can read from current SG */
  282. in_r = min(areq->nbytes - i, 64 - op->len);
  283. in_r = min_t(size_t, mi.length - in_i, in_r);
  284. memcpy(op->buf + op->len, mi.addr + in_i, in_r);
  285. op->len += in_r;
  286. i += in_r;
  287. in_i += in_r;
  288. if (in_i == mi.length) {
  289. sg_miter_next(&mi);
  290. in_i = 0;
  291. }
  292. }
  293. }
  294. sg_miter_stop(&mi);
  295. /*
  296. * End of data process
  297. * Now if we have the flag final go to finalize part
  298. * If not, store the partial hash
  299. */
  300. if (op->flags & SS_HASH_FINAL)
  301. goto hash_final;
  302. writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
  303. i = 0;
  304. do {
  305. v = readl(ss->base + SS_CTL);
  306. i++;
  307. } while (i < SS_TIMEOUT && (v & SS_DATA_END));
  308. if (unlikely(i >= SS_TIMEOUT)) {
  309. dev_err_ratelimited(ss->dev,
  310. "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
  311. i, SS_TIMEOUT, v, areq->nbytes);
  312. err = -EIO;
  313. goto release_ss;
  314. }
  315. /*
  316. * The datasheet isn't very clear about when to retrieve the digest. The
  317. * bit SS_DATA_END is cleared when the engine has processed the data and
  318. * when the digest is computed *but* it doesn't mean the digest is
  319. * available in the digest registers. Hence the delay to be sure we can
  320. * read it.
  321. */
  322. ndelay(1);
  323. for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
  324. op->hash[i] = readl(ss->base + SS_MD0 + i * 4);
  325. goto release_ss;
  326. /*
  327. * hash_final: finalize hashing operation
  328. *
  329. * If we have some remaining bytes, we write them.
  330. * Then ask the SS for finalizing the hashing operation
  331. *
  332. * I do not check RX FIFO size in this function since the size is 32
  333. * after each enabling and this function neither write more than 32 words.
  334. * If we come from the update part, we cannot have more than
  335. * 3 remaining bytes to write and SS is fast enough to not care about it.
  336. */
  337. hash_final:
  338. /* write the remaining words of the wait buffer */
  339. if (op->len) {
  340. nwait = op->len / 4;
  341. if (nwait) {
  342. writesl(ss->base + SS_RXFIFO, op->buf, nwait);
  343. op->byte_count += 4 * nwait;
  344. }
  345. nbw = op->len - 4 * nwait;
  346. if (nbw) {
  347. wb = cpu_to_le32(*(u32 *)(op->buf + nwait * 4));
  348. wb &= GENMASK((nbw * 8) - 1, 0);
  349. op->byte_count += nbw;
  350. }
  351. }
  352. /* write the remaining bytes of the nbw buffer */
  353. wb |= ((1 << 7) << (nbw * 8));
  354. bf[j++] = le32_to_cpu(wb);
  355. /*
  356. * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
  357. * I take the operations from other MD5/SHA1 implementations
  358. */
  359. /* last block size */
  360. fill = 64 - (op->byte_count % 64);
  361. min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
  362. /* if we can't fill all data, jump to the next 64 block */
  363. if (fill < min_fill)
  364. fill += 64;
  365. j += (fill - min_fill) / sizeof(u32);
  366. /* write the length of data */
  367. if (op->mode == SS_OP_SHA1) {
  368. __be64 *bits = (__be64 *)&bf[j];
  369. *bits = cpu_to_be64(op->byte_count << 3);
  370. j += 2;
  371. } else {
  372. __le64 *bits = (__le64 *)&bf[j];
  373. *bits = cpu_to_le64(op->byte_count << 3);
  374. j += 2;
  375. }
  376. writesl(ss->base + SS_RXFIFO, bf, j);
  377. /* Tell the SS to stop the hashing */
  378. writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
  379. /*
  380. * Wait for SS to finish the hash.
  381. * The timeout could happen only in case of bad overclocking
  382. * or driver bug.
  383. */
  384. i = 0;
  385. do {
  386. v = readl(ss->base + SS_CTL);
  387. i++;
  388. } while (i < SS_TIMEOUT && (v & SS_DATA_END));
  389. if (unlikely(i >= SS_TIMEOUT)) {
  390. dev_err_ratelimited(ss->dev,
  391. "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
  392. i, SS_TIMEOUT, v, areq->nbytes);
  393. err = -EIO;
  394. goto release_ss;
  395. }
  396. /*
  397. * The datasheet isn't very clear about when to retrieve the digest. The
  398. * bit SS_DATA_END is cleared when the engine has processed the data and
  399. * when the digest is computed *but* it doesn't mean the digest is
  400. * available in the digest registers. Hence the delay to be sure we can
  401. * read it.
  402. */
  403. ndelay(1);
  404. /* Get the hash from the device */
  405. if (op->mode == SS_OP_SHA1) {
  406. for (i = 0; i < 5; i++) {
  407. v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
  408. memcpy(areq->result + i * 4, &v, 4);
  409. }
  410. } else {
  411. for (i = 0; i < 4; i++) {
  412. v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
  413. memcpy(areq->result + i * 4, &v, 4);
  414. }
  415. }
  416. release_ss:
  417. writel(0, ss->base + SS_CTL);
  418. spin_unlock_bh(&ss->slock);
  419. return err;
  420. }
  421. int sun4i_hash_final(struct ahash_request *areq)
  422. {
  423. struct sun4i_req_ctx *op = ahash_request_ctx(areq);
  424. op->flags = SS_HASH_FINAL;
  425. return sun4i_hash(areq);
  426. }
  427. int sun4i_hash_update(struct ahash_request *areq)
  428. {
  429. struct sun4i_req_ctx *op = ahash_request_ctx(areq);
  430. op->flags = SS_HASH_UPDATE;
  431. return sun4i_hash(areq);
  432. }
  433. /* sun4i_hash_finup: finalize hashing operation after an update */
  434. int sun4i_hash_finup(struct ahash_request *areq)
  435. {
  436. struct sun4i_req_ctx *op = ahash_request_ctx(areq);
  437. op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
  438. return sun4i_hash(areq);
  439. }
  440. /* combo of init/update/final functions */
  441. int sun4i_hash_digest(struct ahash_request *areq)
  442. {
  443. int err;
  444. struct sun4i_req_ctx *op = ahash_request_ctx(areq);
  445. err = sun4i_hash_init(areq);
  446. if (err)
  447. return err;
  448. op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
  449. return sun4i_hash(areq);
  450. }