img-hash.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126
  1. /*
  2. * Copyright (c) 2014 Imagination Technologies
  3. * Authors: Will Thomas, James Hartley
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as published
  7. * by the Free Software Foundation.
  8. *
  9. * Interface structure taken from omap-sham driver
  10. */
  11. #include <linux/clk.h>
  12. #include <linux/dmaengine.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/of_device.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/scatterlist.h>
  20. #include <crypto/internal/hash.h>
  21. #include <crypto/md5.h>
  22. #include <crypto/sha.h>
  23. #define CR_RESET 0
  24. #define CR_RESET_SET 1
  25. #define CR_RESET_UNSET 0
  26. #define CR_MESSAGE_LENGTH_H 0x4
  27. #define CR_MESSAGE_LENGTH_L 0x8
  28. #define CR_CONTROL 0xc
  29. #define CR_CONTROL_BYTE_ORDER_3210 0
  30. #define CR_CONTROL_BYTE_ORDER_0123 1
  31. #define CR_CONTROL_BYTE_ORDER_2310 2
  32. #define CR_CONTROL_BYTE_ORDER_1032 3
  33. #define CR_CONTROL_BYTE_ORDER_SHIFT 8
  34. #define CR_CONTROL_ALGO_MD5 0
  35. #define CR_CONTROL_ALGO_SHA1 1
  36. #define CR_CONTROL_ALGO_SHA224 2
  37. #define CR_CONTROL_ALGO_SHA256 3
  38. #define CR_INTSTAT 0x10
  39. #define CR_INTENAB 0x14
  40. #define CR_INTCLEAR 0x18
  41. #define CR_INT_RESULTS_AVAILABLE BIT(0)
  42. #define CR_INT_NEW_RESULTS_SET BIT(1)
  43. #define CR_INT_RESULT_READ_ERR BIT(2)
  44. #define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
  45. #define CR_INT_STATUS BIT(8)
  46. #define CR_RESULT_QUEUE 0x1c
  47. #define CR_RSD0 0x40
  48. #define CR_CORE_REV 0x50
  49. #define CR_CORE_DES1 0x60
  50. #define CR_CORE_DES2 0x70
  51. #define DRIVER_FLAGS_BUSY BIT(0)
  52. #define DRIVER_FLAGS_FINAL BIT(1)
  53. #define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
  54. #define DRIVER_FLAGS_OUTPUT_READY BIT(3)
  55. #define DRIVER_FLAGS_INIT BIT(4)
  56. #define DRIVER_FLAGS_CPU BIT(5)
  57. #define DRIVER_FLAGS_DMA_READY BIT(6)
  58. #define DRIVER_FLAGS_ERROR BIT(7)
  59. #define DRIVER_FLAGS_SG BIT(8)
  60. #define DRIVER_FLAGS_SHA1 BIT(18)
  61. #define DRIVER_FLAGS_SHA224 BIT(19)
  62. #define DRIVER_FLAGS_SHA256 BIT(20)
  63. #define DRIVER_FLAGS_MD5 BIT(21)
  64. #define IMG_HASH_QUEUE_LENGTH 20
  65. #define IMG_HASH_DMA_BURST 4
  66. #define IMG_HASH_DMA_THRESHOLD 64
  67. #ifdef __LITTLE_ENDIAN
  68. #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
  69. #else
  70. #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
  71. #endif
  72. struct img_hash_dev;
  73. struct img_hash_request_ctx {
  74. struct img_hash_dev *hdev;
  75. u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
  76. unsigned long flags;
  77. size_t digsize;
  78. dma_addr_t dma_addr;
  79. size_t dma_ct;
  80. /* sg root */
  81. struct scatterlist *sgfirst;
  82. /* walk state */
  83. struct scatterlist *sg;
  84. size_t nents;
  85. size_t offset;
  86. unsigned int total;
  87. size_t sent;
  88. unsigned long op;
  89. size_t bufcnt;
  90. struct ahash_request fallback_req;
  91. /* Zero length buffer must remain last member of struct */
  92. u8 buffer[0] __aligned(sizeof(u32));
  93. };
  94. struct img_hash_ctx {
  95. struct img_hash_dev *hdev;
  96. unsigned long flags;
  97. struct crypto_ahash *fallback;
  98. };
  99. struct img_hash_dev {
  100. struct list_head list;
  101. struct device *dev;
  102. struct clk *hash_clk;
  103. struct clk *sys_clk;
  104. void __iomem *io_base;
  105. phys_addr_t bus_addr;
  106. void __iomem *cpu_addr;
  107. spinlock_t lock;
  108. int err;
  109. struct tasklet_struct done_task;
  110. struct tasklet_struct dma_task;
  111. unsigned long flags;
  112. struct crypto_queue queue;
  113. struct ahash_request *req;
  114. struct dma_chan *dma_lch;
  115. };
  116. struct img_hash_drv {
  117. struct list_head dev_list;
  118. spinlock_t lock;
  119. };
  120. static struct img_hash_drv img_hash = {
  121. .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
  122. .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
  123. };
  124. static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
  125. {
  126. return readl_relaxed(hdev->io_base + offset);
  127. }
  128. static inline void img_hash_write(struct img_hash_dev *hdev,
  129. u32 offset, u32 value)
  130. {
  131. writel_relaxed(value, hdev->io_base + offset);
  132. }
  133. static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
  134. {
  135. return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
  136. }
  137. static void img_hash_start(struct img_hash_dev *hdev, bool dma)
  138. {
  139. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  140. u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
  141. if (ctx->flags & DRIVER_FLAGS_MD5)
  142. cr |= CR_CONTROL_ALGO_MD5;
  143. else if (ctx->flags & DRIVER_FLAGS_SHA1)
  144. cr |= CR_CONTROL_ALGO_SHA1;
  145. else if (ctx->flags & DRIVER_FLAGS_SHA224)
  146. cr |= CR_CONTROL_ALGO_SHA224;
  147. else if (ctx->flags & DRIVER_FLAGS_SHA256)
  148. cr |= CR_CONTROL_ALGO_SHA256;
  149. dev_dbg(hdev->dev, "Starting hash process\n");
  150. img_hash_write(hdev, CR_CONTROL, cr);
  151. /*
  152. * The hardware block requires two cycles between writing the control
  153. * register and writing the first word of data in non DMA mode, to
  154. * ensure the first data write is not grouped in burst with the control
  155. * register write a read is issued to 'flush' the bus.
  156. */
  157. if (!dma)
  158. img_hash_read(hdev, CR_CONTROL);
  159. }
  160. static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
  161. size_t length, int final)
  162. {
  163. u32 count, len32;
  164. const u32 *buffer = (const u32 *)buf;
  165. dev_dbg(hdev->dev, "xmit_cpu: length: %zu bytes\n", length);
  166. if (final)
  167. hdev->flags |= DRIVER_FLAGS_FINAL;
  168. len32 = DIV_ROUND_UP(length, sizeof(u32));
  169. for (count = 0; count < len32; count++)
  170. writel_relaxed(buffer[count], hdev->cpu_addr);
  171. return -EINPROGRESS;
  172. }
  173. static void img_hash_dma_callback(void *data)
  174. {
  175. struct img_hash_dev *hdev = (struct img_hash_dev *)data;
  176. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  177. if (ctx->bufcnt) {
  178. img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
  179. ctx->bufcnt = 0;
  180. }
  181. if (ctx->sg)
  182. tasklet_schedule(&hdev->dma_task);
  183. }
  184. static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
  185. {
  186. struct dma_async_tx_descriptor *desc;
  187. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  188. ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
  189. if (ctx->dma_ct == 0) {
  190. dev_err(hdev->dev, "Invalid DMA sg\n");
  191. hdev->err = -EINVAL;
  192. return -EINVAL;
  193. }
  194. desc = dmaengine_prep_slave_sg(hdev->dma_lch,
  195. sg,
  196. ctx->dma_ct,
  197. DMA_MEM_TO_DEV,
  198. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  199. if (!desc) {
  200. dev_err(hdev->dev, "Null DMA descriptor\n");
  201. hdev->err = -EINVAL;
  202. dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
  203. return -EINVAL;
  204. }
  205. desc->callback = img_hash_dma_callback;
  206. desc->callback_param = hdev;
  207. dmaengine_submit(desc);
  208. dma_async_issue_pending(hdev->dma_lch);
  209. return 0;
  210. }
  211. static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
  212. {
  213. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  214. ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
  215. ctx->buffer, hdev->req->nbytes);
  216. ctx->total = hdev->req->nbytes;
  217. ctx->bufcnt = 0;
  218. hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
  219. img_hash_start(hdev, false);
  220. return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
  221. }
  222. static int img_hash_finish(struct ahash_request *req)
  223. {
  224. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  225. if (!req->result)
  226. return -EINVAL;
  227. memcpy(req->result, ctx->digest, ctx->digsize);
  228. return 0;
  229. }
  230. static void img_hash_copy_hash(struct ahash_request *req)
  231. {
  232. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  233. u32 *hash = (u32 *)ctx->digest;
  234. int i;
  235. for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
  236. hash[i] = img_hash_read_result_queue(ctx->hdev);
  237. }
  238. static void img_hash_finish_req(struct ahash_request *req, int err)
  239. {
  240. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  241. struct img_hash_dev *hdev = ctx->hdev;
  242. if (!err) {
  243. img_hash_copy_hash(req);
  244. if (DRIVER_FLAGS_FINAL & hdev->flags)
  245. err = img_hash_finish(req);
  246. } else {
  247. dev_warn(hdev->dev, "Hash failed with error %d\n", err);
  248. ctx->flags |= DRIVER_FLAGS_ERROR;
  249. }
  250. hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
  251. DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
  252. if (req->base.complete)
  253. req->base.complete(&req->base, err);
  254. }
  255. static int img_hash_write_via_dma(struct img_hash_dev *hdev)
  256. {
  257. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  258. img_hash_start(hdev, true);
  259. dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
  260. if (!ctx->total)
  261. hdev->flags |= DRIVER_FLAGS_FINAL;
  262. hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
  263. tasklet_schedule(&hdev->dma_task);
  264. return -EINPROGRESS;
  265. }
  266. static int img_hash_dma_init(struct img_hash_dev *hdev)
  267. {
  268. struct dma_slave_config dma_conf;
  269. int err = -EINVAL;
  270. hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
  271. if (!hdev->dma_lch) {
  272. dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
  273. return -EBUSY;
  274. }
  275. dma_conf.direction = DMA_MEM_TO_DEV;
  276. dma_conf.dst_addr = hdev->bus_addr;
  277. dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  278. dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
  279. dma_conf.device_fc = false;
  280. err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
  281. if (err) {
  282. dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
  283. dma_release_channel(hdev->dma_lch);
  284. return err;
  285. }
  286. return 0;
  287. }
  288. static void img_hash_dma_task(unsigned long d)
  289. {
  290. struct img_hash_dev *hdev = (struct img_hash_dev *)d;
  291. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  292. u8 *addr;
  293. size_t nbytes, bleft, wsend, len, tbc;
  294. struct scatterlist tsg;
  295. if (!hdev->req || !ctx->sg)
  296. return;
  297. addr = sg_virt(ctx->sg);
  298. nbytes = ctx->sg->length - ctx->offset;
  299. /*
  300. * The hash accelerator does not support a data valid mask. This means
  301. * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
  302. * padding bytes in the last word written by that dma would erroneously
  303. * be included in the hash. To avoid this we round down the transfer,
  304. * and add the excess to the start of the next dma. It does not matter
  305. * that the final dma may not be a multiple of 4 bytes as the hashing
  306. * block is programmed to accept the correct number of bytes.
  307. */
  308. bleft = nbytes % 4;
  309. wsend = (nbytes / 4);
  310. if (wsend) {
  311. sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
  312. if (img_hash_xmit_dma(hdev, &tsg)) {
  313. dev_err(hdev->dev, "DMA failed, falling back to CPU");
  314. ctx->flags |= DRIVER_FLAGS_CPU;
  315. hdev->err = 0;
  316. img_hash_xmit_cpu(hdev, addr + ctx->offset,
  317. wsend * 4, 0);
  318. ctx->sent += wsend * 4;
  319. wsend = 0;
  320. } else {
  321. ctx->sent += wsend * 4;
  322. }
  323. }
  324. if (bleft) {
  325. ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
  326. ctx->buffer, bleft, ctx->sent);
  327. tbc = 0;
  328. ctx->sg = sg_next(ctx->sg);
  329. while (ctx->sg && (ctx->bufcnt < 4)) {
  330. len = ctx->sg->length;
  331. if (likely(len > (4 - ctx->bufcnt)))
  332. len = 4 - ctx->bufcnt;
  333. tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
  334. ctx->buffer + ctx->bufcnt, len,
  335. ctx->sent + ctx->bufcnt);
  336. ctx->bufcnt += tbc;
  337. if (tbc >= ctx->sg->length) {
  338. ctx->sg = sg_next(ctx->sg);
  339. tbc = 0;
  340. }
  341. }
  342. ctx->sent += ctx->bufcnt;
  343. ctx->offset = tbc;
  344. if (!wsend)
  345. img_hash_dma_callback(hdev);
  346. } else {
  347. ctx->offset = 0;
  348. ctx->sg = sg_next(ctx->sg);
  349. }
  350. }
  351. static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
  352. {
  353. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  354. if (ctx->flags & DRIVER_FLAGS_SG)
  355. dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
  356. return 0;
  357. }
  358. static int img_hash_process_data(struct img_hash_dev *hdev)
  359. {
  360. struct ahash_request *req = hdev->req;
  361. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  362. int err = 0;
  363. ctx->bufcnt = 0;
  364. if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
  365. dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
  366. req->nbytes);
  367. err = img_hash_write_via_dma(hdev);
  368. } else {
  369. dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
  370. req->nbytes);
  371. err = img_hash_write_via_cpu(hdev);
  372. }
  373. return err;
  374. }
  375. static int img_hash_hw_init(struct img_hash_dev *hdev)
  376. {
  377. unsigned long long nbits;
  378. u32 u, l;
  379. img_hash_write(hdev, CR_RESET, CR_RESET_SET);
  380. img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
  381. img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
  382. nbits = (u64)hdev->req->nbytes << 3;
  383. u = nbits >> 32;
  384. l = nbits;
  385. img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
  386. img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
  387. if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
  388. hdev->flags |= DRIVER_FLAGS_INIT;
  389. hdev->err = 0;
  390. }
  391. dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
  392. return 0;
  393. }
  394. static int img_hash_init(struct ahash_request *req)
  395. {
  396. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  397. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  398. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  399. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  400. rctx->fallback_req.base.flags = req->base.flags
  401. & CRYPTO_TFM_REQ_MAY_SLEEP;
  402. return crypto_ahash_init(&rctx->fallback_req);
  403. }
  404. static int img_hash_handle_queue(struct img_hash_dev *hdev,
  405. struct ahash_request *req)
  406. {
  407. struct crypto_async_request *async_req, *backlog;
  408. struct img_hash_request_ctx *ctx;
  409. unsigned long flags;
  410. int err = 0, res = 0;
  411. spin_lock_irqsave(&hdev->lock, flags);
  412. if (req)
  413. res = ahash_enqueue_request(&hdev->queue, req);
  414. if (DRIVER_FLAGS_BUSY & hdev->flags) {
  415. spin_unlock_irqrestore(&hdev->lock, flags);
  416. return res;
  417. }
  418. backlog = crypto_get_backlog(&hdev->queue);
  419. async_req = crypto_dequeue_request(&hdev->queue);
  420. if (async_req)
  421. hdev->flags |= DRIVER_FLAGS_BUSY;
  422. spin_unlock_irqrestore(&hdev->lock, flags);
  423. if (!async_req)
  424. return res;
  425. if (backlog)
  426. backlog->complete(backlog, -EINPROGRESS);
  427. req = ahash_request_cast(async_req);
  428. hdev->req = req;
  429. ctx = ahash_request_ctx(req);
  430. dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
  431. ctx->op, req->nbytes);
  432. err = img_hash_hw_init(hdev);
  433. if (!err)
  434. err = img_hash_process_data(hdev);
  435. if (err != -EINPROGRESS) {
  436. /* done_task will not finish so do it here */
  437. img_hash_finish_req(req, err);
  438. }
  439. return res;
  440. }
  441. static int img_hash_update(struct ahash_request *req)
  442. {
  443. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  444. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  445. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  446. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  447. rctx->fallback_req.base.flags = req->base.flags
  448. & CRYPTO_TFM_REQ_MAY_SLEEP;
  449. rctx->fallback_req.nbytes = req->nbytes;
  450. rctx->fallback_req.src = req->src;
  451. return crypto_ahash_update(&rctx->fallback_req);
  452. }
  453. static int img_hash_final(struct ahash_request *req)
  454. {
  455. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  456. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  457. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  458. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  459. rctx->fallback_req.base.flags = req->base.flags
  460. & CRYPTO_TFM_REQ_MAY_SLEEP;
  461. rctx->fallback_req.result = req->result;
  462. return crypto_ahash_final(&rctx->fallback_req);
  463. }
  464. static int img_hash_finup(struct ahash_request *req)
  465. {
  466. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  467. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  468. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  469. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  470. rctx->fallback_req.base.flags = req->base.flags
  471. & CRYPTO_TFM_REQ_MAY_SLEEP;
  472. rctx->fallback_req.nbytes = req->nbytes;
  473. rctx->fallback_req.src = req->src;
  474. rctx->fallback_req.result = req->result;
  475. return crypto_ahash_finup(&rctx->fallback_req);
  476. }
  477. static int img_hash_import(struct ahash_request *req, const void *in)
  478. {
  479. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  480. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  481. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  482. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  483. rctx->fallback_req.base.flags = req->base.flags
  484. & CRYPTO_TFM_REQ_MAY_SLEEP;
  485. return crypto_ahash_import(&rctx->fallback_req, in);
  486. }
  487. static int img_hash_export(struct ahash_request *req, void *out)
  488. {
  489. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  490. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  491. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  492. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  493. rctx->fallback_req.base.flags = req->base.flags
  494. & CRYPTO_TFM_REQ_MAY_SLEEP;
  495. return crypto_ahash_export(&rctx->fallback_req, out);
  496. }
  497. static int img_hash_digest(struct ahash_request *req)
  498. {
  499. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  500. struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  501. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  502. struct img_hash_dev *hdev = NULL;
  503. struct img_hash_dev *tmp;
  504. int err;
  505. spin_lock(&img_hash.lock);
  506. if (!tctx->hdev) {
  507. list_for_each_entry(tmp, &img_hash.dev_list, list) {
  508. hdev = tmp;
  509. break;
  510. }
  511. tctx->hdev = hdev;
  512. } else {
  513. hdev = tctx->hdev;
  514. }
  515. spin_unlock(&img_hash.lock);
  516. ctx->hdev = hdev;
  517. ctx->flags = 0;
  518. ctx->digsize = crypto_ahash_digestsize(tfm);
  519. switch (ctx->digsize) {
  520. case SHA1_DIGEST_SIZE:
  521. ctx->flags |= DRIVER_FLAGS_SHA1;
  522. break;
  523. case SHA256_DIGEST_SIZE:
  524. ctx->flags |= DRIVER_FLAGS_SHA256;
  525. break;
  526. case SHA224_DIGEST_SIZE:
  527. ctx->flags |= DRIVER_FLAGS_SHA224;
  528. break;
  529. case MD5_DIGEST_SIZE:
  530. ctx->flags |= DRIVER_FLAGS_MD5;
  531. break;
  532. default:
  533. return -EINVAL;
  534. }
  535. ctx->bufcnt = 0;
  536. ctx->offset = 0;
  537. ctx->sent = 0;
  538. ctx->total = req->nbytes;
  539. ctx->sg = req->src;
  540. ctx->sgfirst = req->src;
  541. ctx->nents = sg_nents(ctx->sg);
  542. err = img_hash_handle_queue(tctx->hdev, req);
  543. return err;
  544. }
  545. static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
  546. {
  547. struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  548. int err = -ENOMEM;
  549. ctx->fallback = crypto_alloc_ahash(alg_name, 0,
  550. CRYPTO_ALG_NEED_FALLBACK);
  551. if (IS_ERR(ctx->fallback)) {
  552. pr_err("img_hash: Could not load fallback driver.\n");
  553. err = PTR_ERR(ctx->fallback);
  554. goto err;
  555. }
  556. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  557. sizeof(struct img_hash_request_ctx) +
  558. crypto_ahash_reqsize(ctx->fallback) +
  559. IMG_HASH_DMA_THRESHOLD);
  560. return 0;
  561. err:
  562. return err;
  563. }
  564. static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
  565. {
  566. return img_hash_cra_init(tfm, "md5-generic");
  567. }
  568. static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
  569. {
  570. return img_hash_cra_init(tfm, "sha1-generic");
  571. }
  572. static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
  573. {
  574. return img_hash_cra_init(tfm, "sha224-generic");
  575. }
  576. static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
  577. {
  578. return img_hash_cra_init(tfm, "sha256-generic");
  579. }
  580. static void img_hash_cra_exit(struct crypto_tfm *tfm)
  581. {
  582. struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  583. crypto_free_ahash(tctx->fallback);
  584. }
  585. static irqreturn_t img_irq_handler(int irq, void *dev_id)
  586. {
  587. struct img_hash_dev *hdev = dev_id;
  588. u32 reg;
  589. reg = img_hash_read(hdev, CR_INTSTAT);
  590. img_hash_write(hdev, CR_INTCLEAR, reg);
  591. if (reg & CR_INT_NEW_RESULTS_SET) {
  592. dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
  593. if (DRIVER_FLAGS_BUSY & hdev->flags) {
  594. hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
  595. if (!(DRIVER_FLAGS_CPU & hdev->flags))
  596. hdev->flags |= DRIVER_FLAGS_DMA_READY;
  597. tasklet_schedule(&hdev->done_task);
  598. } else {
  599. dev_warn(hdev->dev,
  600. "HASH interrupt when no active requests.\n");
  601. }
  602. } else if (reg & CR_INT_RESULTS_AVAILABLE) {
  603. dev_warn(hdev->dev,
  604. "IRQ triggered before the hash had completed\n");
  605. } else if (reg & CR_INT_RESULT_READ_ERR) {
  606. dev_warn(hdev->dev,
  607. "Attempt to read from an empty result queue\n");
  608. } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
  609. dev_warn(hdev->dev,
  610. "Data written before the hardware was configured\n");
  611. }
  612. return IRQ_HANDLED;
  613. }
  614. static struct ahash_alg img_algs[] = {
  615. {
  616. .init = img_hash_init,
  617. .update = img_hash_update,
  618. .final = img_hash_final,
  619. .finup = img_hash_finup,
  620. .export = img_hash_export,
  621. .import = img_hash_import,
  622. .digest = img_hash_digest,
  623. .halg = {
  624. .digestsize = MD5_DIGEST_SIZE,
  625. .statesize = sizeof(struct md5_state),
  626. .base = {
  627. .cra_name = "md5",
  628. .cra_driver_name = "img-md5",
  629. .cra_priority = 300,
  630. .cra_flags =
  631. CRYPTO_ALG_ASYNC |
  632. CRYPTO_ALG_NEED_FALLBACK,
  633. .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
  634. .cra_ctxsize = sizeof(struct img_hash_ctx),
  635. .cra_init = img_hash_cra_md5_init,
  636. .cra_exit = img_hash_cra_exit,
  637. .cra_module = THIS_MODULE,
  638. }
  639. }
  640. },
  641. {
  642. .init = img_hash_init,
  643. .update = img_hash_update,
  644. .final = img_hash_final,
  645. .finup = img_hash_finup,
  646. .export = img_hash_export,
  647. .import = img_hash_import,
  648. .digest = img_hash_digest,
  649. .halg = {
  650. .digestsize = SHA1_DIGEST_SIZE,
  651. .statesize = sizeof(struct sha1_state),
  652. .base = {
  653. .cra_name = "sha1",
  654. .cra_driver_name = "img-sha1",
  655. .cra_priority = 300,
  656. .cra_flags =
  657. CRYPTO_ALG_ASYNC |
  658. CRYPTO_ALG_NEED_FALLBACK,
  659. .cra_blocksize = SHA1_BLOCK_SIZE,
  660. .cra_ctxsize = sizeof(struct img_hash_ctx),
  661. .cra_init = img_hash_cra_sha1_init,
  662. .cra_exit = img_hash_cra_exit,
  663. .cra_module = THIS_MODULE,
  664. }
  665. }
  666. },
  667. {
  668. .init = img_hash_init,
  669. .update = img_hash_update,
  670. .final = img_hash_final,
  671. .finup = img_hash_finup,
  672. .export = img_hash_export,
  673. .import = img_hash_import,
  674. .digest = img_hash_digest,
  675. .halg = {
  676. .digestsize = SHA224_DIGEST_SIZE,
  677. .statesize = sizeof(struct sha256_state),
  678. .base = {
  679. .cra_name = "sha224",
  680. .cra_driver_name = "img-sha224",
  681. .cra_priority = 300,
  682. .cra_flags =
  683. CRYPTO_ALG_ASYNC |
  684. CRYPTO_ALG_NEED_FALLBACK,
  685. .cra_blocksize = SHA224_BLOCK_SIZE,
  686. .cra_ctxsize = sizeof(struct img_hash_ctx),
  687. .cra_init = img_hash_cra_sha224_init,
  688. .cra_exit = img_hash_cra_exit,
  689. .cra_module = THIS_MODULE,
  690. }
  691. }
  692. },
  693. {
  694. .init = img_hash_init,
  695. .update = img_hash_update,
  696. .final = img_hash_final,
  697. .finup = img_hash_finup,
  698. .export = img_hash_export,
  699. .import = img_hash_import,
  700. .digest = img_hash_digest,
  701. .halg = {
  702. .digestsize = SHA256_DIGEST_SIZE,
  703. .statesize = sizeof(struct sha256_state),
  704. .base = {
  705. .cra_name = "sha256",
  706. .cra_driver_name = "img-sha256",
  707. .cra_priority = 300,
  708. .cra_flags =
  709. CRYPTO_ALG_ASYNC |
  710. CRYPTO_ALG_NEED_FALLBACK,
  711. .cra_blocksize = SHA256_BLOCK_SIZE,
  712. .cra_ctxsize = sizeof(struct img_hash_ctx),
  713. .cra_init = img_hash_cra_sha256_init,
  714. .cra_exit = img_hash_cra_exit,
  715. .cra_module = THIS_MODULE,
  716. }
  717. }
  718. }
  719. };
  720. static int img_register_algs(struct img_hash_dev *hdev)
  721. {
  722. int i, err;
  723. for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
  724. err = crypto_register_ahash(&img_algs[i]);
  725. if (err)
  726. goto err_reg;
  727. }
  728. return 0;
  729. err_reg:
  730. for (; i--; )
  731. crypto_unregister_ahash(&img_algs[i]);
  732. return err;
  733. }
  734. static int img_unregister_algs(struct img_hash_dev *hdev)
  735. {
  736. int i;
  737. for (i = 0; i < ARRAY_SIZE(img_algs); i++)
  738. crypto_unregister_ahash(&img_algs[i]);
  739. return 0;
  740. }
  741. static void img_hash_done_task(unsigned long data)
  742. {
  743. struct img_hash_dev *hdev = (struct img_hash_dev *)data;
  744. int err = 0;
  745. if (hdev->err == -EINVAL) {
  746. err = hdev->err;
  747. goto finish;
  748. }
  749. if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
  750. img_hash_handle_queue(hdev, NULL);
  751. return;
  752. }
  753. if (DRIVER_FLAGS_CPU & hdev->flags) {
  754. if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
  755. hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
  756. goto finish;
  757. }
  758. } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
  759. if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
  760. hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
  761. img_hash_write_via_dma_stop(hdev);
  762. if (hdev->err) {
  763. err = hdev->err;
  764. goto finish;
  765. }
  766. }
  767. if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
  768. hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
  769. DRIVER_FLAGS_OUTPUT_READY);
  770. goto finish;
  771. }
  772. }
  773. return;
  774. finish:
  775. img_hash_finish_req(hdev->req, err);
  776. }
  777. static const struct of_device_id img_hash_match[] = {
  778. { .compatible = "img,hash-accelerator" },
  779. {}
  780. };
  781. MODULE_DEVICE_TABLE(of, img_hash_match);
  782. static int img_hash_probe(struct platform_device *pdev)
  783. {
  784. struct img_hash_dev *hdev;
  785. struct device *dev = &pdev->dev;
  786. struct resource *hash_res;
  787. int irq;
  788. int err;
  789. hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
  790. if (hdev == NULL)
  791. return -ENOMEM;
  792. spin_lock_init(&hdev->lock);
  793. hdev->dev = dev;
  794. platform_set_drvdata(pdev, hdev);
  795. INIT_LIST_HEAD(&hdev->list);
  796. tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
  797. tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
  798. crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
  799. /* Register bank */
  800. hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  801. hdev->io_base = devm_ioremap_resource(dev, hash_res);
  802. if (IS_ERR(hdev->io_base)) {
  803. err = PTR_ERR(hdev->io_base);
  804. dev_err(dev, "can't ioremap, returned %d\n", err);
  805. goto res_err;
  806. }
  807. /* Write port (DMA or CPU) */
  808. hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  809. hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
  810. if (IS_ERR(hdev->cpu_addr)) {
  811. dev_err(dev, "can't ioremap write port\n");
  812. err = PTR_ERR(hdev->cpu_addr);
  813. goto res_err;
  814. }
  815. hdev->bus_addr = hash_res->start;
  816. irq = platform_get_irq(pdev, 0);
  817. if (irq < 0) {
  818. dev_err(dev, "no IRQ resource info\n");
  819. err = irq;
  820. goto res_err;
  821. }
  822. err = devm_request_irq(dev, irq, img_irq_handler, 0,
  823. dev_name(dev), hdev);
  824. if (err) {
  825. dev_err(dev, "unable to request irq\n");
  826. goto res_err;
  827. }
  828. dev_dbg(dev, "using IRQ channel %d\n", irq);
  829. hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
  830. if (IS_ERR(hdev->hash_clk)) {
  831. dev_err(dev, "clock initialization failed.\n");
  832. err = PTR_ERR(hdev->hash_clk);
  833. goto res_err;
  834. }
  835. hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
  836. if (IS_ERR(hdev->sys_clk)) {
  837. dev_err(dev, "clock initialization failed.\n");
  838. err = PTR_ERR(hdev->sys_clk);
  839. goto res_err;
  840. }
  841. err = clk_prepare_enable(hdev->hash_clk);
  842. if (err)
  843. goto res_err;
  844. err = clk_prepare_enable(hdev->sys_clk);
  845. if (err)
  846. goto clk_err;
  847. err = img_hash_dma_init(hdev);
  848. if (err)
  849. goto dma_err;
  850. dev_dbg(dev, "using %s for DMA transfers\n",
  851. dma_chan_name(hdev->dma_lch));
  852. spin_lock(&img_hash.lock);
  853. list_add_tail(&hdev->list, &img_hash.dev_list);
  854. spin_unlock(&img_hash.lock);
  855. err = img_register_algs(hdev);
  856. if (err)
  857. goto err_algs;
  858. dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
  859. return 0;
  860. err_algs:
  861. spin_lock(&img_hash.lock);
  862. list_del(&hdev->list);
  863. spin_unlock(&img_hash.lock);
  864. dma_release_channel(hdev->dma_lch);
  865. dma_err:
  866. clk_disable_unprepare(hdev->sys_clk);
  867. clk_err:
  868. clk_disable_unprepare(hdev->hash_clk);
  869. res_err:
  870. tasklet_kill(&hdev->done_task);
  871. tasklet_kill(&hdev->dma_task);
  872. return err;
  873. }
  874. static int img_hash_remove(struct platform_device *pdev)
  875. {
  876. struct img_hash_dev *hdev;
  877. hdev = platform_get_drvdata(pdev);
  878. spin_lock(&img_hash.lock);
  879. list_del(&hdev->list);
  880. spin_unlock(&img_hash.lock);
  881. img_unregister_algs(hdev);
  882. tasklet_kill(&hdev->done_task);
  883. tasklet_kill(&hdev->dma_task);
  884. dma_release_channel(hdev->dma_lch);
  885. clk_disable_unprepare(hdev->hash_clk);
  886. clk_disable_unprepare(hdev->sys_clk);
  887. return 0;
  888. }
  889. #ifdef CONFIG_PM_SLEEP
  890. static int img_hash_suspend(struct device *dev)
  891. {
  892. struct img_hash_dev *hdev = dev_get_drvdata(dev);
  893. clk_disable_unprepare(hdev->hash_clk);
  894. clk_disable_unprepare(hdev->sys_clk);
  895. return 0;
  896. }
  897. static int img_hash_resume(struct device *dev)
  898. {
  899. struct img_hash_dev *hdev = dev_get_drvdata(dev);
  900. int ret;
  901. ret = clk_prepare_enable(hdev->hash_clk);
  902. if (ret)
  903. return ret;
  904. ret = clk_prepare_enable(hdev->sys_clk);
  905. if (ret) {
  906. clk_disable_unprepare(hdev->hash_clk);
  907. return ret;
  908. }
  909. return 0;
  910. }
  911. #endif /* CONFIG_PM_SLEEP */
  912. static const struct dev_pm_ops img_hash_pm_ops = {
  913. SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
  914. };
  915. static struct platform_driver img_hash_driver = {
  916. .probe = img_hash_probe,
  917. .remove = img_hash_remove,
  918. .driver = {
  919. .name = "img-hash-accelerator",
  920. .pm = &img_hash_pm_ops,
  921. .of_match_table = of_match_ptr(img_hash_match),
  922. }
  923. };
  924. module_platform_driver(img_hash_driver);
  925. MODULE_LICENSE("GPL v2");
  926. MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
  927. MODULE_AUTHOR("Will Thomas.");
  928. MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");