bfin_crc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Support Blackfin CRC HW acceleration.
  5. *
  6. * Copyright 2012 Analog Devices Inc.
  7. *
  8. * Licensed under the GPL-2.
  9. */
  10. #include <linux/err.h>
  11. #include <linux/device.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/errno.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/kernel.h>
  17. #include <linux/irq.h>
  18. #include <linux/io.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/delay.h>
  23. #include <linux/crypto.h>
  24. #include <linux/cryptohash.h>
  25. #include <crypto/scatterwalk.h>
  26. #include <crypto/algapi.h>
  27. #include <crypto/hash.h>
  28. #include <crypto/internal/hash.h>
  29. #include <asm/unaligned.h>
  30. #include <asm/dma.h>
  31. #include <asm/portmux.h>
  32. #include <asm/io.h>
  33. #include "bfin_crc.h"
  34. #define CRC_CCRYPTO_QUEUE_LENGTH 5
  35. #define DRIVER_NAME "bfin-hmac-crc"
  36. #define CHKSUM_DIGEST_SIZE 4
  37. #define CHKSUM_BLOCK_SIZE 1
  38. #define CRC_MAX_DMA_DESC 100
  39. #define CRC_CRYPTO_STATE_UPDATE 1
  40. #define CRC_CRYPTO_STATE_FINALUPDATE 2
  41. #define CRC_CRYPTO_STATE_FINISH 3
  42. struct bfin_crypto_crc {
  43. struct list_head list;
  44. struct device *dev;
  45. spinlock_t lock;
  46. int irq;
  47. int dma_ch;
  48. u32 poly;
  49. struct crc_register *regs;
  50. struct ahash_request *req; /* current request in operation */
  51. struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */
  52. dma_addr_t sg_dma; /* phy addr of sg dma descriptors */
  53. u8 *sg_mid_buf;
  54. dma_addr_t sg_mid_dma; /* phy addr of sg mid buffer */
  55. struct tasklet_struct done_task;
  56. struct crypto_queue queue; /* waiting requests */
  57. u8 busy:1; /* crc device in operation flag */
  58. };
  59. static struct bfin_crypto_crc_list {
  60. struct list_head dev_list;
  61. spinlock_t lock;
  62. } crc_list;
  63. struct bfin_crypto_crc_reqctx {
  64. struct bfin_crypto_crc *crc;
  65. unsigned int total; /* total request bytes */
  66. size_t sg_buflen; /* bytes for this update */
  67. unsigned int sg_nents;
  68. struct scatterlist *sg; /* sg list head for this update*/
  69. struct scatterlist bufsl[2]; /* chained sg list */
  70. size_t bufnext_len;
  71. size_t buflast_len;
  72. u8 bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */
  73. u8 buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */
  74. u8 flag;
  75. };
  76. struct bfin_crypto_crc_ctx {
  77. struct bfin_crypto_crc *crc;
  78. u32 key;
  79. };
  80. /*
  81. * derive number of elements in scatterlist
  82. */
  83. static int sg_count(struct scatterlist *sg_list)
  84. {
  85. struct scatterlist *sg = sg_list;
  86. int sg_nents = 1;
  87. if (sg_list == NULL)
  88. return 0;
  89. while (!sg_is_last(sg)) {
  90. sg_nents++;
  91. sg = sg_next(sg);
  92. }
  93. return sg_nents;
  94. }
  95. /*
  96. * get element in scatter list by given index
  97. */
  98. static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents,
  99. unsigned int index)
  100. {
  101. struct scatterlist *sg = NULL;
  102. int i;
  103. for_each_sg(sg_list, sg, nents, i)
  104. if (i == index)
  105. break;
  106. return sg;
  107. }
  108. static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
  109. {
  110. writel(0, &crc->regs->datacntrld);
  111. writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control);
  112. writel(key, &crc->regs->curresult);
  113. /* setup CRC interrupts */
  114. writel(CMPERRI | DCNTEXPI, &crc->regs->status);
  115. writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset);
  116. return 0;
  117. }
  118. static int bfin_crypto_crc_init(struct ahash_request *req)
  119. {
  120. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  121. struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
  122. struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
  123. struct bfin_crypto_crc *crc;
  124. dev_dbg(ctx->crc->dev, "crc_init\n");
  125. spin_lock_bh(&crc_list.lock);
  126. list_for_each_entry(crc, &crc_list.dev_list, list) {
  127. crc_ctx->crc = crc;
  128. break;
  129. }
  130. spin_unlock_bh(&crc_list.lock);
  131. if (sg_count(req->src) > CRC_MAX_DMA_DESC) {
  132. dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n",
  133. CRC_MAX_DMA_DESC);
  134. return -EINVAL;
  135. }
  136. ctx->crc = crc;
  137. ctx->bufnext_len = 0;
  138. ctx->buflast_len = 0;
  139. ctx->sg_buflen = 0;
  140. ctx->total = 0;
  141. ctx->flag = 0;
  142. /* init crc results */
  143. put_unaligned_le32(crc_ctx->key, req->result);
  144. dev_dbg(ctx->crc->dev, "init: digest size: %d\n",
  145. crypto_ahash_digestsize(tfm));
  146. return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
  147. }
  148. static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
  149. {
  150. struct scatterlist *sg;
  151. struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req);
  152. int i = 0, j = 0;
  153. unsigned long dma_config;
  154. unsigned int dma_count;
  155. unsigned int dma_addr;
  156. unsigned int mid_dma_count = 0;
  157. int dma_mod;
  158. dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
  159. for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
  160. dma_addr = sg_dma_address(sg);
  161. /* deduce extra bytes in last sg */
  162. if (sg_is_last(sg))
  163. dma_count = sg_dma_len(sg) - ctx->bufnext_len;
  164. else
  165. dma_count = sg_dma_len(sg);
  166. if (mid_dma_count) {
  167. /* Append last middle dma buffer to 4 bytes with first
  168. bytes in current sg buffer. Move addr of current
  169. sg and deduce the length of current sg.
  170. */
  171. memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count,
  172. sg_virt(sg),
  173. CHKSUM_DIGEST_SIZE - mid_dma_count);
  174. dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
  175. dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
  176. dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
  177. DMAEN | PSIZE_32 | WDSIZE_32;
  178. /* setup new dma descriptor for next middle dma */
  179. crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2);
  180. crc->sg_cpu[i].cfg = dma_config;
  181. crc->sg_cpu[i].x_count = 1;
  182. crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
  183. dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
  184. "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
  185. i, crc->sg_cpu[i].start_addr,
  186. crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
  187. crc->sg_cpu[i].x_modify);
  188. i++;
  189. }
  190. dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
  191. /* chop current sg dma len to multiple of 32 bits */
  192. mid_dma_count = dma_count % 4;
  193. dma_count &= ~0x3;
  194. if (dma_addr % 4 == 0) {
  195. dma_config |= WDSIZE_32;
  196. dma_count >>= 2;
  197. dma_mod = 4;
  198. } else if (dma_addr % 2 == 0) {
  199. dma_config |= WDSIZE_16;
  200. dma_count >>= 1;
  201. dma_mod = 2;
  202. } else {
  203. dma_config |= WDSIZE_8;
  204. dma_mod = 1;
  205. }
  206. crc->sg_cpu[i].start_addr = dma_addr;
  207. crc->sg_cpu[i].cfg = dma_config;
  208. crc->sg_cpu[i].x_count = dma_count;
  209. crc->sg_cpu[i].x_modify = dma_mod;
  210. dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
  211. "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
  212. i, crc->sg_cpu[i].start_addr,
  213. crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
  214. crc->sg_cpu[i].x_modify);
  215. i++;
  216. if (mid_dma_count) {
  217. /* copy extra bytes to next middle dma buffer */
  218. memcpy(crc->sg_mid_buf + (i << 2),
  219. (u8*)sg_virt(sg) + (dma_count << 2),
  220. mid_dma_count);
  221. }
  222. }
  223. dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32;
  224. /* For final update req, append the buffer for next update as well*/
  225. if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
  226. ctx->flag == CRC_CRYPTO_STATE_FINISH)) {
  227. crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext,
  228. CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
  229. crc->sg_cpu[i].cfg = dma_config;
  230. crc->sg_cpu[i].x_count = 1;
  231. crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
  232. dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
  233. "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
  234. i, crc->sg_cpu[i].start_addr,
  235. crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
  236. crc->sg_cpu[i].x_modify);
  237. i++;
  238. }
  239. if (i == 0)
  240. return;
  241. /* Set the last descriptor to stop mode */
  242. crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
  243. crc->sg_cpu[i - 1].cfg |= DI_EN;
  244. set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
  245. set_dma_x_count(crc->dma_ch, 0);
  246. set_dma_x_modify(crc->dma_ch, 0);
  247. set_dma_config(crc->dma_ch, dma_config);
  248. }
  249. static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
  250. struct ahash_request *req)
  251. {
  252. struct crypto_async_request *async_req, *backlog;
  253. struct bfin_crypto_crc_reqctx *ctx;
  254. struct scatterlist *sg;
  255. int ret = 0;
  256. int nsg, i, j;
  257. unsigned int nextlen;
  258. unsigned long flags;
  259. u32 reg;
  260. spin_lock_irqsave(&crc->lock, flags);
  261. if (req)
  262. ret = ahash_enqueue_request(&crc->queue, req);
  263. if (crc->busy) {
  264. spin_unlock_irqrestore(&crc->lock, flags);
  265. return ret;
  266. }
  267. backlog = crypto_get_backlog(&crc->queue);
  268. async_req = crypto_dequeue_request(&crc->queue);
  269. if (async_req)
  270. crc->busy = 1;
  271. spin_unlock_irqrestore(&crc->lock, flags);
  272. if (!async_req)
  273. return ret;
  274. if (backlog)
  275. backlog->complete(backlog, -EINPROGRESS);
  276. req = ahash_request_cast(async_req);
  277. crc->req = req;
  278. ctx = ahash_request_ctx(req);
  279. ctx->sg = NULL;
  280. ctx->sg_buflen = 0;
  281. ctx->sg_nents = 0;
  282. dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n",
  283. ctx->flag, req->nbytes);
  284. if (ctx->flag == CRC_CRYPTO_STATE_FINISH) {
  285. if (ctx->bufnext_len == 0) {
  286. crc->busy = 0;
  287. return 0;
  288. }
  289. /* Pack last crc update buffer to 32bit */
  290. memset(ctx->bufnext + ctx->bufnext_len, 0,
  291. CHKSUM_DIGEST_SIZE - ctx->bufnext_len);
  292. } else {
  293. /* Pack small data which is less than 32bit to buffer for next update. */
  294. if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) {
  295. memcpy(ctx->bufnext + ctx->bufnext_len,
  296. sg_virt(req->src), req->nbytes);
  297. ctx->bufnext_len += req->nbytes;
  298. if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE &&
  299. ctx->bufnext_len) {
  300. goto finish_update;
  301. } else {
  302. crc->busy = 0;
  303. return 0;
  304. }
  305. }
  306. if (ctx->bufnext_len) {
  307. /* Chain in extra bytes of last update */
  308. ctx->buflast_len = ctx->bufnext_len;
  309. memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len);
  310. nsg = ctx->sg_buflen ? 2 : 1;
  311. sg_init_table(ctx->bufsl, nsg);
  312. sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
  313. if (nsg > 1)
  314. scatterwalk_sg_chain(ctx->bufsl, nsg,
  315. req->src);
  316. ctx->sg = ctx->bufsl;
  317. } else
  318. ctx->sg = req->src;
  319. /* Chop crc buffer size to multiple of 32 bit */
  320. nsg = ctx->sg_nents = sg_count(ctx->sg);
  321. ctx->sg_buflen = ctx->buflast_len + req->nbytes;
  322. ctx->bufnext_len = ctx->sg_buflen % 4;
  323. ctx->sg_buflen &= ~0x3;
  324. if (ctx->bufnext_len) {
  325. /* copy extra bytes to buffer for next update */
  326. memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE);
  327. nextlen = ctx->bufnext_len;
  328. for (i = nsg - 1; i >= 0; i--) {
  329. sg = sg_get(ctx->sg, nsg, i);
  330. j = min(nextlen, sg_dma_len(sg));
  331. memcpy(ctx->bufnext + nextlen - j,
  332. sg_virt(sg) + sg_dma_len(sg) - j, j);
  333. if (j == sg_dma_len(sg))
  334. ctx->sg_nents--;
  335. nextlen -= j;
  336. if (nextlen == 0)
  337. break;
  338. }
  339. }
  340. }
  341. finish_update:
  342. if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
  343. ctx->flag == CRC_CRYPTO_STATE_FINISH))
  344. ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
  345. /* set CRC data count before start DMA */
  346. writel(ctx->sg_buflen >> 2, &crc->regs->datacnt);
  347. /* setup and enable CRC DMA */
  348. bfin_crypto_crc_config_dma(crc);
  349. /* finally kick off CRC operation */
  350. reg = readl(&crc->regs->control);
  351. writel(reg | BLKEN, &crc->regs->control);
  352. return -EINPROGRESS;
  353. }
  354. static int bfin_crypto_crc_update(struct ahash_request *req)
  355. {
  356. struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
  357. if (!req->nbytes)
  358. return 0;
  359. dev_dbg(ctx->crc->dev, "crc_update\n");
  360. ctx->total += req->nbytes;
  361. ctx->flag = CRC_CRYPTO_STATE_UPDATE;
  362. return bfin_crypto_crc_handle_queue(ctx->crc, req);
  363. }
  364. static int bfin_crypto_crc_final(struct ahash_request *req)
  365. {
  366. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  367. struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
  368. struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
  369. dev_dbg(ctx->crc->dev, "crc_final\n");
  370. ctx->flag = CRC_CRYPTO_STATE_FINISH;
  371. crc_ctx->key = 0;
  372. return bfin_crypto_crc_handle_queue(ctx->crc, req);
  373. }
  374. static int bfin_crypto_crc_finup(struct ahash_request *req)
  375. {
  376. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  377. struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
  378. struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
  379. dev_dbg(ctx->crc->dev, "crc_finishupdate\n");
  380. ctx->total += req->nbytes;
  381. ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE;
  382. crc_ctx->key = 0;
  383. return bfin_crypto_crc_handle_queue(ctx->crc, req);
  384. }
  385. static int bfin_crypto_crc_digest(struct ahash_request *req)
  386. {
  387. int ret;
  388. ret = bfin_crypto_crc_init(req);
  389. if (ret)
  390. return ret;
  391. return bfin_crypto_crc_finup(req);
  392. }
  393. static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key,
  394. unsigned int keylen)
  395. {
  396. struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
  397. dev_dbg(crc_ctx->crc->dev, "crc_setkey\n");
  398. if (keylen != CHKSUM_DIGEST_SIZE) {
  399. crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  400. return -EINVAL;
  401. }
  402. crc_ctx->key = get_unaligned_le32(key);
  403. return 0;
  404. }
  405. static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm)
  406. {
  407. struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm);
  408. crc_ctx->key = 0;
  409. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  410. sizeof(struct bfin_crypto_crc_reqctx));
  411. return 0;
  412. }
  413. static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm)
  414. {
  415. }
  416. static struct ahash_alg algs = {
  417. .init = bfin_crypto_crc_init,
  418. .update = bfin_crypto_crc_update,
  419. .final = bfin_crypto_crc_final,
  420. .finup = bfin_crypto_crc_finup,
  421. .digest = bfin_crypto_crc_digest,
  422. .setkey = bfin_crypto_crc_setkey,
  423. .halg.digestsize = CHKSUM_DIGEST_SIZE,
  424. .halg.base = {
  425. .cra_name = "hmac(crc32)",
  426. .cra_driver_name = DRIVER_NAME,
  427. .cra_priority = 100,
  428. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  429. CRYPTO_ALG_ASYNC,
  430. .cra_blocksize = CHKSUM_BLOCK_SIZE,
  431. .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
  432. .cra_alignmask = 3,
  433. .cra_module = THIS_MODULE,
  434. .cra_init = bfin_crypto_crc_cra_init,
  435. .cra_exit = bfin_crypto_crc_cra_exit,
  436. }
  437. };
  438. static void bfin_crypto_crc_done_task(unsigned long data)
  439. {
  440. struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data;
  441. bfin_crypto_crc_handle_queue(crc, NULL);
  442. }
  443. static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
  444. {
  445. struct bfin_crypto_crc *crc = dev_id;
  446. u32 reg;
  447. if (readl(&crc->regs->status) & DCNTEXP) {
  448. writel(DCNTEXP, &crc->regs->status);
  449. /* prepare results */
  450. put_unaligned_le32(readl(&crc->regs->result),
  451. crc->req->result);
  452. reg = readl(&crc->regs->control);
  453. writel(reg & ~BLKEN, &crc->regs->control);
  454. crc->busy = 0;
  455. if (crc->req->base.complete)
  456. crc->req->base.complete(&crc->req->base, 0);
  457. tasklet_schedule(&crc->done_task);
  458. return IRQ_HANDLED;
  459. } else
  460. return IRQ_NONE;
  461. }
  462. #ifdef CONFIG_PM
  463. /**
  464. * bfin_crypto_crc_suspend - suspend crc device
  465. * @pdev: device being suspended
  466. * @state: requested suspend state
  467. */
  468. static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state)
  469. {
  470. struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
  471. int i = 100000;
  472. while ((readl(&crc->regs->control) & BLKEN) && --i)
  473. cpu_relax();
  474. if (i == 0)
  475. return -EBUSY;
  476. return 0;
  477. }
  478. #else
  479. # define bfin_crypto_crc_suspend NULL
  480. #endif
  481. #define bfin_crypto_crc_resume NULL
  482. /**
  483. * bfin_crypto_crc_probe - Initialize module
  484. *
  485. */
  486. static int bfin_crypto_crc_probe(struct platform_device *pdev)
  487. {
  488. struct device *dev = &pdev->dev;
  489. struct resource *res;
  490. struct bfin_crypto_crc *crc;
  491. unsigned int timeout = 100000;
  492. int ret;
  493. crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
  494. if (!crc) {
  495. dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
  496. return -ENOMEM;
  497. }
  498. crc->dev = dev;
  499. INIT_LIST_HEAD(&crc->list);
  500. spin_lock_init(&crc->lock);
  501. tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc);
  502. crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
  503. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  504. if (res == NULL) {
  505. dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
  506. return -ENOENT;
  507. }
  508. crc->regs = devm_ioremap_resource(dev, res);
  509. if (IS_ERR((void *)crc->regs)) {
  510. dev_err(&pdev->dev, "Cannot map CRC IO\n");
  511. return PTR_ERR((void *)crc->regs);
  512. }
  513. crc->irq = platform_get_irq(pdev, 0);
  514. if (crc->irq < 0) {
  515. dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
  516. return -ENOENT;
  517. }
  518. ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler,
  519. IRQF_SHARED, dev_name(dev), crc);
  520. if (ret) {
  521. dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
  522. return ret;
  523. }
  524. res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
  525. if (res == NULL) {
  526. dev_err(&pdev->dev, "No CRC DMA channel specified\n");
  527. return -ENOENT;
  528. }
  529. crc->dma_ch = res->start;
  530. ret = request_dma(crc->dma_ch, dev_name(dev));
  531. if (ret) {
  532. dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
  533. return ret;
  534. }
  535. crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
  536. if (crc->sg_cpu == NULL) {
  537. ret = -ENOMEM;
  538. goto out_error_dma;
  539. }
  540. /*
  541. * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle +
  542. * 1 last + 1 next dma descriptors
  543. */
  544. crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
  545. crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array)
  546. * ((CRC_MAX_DMA_DESC + 1) << 1);
  547. writel(0, &crc->regs->control);
  548. crc->poly = (u32)pdev->dev.platform_data;
  549. writel(crc->poly, &crc->regs->poly);
  550. while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0)
  551. cpu_relax();
  552. if (timeout == 0)
  553. dev_info(&pdev->dev, "init crc poly timeout\n");
  554. platform_set_drvdata(pdev, crc);
  555. spin_lock(&crc_list.lock);
  556. list_add(&crc->list, &crc_list.dev_list);
  557. spin_unlock(&crc_list.lock);
  558. if (list_is_singular(&crc_list.dev_list)) {
  559. ret = crypto_register_ahash(&algs);
  560. if (ret) {
  561. dev_err(&pdev->dev,
  562. "Can't register crypto ahash device\n");
  563. goto out_error_dma;
  564. }
  565. }
  566. dev_info(&pdev->dev, "initialized\n");
  567. return 0;
  568. out_error_dma:
  569. if (crc->sg_cpu)
  570. dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
  571. free_dma(crc->dma_ch);
  572. return ret;
  573. }
  574. /**
  575. * bfin_crypto_crc_remove - Initialize module
  576. *
  577. */
  578. static int bfin_crypto_crc_remove(struct platform_device *pdev)
  579. {
  580. struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
  581. if (!crc)
  582. return -ENODEV;
  583. spin_lock(&crc_list.lock);
  584. list_del(&crc->list);
  585. spin_unlock(&crc_list.lock);
  586. crypto_unregister_ahash(&algs);
  587. tasklet_kill(&crc->done_task);
  588. free_dma(crc->dma_ch);
  589. return 0;
  590. }
  591. static struct platform_driver bfin_crypto_crc_driver = {
  592. .probe = bfin_crypto_crc_probe,
  593. .remove = bfin_crypto_crc_remove,
  594. .suspend = bfin_crypto_crc_suspend,
  595. .resume = bfin_crypto_crc_resume,
  596. .driver = {
  597. .name = DRIVER_NAME,
  598. },
  599. };
  600. /**
  601. * bfin_crypto_crc_mod_init - Initialize module
  602. *
  603. * Checks the module params and registers the platform driver.
  604. * Real work is in the platform probe function.
  605. */
  606. static int __init bfin_crypto_crc_mod_init(void)
  607. {
  608. int ret;
  609. pr_info("Blackfin hardware CRC crypto driver\n");
  610. INIT_LIST_HEAD(&crc_list.dev_list);
  611. spin_lock_init(&crc_list.lock);
  612. ret = platform_driver_register(&bfin_crypto_crc_driver);
  613. if (ret) {
  614. pr_err("unable to register driver\n");
  615. return ret;
  616. }
  617. return 0;
  618. }
  619. /**
  620. * bfin_crypto_crc_mod_exit - Deinitialize module
  621. */
  622. static void __exit bfin_crypto_crc_mod_exit(void)
  623. {
  624. platform_driver_unregister(&bfin_crypto_crc_driver);
  625. }
  626. module_init(bfin_crypto_crc_mod_init);
  627. module_exit(bfin_crypto_crc_mod_exit);
  628. MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
  629. MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver");
  630. MODULE_LICENSE("GPL");