atmel-aes.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Support for ATMEL AES HW acceleration.
  5. *
  6. * Copyright (c) 2012 Eukréa Electromatique - ATMEL
  7. * Author: Nicolas Royer <nicolas@eukrea.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as published
  11. * by the Free Software Foundation.
  12. *
  13. * Some ideas are from omap-aes.c driver.
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/err.h>
  19. #include <linux/clk.h>
  20. #include <linux/io.h>
  21. #include <linux/hw_random.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/device.h>
  24. #include <linux/init.h>
  25. #include <linux/errno.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/irq.h>
  28. #include <linux/scatterlist.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/of_device.h>
  31. #include <linux/delay.h>
  32. #include <linux/crypto.h>
  33. #include <linux/cryptohash.h>
  34. #include <crypto/scatterwalk.h>
  35. #include <crypto/algapi.h>
  36. #include <crypto/aes.h>
  37. #include <crypto/hash.h>
  38. #include <crypto/internal/hash.h>
  39. #include <linux/platform_data/crypto-atmel.h>
  40. #include <dt-bindings/dma/at91.h>
  41. #include "atmel-aes-regs.h"
  42. #define CFB8_BLOCK_SIZE 1
  43. #define CFB16_BLOCK_SIZE 2
  44. #define CFB32_BLOCK_SIZE 4
  45. #define CFB64_BLOCK_SIZE 8
  46. /* AES flags */
  47. #define AES_FLAGS_MODE_MASK 0x03ff
  48. #define AES_FLAGS_ENCRYPT BIT(0)
  49. #define AES_FLAGS_CBC BIT(1)
  50. #define AES_FLAGS_CFB BIT(2)
  51. #define AES_FLAGS_CFB8 BIT(3)
  52. #define AES_FLAGS_CFB16 BIT(4)
  53. #define AES_FLAGS_CFB32 BIT(5)
  54. #define AES_FLAGS_CFB64 BIT(6)
  55. #define AES_FLAGS_CFB128 BIT(7)
  56. #define AES_FLAGS_OFB BIT(8)
  57. #define AES_FLAGS_CTR BIT(9)
  58. #define AES_FLAGS_INIT BIT(16)
  59. #define AES_FLAGS_DMA BIT(17)
  60. #define AES_FLAGS_BUSY BIT(18)
  61. #define AES_FLAGS_FAST BIT(19)
  62. #define ATMEL_AES_QUEUE_LENGTH 50
  63. #define ATMEL_AES_DMA_THRESHOLD 16
  64. struct atmel_aes_caps {
  65. bool has_dualbuff;
  66. bool has_cfb64;
  67. u32 max_burst_size;
  68. };
  69. struct atmel_aes_dev;
  70. struct atmel_aes_ctx {
  71. struct atmel_aes_dev *dd;
  72. int keylen;
  73. u32 key[AES_KEYSIZE_256 / sizeof(u32)];
  74. u16 block_size;
  75. };
  76. struct atmel_aes_reqctx {
  77. unsigned long mode;
  78. };
  79. struct atmel_aes_dma {
  80. struct dma_chan *chan;
  81. struct dma_slave_config dma_conf;
  82. };
  83. struct atmel_aes_dev {
  84. struct list_head list;
  85. unsigned long phys_base;
  86. void __iomem *io_base;
  87. struct atmel_aes_ctx *ctx;
  88. struct device *dev;
  89. struct clk *iclk;
  90. int irq;
  91. unsigned long flags;
  92. int err;
  93. spinlock_t lock;
  94. struct crypto_queue queue;
  95. struct tasklet_struct done_task;
  96. struct tasklet_struct queue_task;
  97. struct ablkcipher_request *req;
  98. size_t total;
  99. struct scatterlist *in_sg;
  100. unsigned int nb_in_sg;
  101. size_t in_offset;
  102. struct scatterlist *out_sg;
  103. unsigned int nb_out_sg;
  104. size_t out_offset;
  105. size_t bufcnt;
  106. size_t buflen;
  107. size_t dma_size;
  108. void *buf_in;
  109. int dma_in;
  110. dma_addr_t dma_addr_in;
  111. struct atmel_aes_dma dma_lch_in;
  112. void *buf_out;
  113. int dma_out;
  114. dma_addr_t dma_addr_out;
  115. struct atmel_aes_dma dma_lch_out;
  116. struct atmel_aes_caps caps;
  117. u32 hw_version;
  118. };
  119. struct atmel_aes_drv {
  120. struct list_head dev_list;
  121. spinlock_t lock;
  122. };
  123. static struct atmel_aes_drv atmel_aes = {
  124. .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
  125. .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
  126. };
  127. static int atmel_aes_sg_length(struct ablkcipher_request *req,
  128. struct scatterlist *sg)
  129. {
  130. unsigned int total = req->nbytes;
  131. int sg_nb;
  132. unsigned int len;
  133. struct scatterlist *sg_list;
  134. sg_nb = 0;
  135. sg_list = sg;
  136. total = req->nbytes;
  137. while (total) {
  138. len = min(sg_list->length, total);
  139. sg_nb++;
  140. total -= len;
  141. sg_list = sg_next(sg_list);
  142. if (!sg_list)
  143. total = 0;
  144. }
  145. return sg_nb;
  146. }
  147. static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
  148. void *buf, size_t buflen, size_t total, int out)
  149. {
  150. unsigned int count, off = 0;
  151. while (buflen && total) {
  152. count = min((*sg)->length - *offset, total);
  153. count = min(count, buflen);
  154. if (!count)
  155. return off;
  156. scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
  157. off += count;
  158. buflen -= count;
  159. *offset += count;
  160. total -= count;
  161. if (*offset == (*sg)->length) {
  162. *sg = sg_next(*sg);
  163. if (*sg)
  164. *offset = 0;
  165. else
  166. total = 0;
  167. }
  168. }
  169. return off;
  170. }
  171. static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
  172. {
  173. return readl_relaxed(dd->io_base + offset);
  174. }
  175. static inline void atmel_aes_write(struct atmel_aes_dev *dd,
  176. u32 offset, u32 value)
  177. {
  178. writel_relaxed(value, dd->io_base + offset);
  179. }
  180. static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
  181. u32 *value, int count)
  182. {
  183. for (; count--; value++, offset += 4)
  184. *value = atmel_aes_read(dd, offset);
  185. }
  186. static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
  187. u32 *value, int count)
  188. {
  189. for (; count--; value++, offset += 4)
  190. atmel_aes_write(dd, offset, *value);
  191. }
  192. static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
  193. {
  194. struct atmel_aes_dev *aes_dd = NULL;
  195. struct atmel_aes_dev *tmp;
  196. spin_lock_bh(&atmel_aes.lock);
  197. if (!ctx->dd) {
  198. list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
  199. aes_dd = tmp;
  200. break;
  201. }
  202. ctx->dd = aes_dd;
  203. } else {
  204. aes_dd = ctx->dd;
  205. }
  206. spin_unlock_bh(&atmel_aes.lock);
  207. return aes_dd;
  208. }
  209. static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
  210. {
  211. clk_prepare_enable(dd->iclk);
  212. if (!(dd->flags & AES_FLAGS_INIT)) {
  213. atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
  214. atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
  215. dd->flags |= AES_FLAGS_INIT;
  216. dd->err = 0;
  217. }
  218. return 0;
  219. }
  220. static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
  221. {
  222. return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
  223. }
  224. static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
  225. {
  226. atmel_aes_hw_init(dd);
  227. dd->hw_version = atmel_aes_get_version(dd);
  228. dev_info(dd->dev,
  229. "version: 0x%x\n", dd->hw_version);
  230. clk_disable_unprepare(dd->iclk);
  231. }
  232. static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
  233. {
  234. struct ablkcipher_request *req = dd->req;
  235. clk_disable_unprepare(dd->iclk);
  236. dd->flags &= ~AES_FLAGS_BUSY;
  237. req->base.complete(&req->base, err);
  238. }
  239. static void atmel_aes_dma_callback(void *data)
  240. {
  241. struct atmel_aes_dev *dd = data;
  242. /* dma_lch_out - completed */
  243. tasklet_schedule(&dd->done_task);
  244. }
  245. static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
  246. dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
  247. {
  248. struct scatterlist sg[2];
  249. struct dma_async_tx_descriptor *in_desc, *out_desc;
  250. dd->dma_size = length;
  251. dma_sync_single_for_device(dd->dev, dma_addr_in, length,
  252. DMA_TO_DEVICE);
  253. dma_sync_single_for_device(dd->dev, dma_addr_out, length,
  254. DMA_FROM_DEVICE);
  255. if (dd->flags & AES_FLAGS_CFB8) {
  256. dd->dma_lch_in.dma_conf.dst_addr_width =
  257. DMA_SLAVE_BUSWIDTH_1_BYTE;
  258. dd->dma_lch_out.dma_conf.src_addr_width =
  259. DMA_SLAVE_BUSWIDTH_1_BYTE;
  260. } else if (dd->flags & AES_FLAGS_CFB16) {
  261. dd->dma_lch_in.dma_conf.dst_addr_width =
  262. DMA_SLAVE_BUSWIDTH_2_BYTES;
  263. dd->dma_lch_out.dma_conf.src_addr_width =
  264. DMA_SLAVE_BUSWIDTH_2_BYTES;
  265. } else {
  266. dd->dma_lch_in.dma_conf.dst_addr_width =
  267. DMA_SLAVE_BUSWIDTH_4_BYTES;
  268. dd->dma_lch_out.dma_conf.src_addr_width =
  269. DMA_SLAVE_BUSWIDTH_4_BYTES;
  270. }
  271. if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
  272. AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) {
  273. dd->dma_lch_in.dma_conf.src_maxburst = 1;
  274. dd->dma_lch_in.dma_conf.dst_maxburst = 1;
  275. dd->dma_lch_out.dma_conf.src_maxburst = 1;
  276. dd->dma_lch_out.dma_conf.dst_maxburst = 1;
  277. } else {
  278. dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
  279. dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
  280. dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
  281. dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
  282. }
  283. dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
  284. dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
  285. dd->flags |= AES_FLAGS_DMA;
  286. sg_init_table(&sg[0], 1);
  287. sg_dma_address(&sg[0]) = dma_addr_in;
  288. sg_dma_len(&sg[0]) = length;
  289. sg_init_table(&sg[1], 1);
  290. sg_dma_address(&sg[1]) = dma_addr_out;
  291. sg_dma_len(&sg[1]) = length;
  292. in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
  293. 1, DMA_MEM_TO_DEV,
  294. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  295. if (!in_desc)
  296. return -EINVAL;
  297. out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
  298. 1, DMA_DEV_TO_MEM,
  299. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  300. if (!out_desc)
  301. return -EINVAL;
  302. out_desc->callback = atmel_aes_dma_callback;
  303. out_desc->callback_param = dd;
  304. dmaengine_submit(out_desc);
  305. dma_async_issue_pending(dd->dma_lch_out.chan);
  306. dmaengine_submit(in_desc);
  307. dma_async_issue_pending(dd->dma_lch_in.chan);
  308. return 0;
  309. }
  310. static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
  311. {
  312. dd->flags &= ~AES_FLAGS_DMA;
  313. dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
  314. dd->dma_size, DMA_TO_DEVICE);
  315. dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
  316. dd->dma_size, DMA_FROM_DEVICE);
  317. /* use cache buffers */
  318. dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
  319. if (!dd->nb_in_sg)
  320. return -EINVAL;
  321. dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
  322. if (!dd->nb_out_sg)
  323. return -EINVAL;
  324. dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
  325. dd->buf_in, dd->total);
  326. if (!dd->bufcnt)
  327. return -EINVAL;
  328. dd->total -= dd->bufcnt;
  329. atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
  330. atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
  331. dd->bufcnt >> 2);
  332. return 0;
  333. }
  334. static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
  335. {
  336. int err, fast = 0, in, out;
  337. size_t count;
  338. dma_addr_t addr_in, addr_out;
  339. if ((!dd->in_offset) && (!dd->out_offset)) {
  340. /* check for alignment */
  341. in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
  342. IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
  343. out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
  344. IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
  345. fast = in && out;
  346. if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
  347. fast = 0;
  348. }
  349. if (fast) {
  350. count = min(dd->total, sg_dma_len(dd->in_sg));
  351. count = min(count, sg_dma_len(dd->out_sg));
  352. err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  353. if (!err) {
  354. dev_err(dd->dev, "dma_map_sg() error\n");
  355. return -EINVAL;
  356. }
  357. err = dma_map_sg(dd->dev, dd->out_sg, 1,
  358. DMA_FROM_DEVICE);
  359. if (!err) {
  360. dev_err(dd->dev, "dma_map_sg() error\n");
  361. dma_unmap_sg(dd->dev, dd->in_sg, 1,
  362. DMA_TO_DEVICE);
  363. return -EINVAL;
  364. }
  365. addr_in = sg_dma_address(dd->in_sg);
  366. addr_out = sg_dma_address(dd->out_sg);
  367. dd->flags |= AES_FLAGS_FAST;
  368. } else {
  369. dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
  370. dd->dma_size, DMA_TO_DEVICE);
  371. /* use cache buffers */
  372. count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
  373. dd->buf_in, dd->buflen, dd->total, 0);
  374. addr_in = dd->dma_addr_in;
  375. addr_out = dd->dma_addr_out;
  376. dd->flags &= ~AES_FLAGS_FAST;
  377. }
  378. dd->total -= count;
  379. err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
  380. if (err && (dd->flags & AES_FLAGS_FAST)) {
  381. dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  382. dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
  383. }
  384. return err;
  385. }
  386. static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
  387. {
  388. int err;
  389. u32 valcr = 0, valmr = 0;
  390. err = atmel_aes_hw_init(dd);
  391. if (err)
  392. return err;
  393. /* MR register must be set before IV registers */
  394. if (dd->ctx->keylen == AES_KEYSIZE_128)
  395. valmr |= AES_MR_KEYSIZE_128;
  396. else if (dd->ctx->keylen == AES_KEYSIZE_192)
  397. valmr |= AES_MR_KEYSIZE_192;
  398. else
  399. valmr |= AES_MR_KEYSIZE_256;
  400. if (dd->flags & AES_FLAGS_CBC) {
  401. valmr |= AES_MR_OPMOD_CBC;
  402. } else if (dd->flags & AES_FLAGS_CFB) {
  403. valmr |= AES_MR_OPMOD_CFB;
  404. if (dd->flags & AES_FLAGS_CFB8)
  405. valmr |= AES_MR_CFBS_8b;
  406. else if (dd->flags & AES_FLAGS_CFB16)
  407. valmr |= AES_MR_CFBS_16b;
  408. else if (dd->flags & AES_FLAGS_CFB32)
  409. valmr |= AES_MR_CFBS_32b;
  410. else if (dd->flags & AES_FLAGS_CFB64)
  411. valmr |= AES_MR_CFBS_64b;
  412. else if (dd->flags & AES_FLAGS_CFB128)
  413. valmr |= AES_MR_CFBS_128b;
  414. } else if (dd->flags & AES_FLAGS_OFB) {
  415. valmr |= AES_MR_OPMOD_OFB;
  416. } else if (dd->flags & AES_FLAGS_CTR) {
  417. valmr |= AES_MR_OPMOD_CTR;
  418. } else {
  419. valmr |= AES_MR_OPMOD_ECB;
  420. }
  421. if (dd->flags & AES_FLAGS_ENCRYPT)
  422. valmr |= AES_MR_CYPHER_ENC;
  423. if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
  424. valmr |= AES_MR_SMOD_IDATAR0;
  425. if (dd->caps.has_dualbuff)
  426. valmr |= AES_MR_DUALBUFF;
  427. } else {
  428. valmr |= AES_MR_SMOD_AUTO;
  429. }
  430. atmel_aes_write(dd, AES_CR, valcr);
  431. atmel_aes_write(dd, AES_MR, valmr);
  432. atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
  433. dd->ctx->keylen >> 2);
  434. if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
  435. (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
  436. dd->req->info) {
  437. atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
  438. }
  439. return 0;
  440. }
  441. static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
  442. struct ablkcipher_request *req)
  443. {
  444. struct crypto_async_request *async_req, *backlog;
  445. struct atmel_aes_ctx *ctx;
  446. struct atmel_aes_reqctx *rctx;
  447. unsigned long flags;
  448. int err, ret = 0;
  449. spin_lock_irqsave(&dd->lock, flags);
  450. if (req)
  451. ret = ablkcipher_enqueue_request(&dd->queue, req);
  452. if (dd->flags & AES_FLAGS_BUSY) {
  453. spin_unlock_irqrestore(&dd->lock, flags);
  454. return ret;
  455. }
  456. backlog = crypto_get_backlog(&dd->queue);
  457. async_req = crypto_dequeue_request(&dd->queue);
  458. if (async_req)
  459. dd->flags |= AES_FLAGS_BUSY;
  460. spin_unlock_irqrestore(&dd->lock, flags);
  461. if (!async_req)
  462. return ret;
  463. if (backlog)
  464. backlog->complete(backlog, -EINPROGRESS);
  465. req = ablkcipher_request_cast(async_req);
  466. /* assign new request to device */
  467. dd->req = req;
  468. dd->total = req->nbytes;
  469. dd->in_offset = 0;
  470. dd->in_sg = req->src;
  471. dd->out_offset = 0;
  472. dd->out_sg = req->dst;
  473. rctx = ablkcipher_request_ctx(req);
  474. ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
  475. rctx->mode &= AES_FLAGS_MODE_MASK;
  476. dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
  477. dd->ctx = ctx;
  478. ctx->dd = dd;
  479. err = atmel_aes_write_ctrl(dd);
  480. if (!err) {
  481. if (dd->total > ATMEL_AES_DMA_THRESHOLD)
  482. err = atmel_aes_crypt_dma_start(dd);
  483. else
  484. err = atmel_aes_crypt_cpu_start(dd);
  485. }
  486. if (err) {
  487. /* aes_task will not finish it, so do it here */
  488. atmel_aes_finish_req(dd, err);
  489. tasklet_schedule(&dd->queue_task);
  490. }
  491. return ret;
  492. }
  493. static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
  494. {
  495. int err = -EINVAL;
  496. size_t count;
  497. if (dd->flags & AES_FLAGS_DMA) {
  498. err = 0;
  499. if (dd->flags & AES_FLAGS_FAST) {
  500. dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
  501. dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  502. } else {
  503. dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
  504. dd->dma_size, DMA_FROM_DEVICE);
  505. /* copy data */
  506. count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
  507. dd->buf_out, dd->buflen, dd->dma_size, 1);
  508. if (count != dd->dma_size) {
  509. err = -EINVAL;
  510. pr_err("not all data converted: %u\n", count);
  511. }
  512. }
  513. }
  514. return err;
  515. }
  516. static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
  517. {
  518. int err = -ENOMEM;
  519. dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
  520. dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
  521. dd->buflen = PAGE_SIZE;
  522. dd->buflen &= ~(AES_BLOCK_SIZE - 1);
  523. if (!dd->buf_in || !dd->buf_out) {
  524. dev_err(dd->dev, "unable to alloc pages.\n");
  525. goto err_alloc;
  526. }
  527. /* MAP here */
  528. dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
  529. dd->buflen, DMA_TO_DEVICE);
  530. if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
  531. dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
  532. err = -EINVAL;
  533. goto err_map_in;
  534. }
  535. dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
  536. dd->buflen, DMA_FROM_DEVICE);
  537. if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
  538. dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
  539. err = -EINVAL;
  540. goto err_map_out;
  541. }
  542. return 0;
  543. err_map_out:
  544. dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
  545. DMA_TO_DEVICE);
  546. err_map_in:
  547. err_alloc:
  548. free_page((unsigned long)dd->buf_out);
  549. free_page((unsigned long)dd->buf_in);
  550. if (err)
  551. pr_err("error: %d\n", err);
  552. return err;
  553. }
  554. static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
  555. {
  556. dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
  557. DMA_FROM_DEVICE);
  558. dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
  559. DMA_TO_DEVICE);
  560. free_page((unsigned long)dd->buf_out);
  561. free_page((unsigned long)dd->buf_in);
  562. }
  563. static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
  564. {
  565. struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
  566. crypto_ablkcipher_reqtfm(req));
  567. struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  568. struct atmel_aes_dev *dd;
  569. if (mode & AES_FLAGS_CFB8) {
  570. if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
  571. pr_err("request size is not exact amount of CFB8 blocks\n");
  572. return -EINVAL;
  573. }
  574. ctx->block_size = CFB8_BLOCK_SIZE;
  575. } else if (mode & AES_FLAGS_CFB16) {
  576. if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
  577. pr_err("request size is not exact amount of CFB16 blocks\n");
  578. return -EINVAL;
  579. }
  580. ctx->block_size = CFB16_BLOCK_SIZE;
  581. } else if (mode & AES_FLAGS_CFB32) {
  582. if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
  583. pr_err("request size is not exact amount of CFB32 blocks\n");
  584. return -EINVAL;
  585. }
  586. ctx->block_size = CFB32_BLOCK_SIZE;
  587. } else if (mode & AES_FLAGS_CFB64) {
  588. if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) {
  589. pr_err("request size is not exact amount of CFB64 blocks\n");
  590. return -EINVAL;
  591. }
  592. ctx->block_size = CFB64_BLOCK_SIZE;
  593. } else {
  594. if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
  595. pr_err("request size is not exact amount of AES blocks\n");
  596. return -EINVAL;
  597. }
  598. ctx->block_size = AES_BLOCK_SIZE;
  599. }
  600. dd = atmel_aes_find_dev(ctx);
  601. if (!dd)
  602. return -ENODEV;
  603. rctx->mode = mode;
  604. return atmel_aes_handle_queue(dd, req);
  605. }
  606. static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
  607. {
  608. struct at_dma_slave *sl = slave;
  609. if (sl && sl->dma_dev == chan->device->dev) {
  610. chan->private = sl;
  611. return true;
  612. } else {
  613. return false;
  614. }
  615. }
  616. static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
  617. struct crypto_platform_data *pdata)
  618. {
  619. int err = -ENOMEM;
  620. dma_cap_mask_t mask;
  621. dma_cap_zero(mask);
  622. dma_cap_set(DMA_SLAVE, mask);
  623. /* Try to grab 2 DMA channels */
  624. dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
  625. atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
  626. if (!dd->dma_lch_in.chan)
  627. goto err_dma_in;
  628. dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
  629. dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
  630. AES_IDATAR(0);
  631. dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
  632. dd->dma_lch_in.dma_conf.src_addr_width =
  633. DMA_SLAVE_BUSWIDTH_4_BYTES;
  634. dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
  635. dd->dma_lch_in.dma_conf.dst_addr_width =
  636. DMA_SLAVE_BUSWIDTH_4_BYTES;
  637. dd->dma_lch_in.dma_conf.device_fc = false;
  638. dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
  639. atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
  640. if (!dd->dma_lch_out.chan)
  641. goto err_dma_out;
  642. dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
  643. dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
  644. AES_ODATAR(0);
  645. dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
  646. dd->dma_lch_out.dma_conf.src_addr_width =
  647. DMA_SLAVE_BUSWIDTH_4_BYTES;
  648. dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
  649. dd->dma_lch_out.dma_conf.dst_addr_width =
  650. DMA_SLAVE_BUSWIDTH_4_BYTES;
  651. dd->dma_lch_out.dma_conf.device_fc = false;
  652. return 0;
  653. err_dma_out:
  654. dma_release_channel(dd->dma_lch_in.chan);
  655. err_dma_in:
  656. dev_warn(dd->dev, "no DMA channel available\n");
  657. return err;
  658. }
  659. static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
  660. {
  661. dma_release_channel(dd->dma_lch_in.chan);
  662. dma_release_channel(dd->dma_lch_out.chan);
  663. }
  664. static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  665. unsigned int keylen)
  666. {
  667. struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  668. if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  669. keylen != AES_KEYSIZE_256) {
  670. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  671. return -EINVAL;
  672. }
  673. memcpy(ctx->key, key, keylen);
  674. ctx->keylen = keylen;
  675. return 0;
  676. }
  677. static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
  678. {
  679. return atmel_aes_crypt(req,
  680. AES_FLAGS_ENCRYPT);
  681. }
  682. static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
  683. {
  684. return atmel_aes_crypt(req,
  685. 0);
  686. }
  687. static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
  688. {
  689. return atmel_aes_crypt(req,
  690. AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
  691. }
  692. static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
  693. {
  694. return atmel_aes_crypt(req,
  695. AES_FLAGS_CBC);
  696. }
  697. static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
  698. {
  699. return atmel_aes_crypt(req,
  700. AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
  701. }
  702. static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
  703. {
  704. return atmel_aes_crypt(req,
  705. AES_FLAGS_OFB);
  706. }
  707. static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
  708. {
  709. return atmel_aes_crypt(req,
  710. AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128);
  711. }
  712. static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
  713. {
  714. return atmel_aes_crypt(req,
  715. AES_FLAGS_CFB | AES_FLAGS_CFB128);
  716. }
  717. static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
  718. {
  719. return atmel_aes_crypt(req,
  720. AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
  721. }
  722. static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
  723. {
  724. return atmel_aes_crypt(req,
  725. AES_FLAGS_CFB | AES_FLAGS_CFB64);
  726. }
  727. static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
  728. {
  729. return atmel_aes_crypt(req,
  730. AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
  731. }
  732. static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
  733. {
  734. return atmel_aes_crypt(req,
  735. AES_FLAGS_CFB | AES_FLAGS_CFB32);
  736. }
  737. static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
  738. {
  739. return atmel_aes_crypt(req,
  740. AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
  741. }
  742. static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
  743. {
  744. return atmel_aes_crypt(req,
  745. AES_FLAGS_CFB | AES_FLAGS_CFB16);
  746. }
  747. static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
  748. {
  749. return atmel_aes_crypt(req,
  750. AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8);
  751. }
  752. static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
  753. {
  754. return atmel_aes_crypt(req,
  755. AES_FLAGS_CFB | AES_FLAGS_CFB8);
  756. }
  757. static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
  758. {
  759. return atmel_aes_crypt(req,
  760. AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
  761. }
  762. static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
  763. {
  764. return atmel_aes_crypt(req,
  765. AES_FLAGS_CTR);
  766. }
  767. static int atmel_aes_cra_init(struct crypto_tfm *tfm)
  768. {
  769. tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
  770. return 0;
  771. }
  772. static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
  773. {
  774. }
  775. static struct crypto_alg aes_algs[] = {
  776. {
  777. .cra_name = "ecb(aes)",
  778. .cra_driver_name = "atmel-ecb-aes",
  779. .cra_priority = 100,
  780. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  781. .cra_blocksize = AES_BLOCK_SIZE,
  782. .cra_ctxsize = sizeof(struct atmel_aes_ctx),
  783. .cra_alignmask = 0xf,
  784. .cra_type = &crypto_ablkcipher_type,
  785. .cra_module = THIS_MODULE,
  786. .cra_init = atmel_aes_cra_init,
  787. .cra_exit = atmel_aes_cra_exit,
  788. .cra_u.ablkcipher = {
  789. .min_keysize = AES_MIN_KEY_SIZE,
  790. .max_keysize = AES_MAX_KEY_SIZE,
  791. .setkey = atmel_aes_setkey,
  792. .encrypt = atmel_aes_ecb_encrypt,
  793. .decrypt = atmel_aes_ecb_decrypt,
  794. }
  795. },
  796. {
  797. .cra_name = "cbc(aes)",
  798. .cra_driver_name = "atmel-cbc-aes",
  799. .cra_priority = 100,
  800. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  801. .cra_blocksize = AES_BLOCK_SIZE,
  802. .cra_ctxsize = sizeof(struct atmel_aes_ctx),
  803. .cra_alignmask = 0xf,
  804. .cra_type = &crypto_ablkcipher_type,
  805. .cra_module = THIS_MODULE,
  806. .cra_init = atmel_aes_cra_init,
  807. .cra_exit = atmel_aes_cra_exit,
  808. .cra_u.ablkcipher = {
  809. .min_keysize = AES_MIN_KEY_SIZE,
  810. .max_keysize = AES_MAX_KEY_SIZE,
  811. .ivsize = AES_BLOCK_SIZE,
  812. .setkey = atmel_aes_setkey,
  813. .encrypt = atmel_aes_cbc_encrypt,
  814. .decrypt = atmel_aes_cbc_decrypt,
  815. }
  816. },
  817. {
  818. .cra_name = "ofb(aes)",
  819. .cra_driver_name = "atmel-ofb-aes",
  820. .cra_priority = 100,
  821. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  822. .cra_blocksize = AES_BLOCK_SIZE,
  823. .cra_ctxsize = sizeof(struct atmel_aes_ctx),
  824. .cra_alignmask = 0xf,
  825. .cra_type = &crypto_ablkcipher_type,
  826. .cra_module = THIS_MODULE,
  827. .cra_init = atmel_aes_cra_init,
  828. .cra_exit = atmel_aes_cra_exit,
  829. .cra_u.ablkcipher = {
  830. .min_keysize = AES_MIN_KEY_SIZE,
  831. .max_keysize = AES_MAX_KEY_SIZE,
  832. .ivsize = AES_BLOCK_SIZE,
  833. .setkey = atmel_aes_setkey,
  834. .encrypt = atmel_aes_ofb_encrypt,
  835. .decrypt = atmel_aes_ofb_decrypt,
  836. }
  837. },
  838. {
  839. .cra_name = "cfb(aes)",
  840. .cra_driver_name = "atmel-cfb-aes",
  841. .cra_priority = 100,
  842. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  843. .cra_blocksize = AES_BLOCK_SIZE,
  844. .cra_ctxsize = sizeof(struct atmel_aes_ctx),
  845. .cra_alignmask = 0xf,
  846. .cra_type = &crypto_ablkcipher_type,
  847. .cra_module = THIS_MODULE,
  848. .cra_init = atmel_aes_cra_init,
  849. .cra_exit = atmel_aes_cra_exit,
  850. .cra_u.ablkcipher = {
  851. .min_keysize = AES_MIN_KEY_SIZE,
  852. .max_keysize = AES_MAX_KEY_SIZE,
  853. .ivsize = AES_BLOCK_SIZE,
  854. .setkey = atmel_aes_setkey,
  855. .encrypt = atmel_aes_cfb_encrypt,
  856. .decrypt = atmel_aes_cfb_decrypt,
  857. }
  858. },
  859. {
  860. .cra_name = "cfb32(aes)",
  861. .cra_driver_name = "atmel-cfb32-aes",
  862. .cra_priority = 100,
  863. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  864. .cra_blocksize = CFB32_BLOCK_SIZE,
  865. .cra_ctxsize = sizeof(struct atmel_aes_ctx),
  866. .cra_alignmask = 0x3,
  867. .cra_type = &crypto_ablkcipher_type,
  868. .cra_module = THIS_MODULE,
  869. .cra_init = atmel_aes_cra_init,
  870. .cra_exit = atmel_aes_cra_exit,
  871. .cra_u.ablkcipher = {
  872. .min_keysize = AES_MIN_KEY_SIZE,
  873. .max_keysize = AES_MAX_KEY_SIZE,
  874. .ivsize = AES_BLOCK_SIZE,
  875. .setkey = atmel_aes_setkey,
  876. .encrypt = atmel_aes_cfb32_encrypt,
  877. .decrypt = atmel_aes_cfb32_decrypt,
  878. }
  879. },
  880. {
  881. .cra_name = "cfb16(aes)",
  882. .cra_driver_name = "atmel-cfb16-aes",
  883. .cra_priority = 100,
  884. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  885. .cra_blocksize = CFB16_BLOCK_SIZE,
  886. .cra_ctxsize = sizeof(struct atmel_aes_ctx),
  887. .cra_alignmask = 0x1,
  888. .cra_type = &crypto_ablkcipher_type,
  889. .cra_module = THIS_MODULE,
  890. .cra_init = atmel_aes_cra_init,
  891. .cra_exit = atmel_aes_cra_exit,
  892. .cra_u.ablkcipher = {
  893. .min_keysize = AES_MIN_KEY_SIZE,
  894. .max_keysize = AES_MAX_KEY_SIZE,
  895. .ivsize = AES_BLOCK_SIZE,
  896. .setkey = atmel_aes_setkey,
  897. .encrypt = atmel_aes_cfb16_encrypt,
  898. .decrypt = atmel_aes_cfb16_decrypt,
  899. }
  900. },
  901. {
  902. .cra_name = "cfb8(aes)",
  903. .cra_driver_name = "atmel-cfb8-aes",
  904. .cra_priority = 100,
  905. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  906. .cra_blocksize = CFB8_BLOCK_SIZE,
  907. .cra_ctxsize = sizeof(struct atmel_aes_ctx),
  908. .cra_alignmask = 0x0,
  909. .cra_type = &crypto_ablkcipher_type,
  910. .cra_module = THIS_MODULE,
  911. .cra_init = atmel_aes_cra_init,
  912. .cra_exit = atmel_aes_cra_exit,
  913. .cra_u.ablkcipher = {
  914. .min_keysize = AES_MIN_KEY_SIZE,
  915. .max_keysize = AES_MAX_KEY_SIZE,
  916. .ivsize = AES_BLOCK_SIZE,
  917. .setkey = atmel_aes_setkey,
  918. .encrypt = atmel_aes_cfb8_encrypt,
  919. .decrypt = atmel_aes_cfb8_decrypt,
  920. }
  921. },
  922. {
  923. .cra_name = "ctr(aes)",
  924. .cra_driver_name = "atmel-ctr-aes",
  925. .cra_priority = 100,
  926. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  927. .cra_blocksize = AES_BLOCK_SIZE,
  928. .cra_ctxsize = sizeof(struct atmel_aes_ctx),
  929. .cra_alignmask = 0xf,
  930. .cra_type = &crypto_ablkcipher_type,
  931. .cra_module = THIS_MODULE,
  932. .cra_init = atmel_aes_cra_init,
  933. .cra_exit = atmel_aes_cra_exit,
  934. .cra_u.ablkcipher = {
  935. .min_keysize = AES_MIN_KEY_SIZE,
  936. .max_keysize = AES_MAX_KEY_SIZE,
  937. .ivsize = AES_BLOCK_SIZE,
  938. .setkey = atmel_aes_setkey,
  939. .encrypt = atmel_aes_ctr_encrypt,
  940. .decrypt = atmel_aes_ctr_decrypt,
  941. }
  942. },
  943. };
  944. static struct crypto_alg aes_cfb64_alg = {
  945. .cra_name = "cfb64(aes)",
  946. .cra_driver_name = "atmel-cfb64-aes",
  947. .cra_priority = 100,
  948. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  949. .cra_blocksize = CFB64_BLOCK_SIZE,
  950. .cra_ctxsize = sizeof(struct atmel_aes_ctx),
  951. .cra_alignmask = 0x7,
  952. .cra_type = &crypto_ablkcipher_type,
  953. .cra_module = THIS_MODULE,
  954. .cra_init = atmel_aes_cra_init,
  955. .cra_exit = atmel_aes_cra_exit,
  956. .cra_u.ablkcipher = {
  957. .min_keysize = AES_MIN_KEY_SIZE,
  958. .max_keysize = AES_MAX_KEY_SIZE,
  959. .ivsize = AES_BLOCK_SIZE,
  960. .setkey = atmel_aes_setkey,
  961. .encrypt = atmel_aes_cfb64_encrypt,
  962. .decrypt = atmel_aes_cfb64_decrypt,
  963. }
  964. };
  965. static void atmel_aes_queue_task(unsigned long data)
  966. {
  967. struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
  968. atmel_aes_handle_queue(dd, NULL);
  969. }
  970. static void atmel_aes_done_task(unsigned long data)
  971. {
  972. struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
  973. int err;
  974. if (!(dd->flags & AES_FLAGS_DMA)) {
  975. atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
  976. dd->bufcnt >> 2);
  977. if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
  978. dd->buf_out, dd->bufcnt))
  979. err = 0;
  980. else
  981. err = -EINVAL;
  982. goto cpu_end;
  983. }
  984. err = atmel_aes_crypt_dma_stop(dd);
  985. err = dd->err ? : err;
  986. if (dd->total && !err) {
  987. if (dd->flags & AES_FLAGS_FAST) {
  988. dd->in_sg = sg_next(dd->in_sg);
  989. dd->out_sg = sg_next(dd->out_sg);
  990. if (!dd->in_sg || !dd->out_sg)
  991. err = -EINVAL;
  992. }
  993. if (!err)
  994. err = atmel_aes_crypt_dma_start(dd);
  995. if (!err)
  996. return; /* DMA started. Not fininishing. */
  997. }
  998. cpu_end:
  999. atmel_aes_finish_req(dd, err);
  1000. atmel_aes_handle_queue(dd, NULL);
  1001. }
  1002. static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
  1003. {
  1004. struct atmel_aes_dev *aes_dd = dev_id;
  1005. u32 reg;
  1006. reg = atmel_aes_read(aes_dd, AES_ISR);
  1007. if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
  1008. atmel_aes_write(aes_dd, AES_IDR, reg);
  1009. if (AES_FLAGS_BUSY & aes_dd->flags)
  1010. tasklet_schedule(&aes_dd->done_task);
  1011. else
  1012. dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
  1013. return IRQ_HANDLED;
  1014. }
  1015. return IRQ_NONE;
  1016. }
  1017. static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
  1018. {
  1019. int i;
  1020. for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
  1021. crypto_unregister_alg(&aes_algs[i]);
  1022. if (dd->caps.has_cfb64)
  1023. crypto_unregister_alg(&aes_cfb64_alg);
  1024. }
  1025. static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
  1026. {
  1027. int err, i, j;
  1028. for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
  1029. err = crypto_register_alg(&aes_algs[i]);
  1030. if (err)
  1031. goto err_aes_algs;
  1032. }
  1033. if (dd->caps.has_cfb64) {
  1034. err = crypto_register_alg(&aes_cfb64_alg);
  1035. if (err)
  1036. goto err_aes_cfb64_alg;
  1037. }
  1038. return 0;
  1039. err_aes_cfb64_alg:
  1040. i = ARRAY_SIZE(aes_algs);
  1041. err_aes_algs:
  1042. for (j = 0; j < i; j++)
  1043. crypto_unregister_alg(&aes_algs[j]);
  1044. return err;
  1045. }
  1046. static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
  1047. {
  1048. dd->caps.has_dualbuff = 0;
  1049. dd->caps.has_cfb64 = 0;
  1050. dd->caps.max_burst_size = 1;
  1051. /* keep only major version number */
  1052. switch (dd->hw_version & 0xff0) {
  1053. case 0x200:
  1054. dd->caps.has_dualbuff = 1;
  1055. dd->caps.has_cfb64 = 1;
  1056. dd->caps.max_burst_size = 4;
  1057. break;
  1058. case 0x130:
  1059. dd->caps.has_dualbuff = 1;
  1060. dd->caps.has_cfb64 = 1;
  1061. dd->caps.max_burst_size = 4;
  1062. break;
  1063. case 0x120:
  1064. break;
  1065. default:
  1066. dev_warn(dd->dev,
  1067. "Unmanaged aes version, set minimum capabilities\n");
  1068. break;
  1069. }
  1070. }
  1071. #if defined(CONFIG_OF)
  1072. static const struct of_device_id atmel_aes_dt_ids[] = {
  1073. { .compatible = "atmel,at91sam9g46-aes" },
  1074. { /* sentinel */ }
  1075. };
  1076. MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
  1077. static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
  1078. {
  1079. struct device_node *np = pdev->dev.of_node;
  1080. struct crypto_platform_data *pdata;
  1081. if (!np) {
  1082. dev_err(&pdev->dev, "device node not found\n");
  1083. return ERR_PTR(-EINVAL);
  1084. }
  1085. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  1086. if (!pdata) {
  1087. dev_err(&pdev->dev, "could not allocate memory for pdata\n");
  1088. return ERR_PTR(-ENOMEM);
  1089. }
  1090. pdata->dma_slave = devm_kzalloc(&pdev->dev,
  1091. sizeof(*(pdata->dma_slave)),
  1092. GFP_KERNEL);
  1093. if (!pdata->dma_slave) {
  1094. dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
  1095. devm_kfree(&pdev->dev, pdata);
  1096. return ERR_PTR(-ENOMEM);
  1097. }
  1098. return pdata;
  1099. }
  1100. #else
  1101. static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
  1102. {
  1103. return ERR_PTR(-EINVAL);
  1104. }
  1105. #endif
  1106. static int atmel_aes_probe(struct platform_device *pdev)
  1107. {
  1108. struct atmel_aes_dev *aes_dd;
  1109. struct crypto_platform_data *pdata;
  1110. struct device *dev = &pdev->dev;
  1111. struct resource *aes_res;
  1112. unsigned long aes_phys_size;
  1113. int err;
  1114. pdata = pdev->dev.platform_data;
  1115. if (!pdata) {
  1116. pdata = atmel_aes_of_init(pdev);
  1117. if (IS_ERR(pdata)) {
  1118. err = PTR_ERR(pdata);
  1119. goto aes_dd_err;
  1120. }
  1121. }
  1122. if (!pdata->dma_slave) {
  1123. err = -ENXIO;
  1124. goto aes_dd_err;
  1125. }
  1126. aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
  1127. if (aes_dd == NULL) {
  1128. dev_err(dev, "unable to alloc data struct.\n");
  1129. err = -ENOMEM;
  1130. goto aes_dd_err;
  1131. }
  1132. aes_dd->dev = dev;
  1133. platform_set_drvdata(pdev, aes_dd);
  1134. INIT_LIST_HEAD(&aes_dd->list);
  1135. spin_lock_init(&aes_dd->lock);
  1136. tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
  1137. (unsigned long)aes_dd);
  1138. tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
  1139. (unsigned long)aes_dd);
  1140. crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
  1141. aes_dd->irq = -1;
  1142. /* Get the base address */
  1143. aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1144. if (!aes_res) {
  1145. dev_err(dev, "no MEM resource info\n");
  1146. err = -ENODEV;
  1147. goto res_err;
  1148. }
  1149. aes_dd->phys_base = aes_res->start;
  1150. aes_phys_size = resource_size(aes_res);
  1151. /* Get the IRQ */
  1152. aes_dd->irq = platform_get_irq(pdev, 0);
  1153. if (aes_dd->irq < 0) {
  1154. dev_err(dev, "no IRQ resource info\n");
  1155. err = aes_dd->irq;
  1156. goto aes_irq_err;
  1157. }
  1158. err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
  1159. aes_dd);
  1160. if (err) {
  1161. dev_err(dev, "unable to request aes irq.\n");
  1162. goto aes_irq_err;
  1163. }
  1164. /* Initializing the clock */
  1165. aes_dd->iclk = clk_get(&pdev->dev, "aes_clk");
  1166. if (IS_ERR(aes_dd->iclk)) {
  1167. dev_err(dev, "clock initialization failed.\n");
  1168. err = PTR_ERR(aes_dd->iclk);
  1169. goto clk_err;
  1170. }
  1171. aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
  1172. if (!aes_dd->io_base) {
  1173. dev_err(dev, "can't ioremap\n");
  1174. err = -ENOMEM;
  1175. goto aes_io_err;
  1176. }
  1177. atmel_aes_hw_version_init(aes_dd);
  1178. atmel_aes_get_cap(aes_dd);
  1179. err = atmel_aes_buff_init(aes_dd);
  1180. if (err)
  1181. goto err_aes_buff;
  1182. err = atmel_aes_dma_init(aes_dd, pdata);
  1183. if (err)
  1184. goto err_aes_dma;
  1185. spin_lock(&atmel_aes.lock);
  1186. list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
  1187. spin_unlock(&atmel_aes.lock);
  1188. err = atmel_aes_register_algs(aes_dd);
  1189. if (err)
  1190. goto err_algs;
  1191. dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
  1192. dma_chan_name(aes_dd->dma_lch_in.chan),
  1193. dma_chan_name(aes_dd->dma_lch_out.chan));
  1194. return 0;
  1195. err_algs:
  1196. spin_lock(&atmel_aes.lock);
  1197. list_del(&aes_dd->list);
  1198. spin_unlock(&atmel_aes.lock);
  1199. atmel_aes_dma_cleanup(aes_dd);
  1200. err_aes_dma:
  1201. atmel_aes_buff_cleanup(aes_dd);
  1202. err_aes_buff:
  1203. iounmap(aes_dd->io_base);
  1204. aes_io_err:
  1205. clk_put(aes_dd->iclk);
  1206. clk_err:
  1207. free_irq(aes_dd->irq, aes_dd);
  1208. aes_irq_err:
  1209. res_err:
  1210. tasklet_kill(&aes_dd->done_task);
  1211. tasklet_kill(&aes_dd->queue_task);
  1212. kfree(aes_dd);
  1213. aes_dd = NULL;
  1214. aes_dd_err:
  1215. dev_err(dev, "initialization failed.\n");
  1216. return err;
  1217. }
  1218. static int atmel_aes_remove(struct platform_device *pdev)
  1219. {
  1220. static struct atmel_aes_dev *aes_dd;
  1221. aes_dd = platform_get_drvdata(pdev);
  1222. if (!aes_dd)
  1223. return -ENODEV;
  1224. spin_lock(&atmel_aes.lock);
  1225. list_del(&aes_dd->list);
  1226. spin_unlock(&atmel_aes.lock);
  1227. atmel_aes_unregister_algs(aes_dd);
  1228. tasklet_kill(&aes_dd->done_task);
  1229. tasklet_kill(&aes_dd->queue_task);
  1230. atmel_aes_dma_cleanup(aes_dd);
  1231. iounmap(aes_dd->io_base);
  1232. clk_put(aes_dd->iclk);
  1233. if (aes_dd->irq > 0)
  1234. free_irq(aes_dd->irq, aes_dd);
  1235. kfree(aes_dd);
  1236. aes_dd = NULL;
  1237. return 0;
  1238. }
  1239. static struct platform_driver atmel_aes_driver = {
  1240. .probe = atmel_aes_probe,
  1241. .remove = atmel_aes_remove,
  1242. .driver = {
  1243. .name = "atmel_aes",
  1244. .of_match_table = of_match_ptr(atmel_aes_dt_ids),
  1245. },
  1246. };
  1247. module_platform_driver(atmel_aes_driver);
  1248. MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
  1249. MODULE_LICENSE("GPL v2");
  1250. MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");