hash_core.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965
  1. /*
  2. * Cryptographic API.
  3. * Support for Nomadik hardware crypto engine.
  4. * Copyright (C) ST-Ericsson SA 2010
  5. * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
  6. * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
  7. * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
  8. * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
  9. * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
  10. * License terms: GNU General Public License (GPL) version 2
  11. */
  12. #define pr_fmt(fmt) "hashX hashX: " fmt
  13. #include <linux/clk.h>
  14. #include <linux/device.h>
  15. #include <linux/err.h>
  16. #include <linux/init.h>
  17. #include <linux/io.h>
  18. #include <linux/klist.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/mod_devicetable.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/crypto.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/dmaengine.h>
  26. #include <linux/bitops.h>
  27. #include <crypto/internal/hash.h>
  28. #include <crypto/sha.h>
  29. #include <crypto/scatterwalk.h>
  30. #include <crypto/algapi.h>
  31. #include <linux/platform_data/crypto-ux500.h>
  32. #include "hash_alg.h"
  33. static int hash_mode;
  34. module_param(hash_mode, int, 0);
  35. MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
  36. /* HMAC-SHA1, no key */
  37. static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
  38. 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
  39. 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
  40. 0x70, 0x69, 0x0e, 0x1d
  41. };
  42. /* HMAC-SHA256, no key */
  43. static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
  44. 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
  45. 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
  46. 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
  47. 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
  48. };
  49. /**
  50. * struct hash_driver_data - data specific to the driver.
  51. *
  52. * @device_list: A list of registered devices to choose from.
  53. * @device_allocation: A semaphore initialized with number of devices.
  54. */
  55. struct hash_driver_data {
  56. struct klist device_list;
  57. struct semaphore device_allocation;
  58. };
  59. static struct hash_driver_data driver_data;
  60. /* Declaration of functions */
  61. /**
  62. * hash_messagepad - Pads a message and write the nblw bits.
  63. * @device_data: Structure for the hash device.
  64. * @message: Last word of a message
  65. * @index_bytes: The number of bytes in the last message
  66. *
  67. * This function manages the final part of the digest calculation, when less
  68. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  69. *
  70. */
  71. static void hash_messagepad(struct hash_device_data *device_data,
  72. const u32 *message, u8 index_bytes);
  73. /**
  74. * release_hash_device - Releases a previously allocated hash device.
  75. * @device_data: Structure for the hash device.
  76. *
  77. */
  78. static void release_hash_device(struct hash_device_data *device_data)
  79. {
  80. spin_lock(&device_data->ctx_lock);
  81. device_data->current_ctx->device = NULL;
  82. device_data->current_ctx = NULL;
  83. spin_unlock(&device_data->ctx_lock);
  84. /*
  85. * The down_interruptible part for this semaphore is called in
  86. * cryp_get_device_data.
  87. */
  88. up(&driver_data.device_allocation);
  89. }
  90. static void hash_dma_setup_channel(struct hash_device_data *device_data,
  91. struct device *dev)
  92. {
  93. struct hash_platform_data *platform_data = dev->platform_data;
  94. struct dma_slave_config conf = {
  95. .direction = DMA_MEM_TO_DEV,
  96. .dst_addr = device_data->phybase + HASH_DMA_FIFO,
  97. .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
  98. .dst_maxburst = 16,
  99. };
  100. dma_cap_zero(device_data->dma.mask);
  101. dma_cap_set(DMA_SLAVE, device_data->dma.mask);
  102. device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
  103. device_data->dma.chan_mem2hash =
  104. dma_request_channel(device_data->dma.mask,
  105. platform_data->dma_filter,
  106. device_data->dma.cfg_mem2hash);
  107. dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
  108. init_completion(&device_data->dma.complete);
  109. }
  110. static void hash_dma_callback(void *data)
  111. {
  112. struct hash_ctx *ctx = data;
  113. complete(&ctx->device->dma.complete);
  114. }
  115. static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
  116. int len, enum dma_data_direction direction)
  117. {
  118. struct dma_async_tx_descriptor *desc = NULL;
  119. struct dma_chan *channel = NULL;
  120. dma_cookie_t cookie;
  121. if (direction != DMA_TO_DEVICE) {
  122. dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
  123. __func__);
  124. return -EFAULT;
  125. }
  126. sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
  127. channel = ctx->device->dma.chan_mem2hash;
  128. ctx->device->dma.sg = sg;
  129. ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
  130. ctx->device->dma.sg, ctx->device->dma.nents,
  131. direction);
  132. if (!ctx->device->dma.sg_len) {
  133. dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
  134. __func__);
  135. return -EFAULT;
  136. }
  137. dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
  138. __func__);
  139. desc = dmaengine_prep_slave_sg(channel,
  140. ctx->device->dma.sg, ctx->device->dma.sg_len,
  141. DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
  142. if (!desc) {
  143. dev_err(ctx->device->dev,
  144. "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
  145. return -EFAULT;
  146. }
  147. desc->callback = hash_dma_callback;
  148. desc->callback_param = ctx;
  149. cookie = dmaengine_submit(desc);
  150. dma_async_issue_pending(channel);
  151. return 0;
  152. }
  153. static void hash_dma_done(struct hash_ctx *ctx)
  154. {
  155. struct dma_chan *chan;
  156. chan = ctx->device->dma.chan_mem2hash;
  157. dmaengine_terminate_all(chan);
  158. dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
  159. ctx->device->dma.sg_len, DMA_TO_DEVICE);
  160. }
  161. static int hash_dma_write(struct hash_ctx *ctx,
  162. struct scatterlist *sg, int len)
  163. {
  164. int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
  165. if (error) {
  166. dev_dbg(ctx->device->dev,
  167. "%s: hash_set_dma_transfer() failed\n", __func__);
  168. return error;
  169. }
  170. return len;
  171. }
  172. /**
  173. * get_empty_message_digest - Returns a pre-calculated digest for
  174. * the empty message.
  175. * @device_data: Structure for the hash device.
  176. * @zero_hash: Buffer to return the empty message digest.
  177. * @zero_hash_size: Hash size of the empty message digest.
  178. * @zero_digest: True if zero_digest returned.
  179. */
  180. static int get_empty_message_digest(
  181. struct hash_device_data *device_data,
  182. u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
  183. {
  184. int ret = 0;
  185. struct hash_ctx *ctx = device_data->current_ctx;
  186. *zero_digest = false;
  187. /**
  188. * Caller responsible for ctx != NULL.
  189. */
  190. if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
  191. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  192. memcpy(zero_hash, &sha1_zero_message_hash[0],
  193. SHA1_DIGEST_SIZE);
  194. *zero_hash_size = SHA1_DIGEST_SIZE;
  195. *zero_digest = true;
  196. } else if (HASH_ALGO_SHA256 ==
  197. ctx->config.algorithm) {
  198. memcpy(zero_hash, &sha256_zero_message_hash[0],
  199. SHA256_DIGEST_SIZE);
  200. *zero_hash_size = SHA256_DIGEST_SIZE;
  201. *zero_digest = true;
  202. } else {
  203. dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
  204. __func__);
  205. ret = -EINVAL;
  206. goto out;
  207. }
  208. } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
  209. if (!ctx->keylen) {
  210. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  211. memcpy(zero_hash, &zero_message_hmac_sha1[0],
  212. SHA1_DIGEST_SIZE);
  213. *zero_hash_size = SHA1_DIGEST_SIZE;
  214. *zero_digest = true;
  215. } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
  216. memcpy(zero_hash, &zero_message_hmac_sha256[0],
  217. SHA256_DIGEST_SIZE);
  218. *zero_hash_size = SHA256_DIGEST_SIZE;
  219. *zero_digest = true;
  220. } else {
  221. dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
  222. __func__);
  223. ret = -EINVAL;
  224. goto out;
  225. }
  226. } else {
  227. dev_dbg(device_data->dev,
  228. "%s: Continue hash calculation, since hmac key available\n",
  229. __func__);
  230. }
  231. }
  232. out:
  233. return ret;
  234. }
  235. /**
  236. * hash_disable_power - Request to disable power and clock.
  237. * @device_data: Structure for the hash device.
  238. * @save_device_state: If true, saves the current hw state.
  239. *
  240. * This function request for disabling power (regulator) and clock,
  241. * and could also save current hw state.
  242. */
  243. static int hash_disable_power(struct hash_device_data *device_data,
  244. bool save_device_state)
  245. {
  246. int ret = 0;
  247. struct device *dev = device_data->dev;
  248. spin_lock(&device_data->power_state_lock);
  249. if (!device_data->power_state)
  250. goto out;
  251. if (save_device_state) {
  252. hash_save_state(device_data,
  253. &device_data->state);
  254. device_data->restore_dev_state = true;
  255. }
  256. clk_disable(device_data->clk);
  257. ret = regulator_disable(device_data->regulator);
  258. if (ret)
  259. dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
  260. device_data->power_state = false;
  261. out:
  262. spin_unlock(&device_data->power_state_lock);
  263. return ret;
  264. }
  265. /**
  266. * hash_enable_power - Request to enable power and clock.
  267. * @device_data: Structure for the hash device.
  268. * @restore_device_state: If true, restores a previous saved hw state.
  269. *
  270. * This function request for enabling power (regulator) and clock,
  271. * and could also restore a previously saved hw state.
  272. */
  273. static int hash_enable_power(struct hash_device_data *device_data,
  274. bool restore_device_state)
  275. {
  276. int ret = 0;
  277. struct device *dev = device_data->dev;
  278. spin_lock(&device_data->power_state_lock);
  279. if (!device_data->power_state) {
  280. ret = regulator_enable(device_data->regulator);
  281. if (ret) {
  282. dev_err(dev, "%s: regulator_enable() failed!\n",
  283. __func__);
  284. goto out;
  285. }
  286. ret = clk_enable(device_data->clk);
  287. if (ret) {
  288. dev_err(dev, "%s: clk_enable() failed!\n", __func__);
  289. ret = regulator_disable(
  290. device_data->regulator);
  291. goto out;
  292. }
  293. device_data->power_state = true;
  294. }
  295. if (device_data->restore_dev_state) {
  296. if (restore_device_state) {
  297. device_data->restore_dev_state = false;
  298. hash_resume_state(device_data, &device_data->state);
  299. }
  300. }
  301. out:
  302. spin_unlock(&device_data->power_state_lock);
  303. return ret;
  304. }
  305. /**
  306. * hash_get_device_data - Checks for an available hash device and return it.
  307. * @hash_ctx: Structure for the hash context.
  308. * @device_data: Structure for the hash device.
  309. *
  310. * This function check for an available hash device and return it to
  311. * the caller.
  312. * Note! Caller need to release the device, calling up().
  313. */
  314. static int hash_get_device_data(struct hash_ctx *ctx,
  315. struct hash_device_data **device_data)
  316. {
  317. int ret;
  318. struct klist_iter device_iterator;
  319. struct klist_node *device_node;
  320. struct hash_device_data *local_device_data = NULL;
  321. /* Wait until a device is available */
  322. ret = down_interruptible(&driver_data.device_allocation);
  323. if (ret)
  324. return ret; /* Interrupted */
  325. /* Select a device */
  326. klist_iter_init(&driver_data.device_list, &device_iterator);
  327. device_node = klist_next(&device_iterator);
  328. while (device_node) {
  329. local_device_data = container_of(device_node,
  330. struct hash_device_data, list_node);
  331. spin_lock(&local_device_data->ctx_lock);
  332. /* current_ctx allocates a device, NULL = unallocated */
  333. if (local_device_data->current_ctx) {
  334. device_node = klist_next(&device_iterator);
  335. } else {
  336. local_device_data->current_ctx = ctx;
  337. ctx->device = local_device_data;
  338. spin_unlock(&local_device_data->ctx_lock);
  339. break;
  340. }
  341. spin_unlock(&local_device_data->ctx_lock);
  342. }
  343. klist_iter_exit(&device_iterator);
  344. if (!device_node) {
  345. /**
  346. * No free device found.
  347. * Since we allocated a device with down_interruptible, this
  348. * should not be able to happen.
  349. * Number of available devices, which are contained in
  350. * device_allocation, is therefore decremented by not doing
  351. * an up(device_allocation).
  352. */
  353. return -EBUSY;
  354. }
  355. *device_data = local_device_data;
  356. return 0;
  357. }
  358. /**
  359. * hash_hw_write_key - Writes the key to the hardware registries.
  360. *
  361. * @device_data: Structure for the hash device.
  362. * @key: Key to be written.
  363. * @keylen: The lengt of the key.
  364. *
  365. * Note! This function DOES NOT write to the NBLW registry, even though
  366. * specified in the the hw design spec. Either due to incorrect info in the
  367. * spec or due to a bug in the hw.
  368. */
  369. static void hash_hw_write_key(struct hash_device_data *device_data,
  370. const u8 *key, unsigned int keylen)
  371. {
  372. u32 word = 0;
  373. int nwords = 1;
  374. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  375. while (keylen >= 4) {
  376. u32 *key_word = (u32 *)key;
  377. HASH_SET_DIN(key_word, nwords);
  378. keylen -= 4;
  379. key += 4;
  380. }
  381. /* Take care of the remaining bytes in the last word */
  382. if (keylen) {
  383. word = 0;
  384. while (keylen) {
  385. word |= (key[keylen - 1] << (8 * (keylen - 1)));
  386. keylen--;
  387. }
  388. HASH_SET_DIN(&word, nwords);
  389. }
  390. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  391. cpu_relax();
  392. HASH_SET_DCAL;
  393. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  394. cpu_relax();
  395. }
  396. /**
  397. * init_hash_hw - Initialise the hash hardware for a new calculation.
  398. * @device_data: Structure for the hash device.
  399. * @ctx: The hash context.
  400. *
  401. * This function will enable the bits needed to clear and start a new
  402. * calculation.
  403. */
  404. static int init_hash_hw(struct hash_device_data *device_data,
  405. struct hash_ctx *ctx)
  406. {
  407. int ret = 0;
  408. ret = hash_setconfiguration(device_data, &ctx->config);
  409. if (ret) {
  410. dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
  411. __func__);
  412. return ret;
  413. }
  414. hash_begin(device_data, ctx);
  415. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  416. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  417. return ret;
  418. }
  419. /**
  420. * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
  421. *
  422. * @sg: Scatterlist.
  423. * @size: Size in bytes.
  424. * @aligned: True if sg data aligned to work in DMA mode.
  425. *
  426. */
  427. static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
  428. {
  429. int nents = 0;
  430. bool aligned_data = true;
  431. while (size > 0 && sg) {
  432. nents++;
  433. size -= sg->length;
  434. /* hash_set_dma_transfer will align last nent */
  435. if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
  436. (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
  437. aligned_data = false;
  438. sg = sg_next(sg);
  439. }
  440. if (aligned)
  441. *aligned = aligned_data;
  442. if (size != 0)
  443. return -EFAULT;
  444. return nents;
  445. }
  446. /**
  447. * hash_dma_valid_data - checks for dma valid sg data.
  448. * @sg: Scatterlist.
  449. * @datasize: Datasize in bytes.
  450. *
  451. * NOTE! This function checks for dma valid sg data, since dma
  452. * only accept datasizes of even wordsize.
  453. */
  454. static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
  455. {
  456. bool aligned;
  457. /* Need to include at least one nent, else error */
  458. if (hash_get_nents(sg, datasize, &aligned) < 1)
  459. return false;
  460. return aligned;
  461. }
  462. /**
  463. * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
  464. * @req: The hash request for the job.
  465. *
  466. * Initialize structures.
  467. */
  468. static int hash_init(struct ahash_request *req)
  469. {
  470. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  471. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  472. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  473. if (!ctx->key)
  474. ctx->keylen = 0;
  475. memset(&req_ctx->state, 0, sizeof(struct hash_state));
  476. req_ctx->updated = 0;
  477. if (hash_mode == HASH_MODE_DMA) {
  478. if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
  479. req_ctx->dma_mode = false; /* Don't use DMA */
  480. pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
  481. __func__, HASH_DMA_ALIGN_SIZE);
  482. } else {
  483. if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
  484. hash_dma_valid_data(req->src, req->nbytes)) {
  485. req_ctx->dma_mode = true;
  486. } else {
  487. req_ctx->dma_mode = false;
  488. pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
  489. __func__,
  490. HASH_DMA_PERFORMANCE_MIN_SIZE);
  491. }
  492. }
  493. }
  494. return 0;
  495. }
  496. /**
  497. * hash_processblock - This function processes a single block of 512 bits (64
  498. * bytes), word aligned, starting at message.
  499. * @device_data: Structure for the hash device.
  500. * @message: Block (512 bits) of message to be written to
  501. * the HASH hardware.
  502. *
  503. */
  504. static void hash_processblock(struct hash_device_data *device_data,
  505. const u32 *message, int length)
  506. {
  507. int len = length / HASH_BYTES_PER_WORD;
  508. /*
  509. * NBLW bits. Reset the number of bits in last word (NBLW).
  510. */
  511. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  512. /*
  513. * Write message data to the HASH_DIN register.
  514. */
  515. HASH_SET_DIN(message, len);
  516. }
  517. /**
  518. * hash_messagepad - Pads a message and write the nblw bits.
  519. * @device_data: Structure for the hash device.
  520. * @message: Last word of a message.
  521. * @index_bytes: The number of bytes in the last message.
  522. *
  523. * This function manages the final part of the digest calculation, when less
  524. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  525. *
  526. */
  527. static void hash_messagepad(struct hash_device_data *device_data,
  528. const u32 *message, u8 index_bytes)
  529. {
  530. int nwords = 1;
  531. /*
  532. * Clear hash str register, only clear NBLW
  533. * since DCAL will be reset by hardware.
  534. */
  535. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  536. /* Main loop */
  537. while (index_bytes >= 4) {
  538. HASH_SET_DIN(message, nwords);
  539. index_bytes -= 4;
  540. message++;
  541. }
  542. if (index_bytes)
  543. HASH_SET_DIN(message, nwords);
  544. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  545. cpu_relax();
  546. /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
  547. HASH_SET_NBLW(index_bytes * 8);
  548. dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
  549. __func__, readl_relaxed(&device_data->base->din),
  550. readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
  551. HASH_SET_DCAL;
  552. dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
  553. __func__, readl_relaxed(&device_data->base->din),
  554. readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
  555. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  556. cpu_relax();
  557. }
  558. /**
  559. * hash_incrementlength - Increments the length of the current message.
  560. * @ctx: Hash context
  561. * @incr: Length of message processed already
  562. *
  563. * Overflow cannot occur, because conditions for overflow are checked in
  564. * hash_hw_update.
  565. */
  566. static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
  567. {
  568. ctx->state.length.low_word += incr;
  569. /* Check for wrap-around */
  570. if (ctx->state.length.low_word < incr)
  571. ctx->state.length.high_word++;
  572. }
  573. /**
  574. * hash_setconfiguration - Sets the required configuration for the hash
  575. * hardware.
  576. * @device_data: Structure for the hash device.
  577. * @config: Pointer to a configuration structure.
  578. */
  579. int hash_setconfiguration(struct hash_device_data *device_data,
  580. struct hash_config *config)
  581. {
  582. int ret = 0;
  583. if (config->algorithm != HASH_ALGO_SHA1 &&
  584. config->algorithm != HASH_ALGO_SHA256)
  585. return -EPERM;
  586. /*
  587. * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
  588. * to be written to HASH_DIN is considered as 32 bits.
  589. */
  590. HASH_SET_DATA_FORMAT(config->data_format);
  591. /*
  592. * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
  593. */
  594. switch (config->algorithm) {
  595. case HASH_ALGO_SHA1:
  596. HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  597. break;
  598. case HASH_ALGO_SHA256:
  599. HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  600. break;
  601. default:
  602. dev_err(device_data->dev, "%s: Incorrect algorithm\n",
  603. __func__);
  604. return -EPERM;
  605. }
  606. /*
  607. * MODE bit. This bit selects between HASH or HMAC mode for the
  608. * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
  609. */
  610. if (HASH_OPER_MODE_HASH == config->oper_mode)
  611. HASH_CLEAR_BITS(&device_data->base->cr,
  612. HASH_CR_MODE_MASK);
  613. else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
  614. HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
  615. if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
  616. /* Truncate key to blocksize */
  617. dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
  618. HASH_SET_BITS(&device_data->base->cr,
  619. HASH_CR_LKEY_MASK);
  620. } else {
  621. dev_dbg(device_data->dev, "%s: LKEY cleared\n",
  622. __func__);
  623. HASH_CLEAR_BITS(&device_data->base->cr,
  624. HASH_CR_LKEY_MASK);
  625. }
  626. } else { /* Wrong hash mode */
  627. ret = -EPERM;
  628. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  629. __func__);
  630. }
  631. return ret;
  632. }
  633. /**
  634. * hash_begin - This routine resets some globals and initializes the hash
  635. * hardware.
  636. * @device_data: Structure for the hash device.
  637. * @ctx: Hash context.
  638. */
  639. void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
  640. {
  641. /* HW and SW initializations */
  642. /* Note: there is no need to initialize buffer and digest members */
  643. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  644. cpu_relax();
  645. /*
  646. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  647. * prepare the initialize the HASH accelerator to compute the message
  648. * digest of a new message.
  649. */
  650. HASH_INITIALIZE;
  651. /*
  652. * NBLW bits. Reset the number of bits in last word (NBLW).
  653. */
  654. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  655. }
  656. static int hash_process_data(struct hash_device_data *device_data,
  657. struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
  658. int msg_length, u8 *data_buffer, u8 *buffer,
  659. u8 *index)
  660. {
  661. int ret = 0;
  662. u32 count;
  663. do {
  664. if ((*index + msg_length) < HASH_BLOCK_SIZE) {
  665. for (count = 0; count < msg_length; count++) {
  666. buffer[*index + count] =
  667. *(data_buffer + count);
  668. }
  669. *index += msg_length;
  670. msg_length = 0;
  671. } else {
  672. if (req_ctx->updated) {
  673. ret = hash_resume_state(device_data,
  674. &device_data->state);
  675. memmove(req_ctx->state.buffer,
  676. device_data->state.buffer,
  677. HASH_BLOCK_SIZE);
  678. if (ret) {
  679. dev_err(device_data->dev,
  680. "%s: hash_resume_state() failed!\n",
  681. __func__);
  682. goto out;
  683. }
  684. } else {
  685. ret = init_hash_hw(device_data, ctx);
  686. if (ret) {
  687. dev_err(device_data->dev,
  688. "%s: init_hash_hw() failed!\n",
  689. __func__);
  690. goto out;
  691. }
  692. req_ctx->updated = 1;
  693. }
  694. /*
  695. * If 'data_buffer' is four byte aligned and
  696. * local buffer does not have any data, we can
  697. * write data directly from 'data_buffer' to
  698. * HW peripheral, otherwise we first copy data
  699. * to a local buffer
  700. */
  701. if ((0 == (((u32)data_buffer) % 4)) &&
  702. (0 == *index))
  703. hash_processblock(device_data,
  704. (const u32 *)data_buffer,
  705. HASH_BLOCK_SIZE);
  706. else {
  707. for (count = 0;
  708. count < (u32)(HASH_BLOCK_SIZE - *index);
  709. count++) {
  710. buffer[*index + count] =
  711. *(data_buffer + count);
  712. }
  713. hash_processblock(device_data,
  714. (const u32 *)buffer,
  715. HASH_BLOCK_SIZE);
  716. }
  717. hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
  718. data_buffer += (HASH_BLOCK_SIZE - *index);
  719. msg_length -= (HASH_BLOCK_SIZE - *index);
  720. *index = 0;
  721. ret = hash_save_state(device_data,
  722. &device_data->state);
  723. memmove(device_data->state.buffer,
  724. req_ctx->state.buffer,
  725. HASH_BLOCK_SIZE);
  726. if (ret) {
  727. dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
  728. __func__);
  729. goto out;
  730. }
  731. }
  732. } while (msg_length != 0);
  733. out:
  734. return ret;
  735. }
  736. /**
  737. * hash_dma_final - The hash dma final function for SHA1/SHA256.
  738. * @req: The hash request for the job.
  739. */
  740. static int hash_dma_final(struct ahash_request *req)
  741. {
  742. int ret = 0;
  743. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  744. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  745. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  746. struct hash_device_data *device_data;
  747. u8 digest[SHA256_DIGEST_SIZE];
  748. int bytes_written = 0;
  749. ret = hash_get_device_data(ctx, &device_data);
  750. if (ret)
  751. return ret;
  752. dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
  753. if (req_ctx->updated) {
  754. ret = hash_resume_state(device_data, &device_data->state);
  755. if (ret) {
  756. dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
  757. __func__);
  758. goto out;
  759. }
  760. }
  761. if (!req_ctx->updated) {
  762. ret = hash_setconfiguration(device_data, &ctx->config);
  763. if (ret) {
  764. dev_err(device_data->dev,
  765. "%s: hash_setconfiguration() failed!\n",
  766. __func__);
  767. goto out;
  768. }
  769. /* Enable DMA input */
  770. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
  771. HASH_CLEAR_BITS(&device_data->base->cr,
  772. HASH_CR_DMAE_MASK);
  773. } else {
  774. HASH_SET_BITS(&device_data->base->cr,
  775. HASH_CR_DMAE_MASK);
  776. HASH_SET_BITS(&device_data->base->cr,
  777. HASH_CR_PRIVN_MASK);
  778. }
  779. HASH_INITIALIZE;
  780. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  781. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  782. /* Number of bits in last word = (nbytes * 8) % 32 */
  783. HASH_SET_NBLW((req->nbytes * 8) % 32);
  784. req_ctx->updated = 1;
  785. }
  786. /* Store the nents in the dma struct. */
  787. ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
  788. if (!ctx->device->dma.nents) {
  789. dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
  790. __func__);
  791. ret = ctx->device->dma.nents;
  792. goto out;
  793. }
  794. bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
  795. if (bytes_written != req->nbytes) {
  796. dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
  797. __func__);
  798. ret = bytes_written;
  799. goto out;
  800. }
  801. wait_for_completion(&ctx->device->dma.complete);
  802. hash_dma_done(ctx);
  803. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  804. cpu_relax();
  805. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  806. unsigned int keylen = ctx->keylen;
  807. u8 *key = ctx->key;
  808. dev_dbg(device_data->dev, "%s: keylen: %d\n",
  809. __func__, ctx->keylen);
  810. hash_hw_write_key(device_data, key, keylen);
  811. }
  812. hash_get_digest(device_data, digest, ctx->config.algorithm);
  813. memcpy(req->result, digest, ctx->digestsize);
  814. out:
  815. release_hash_device(device_data);
  816. /**
  817. * Allocated in setkey, and only used in HMAC.
  818. */
  819. kfree(ctx->key);
  820. return ret;
  821. }
  822. /**
  823. * hash_hw_final - The final hash calculation function
  824. * @req: The hash request for the job.
  825. */
  826. static int hash_hw_final(struct ahash_request *req)
  827. {
  828. int ret = 0;
  829. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  830. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  831. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  832. struct hash_device_data *device_data;
  833. u8 digest[SHA256_DIGEST_SIZE];
  834. ret = hash_get_device_data(ctx, &device_data);
  835. if (ret)
  836. return ret;
  837. dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
  838. if (req_ctx->updated) {
  839. ret = hash_resume_state(device_data, &device_data->state);
  840. if (ret) {
  841. dev_err(device_data->dev,
  842. "%s: hash_resume_state() failed!\n", __func__);
  843. goto out;
  844. }
  845. } else if (req->nbytes == 0 && ctx->keylen == 0) {
  846. u8 zero_hash[SHA256_DIGEST_SIZE];
  847. u32 zero_hash_size = 0;
  848. bool zero_digest = false;
  849. /**
  850. * Use a pre-calculated empty message digest
  851. * (workaround since hw return zeroes, hw bug!?)
  852. */
  853. ret = get_empty_message_digest(device_data, &zero_hash[0],
  854. &zero_hash_size, &zero_digest);
  855. if (!ret && likely(zero_hash_size == ctx->digestsize) &&
  856. zero_digest) {
  857. memcpy(req->result, &zero_hash[0], ctx->digestsize);
  858. goto out;
  859. } else if (!ret && !zero_digest) {
  860. dev_dbg(device_data->dev,
  861. "%s: HMAC zero msg with key, continue...\n",
  862. __func__);
  863. } else {
  864. dev_err(device_data->dev,
  865. "%s: ret=%d, or wrong digest size? %s\n",
  866. __func__, ret,
  867. zero_hash_size == ctx->digestsize ?
  868. "true" : "false");
  869. /* Return error */
  870. goto out;
  871. }
  872. } else if (req->nbytes == 0 && ctx->keylen > 0) {
  873. dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
  874. __func__);
  875. goto out;
  876. }
  877. if (!req_ctx->updated) {
  878. ret = init_hash_hw(device_data, ctx);
  879. if (ret) {
  880. dev_err(device_data->dev,
  881. "%s: init_hash_hw() failed!\n", __func__);
  882. goto out;
  883. }
  884. }
  885. if (req_ctx->state.index) {
  886. hash_messagepad(device_data, req_ctx->state.buffer,
  887. req_ctx->state.index);
  888. } else {
  889. HASH_SET_DCAL;
  890. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  891. cpu_relax();
  892. }
  893. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  894. unsigned int keylen = ctx->keylen;
  895. u8 *key = ctx->key;
  896. dev_dbg(device_data->dev, "%s: keylen: %d\n",
  897. __func__, ctx->keylen);
  898. hash_hw_write_key(device_data, key, keylen);
  899. }
  900. hash_get_digest(device_data, digest, ctx->config.algorithm);
  901. memcpy(req->result, digest, ctx->digestsize);
  902. out:
  903. release_hash_device(device_data);
  904. /**
  905. * Allocated in setkey, and only used in HMAC.
  906. */
  907. kfree(ctx->key);
  908. return ret;
  909. }
  910. /**
  911. * hash_hw_update - Updates current HASH computation hashing another part of
  912. * the message.
  913. * @req: Byte array containing the message to be hashed (caller
  914. * allocated).
  915. */
  916. int hash_hw_update(struct ahash_request *req)
  917. {
  918. int ret = 0;
  919. u8 index = 0;
  920. u8 *buffer;
  921. struct hash_device_data *device_data;
  922. u8 *data_buffer;
  923. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  924. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  925. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  926. struct crypto_hash_walk walk;
  927. int msg_length = crypto_hash_walk_first(req, &walk);
  928. /* Empty message ("") is correct indata */
  929. if (msg_length == 0)
  930. return ret;
  931. index = req_ctx->state.index;
  932. buffer = (u8 *)req_ctx->state.buffer;
  933. /* Check if ctx->state.length + msg_length
  934. overflows */
  935. if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
  936. HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
  937. pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
  938. return -EPERM;
  939. }
  940. ret = hash_get_device_data(ctx, &device_data);
  941. if (ret)
  942. return ret;
  943. /* Main loop */
  944. while (0 != msg_length) {
  945. data_buffer = walk.data;
  946. ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
  947. data_buffer, buffer, &index);
  948. if (ret) {
  949. dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
  950. __func__);
  951. goto out;
  952. }
  953. msg_length = crypto_hash_walk_done(&walk, 0);
  954. }
  955. req_ctx->state.index = index;
  956. dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
  957. __func__, req_ctx->state.index, req_ctx->state.bit_index);
  958. out:
  959. release_hash_device(device_data);
  960. return ret;
  961. }
  962. /**
  963. * hash_resume_state - Function that resumes the state of an calculation.
  964. * @device_data: Pointer to the device structure.
  965. * @device_state: The state to be restored in the hash hardware
  966. */
  967. int hash_resume_state(struct hash_device_data *device_data,
  968. const struct hash_state *device_state)
  969. {
  970. u32 temp_cr;
  971. s32 count;
  972. int hash_mode = HASH_OPER_MODE_HASH;
  973. if (NULL == device_state) {
  974. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  975. __func__);
  976. return -EPERM;
  977. }
  978. /* Check correctness of index and length members */
  979. if (device_state->index > HASH_BLOCK_SIZE ||
  980. (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
  981. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  982. __func__);
  983. return -EPERM;
  984. }
  985. /*
  986. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  987. * prepare the initialize the HASH accelerator to compute the message
  988. * digest of a new message.
  989. */
  990. HASH_INITIALIZE;
  991. temp_cr = device_state->temp_cr;
  992. writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
  993. if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
  994. hash_mode = HASH_OPER_MODE_HMAC;
  995. else
  996. hash_mode = HASH_OPER_MODE_HASH;
  997. for (count = 0; count < HASH_CSR_COUNT; count++) {
  998. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  999. break;
  1000. writel_relaxed(device_state->csr[count],
  1001. &device_data->base->csrx[count]);
  1002. }
  1003. writel_relaxed(device_state->csfull, &device_data->base->csfull);
  1004. writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
  1005. writel_relaxed(device_state->str_reg, &device_data->base->str);
  1006. writel_relaxed(temp_cr, &device_data->base->cr);
  1007. return 0;
  1008. }
  1009. /**
  1010. * hash_save_state - Function that saves the state of hardware.
  1011. * @device_data: Pointer to the device structure.
  1012. * @device_state: The strucure where the hardware state should be saved.
  1013. */
  1014. int hash_save_state(struct hash_device_data *device_data,
  1015. struct hash_state *device_state)
  1016. {
  1017. u32 temp_cr;
  1018. u32 count;
  1019. int hash_mode = HASH_OPER_MODE_HASH;
  1020. if (NULL == device_state) {
  1021. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  1022. __func__);
  1023. return -ENOTSUPP;
  1024. }
  1025. /* Write dummy value to force digest intermediate calculation. This
  1026. * actually makes sure that there isn't any ongoing calculation in the
  1027. * hardware.
  1028. */
  1029. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  1030. cpu_relax();
  1031. temp_cr = readl_relaxed(&device_data->base->cr);
  1032. device_state->str_reg = readl_relaxed(&device_data->base->str);
  1033. device_state->din_reg = readl_relaxed(&device_data->base->din);
  1034. if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
  1035. hash_mode = HASH_OPER_MODE_HMAC;
  1036. else
  1037. hash_mode = HASH_OPER_MODE_HASH;
  1038. for (count = 0; count < HASH_CSR_COUNT; count++) {
  1039. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  1040. break;
  1041. device_state->csr[count] =
  1042. readl_relaxed(&device_data->base->csrx[count]);
  1043. }
  1044. device_state->csfull = readl_relaxed(&device_data->base->csfull);
  1045. device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
  1046. device_state->temp_cr = temp_cr;
  1047. return 0;
  1048. }
  1049. /**
  1050. * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
  1051. * @device_data:
  1052. *
  1053. */
  1054. int hash_check_hw(struct hash_device_data *device_data)
  1055. {
  1056. /* Checking Peripheral Ids */
  1057. if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
  1058. HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
  1059. HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
  1060. HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
  1061. HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
  1062. HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
  1063. HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
  1064. HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
  1065. return 0;
  1066. }
  1067. dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
  1068. return -ENOTSUPP;
  1069. }
  1070. /**
  1071. * hash_get_digest - Gets the digest.
  1072. * @device_data: Pointer to the device structure.
  1073. * @digest: User allocated byte array for the calculated digest.
  1074. * @algorithm: The algorithm in use.
  1075. */
  1076. void hash_get_digest(struct hash_device_data *device_data,
  1077. u8 *digest, int algorithm)
  1078. {
  1079. u32 temp_hx_val, count;
  1080. int loop_ctr;
  1081. if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
  1082. dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
  1083. __func__, algorithm);
  1084. return;
  1085. }
  1086. if (algorithm == HASH_ALGO_SHA1)
  1087. loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
  1088. else
  1089. loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
  1090. dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n",
  1091. __func__, (u32) digest);
  1092. /* Copy result into digest array */
  1093. for (count = 0; count < loop_ctr; count++) {
  1094. temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
  1095. digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
  1096. digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
  1097. digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
  1098. digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
  1099. }
  1100. }
  1101. /**
  1102. * hash_update - The hash update function for SHA1/SHA2 (SHA256).
  1103. * @req: The hash request for the job.
  1104. */
  1105. static int ahash_update(struct ahash_request *req)
  1106. {
  1107. int ret = 0;
  1108. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1109. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
  1110. ret = hash_hw_update(req);
  1111. /* Skip update for DMA, all data will be passed to DMA in final */
  1112. if (ret) {
  1113. pr_err("%s: hash_hw_update() failed!\n", __func__);
  1114. }
  1115. return ret;
  1116. }
  1117. /**
  1118. * hash_final - The hash final function for SHA1/SHA2 (SHA256).
  1119. * @req: The hash request for the job.
  1120. */
  1121. static int ahash_final(struct ahash_request *req)
  1122. {
  1123. int ret = 0;
  1124. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1125. pr_debug("%s: data size: %d\n", __func__, req->nbytes);
  1126. if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
  1127. ret = hash_dma_final(req);
  1128. else
  1129. ret = hash_hw_final(req);
  1130. if (ret) {
  1131. pr_err("%s: hash_hw/dma_final() failed\n", __func__);
  1132. }
  1133. return ret;
  1134. }
  1135. static int hash_setkey(struct crypto_ahash *tfm,
  1136. const u8 *key, unsigned int keylen, int alg)
  1137. {
  1138. int ret = 0;
  1139. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1140. /**
  1141. * Freed in final.
  1142. */
  1143. ctx->key = kmemdup(key, keylen, GFP_KERNEL);
  1144. if (!ctx->key) {
  1145. pr_err("%s: Failed to allocate ctx->key for %d\n",
  1146. __func__, alg);
  1147. return -ENOMEM;
  1148. }
  1149. ctx->keylen = keylen;
  1150. return ret;
  1151. }
  1152. static int ahash_sha1_init(struct ahash_request *req)
  1153. {
  1154. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1155. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1156. ctx->config.data_format = HASH_DATA_8_BITS;
  1157. ctx->config.algorithm = HASH_ALGO_SHA1;
  1158. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1159. ctx->digestsize = SHA1_DIGEST_SIZE;
  1160. return hash_init(req);
  1161. }
  1162. static int ahash_sha256_init(struct ahash_request *req)
  1163. {
  1164. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1165. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1166. ctx->config.data_format = HASH_DATA_8_BITS;
  1167. ctx->config.algorithm = HASH_ALGO_SHA256;
  1168. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1169. ctx->digestsize = SHA256_DIGEST_SIZE;
  1170. return hash_init(req);
  1171. }
  1172. static int ahash_sha1_digest(struct ahash_request *req)
  1173. {
  1174. int ret2, ret1;
  1175. ret1 = ahash_sha1_init(req);
  1176. if (ret1)
  1177. goto out;
  1178. ret1 = ahash_update(req);
  1179. ret2 = ahash_final(req);
  1180. out:
  1181. return ret1 ? ret1 : ret2;
  1182. }
  1183. static int ahash_sha256_digest(struct ahash_request *req)
  1184. {
  1185. int ret2, ret1;
  1186. ret1 = ahash_sha256_init(req);
  1187. if (ret1)
  1188. goto out;
  1189. ret1 = ahash_update(req);
  1190. ret2 = ahash_final(req);
  1191. out:
  1192. return ret1 ? ret1 : ret2;
  1193. }
  1194. static int ahash_noimport(struct ahash_request *req, const void *in)
  1195. {
  1196. return -ENOSYS;
  1197. }
  1198. static int ahash_noexport(struct ahash_request *req, void *out)
  1199. {
  1200. return -ENOSYS;
  1201. }
  1202. static int hmac_sha1_init(struct ahash_request *req)
  1203. {
  1204. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1205. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1206. ctx->config.data_format = HASH_DATA_8_BITS;
  1207. ctx->config.algorithm = HASH_ALGO_SHA1;
  1208. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1209. ctx->digestsize = SHA1_DIGEST_SIZE;
  1210. return hash_init(req);
  1211. }
  1212. static int hmac_sha256_init(struct ahash_request *req)
  1213. {
  1214. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1215. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1216. ctx->config.data_format = HASH_DATA_8_BITS;
  1217. ctx->config.algorithm = HASH_ALGO_SHA256;
  1218. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1219. ctx->digestsize = SHA256_DIGEST_SIZE;
  1220. return hash_init(req);
  1221. }
  1222. static int hmac_sha1_digest(struct ahash_request *req)
  1223. {
  1224. int ret2, ret1;
  1225. ret1 = hmac_sha1_init(req);
  1226. if (ret1)
  1227. goto out;
  1228. ret1 = ahash_update(req);
  1229. ret2 = ahash_final(req);
  1230. out:
  1231. return ret1 ? ret1 : ret2;
  1232. }
  1233. static int hmac_sha256_digest(struct ahash_request *req)
  1234. {
  1235. int ret2, ret1;
  1236. ret1 = hmac_sha256_init(req);
  1237. if (ret1)
  1238. goto out;
  1239. ret1 = ahash_update(req);
  1240. ret2 = ahash_final(req);
  1241. out:
  1242. return ret1 ? ret1 : ret2;
  1243. }
  1244. static int hmac_sha1_setkey(struct crypto_ahash *tfm,
  1245. const u8 *key, unsigned int keylen)
  1246. {
  1247. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
  1248. }
  1249. static int hmac_sha256_setkey(struct crypto_ahash *tfm,
  1250. const u8 *key, unsigned int keylen)
  1251. {
  1252. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
  1253. }
  1254. struct hash_algo_template {
  1255. struct hash_config conf;
  1256. struct ahash_alg hash;
  1257. };
  1258. static int hash_cra_init(struct crypto_tfm *tfm)
  1259. {
  1260. struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
  1261. struct crypto_alg *alg = tfm->__crt_alg;
  1262. struct hash_algo_template *hash_alg;
  1263. hash_alg = container_of(__crypto_ahash_alg(alg),
  1264. struct hash_algo_template,
  1265. hash);
  1266. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1267. sizeof(struct hash_req_ctx));
  1268. ctx->config.data_format = HASH_DATA_8_BITS;
  1269. ctx->config.algorithm = hash_alg->conf.algorithm;
  1270. ctx->config.oper_mode = hash_alg->conf.oper_mode;
  1271. ctx->digestsize = hash_alg->hash.halg.digestsize;
  1272. return 0;
  1273. }
  1274. static struct hash_algo_template hash_algs[] = {
  1275. {
  1276. .conf.algorithm = HASH_ALGO_SHA1,
  1277. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1278. .hash = {
  1279. .init = hash_init,
  1280. .update = ahash_update,
  1281. .final = ahash_final,
  1282. .digest = ahash_sha1_digest,
  1283. .export = ahash_noexport,
  1284. .import = ahash_noimport,
  1285. .halg.digestsize = SHA1_DIGEST_SIZE,
  1286. .halg.statesize = sizeof(struct hash_ctx),
  1287. .halg.base = {
  1288. .cra_name = "sha1",
  1289. .cra_driver_name = "sha1-ux500",
  1290. .cra_flags = CRYPTO_ALG_ASYNC,
  1291. .cra_blocksize = SHA1_BLOCK_SIZE,
  1292. .cra_ctxsize = sizeof(struct hash_ctx),
  1293. .cra_init = hash_cra_init,
  1294. .cra_module = THIS_MODULE,
  1295. }
  1296. }
  1297. },
  1298. {
  1299. .conf.algorithm = HASH_ALGO_SHA256,
  1300. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1301. .hash = {
  1302. .init = hash_init,
  1303. .update = ahash_update,
  1304. .final = ahash_final,
  1305. .digest = ahash_sha256_digest,
  1306. .export = ahash_noexport,
  1307. .import = ahash_noimport,
  1308. .halg.digestsize = SHA256_DIGEST_SIZE,
  1309. .halg.statesize = sizeof(struct hash_ctx),
  1310. .halg.base = {
  1311. .cra_name = "sha256",
  1312. .cra_driver_name = "sha256-ux500",
  1313. .cra_flags = CRYPTO_ALG_ASYNC,
  1314. .cra_blocksize = SHA256_BLOCK_SIZE,
  1315. .cra_ctxsize = sizeof(struct hash_ctx),
  1316. .cra_init = hash_cra_init,
  1317. .cra_module = THIS_MODULE,
  1318. }
  1319. }
  1320. },
  1321. {
  1322. .conf.algorithm = HASH_ALGO_SHA1,
  1323. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1324. .hash = {
  1325. .init = hash_init,
  1326. .update = ahash_update,
  1327. .final = ahash_final,
  1328. .digest = hmac_sha1_digest,
  1329. .setkey = hmac_sha1_setkey,
  1330. .export = ahash_noexport,
  1331. .import = ahash_noimport,
  1332. .halg.digestsize = SHA1_DIGEST_SIZE,
  1333. .halg.statesize = sizeof(struct hash_ctx),
  1334. .halg.base = {
  1335. .cra_name = "hmac(sha1)",
  1336. .cra_driver_name = "hmac-sha1-ux500",
  1337. .cra_flags = CRYPTO_ALG_ASYNC,
  1338. .cra_blocksize = SHA1_BLOCK_SIZE,
  1339. .cra_ctxsize = sizeof(struct hash_ctx),
  1340. .cra_init = hash_cra_init,
  1341. .cra_module = THIS_MODULE,
  1342. }
  1343. }
  1344. },
  1345. {
  1346. .conf.algorithm = HASH_ALGO_SHA256,
  1347. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1348. .hash = {
  1349. .init = hash_init,
  1350. .update = ahash_update,
  1351. .final = ahash_final,
  1352. .digest = hmac_sha256_digest,
  1353. .setkey = hmac_sha256_setkey,
  1354. .export = ahash_noexport,
  1355. .import = ahash_noimport,
  1356. .halg.digestsize = SHA256_DIGEST_SIZE,
  1357. .halg.statesize = sizeof(struct hash_ctx),
  1358. .halg.base = {
  1359. .cra_name = "hmac(sha256)",
  1360. .cra_driver_name = "hmac-sha256-ux500",
  1361. .cra_flags = CRYPTO_ALG_ASYNC,
  1362. .cra_blocksize = SHA256_BLOCK_SIZE,
  1363. .cra_ctxsize = sizeof(struct hash_ctx),
  1364. .cra_init = hash_cra_init,
  1365. .cra_module = THIS_MODULE,
  1366. }
  1367. }
  1368. }
  1369. };
  1370. /**
  1371. * hash_algs_register_all -
  1372. */
  1373. static int ahash_algs_register_all(struct hash_device_data *device_data)
  1374. {
  1375. int ret;
  1376. int i;
  1377. int count;
  1378. for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
  1379. ret = crypto_register_ahash(&hash_algs[i].hash);
  1380. if (ret) {
  1381. count = i;
  1382. dev_err(device_data->dev, "%s: alg registration failed\n",
  1383. hash_algs[i].hash.halg.base.cra_driver_name);
  1384. goto unreg;
  1385. }
  1386. }
  1387. return 0;
  1388. unreg:
  1389. for (i = 0; i < count; i++)
  1390. crypto_unregister_ahash(&hash_algs[i].hash);
  1391. return ret;
  1392. }
  1393. /**
  1394. * hash_algs_unregister_all -
  1395. */
  1396. static void ahash_algs_unregister_all(struct hash_device_data *device_data)
  1397. {
  1398. int i;
  1399. for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
  1400. crypto_unregister_ahash(&hash_algs[i].hash);
  1401. }
  1402. /**
  1403. * ux500_hash_probe - Function that probes the hash hardware.
  1404. * @pdev: The platform device.
  1405. */
  1406. static int ux500_hash_probe(struct platform_device *pdev)
  1407. {
  1408. int ret = 0;
  1409. struct resource *res = NULL;
  1410. struct hash_device_data *device_data;
  1411. struct device *dev = &pdev->dev;
  1412. device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
  1413. if (!device_data) {
  1414. ret = -ENOMEM;
  1415. goto out;
  1416. }
  1417. device_data->dev = dev;
  1418. device_data->current_ctx = NULL;
  1419. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1420. if (!res) {
  1421. dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
  1422. ret = -ENODEV;
  1423. goto out;
  1424. }
  1425. device_data->phybase = res->start;
  1426. device_data->base = devm_ioremap_resource(dev, res);
  1427. if (IS_ERR(device_data->base)) {
  1428. dev_err(dev, "%s: ioremap() failed!\n", __func__);
  1429. ret = PTR_ERR(device_data->base);
  1430. goto out;
  1431. }
  1432. spin_lock_init(&device_data->ctx_lock);
  1433. spin_lock_init(&device_data->power_state_lock);
  1434. /* Enable power for HASH1 hardware block */
  1435. device_data->regulator = regulator_get(dev, "v-ape");
  1436. if (IS_ERR(device_data->regulator)) {
  1437. dev_err(dev, "%s: regulator_get() failed!\n", __func__);
  1438. ret = PTR_ERR(device_data->regulator);
  1439. device_data->regulator = NULL;
  1440. goto out;
  1441. }
  1442. /* Enable the clock for HASH1 hardware block */
  1443. device_data->clk = devm_clk_get(dev, NULL);
  1444. if (IS_ERR(device_data->clk)) {
  1445. dev_err(dev, "%s: clk_get() failed!\n", __func__);
  1446. ret = PTR_ERR(device_data->clk);
  1447. goto out_regulator;
  1448. }
  1449. ret = clk_prepare(device_data->clk);
  1450. if (ret) {
  1451. dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
  1452. goto out_regulator;
  1453. }
  1454. /* Enable device power (and clock) */
  1455. ret = hash_enable_power(device_data, false);
  1456. if (ret) {
  1457. dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
  1458. goto out_clk_unprepare;
  1459. }
  1460. ret = hash_check_hw(device_data);
  1461. if (ret) {
  1462. dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
  1463. goto out_power;
  1464. }
  1465. if (hash_mode == HASH_MODE_DMA)
  1466. hash_dma_setup_channel(device_data, dev);
  1467. platform_set_drvdata(pdev, device_data);
  1468. /* Put the new device into the device list... */
  1469. klist_add_tail(&device_data->list_node, &driver_data.device_list);
  1470. /* ... and signal that a new device is available. */
  1471. up(&driver_data.device_allocation);
  1472. ret = ahash_algs_register_all(device_data);
  1473. if (ret) {
  1474. dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
  1475. __func__);
  1476. goto out_power;
  1477. }
  1478. dev_info(dev, "successfully registered\n");
  1479. return 0;
  1480. out_power:
  1481. hash_disable_power(device_data, false);
  1482. out_clk_unprepare:
  1483. clk_unprepare(device_data->clk);
  1484. out_regulator:
  1485. regulator_put(device_data->regulator);
  1486. out:
  1487. return ret;
  1488. }
  1489. /**
  1490. * ux500_hash_remove - Function that removes the hash device from the platform.
  1491. * @pdev: The platform device.
  1492. */
  1493. static int ux500_hash_remove(struct platform_device *pdev)
  1494. {
  1495. struct hash_device_data *device_data;
  1496. struct device *dev = &pdev->dev;
  1497. device_data = platform_get_drvdata(pdev);
  1498. if (!device_data) {
  1499. dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
  1500. return -ENOMEM;
  1501. }
  1502. /* Try to decrease the number of available devices. */
  1503. if (down_trylock(&driver_data.device_allocation))
  1504. return -EBUSY;
  1505. /* Check that the device is free */
  1506. spin_lock(&device_data->ctx_lock);
  1507. /* current_ctx allocates a device, NULL = unallocated */
  1508. if (device_data->current_ctx) {
  1509. /* The device is busy */
  1510. spin_unlock(&device_data->ctx_lock);
  1511. /* Return the device to the pool. */
  1512. up(&driver_data.device_allocation);
  1513. return -EBUSY;
  1514. }
  1515. spin_unlock(&device_data->ctx_lock);
  1516. /* Remove the device from the list */
  1517. if (klist_node_attached(&device_data->list_node))
  1518. klist_remove(&device_data->list_node);
  1519. /* If this was the last device, remove the services */
  1520. if (list_empty(&driver_data.device_list.k_list))
  1521. ahash_algs_unregister_all(device_data);
  1522. if (hash_disable_power(device_data, false))
  1523. dev_err(dev, "%s: hash_disable_power() failed\n",
  1524. __func__);
  1525. clk_unprepare(device_data->clk);
  1526. regulator_put(device_data->regulator);
  1527. return 0;
  1528. }
  1529. /**
  1530. * ux500_hash_shutdown - Function that shutdown the hash device.
  1531. * @pdev: The platform device
  1532. */
  1533. static void ux500_hash_shutdown(struct platform_device *pdev)
  1534. {
  1535. struct hash_device_data *device_data;
  1536. device_data = platform_get_drvdata(pdev);
  1537. if (!device_data) {
  1538. dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
  1539. __func__);
  1540. return;
  1541. }
  1542. /* Check that the device is free */
  1543. spin_lock(&device_data->ctx_lock);
  1544. /* current_ctx allocates a device, NULL = unallocated */
  1545. if (!device_data->current_ctx) {
  1546. if (down_trylock(&driver_data.device_allocation))
  1547. dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
  1548. __func__);
  1549. /**
  1550. * (Allocate the device)
  1551. * Need to set this to non-null (dummy) value,
  1552. * to avoid usage if context switching.
  1553. */
  1554. device_data->current_ctx++;
  1555. }
  1556. spin_unlock(&device_data->ctx_lock);
  1557. /* Remove the device from the list */
  1558. if (klist_node_attached(&device_data->list_node))
  1559. klist_remove(&device_data->list_node);
  1560. /* If this was the last device, remove the services */
  1561. if (list_empty(&driver_data.device_list.k_list))
  1562. ahash_algs_unregister_all(device_data);
  1563. if (hash_disable_power(device_data, false))
  1564. dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
  1565. __func__);
  1566. }
  1567. #ifdef CONFIG_PM_SLEEP
  1568. /**
  1569. * ux500_hash_suspend - Function that suspends the hash device.
  1570. * @dev: Device to suspend.
  1571. */
  1572. static int ux500_hash_suspend(struct device *dev)
  1573. {
  1574. int ret;
  1575. struct hash_device_data *device_data;
  1576. struct hash_ctx *temp_ctx = NULL;
  1577. device_data = dev_get_drvdata(dev);
  1578. if (!device_data) {
  1579. dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
  1580. return -ENOMEM;
  1581. }
  1582. spin_lock(&device_data->ctx_lock);
  1583. if (!device_data->current_ctx)
  1584. device_data->current_ctx++;
  1585. spin_unlock(&device_data->ctx_lock);
  1586. if (device_data->current_ctx == ++temp_ctx) {
  1587. if (down_interruptible(&driver_data.device_allocation))
  1588. dev_dbg(dev, "%s: down_interruptible() failed\n",
  1589. __func__);
  1590. ret = hash_disable_power(device_data, false);
  1591. } else {
  1592. ret = hash_disable_power(device_data, true);
  1593. }
  1594. if (ret)
  1595. dev_err(dev, "%s: hash_disable_power()\n", __func__);
  1596. return ret;
  1597. }
  1598. /**
  1599. * ux500_hash_resume - Function that resume the hash device.
  1600. * @dev: Device to resume.
  1601. */
  1602. static int ux500_hash_resume(struct device *dev)
  1603. {
  1604. int ret = 0;
  1605. struct hash_device_data *device_data;
  1606. struct hash_ctx *temp_ctx = NULL;
  1607. device_data = dev_get_drvdata(dev);
  1608. if (!device_data) {
  1609. dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
  1610. return -ENOMEM;
  1611. }
  1612. spin_lock(&device_data->ctx_lock);
  1613. if (device_data->current_ctx == ++temp_ctx)
  1614. device_data->current_ctx = NULL;
  1615. spin_unlock(&device_data->ctx_lock);
  1616. if (!device_data->current_ctx)
  1617. up(&driver_data.device_allocation);
  1618. else
  1619. ret = hash_enable_power(device_data, true);
  1620. if (ret)
  1621. dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
  1622. return ret;
  1623. }
  1624. #endif
  1625. static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
  1626. static const struct of_device_id ux500_hash_match[] = {
  1627. { .compatible = "stericsson,ux500-hash" },
  1628. { },
  1629. };
  1630. MODULE_DEVICE_TABLE(of, ux500_hash_match);
  1631. static struct platform_driver hash_driver = {
  1632. .probe = ux500_hash_probe,
  1633. .remove = ux500_hash_remove,
  1634. .shutdown = ux500_hash_shutdown,
  1635. .driver = {
  1636. .name = "hash1",
  1637. .of_match_table = ux500_hash_match,
  1638. .pm = &ux500_hash_pm,
  1639. }
  1640. };
  1641. /**
  1642. * ux500_hash_mod_init - The kernel module init function.
  1643. */
  1644. static int __init ux500_hash_mod_init(void)
  1645. {
  1646. klist_init(&driver_data.device_list, NULL, NULL);
  1647. /* Initialize the semaphore to 0 devices (locked state) */
  1648. sema_init(&driver_data.device_allocation, 0);
  1649. return platform_driver_register(&hash_driver);
  1650. }
  1651. /**
  1652. * ux500_hash_mod_fini - The kernel module exit function.
  1653. */
  1654. static void __exit ux500_hash_mod_fini(void)
  1655. {
  1656. platform_driver_unregister(&hash_driver);
  1657. }
  1658. module_init(ux500_hash_mod_init);
  1659. module_exit(ux500_hash_mod_fini);
  1660. MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
  1661. MODULE_LICENSE("GPL");
  1662. MODULE_ALIAS_CRYPTO("sha1-all");
  1663. MODULE_ALIAS_CRYPTO("sha256-all");
  1664. MODULE_ALIAS_CRYPTO("hmac-sha1-all");
  1665. MODULE_ALIAS_CRYPTO("hmac-sha256-all");