hash_core.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003
  1. /*
  2. * Cryptographic API.
  3. * Support for Nomadik hardware crypto engine.
  4. * Copyright (C) ST-Ericsson SA 2010
  5. * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
  6. * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
  7. * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
  8. * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
  9. * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
  10. * License terms: GNU General Public License (GPL) version 2
  11. */
  12. #define pr_fmt(fmt) "hashX hashX: " fmt
  13. #include <linux/clk.h>
  14. #include <linux/device.h>
  15. #include <linux/err.h>
  16. #include <linux/init.h>
  17. #include <linux/io.h>
  18. #include <linux/klist.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/crypto.h>
  23. #include <linux/regulator/consumer.h>
  24. #include <linux/dmaengine.h>
  25. #include <linux/bitops.h>
  26. #include <crypto/internal/hash.h>
  27. #include <crypto/sha.h>
  28. #include <crypto/scatterwalk.h>
  29. #include <crypto/algapi.h>
  30. #include <linux/platform_data/crypto-ux500.h>
  31. #include "hash_alg.h"
  32. static int hash_mode;
  33. module_param(hash_mode, int, 0);
  34. MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
  35. /**
  36. * Pre-calculated empty message digests.
  37. */
  38. static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
  39. 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
  40. 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
  41. 0xaf, 0xd8, 0x07, 0x09
  42. };
  43. static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
  44. 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
  45. 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
  46. 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
  47. 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
  48. };
  49. /* HMAC-SHA1, no key */
  50. static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
  51. 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
  52. 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
  53. 0x70, 0x69, 0x0e, 0x1d
  54. };
  55. /* HMAC-SHA256, no key */
  56. static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
  57. 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
  58. 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
  59. 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
  60. 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
  61. };
  62. /**
  63. * struct hash_driver_data - data specific to the driver.
  64. *
  65. * @device_list: A list of registered devices to choose from.
  66. * @device_allocation: A semaphore initialized with number of devices.
  67. */
  68. struct hash_driver_data {
  69. struct klist device_list;
  70. struct semaphore device_allocation;
  71. };
  72. static struct hash_driver_data driver_data;
  73. /* Declaration of functions */
  74. /**
  75. * hash_messagepad - Pads a message and write the nblw bits.
  76. * @device_data: Structure for the hash device.
  77. * @message: Last word of a message
  78. * @index_bytes: The number of bytes in the last message
  79. *
  80. * This function manages the final part of the digest calculation, when less
  81. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  82. *
  83. */
  84. static void hash_messagepad(struct hash_device_data *device_data,
  85. const u32 *message, u8 index_bytes);
  86. /**
  87. * release_hash_device - Releases a previously allocated hash device.
  88. * @device_data: Structure for the hash device.
  89. *
  90. */
  91. static void release_hash_device(struct hash_device_data *device_data)
  92. {
  93. spin_lock(&device_data->ctx_lock);
  94. device_data->current_ctx->device = NULL;
  95. device_data->current_ctx = NULL;
  96. spin_unlock(&device_data->ctx_lock);
  97. /*
  98. * The down_interruptible part for this semaphore is called in
  99. * cryp_get_device_data.
  100. */
  101. up(&driver_data.device_allocation);
  102. }
  103. static void hash_dma_setup_channel(struct hash_device_data *device_data,
  104. struct device *dev)
  105. {
  106. struct hash_platform_data *platform_data = dev->platform_data;
  107. struct dma_slave_config conf = {
  108. .direction = DMA_MEM_TO_DEV,
  109. .dst_addr = device_data->phybase + HASH_DMA_FIFO,
  110. .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
  111. .dst_maxburst = 16,
  112. };
  113. dma_cap_zero(device_data->dma.mask);
  114. dma_cap_set(DMA_SLAVE, device_data->dma.mask);
  115. device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
  116. device_data->dma.chan_mem2hash =
  117. dma_request_channel(device_data->dma.mask,
  118. platform_data->dma_filter,
  119. device_data->dma.cfg_mem2hash);
  120. dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
  121. init_completion(&device_data->dma.complete);
  122. }
  123. static void hash_dma_callback(void *data)
  124. {
  125. struct hash_ctx *ctx = data;
  126. complete(&ctx->device->dma.complete);
  127. }
  128. static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
  129. int len, enum dma_data_direction direction)
  130. {
  131. struct dma_async_tx_descriptor *desc = NULL;
  132. struct dma_chan *channel = NULL;
  133. dma_cookie_t cookie;
  134. if (direction != DMA_TO_DEVICE) {
  135. dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
  136. __func__);
  137. return -EFAULT;
  138. }
  139. sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
  140. channel = ctx->device->dma.chan_mem2hash;
  141. ctx->device->dma.sg = sg;
  142. ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
  143. ctx->device->dma.sg, ctx->device->dma.nents,
  144. direction);
  145. if (!ctx->device->dma.sg_len) {
  146. dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
  147. __func__);
  148. return -EFAULT;
  149. }
  150. dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
  151. __func__);
  152. desc = dmaengine_prep_slave_sg(channel,
  153. ctx->device->dma.sg, ctx->device->dma.sg_len,
  154. direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
  155. if (!desc) {
  156. dev_err(ctx->device->dev,
  157. "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
  158. return -EFAULT;
  159. }
  160. desc->callback = hash_dma_callback;
  161. desc->callback_param = ctx;
  162. cookie = dmaengine_submit(desc);
  163. dma_async_issue_pending(channel);
  164. return 0;
  165. }
  166. static void hash_dma_done(struct hash_ctx *ctx)
  167. {
  168. struct dma_chan *chan;
  169. chan = ctx->device->dma.chan_mem2hash;
  170. dmaengine_terminate_all(chan);
  171. dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
  172. ctx->device->dma.sg_len, DMA_TO_DEVICE);
  173. }
  174. static int hash_dma_write(struct hash_ctx *ctx,
  175. struct scatterlist *sg, int len)
  176. {
  177. int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
  178. if (error) {
  179. dev_dbg(ctx->device->dev,
  180. "%s: hash_set_dma_transfer() failed\n", __func__);
  181. return error;
  182. }
  183. return len;
  184. }
  185. /**
  186. * get_empty_message_digest - Returns a pre-calculated digest for
  187. * the empty message.
  188. * @device_data: Structure for the hash device.
  189. * @zero_hash: Buffer to return the empty message digest.
  190. * @zero_hash_size: Hash size of the empty message digest.
  191. * @zero_digest: True if zero_digest returned.
  192. */
  193. static int get_empty_message_digest(
  194. struct hash_device_data *device_data,
  195. u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
  196. {
  197. int ret = 0;
  198. struct hash_ctx *ctx = device_data->current_ctx;
  199. *zero_digest = false;
  200. /**
  201. * Caller responsible for ctx != NULL.
  202. */
  203. if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
  204. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  205. memcpy(zero_hash, &zero_message_hash_sha1[0],
  206. SHA1_DIGEST_SIZE);
  207. *zero_hash_size = SHA1_DIGEST_SIZE;
  208. *zero_digest = true;
  209. } else if (HASH_ALGO_SHA256 ==
  210. ctx->config.algorithm) {
  211. memcpy(zero_hash, &zero_message_hash_sha256[0],
  212. SHA256_DIGEST_SIZE);
  213. *zero_hash_size = SHA256_DIGEST_SIZE;
  214. *zero_digest = true;
  215. } else {
  216. dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
  217. __func__);
  218. ret = -EINVAL;
  219. goto out;
  220. }
  221. } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
  222. if (!ctx->keylen) {
  223. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  224. memcpy(zero_hash, &zero_message_hmac_sha1[0],
  225. SHA1_DIGEST_SIZE);
  226. *zero_hash_size = SHA1_DIGEST_SIZE;
  227. *zero_digest = true;
  228. } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
  229. memcpy(zero_hash, &zero_message_hmac_sha256[0],
  230. SHA256_DIGEST_SIZE);
  231. *zero_hash_size = SHA256_DIGEST_SIZE;
  232. *zero_digest = true;
  233. } else {
  234. dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
  235. __func__);
  236. ret = -EINVAL;
  237. goto out;
  238. }
  239. } else {
  240. dev_dbg(device_data->dev,
  241. "%s: Continue hash calculation, since hmac key available\n",
  242. __func__);
  243. }
  244. }
  245. out:
  246. return ret;
  247. }
  248. /**
  249. * hash_disable_power - Request to disable power and clock.
  250. * @device_data: Structure for the hash device.
  251. * @save_device_state: If true, saves the current hw state.
  252. *
  253. * This function request for disabling power (regulator) and clock,
  254. * and could also save current hw state.
  255. */
  256. static int hash_disable_power(struct hash_device_data *device_data,
  257. bool save_device_state)
  258. {
  259. int ret = 0;
  260. struct device *dev = device_data->dev;
  261. spin_lock(&device_data->power_state_lock);
  262. if (!device_data->power_state)
  263. goto out;
  264. if (save_device_state) {
  265. hash_save_state(device_data,
  266. &device_data->state);
  267. device_data->restore_dev_state = true;
  268. }
  269. clk_disable(device_data->clk);
  270. ret = regulator_disable(device_data->regulator);
  271. if (ret)
  272. dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
  273. device_data->power_state = false;
  274. out:
  275. spin_unlock(&device_data->power_state_lock);
  276. return ret;
  277. }
  278. /**
  279. * hash_enable_power - Request to enable power and clock.
  280. * @device_data: Structure for the hash device.
  281. * @restore_device_state: If true, restores a previous saved hw state.
  282. *
  283. * This function request for enabling power (regulator) and clock,
  284. * and could also restore a previously saved hw state.
  285. */
  286. static int hash_enable_power(struct hash_device_data *device_data,
  287. bool restore_device_state)
  288. {
  289. int ret = 0;
  290. struct device *dev = device_data->dev;
  291. spin_lock(&device_data->power_state_lock);
  292. if (!device_data->power_state) {
  293. ret = regulator_enable(device_data->regulator);
  294. if (ret) {
  295. dev_err(dev, "%s: regulator_enable() failed!\n",
  296. __func__);
  297. goto out;
  298. }
  299. ret = clk_enable(device_data->clk);
  300. if (ret) {
  301. dev_err(dev, "%s: clk_enable() failed!\n", __func__);
  302. ret = regulator_disable(
  303. device_data->regulator);
  304. goto out;
  305. }
  306. device_data->power_state = true;
  307. }
  308. if (device_data->restore_dev_state) {
  309. if (restore_device_state) {
  310. device_data->restore_dev_state = false;
  311. hash_resume_state(device_data, &device_data->state);
  312. }
  313. }
  314. out:
  315. spin_unlock(&device_data->power_state_lock);
  316. return ret;
  317. }
  318. /**
  319. * hash_get_device_data - Checks for an available hash device and return it.
  320. * @hash_ctx: Structure for the hash context.
  321. * @device_data: Structure for the hash device.
  322. *
  323. * This function check for an available hash device and return it to
  324. * the caller.
  325. * Note! Caller need to release the device, calling up().
  326. */
  327. static int hash_get_device_data(struct hash_ctx *ctx,
  328. struct hash_device_data **device_data)
  329. {
  330. int ret;
  331. struct klist_iter device_iterator;
  332. struct klist_node *device_node;
  333. struct hash_device_data *local_device_data = NULL;
  334. /* Wait until a device is available */
  335. ret = down_interruptible(&driver_data.device_allocation);
  336. if (ret)
  337. return ret; /* Interrupted */
  338. /* Select a device */
  339. klist_iter_init(&driver_data.device_list, &device_iterator);
  340. device_node = klist_next(&device_iterator);
  341. while (device_node) {
  342. local_device_data = container_of(device_node,
  343. struct hash_device_data, list_node);
  344. spin_lock(&local_device_data->ctx_lock);
  345. /* current_ctx allocates a device, NULL = unallocated */
  346. if (local_device_data->current_ctx) {
  347. device_node = klist_next(&device_iterator);
  348. } else {
  349. local_device_data->current_ctx = ctx;
  350. ctx->device = local_device_data;
  351. spin_unlock(&local_device_data->ctx_lock);
  352. break;
  353. }
  354. spin_unlock(&local_device_data->ctx_lock);
  355. }
  356. klist_iter_exit(&device_iterator);
  357. if (!device_node) {
  358. /**
  359. * No free device found.
  360. * Since we allocated a device with down_interruptible, this
  361. * should not be able to happen.
  362. * Number of available devices, which are contained in
  363. * device_allocation, is therefore decremented by not doing
  364. * an up(device_allocation).
  365. */
  366. return -EBUSY;
  367. }
  368. *device_data = local_device_data;
  369. return 0;
  370. }
  371. /**
  372. * hash_hw_write_key - Writes the key to the hardware registries.
  373. *
  374. * @device_data: Structure for the hash device.
  375. * @key: Key to be written.
  376. * @keylen: The lengt of the key.
  377. *
  378. * Note! This function DOES NOT write to the NBLW registry, even though
  379. * specified in the the hw design spec. Either due to incorrect info in the
  380. * spec or due to a bug in the hw.
  381. */
  382. static void hash_hw_write_key(struct hash_device_data *device_data,
  383. const u8 *key, unsigned int keylen)
  384. {
  385. u32 word = 0;
  386. int nwords = 1;
  387. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  388. while (keylen >= 4) {
  389. u32 *key_word = (u32 *)key;
  390. HASH_SET_DIN(key_word, nwords);
  391. keylen -= 4;
  392. key += 4;
  393. }
  394. /* Take care of the remaining bytes in the last word */
  395. if (keylen) {
  396. word = 0;
  397. while (keylen) {
  398. word |= (key[keylen - 1] << (8 * (keylen - 1)));
  399. keylen--;
  400. }
  401. HASH_SET_DIN(&word, nwords);
  402. }
  403. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  404. cpu_relax();
  405. HASH_SET_DCAL;
  406. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  407. cpu_relax();
  408. }
  409. /**
  410. * init_hash_hw - Initialise the hash hardware for a new calculation.
  411. * @device_data: Structure for the hash device.
  412. * @ctx: The hash context.
  413. *
  414. * This function will enable the bits needed to clear and start a new
  415. * calculation.
  416. */
  417. static int init_hash_hw(struct hash_device_data *device_data,
  418. struct hash_ctx *ctx)
  419. {
  420. int ret = 0;
  421. ret = hash_setconfiguration(device_data, &ctx->config);
  422. if (ret) {
  423. dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
  424. __func__);
  425. return ret;
  426. }
  427. hash_begin(device_data, ctx);
  428. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  429. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  430. return ret;
  431. }
  432. /**
  433. * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
  434. *
  435. * @sg: Scatterlist.
  436. * @size: Size in bytes.
  437. * @aligned: True if sg data aligned to work in DMA mode.
  438. *
  439. */
  440. static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
  441. {
  442. int nents = 0;
  443. bool aligned_data = true;
  444. while (size > 0 && sg) {
  445. nents++;
  446. size -= sg->length;
  447. /* hash_set_dma_transfer will align last nent */
  448. if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
  449. (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
  450. aligned_data = false;
  451. sg = sg_next(sg);
  452. }
  453. if (aligned)
  454. *aligned = aligned_data;
  455. if (size != 0)
  456. return -EFAULT;
  457. return nents;
  458. }
  459. /**
  460. * hash_dma_valid_data - checks for dma valid sg data.
  461. * @sg: Scatterlist.
  462. * @datasize: Datasize in bytes.
  463. *
  464. * NOTE! This function checks for dma valid sg data, since dma
  465. * only accept datasizes of even wordsize.
  466. */
  467. static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
  468. {
  469. bool aligned;
  470. /* Need to include at least one nent, else error */
  471. if (hash_get_nents(sg, datasize, &aligned) < 1)
  472. return false;
  473. return aligned;
  474. }
  475. /**
  476. * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
  477. * @req: The hash request for the job.
  478. *
  479. * Initialize structures.
  480. */
  481. static int hash_init(struct ahash_request *req)
  482. {
  483. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  484. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  485. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  486. if (!ctx->key)
  487. ctx->keylen = 0;
  488. memset(&req_ctx->state, 0, sizeof(struct hash_state));
  489. req_ctx->updated = 0;
  490. if (hash_mode == HASH_MODE_DMA) {
  491. if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
  492. req_ctx->dma_mode = false; /* Don't use DMA */
  493. pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
  494. __func__, HASH_DMA_ALIGN_SIZE);
  495. } else {
  496. if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
  497. hash_dma_valid_data(req->src, req->nbytes)) {
  498. req_ctx->dma_mode = true;
  499. } else {
  500. req_ctx->dma_mode = false;
  501. pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
  502. __func__,
  503. HASH_DMA_PERFORMANCE_MIN_SIZE);
  504. }
  505. }
  506. }
  507. return 0;
  508. }
  509. /**
  510. * hash_processblock - This function processes a single block of 512 bits (64
  511. * bytes), word aligned, starting at message.
  512. * @device_data: Structure for the hash device.
  513. * @message: Block (512 bits) of message to be written to
  514. * the HASH hardware.
  515. *
  516. */
  517. static void hash_processblock(struct hash_device_data *device_data,
  518. const u32 *message, int length)
  519. {
  520. int len = length / HASH_BYTES_PER_WORD;
  521. /*
  522. * NBLW bits. Reset the number of bits in last word (NBLW).
  523. */
  524. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  525. /*
  526. * Write message data to the HASH_DIN register.
  527. */
  528. HASH_SET_DIN(message, len);
  529. }
  530. /**
  531. * hash_messagepad - Pads a message and write the nblw bits.
  532. * @device_data: Structure for the hash device.
  533. * @message: Last word of a message.
  534. * @index_bytes: The number of bytes in the last message.
  535. *
  536. * This function manages the final part of the digest calculation, when less
  537. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  538. *
  539. */
  540. static void hash_messagepad(struct hash_device_data *device_data,
  541. const u32 *message, u8 index_bytes)
  542. {
  543. int nwords = 1;
  544. /*
  545. * Clear hash str register, only clear NBLW
  546. * since DCAL will be reset by hardware.
  547. */
  548. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  549. /* Main loop */
  550. while (index_bytes >= 4) {
  551. HASH_SET_DIN(message, nwords);
  552. index_bytes -= 4;
  553. message++;
  554. }
  555. if (index_bytes)
  556. HASH_SET_DIN(message, nwords);
  557. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  558. cpu_relax();
  559. /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
  560. HASH_SET_NBLW(index_bytes * 8);
  561. dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
  562. __func__, readl_relaxed(&device_data->base->din),
  563. readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
  564. HASH_SET_DCAL;
  565. dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
  566. __func__, readl_relaxed(&device_data->base->din),
  567. readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
  568. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  569. cpu_relax();
  570. }
  571. /**
  572. * hash_incrementlength - Increments the length of the current message.
  573. * @ctx: Hash context
  574. * @incr: Length of message processed already
  575. *
  576. * Overflow cannot occur, because conditions for overflow are checked in
  577. * hash_hw_update.
  578. */
  579. static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
  580. {
  581. ctx->state.length.low_word += incr;
  582. /* Check for wrap-around */
  583. if (ctx->state.length.low_word < incr)
  584. ctx->state.length.high_word++;
  585. }
  586. /**
  587. * hash_setconfiguration - Sets the required configuration for the hash
  588. * hardware.
  589. * @device_data: Structure for the hash device.
  590. * @config: Pointer to a configuration structure.
  591. */
  592. int hash_setconfiguration(struct hash_device_data *device_data,
  593. struct hash_config *config)
  594. {
  595. int ret = 0;
  596. if (config->algorithm != HASH_ALGO_SHA1 &&
  597. config->algorithm != HASH_ALGO_SHA256)
  598. return -EPERM;
  599. /*
  600. * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
  601. * to be written to HASH_DIN is considered as 32 bits.
  602. */
  603. HASH_SET_DATA_FORMAT(config->data_format);
  604. /*
  605. * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
  606. */
  607. switch (config->algorithm) {
  608. case HASH_ALGO_SHA1:
  609. HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  610. break;
  611. case HASH_ALGO_SHA256:
  612. HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  613. break;
  614. default:
  615. dev_err(device_data->dev, "%s: Incorrect algorithm\n",
  616. __func__);
  617. return -EPERM;
  618. }
  619. /*
  620. * MODE bit. This bit selects between HASH or HMAC mode for the
  621. * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
  622. */
  623. if (HASH_OPER_MODE_HASH == config->oper_mode)
  624. HASH_CLEAR_BITS(&device_data->base->cr,
  625. HASH_CR_MODE_MASK);
  626. else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
  627. HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
  628. if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
  629. /* Truncate key to blocksize */
  630. dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
  631. HASH_SET_BITS(&device_data->base->cr,
  632. HASH_CR_LKEY_MASK);
  633. } else {
  634. dev_dbg(device_data->dev, "%s: LKEY cleared\n",
  635. __func__);
  636. HASH_CLEAR_BITS(&device_data->base->cr,
  637. HASH_CR_LKEY_MASK);
  638. }
  639. } else { /* Wrong hash mode */
  640. ret = -EPERM;
  641. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  642. __func__);
  643. }
  644. return ret;
  645. }
  646. /**
  647. * hash_begin - This routine resets some globals and initializes the hash
  648. * hardware.
  649. * @device_data: Structure for the hash device.
  650. * @ctx: Hash context.
  651. */
  652. void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
  653. {
  654. /* HW and SW initializations */
  655. /* Note: there is no need to initialize buffer and digest members */
  656. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  657. cpu_relax();
  658. /*
  659. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  660. * prepare the initialize the HASH accelerator to compute the message
  661. * digest of a new message.
  662. */
  663. HASH_INITIALIZE;
  664. /*
  665. * NBLW bits. Reset the number of bits in last word (NBLW).
  666. */
  667. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  668. }
  669. static int hash_process_data(struct hash_device_data *device_data,
  670. struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
  671. int msg_length, u8 *data_buffer, u8 *buffer,
  672. u8 *index)
  673. {
  674. int ret = 0;
  675. u32 count;
  676. do {
  677. if ((*index + msg_length) < HASH_BLOCK_SIZE) {
  678. for (count = 0; count < msg_length; count++) {
  679. buffer[*index + count] =
  680. *(data_buffer + count);
  681. }
  682. *index += msg_length;
  683. msg_length = 0;
  684. } else {
  685. if (req_ctx->updated) {
  686. ret = hash_resume_state(device_data,
  687. &device_data->state);
  688. memmove(req_ctx->state.buffer,
  689. device_data->state.buffer,
  690. HASH_BLOCK_SIZE / sizeof(u32));
  691. if (ret) {
  692. dev_err(device_data->dev,
  693. "%s: hash_resume_state() failed!\n",
  694. __func__);
  695. goto out;
  696. }
  697. } else {
  698. ret = init_hash_hw(device_data, ctx);
  699. if (ret) {
  700. dev_err(device_data->dev,
  701. "%s: init_hash_hw() failed!\n",
  702. __func__);
  703. goto out;
  704. }
  705. req_ctx->updated = 1;
  706. }
  707. /*
  708. * If 'data_buffer' is four byte aligned and
  709. * local buffer does not have any data, we can
  710. * write data directly from 'data_buffer' to
  711. * HW peripheral, otherwise we first copy data
  712. * to a local buffer
  713. */
  714. if ((0 == (((u32)data_buffer) % 4)) &&
  715. (0 == *index))
  716. hash_processblock(device_data,
  717. (const u32 *)data_buffer,
  718. HASH_BLOCK_SIZE);
  719. else {
  720. for (count = 0;
  721. count < (u32)(HASH_BLOCK_SIZE - *index);
  722. count++) {
  723. buffer[*index + count] =
  724. *(data_buffer + count);
  725. }
  726. hash_processblock(device_data,
  727. (const u32 *)buffer,
  728. HASH_BLOCK_SIZE);
  729. }
  730. hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
  731. data_buffer += (HASH_BLOCK_SIZE - *index);
  732. msg_length -= (HASH_BLOCK_SIZE - *index);
  733. *index = 0;
  734. ret = hash_save_state(device_data,
  735. &device_data->state);
  736. memmove(device_data->state.buffer,
  737. req_ctx->state.buffer,
  738. HASH_BLOCK_SIZE / sizeof(u32));
  739. if (ret) {
  740. dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
  741. __func__);
  742. goto out;
  743. }
  744. }
  745. } while (msg_length != 0);
  746. out:
  747. return ret;
  748. }
  749. /**
  750. * hash_dma_final - The hash dma final function for SHA1/SHA256.
  751. * @req: The hash request for the job.
  752. */
  753. static int hash_dma_final(struct ahash_request *req)
  754. {
  755. int ret = 0;
  756. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  757. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  758. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  759. struct hash_device_data *device_data;
  760. u8 digest[SHA256_DIGEST_SIZE];
  761. int bytes_written = 0;
  762. ret = hash_get_device_data(ctx, &device_data);
  763. if (ret)
  764. return ret;
  765. dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
  766. if (req_ctx->updated) {
  767. ret = hash_resume_state(device_data, &device_data->state);
  768. if (ret) {
  769. dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
  770. __func__);
  771. goto out;
  772. }
  773. }
  774. if (!req_ctx->updated) {
  775. ret = hash_setconfiguration(device_data, &ctx->config);
  776. if (ret) {
  777. dev_err(device_data->dev,
  778. "%s: hash_setconfiguration() failed!\n",
  779. __func__);
  780. goto out;
  781. }
  782. /* Enable DMA input */
  783. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
  784. HASH_CLEAR_BITS(&device_data->base->cr,
  785. HASH_CR_DMAE_MASK);
  786. } else {
  787. HASH_SET_BITS(&device_data->base->cr,
  788. HASH_CR_DMAE_MASK);
  789. HASH_SET_BITS(&device_data->base->cr,
  790. HASH_CR_PRIVN_MASK);
  791. }
  792. HASH_INITIALIZE;
  793. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  794. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  795. /* Number of bits in last word = (nbytes * 8) % 32 */
  796. HASH_SET_NBLW((req->nbytes * 8) % 32);
  797. req_ctx->updated = 1;
  798. }
  799. /* Store the nents in the dma struct. */
  800. ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
  801. if (!ctx->device->dma.nents) {
  802. dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
  803. __func__);
  804. ret = ctx->device->dma.nents;
  805. goto out;
  806. }
  807. bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
  808. if (bytes_written != req->nbytes) {
  809. dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
  810. __func__);
  811. ret = bytes_written;
  812. goto out;
  813. }
  814. wait_for_completion(&ctx->device->dma.complete);
  815. hash_dma_done(ctx);
  816. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  817. cpu_relax();
  818. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  819. unsigned int keylen = ctx->keylen;
  820. u8 *key = ctx->key;
  821. dev_dbg(device_data->dev, "%s: keylen: %d\n",
  822. __func__, ctx->keylen);
  823. hash_hw_write_key(device_data, key, keylen);
  824. }
  825. hash_get_digest(device_data, digest, ctx->config.algorithm);
  826. memcpy(req->result, digest, ctx->digestsize);
  827. out:
  828. release_hash_device(device_data);
  829. /**
  830. * Allocated in setkey, and only used in HMAC.
  831. */
  832. kfree(ctx->key);
  833. return ret;
  834. }
  835. /**
  836. * hash_hw_final - The final hash calculation function
  837. * @req: The hash request for the job.
  838. */
  839. static int hash_hw_final(struct ahash_request *req)
  840. {
  841. int ret = 0;
  842. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  843. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  844. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  845. struct hash_device_data *device_data;
  846. u8 digest[SHA256_DIGEST_SIZE];
  847. ret = hash_get_device_data(ctx, &device_data);
  848. if (ret)
  849. return ret;
  850. dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
  851. if (req_ctx->updated) {
  852. ret = hash_resume_state(device_data, &device_data->state);
  853. if (ret) {
  854. dev_err(device_data->dev,
  855. "%s: hash_resume_state() failed!\n", __func__);
  856. goto out;
  857. }
  858. } else if (req->nbytes == 0 && ctx->keylen == 0) {
  859. u8 zero_hash[SHA256_DIGEST_SIZE];
  860. u32 zero_hash_size = 0;
  861. bool zero_digest = false;
  862. /**
  863. * Use a pre-calculated empty message digest
  864. * (workaround since hw return zeroes, hw bug!?)
  865. */
  866. ret = get_empty_message_digest(device_data, &zero_hash[0],
  867. &zero_hash_size, &zero_digest);
  868. if (!ret && likely(zero_hash_size == ctx->digestsize) &&
  869. zero_digest) {
  870. memcpy(req->result, &zero_hash[0], ctx->digestsize);
  871. goto out;
  872. } else if (!ret && !zero_digest) {
  873. dev_dbg(device_data->dev,
  874. "%s: HMAC zero msg with key, continue...\n",
  875. __func__);
  876. } else {
  877. dev_err(device_data->dev,
  878. "%s: ret=%d, or wrong digest size? %s\n",
  879. __func__, ret,
  880. zero_hash_size == ctx->digestsize ?
  881. "true" : "false");
  882. /* Return error */
  883. goto out;
  884. }
  885. } else if (req->nbytes == 0 && ctx->keylen > 0) {
  886. dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
  887. __func__);
  888. goto out;
  889. }
  890. if (!req_ctx->updated) {
  891. ret = init_hash_hw(device_data, ctx);
  892. if (ret) {
  893. dev_err(device_data->dev,
  894. "%s: init_hash_hw() failed!\n", __func__);
  895. goto out;
  896. }
  897. }
  898. if (req_ctx->state.index) {
  899. hash_messagepad(device_data, req_ctx->state.buffer,
  900. req_ctx->state.index);
  901. } else {
  902. HASH_SET_DCAL;
  903. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  904. cpu_relax();
  905. }
  906. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  907. unsigned int keylen = ctx->keylen;
  908. u8 *key = ctx->key;
  909. dev_dbg(device_data->dev, "%s: keylen: %d\n",
  910. __func__, ctx->keylen);
  911. hash_hw_write_key(device_data, key, keylen);
  912. }
  913. hash_get_digest(device_data, digest, ctx->config.algorithm);
  914. memcpy(req->result, digest, ctx->digestsize);
  915. out:
  916. release_hash_device(device_data);
  917. /**
  918. * Allocated in setkey, and only used in HMAC.
  919. */
  920. kfree(ctx->key);
  921. return ret;
  922. }
  923. /**
  924. * hash_hw_update - Updates current HASH computation hashing another part of
  925. * the message.
  926. * @req: Byte array containing the message to be hashed (caller
  927. * allocated).
  928. */
  929. int hash_hw_update(struct ahash_request *req)
  930. {
  931. int ret = 0;
  932. u8 index = 0;
  933. u8 *buffer;
  934. struct hash_device_data *device_data;
  935. u8 *data_buffer;
  936. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  937. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  938. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  939. struct crypto_hash_walk walk;
  940. int msg_length = crypto_hash_walk_first(req, &walk);
  941. /* Empty message ("") is correct indata */
  942. if (msg_length == 0)
  943. return ret;
  944. index = req_ctx->state.index;
  945. buffer = (u8 *)req_ctx->state.buffer;
  946. /* Check if ctx->state.length + msg_length
  947. overflows */
  948. if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
  949. HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
  950. pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
  951. return -EPERM;
  952. }
  953. ret = hash_get_device_data(ctx, &device_data);
  954. if (ret)
  955. return ret;
  956. /* Main loop */
  957. while (0 != msg_length) {
  958. data_buffer = walk.data;
  959. ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
  960. data_buffer, buffer, &index);
  961. if (ret) {
  962. dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
  963. __func__);
  964. goto out;
  965. }
  966. msg_length = crypto_hash_walk_done(&walk, 0);
  967. }
  968. req_ctx->state.index = index;
  969. dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
  970. __func__, req_ctx->state.index, req_ctx->state.bit_index);
  971. out:
  972. release_hash_device(device_data);
  973. return ret;
  974. }
  975. /**
  976. * hash_resume_state - Function that resumes the state of an calculation.
  977. * @device_data: Pointer to the device structure.
  978. * @device_state: The state to be restored in the hash hardware
  979. */
  980. int hash_resume_state(struct hash_device_data *device_data,
  981. const struct hash_state *device_state)
  982. {
  983. u32 temp_cr;
  984. s32 count;
  985. int hash_mode = HASH_OPER_MODE_HASH;
  986. if (NULL == device_state) {
  987. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  988. __func__);
  989. return -EPERM;
  990. }
  991. /* Check correctness of index and length members */
  992. if (device_state->index > HASH_BLOCK_SIZE ||
  993. (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
  994. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  995. __func__);
  996. return -EPERM;
  997. }
  998. /*
  999. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  1000. * prepare the initialize the HASH accelerator to compute the message
  1001. * digest of a new message.
  1002. */
  1003. HASH_INITIALIZE;
  1004. temp_cr = device_state->temp_cr;
  1005. writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
  1006. if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
  1007. hash_mode = HASH_OPER_MODE_HMAC;
  1008. else
  1009. hash_mode = HASH_OPER_MODE_HASH;
  1010. for (count = 0; count < HASH_CSR_COUNT; count++) {
  1011. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  1012. break;
  1013. writel_relaxed(device_state->csr[count],
  1014. &device_data->base->csrx[count]);
  1015. }
  1016. writel_relaxed(device_state->csfull, &device_data->base->csfull);
  1017. writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
  1018. writel_relaxed(device_state->str_reg, &device_data->base->str);
  1019. writel_relaxed(temp_cr, &device_data->base->cr);
  1020. return 0;
  1021. }
  1022. /**
  1023. * hash_save_state - Function that saves the state of hardware.
  1024. * @device_data: Pointer to the device structure.
  1025. * @device_state: The strucure where the hardware state should be saved.
  1026. */
  1027. int hash_save_state(struct hash_device_data *device_data,
  1028. struct hash_state *device_state)
  1029. {
  1030. u32 temp_cr;
  1031. u32 count;
  1032. int hash_mode = HASH_OPER_MODE_HASH;
  1033. if (NULL == device_state) {
  1034. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  1035. __func__);
  1036. return -ENOTSUPP;
  1037. }
  1038. /* Write dummy value to force digest intermediate calculation. This
  1039. * actually makes sure that there isn't any ongoing calculation in the
  1040. * hardware.
  1041. */
  1042. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  1043. cpu_relax();
  1044. temp_cr = readl_relaxed(&device_data->base->cr);
  1045. device_state->str_reg = readl_relaxed(&device_data->base->str);
  1046. device_state->din_reg = readl_relaxed(&device_data->base->din);
  1047. if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
  1048. hash_mode = HASH_OPER_MODE_HMAC;
  1049. else
  1050. hash_mode = HASH_OPER_MODE_HASH;
  1051. for (count = 0; count < HASH_CSR_COUNT; count++) {
  1052. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  1053. break;
  1054. device_state->csr[count] =
  1055. readl_relaxed(&device_data->base->csrx[count]);
  1056. }
  1057. device_state->csfull = readl_relaxed(&device_data->base->csfull);
  1058. device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
  1059. device_state->temp_cr = temp_cr;
  1060. return 0;
  1061. }
  1062. /**
  1063. * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
  1064. * @device_data:
  1065. *
  1066. */
  1067. int hash_check_hw(struct hash_device_data *device_data)
  1068. {
  1069. /* Checking Peripheral Ids */
  1070. if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
  1071. HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
  1072. HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
  1073. HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
  1074. HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
  1075. HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
  1076. HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
  1077. HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
  1078. return 0;
  1079. }
  1080. dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
  1081. return -ENOTSUPP;
  1082. }
  1083. /**
  1084. * hash_get_digest - Gets the digest.
  1085. * @device_data: Pointer to the device structure.
  1086. * @digest: User allocated byte array for the calculated digest.
  1087. * @algorithm: The algorithm in use.
  1088. */
  1089. void hash_get_digest(struct hash_device_data *device_data,
  1090. u8 *digest, int algorithm)
  1091. {
  1092. u32 temp_hx_val, count;
  1093. int loop_ctr;
  1094. if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
  1095. dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
  1096. __func__, algorithm);
  1097. return;
  1098. }
  1099. if (algorithm == HASH_ALGO_SHA1)
  1100. loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
  1101. else
  1102. loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
  1103. dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n",
  1104. __func__, (u32) digest);
  1105. /* Copy result into digest array */
  1106. for (count = 0; count < loop_ctr; count++) {
  1107. temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
  1108. digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
  1109. digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
  1110. digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
  1111. digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
  1112. }
  1113. }
  1114. /**
  1115. * hash_update - The hash update function for SHA1/SHA2 (SHA256).
  1116. * @req: The hash request for the job.
  1117. */
  1118. static int ahash_update(struct ahash_request *req)
  1119. {
  1120. int ret = 0;
  1121. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1122. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
  1123. ret = hash_hw_update(req);
  1124. /* Skip update for DMA, all data will be passed to DMA in final */
  1125. if (ret) {
  1126. pr_err("%s: hash_hw_update() failed!\n", __func__);
  1127. }
  1128. return ret;
  1129. }
  1130. /**
  1131. * hash_final - The hash final function for SHA1/SHA2 (SHA256).
  1132. * @req: The hash request for the job.
  1133. */
  1134. static int ahash_final(struct ahash_request *req)
  1135. {
  1136. int ret = 0;
  1137. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1138. pr_debug("%s: data size: %d\n", __func__, req->nbytes);
  1139. if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
  1140. ret = hash_dma_final(req);
  1141. else
  1142. ret = hash_hw_final(req);
  1143. if (ret) {
  1144. pr_err("%s: hash_hw/dma_final() failed\n", __func__);
  1145. }
  1146. return ret;
  1147. }
  1148. static int hash_setkey(struct crypto_ahash *tfm,
  1149. const u8 *key, unsigned int keylen, int alg)
  1150. {
  1151. int ret = 0;
  1152. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1153. /**
  1154. * Freed in final.
  1155. */
  1156. ctx->key = kmemdup(key, keylen, GFP_KERNEL);
  1157. if (!ctx->key) {
  1158. pr_err("%s: Failed to allocate ctx->key for %d\n",
  1159. __func__, alg);
  1160. return -ENOMEM;
  1161. }
  1162. ctx->keylen = keylen;
  1163. return ret;
  1164. }
  1165. static int ahash_sha1_init(struct ahash_request *req)
  1166. {
  1167. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1168. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1169. ctx->config.data_format = HASH_DATA_8_BITS;
  1170. ctx->config.algorithm = HASH_ALGO_SHA1;
  1171. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1172. ctx->digestsize = SHA1_DIGEST_SIZE;
  1173. return hash_init(req);
  1174. }
  1175. static int ahash_sha256_init(struct ahash_request *req)
  1176. {
  1177. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1178. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1179. ctx->config.data_format = HASH_DATA_8_BITS;
  1180. ctx->config.algorithm = HASH_ALGO_SHA256;
  1181. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1182. ctx->digestsize = SHA256_DIGEST_SIZE;
  1183. return hash_init(req);
  1184. }
  1185. static int ahash_sha1_digest(struct ahash_request *req)
  1186. {
  1187. int ret2, ret1;
  1188. ret1 = ahash_sha1_init(req);
  1189. if (ret1)
  1190. goto out;
  1191. ret1 = ahash_update(req);
  1192. ret2 = ahash_final(req);
  1193. out:
  1194. return ret1 ? ret1 : ret2;
  1195. }
  1196. static int ahash_sha256_digest(struct ahash_request *req)
  1197. {
  1198. int ret2, ret1;
  1199. ret1 = ahash_sha256_init(req);
  1200. if (ret1)
  1201. goto out;
  1202. ret1 = ahash_update(req);
  1203. ret2 = ahash_final(req);
  1204. out:
  1205. return ret1 ? ret1 : ret2;
  1206. }
  1207. static int hmac_sha1_init(struct ahash_request *req)
  1208. {
  1209. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1210. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1211. ctx->config.data_format = HASH_DATA_8_BITS;
  1212. ctx->config.algorithm = HASH_ALGO_SHA1;
  1213. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1214. ctx->digestsize = SHA1_DIGEST_SIZE;
  1215. return hash_init(req);
  1216. }
  1217. static int hmac_sha256_init(struct ahash_request *req)
  1218. {
  1219. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1220. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1221. ctx->config.data_format = HASH_DATA_8_BITS;
  1222. ctx->config.algorithm = HASH_ALGO_SHA256;
  1223. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1224. ctx->digestsize = SHA256_DIGEST_SIZE;
  1225. return hash_init(req);
  1226. }
  1227. static int hmac_sha1_digest(struct ahash_request *req)
  1228. {
  1229. int ret2, ret1;
  1230. ret1 = hmac_sha1_init(req);
  1231. if (ret1)
  1232. goto out;
  1233. ret1 = ahash_update(req);
  1234. ret2 = ahash_final(req);
  1235. out:
  1236. return ret1 ? ret1 : ret2;
  1237. }
  1238. static int hmac_sha256_digest(struct ahash_request *req)
  1239. {
  1240. int ret2, ret1;
  1241. ret1 = hmac_sha256_init(req);
  1242. if (ret1)
  1243. goto out;
  1244. ret1 = ahash_update(req);
  1245. ret2 = ahash_final(req);
  1246. out:
  1247. return ret1 ? ret1 : ret2;
  1248. }
  1249. static int hmac_sha1_setkey(struct crypto_ahash *tfm,
  1250. const u8 *key, unsigned int keylen)
  1251. {
  1252. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
  1253. }
  1254. static int hmac_sha256_setkey(struct crypto_ahash *tfm,
  1255. const u8 *key, unsigned int keylen)
  1256. {
  1257. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
  1258. }
  1259. struct hash_algo_template {
  1260. struct hash_config conf;
  1261. struct ahash_alg hash;
  1262. };
  1263. static int hash_cra_init(struct crypto_tfm *tfm)
  1264. {
  1265. struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
  1266. struct crypto_alg *alg = tfm->__crt_alg;
  1267. struct hash_algo_template *hash_alg;
  1268. hash_alg = container_of(__crypto_ahash_alg(alg),
  1269. struct hash_algo_template,
  1270. hash);
  1271. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1272. sizeof(struct hash_req_ctx));
  1273. ctx->config.data_format = HASH_DATA_8_BITS;
  1274. ctx->config.algorithm = hash_alg->conf.algorithm;
  1275. ctx->config.oper_mode = hash_alg->conf.oper_mode;
  1276. ctx->digestsize = hash_alg->hash.halg.digestsize;
  1277. return 0;
  1278. }
  1279. static struct hash_algo_template hash_algs[] = {
  1280. {
  1281. .conf.algorithm = HASH_ALGO_SHA1,
  1282. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1283. .hash = {
  1284. .init = hash_init,
  1285. .update = ahash_update,
  1286. .final = ahash_final,
  1287. .digest = ahash_sha1_digest,
  1288. .halg.digestsize = SHA1_DIGEST_SIZE,
  1289. .halg.statesize = sizeof(struct hash_ctx),
  1290. .halg.base = {
  1291. .cra_name = "sha1",
  1292. .cra_driver_name = "sha1-ux500",
  1293. .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
  1294. CRYPTO_ALG_ASYNC),
  1295. .cra_blocksize = SHA1_BLOCK_SIZE,
  1296. .cra_ctxsize = sizeof(struct hash_ctx),
  1297. .cra_init = hash_cra_init,
  1298. .cra_module = THIS_MODULE,
  1299. }
  1300. }
  1301. },
  1302. {
  1303. .conf.algorithm = HASH_ALGO_SHA256,
  1304. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1305. .hash = {
  1306. .init = hash_init,
  1307. .update = ahash_update,
  1308. .final = ahash_final,
  1309. .digest = ahash_sha256_digest,
  1310. .halg.digestsize = SHA256_DIGEST_SIZE,
  1311. .halg.statesize = sizeof(struct hash_ctx),
  1312. .halg.base = {
  1313. .cra_name = "sha256",
  1314. .cra_driver_name = "sha256-ux500",
  1315. .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
  1316. CRYPTO_ALG_ASYNC),
  1317. .cra_blocksize = SHA256_BLOCK_SIZE,
  1318. .cra_ctxsize = sizeof(struct hash_ctx),
  1319. .cra_type = &crypto_ahash_type,
  1320. .cra_init = hash_cra_init,
  1321. .cra_module = THIS_MODULE,
  1322. }
  1323. }
  1324. },
  1325. {
  1326. .conf.algorithm = HASH_ALGO_SHA1,
  1327. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1328. .hash = {
  1329. .init = hash_init,
  1330. .update = ahash_update,
  1331. .final = ahash_final,
  1332. .digest = hmac_sha1_digest,
  1333. .setkey = hmac_sha1_setkey,
  1334. .halg.digestsize = SHA1_DIGEST_SIZE,
  1335. .halg.statesize = sizeof(struct hash_ctx),
  1336. .halg.base = {
  1337. .cra_name = "hmac(sha1)",
  1338. .cra_driver_name = "hmac-sha1-ux500",
  1339. .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
  1340. CRYPTO_ALG_ASYNC),
  1341. .cra_blocksize = SHA1_BLOCK_SIZE,
  1342. .cra_ctxsize = sizeof(struct hash_ctx),
  1343. .cra_type = &crypto_ahash_type,
  1344. .cra_init = hash_cra_init,
  1345. .cra_module = THIS_MODULE,
  1346. }
  1347. }
  1348. },
  1349. {
  1350. .conf.algorithm = HASH_ALGO_SHA256,
  1351. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1352. .hash = {
  1353. .init = hash_init,
  1354. .update = ahash_update,
  1355. .final = ahash_final,
  1356. .digest = hmac_sha256_digest,
  1357. .setkey = hmac_sha256_setkey,
  1358. .halg.digestsize = SHA256_DIGEST_SIZE,
  1359. .halg.statesize = sizeof(struct hash_ctx),
  1360. .halg.base = {
  1361. .cra_name = "hmac(sha256)",
  1362. .cra_driver_name = "hmac-sha256-ux500",
  1363. .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
  1364. CRYPTO_ALG_ASYNC),
  1365. .cra_blocksize = SHA256_BLOCK_SIZE,
  1366. .cra_ctxsize = sizeof(struct hash_ctx),
  1367. .cra_type = &crypto_ahash_type,
  1368. .cra_init = hash_cra_init,
  1369. .cra_module = THIS_MODULE,
  1370. }
  1371. }
  1372. }
  1373. };
  1374. /**
  1375. * hash_algs_register_all -
  1376. */
  1377. static int ahash_algs_register_all(struct hash_device_data *device_data)
  1378. {
  1379. int ret;
  1380. int i;
  1381. int count;
  1382. for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
  1383. ret = crypto_register_ahash(&hash_algs[i].hash);
  1384. if (ret) {
  1385. count = i;
  1386. dev_err(device_data->dev, "%s: alg registration failed\n",
  1387. hash_algs[i].hash.halg.base.cra_driver_name);
  1388. goto unreg;
  1389. }
  1390. }
  1391. return 0;
  1392. unreg:
  1393. for (i = 0; i < count; i++)
  1394. crypto_unregister_ahash(&hash_algs[i].hash);
  1395. return ret;
  1396. }
  1397. /**
  1398. * hash_algs_unregister_all -
  1399. */
  1400. static void ahash_algs_unregister_all(struct hash_device_data *device_data)
  1401. {
  1402. int i;
  1403. for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
  1404. crypto_unregister_ahash(&hash_algs[i].hash);
  1405. }
  1406. /**
  1407. * ux500_hash_probe - Function that probes the hash hardware.
  1408. * @pdev: The platform device.
  1409. */
  1410. static int ux500_hash_probe(struct platform_device *pdev)
  1411. {
  1412. int ret = 0;
  1413. struct resource *res = NULL;
  1414. struct hash_device_data *device_data;
  1415. struct device *dev = &pdev->dev;
  1416. device_data = kzalloc(sizeof(*device_data), GFP_ATOMIC);
  1417. if (!device_data) {
  1418. ret = -ENOMEM;
  1419. goto out;
  1420. }
  1421. device_data->dev = dev;
  1422. device_data->current_ctx = NULL;
  1423. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1424. if (!res) {
  1425. dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
  1426. ret = -ENODEV;
  1427. goto out_kfree;
  1428. }
  1429. res = request_mem_region(res->start, resource_size(res), pdev->name);
  1430. if (res == NULL) {
  1431. dev_dbg(dev, "%s: request_mem_region() failed!\n", __func__);
  1432. ret = -EBUSY;
  1433. goto out_kfree;
  1434. }
  1435. device_data->phybase = res->start;
  1436. device_data->base = ioremap(res->start, resource_size(res));
  1437. if (!device_data->base) {
  1438. dev_err(dev, "%s: ioremap() failed!\n", __func__);
  1439. ret = -ENOMEM;
  1440. goto out_free_mem;
  1441. }
  1442. spin_lock_init(&device_data->ctx_lock);
  1443. spin_lock_init(&device_data->power_state_lock);
  1444. /* Enable power for HASH1 hardware block */
  1445. device_data->regulator = regulator_get(dev, "v-ape");
  1446. if (IS_ERR(device_data->regulator)) {
  1447. dev_err(dev, "%s: regulator_get() failed!\n", __func__);
  1448. ret = PTR_ERR(device_data->regulator);
  1449. device_data->regulator = NULL;
  1450. goto out_unmap;
  1451. }
  1452. /* Enable the clock for HASH1 hardware block */
  1453. device_data->clk = clk_get(dev, NULL);
  1454. if (IS_ERR(device_data->clk)) {
  1455. dev_err(dev, "%s: clk_get() failed!\n", __func__);
  1456. ret = PTR_ERR(device_data->clk);
  1457. goto out_regulator;
  1458. }
  1459. ret = clk_prepare(device_data->clk);
  1460. if (ret) {
  1461. dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
  1462. goto out_clk;
  1463. }
  1464. /* Enable device power (and clock) */
  1465. ret = hash_enable_power(device_data, false);
  1466. if (ret) {
  1467. dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
  1468. goto out_clk_unprepare;
  1469. }
  1470. ret = hash_check_hw(device_data);
  1471. if (ret) {
  1472. dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
  1473. goto out_power;
  1474. }
  1475. if (hash_mode == HASH_MODE_DMA)
  1476. hash_dma_setup_channel(device_data, dev);
  1477. platform_set_drvdata(pdev, device_data);
  1478. /* Put the new device into the device list... */
  1479. klist_add_tail(&device_data->list_node, &driver_data.device_list);
  1480. /* ... and signal that a new device is available. */
  1481. up(&driver_data.device_allocation);
  1482. ret = ahash_algs_register_all(device_data);
  1483. if (ret) {
  1484. dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
  1485. __func__);
  1486. goto out_power;
  1487. }
  1488. dev_info(dev, "successfully registered\n");
  1489. return 0;
  1490. out_power:
  1491. hash_disable_power(device_data, false);
  1492. out_clk_unprepare:
  1493. clk_unprepare(device_data->clk);
  1494. out_clk:
  1495. clk_put(device_data->clk);
  1496. out_regulator:
  1497. regulator_put(device_data->regulator);
  1498. out_unmap:
  1499. iounmap(device_data->base);
  1500. out_free_mem:
  1501. release_mem_region(res->start, resource_size(res));
  1502. out_kfree:
  1503. kfree(device_data);
  1504. out:
  1505. return ret;
  1506. }
  1507. /**
  1508. * ux500_hash_remove - Function that removes the hash device from the platform.
  1509. * @pdev: The platform device.
  1510. */
  1511. static int ux500_hash_remove(struct platform_device *pdev)
  1512. {
  1513. struct resource *res;
  1514. struct hash_device_data *device_data;
  1515. struct device *dev = &pdev->dev;
  1516. device_data = platform_get_drvdata(pdev);
  1517. if (!device_data) {
  1518. dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
  1519. return -ENOMEM;
  1520. }
  1521. /* Try to decrease the number of available devices. */
  1522. if (down_trylock(&driver_data.device_allocation))
  1523. return -EBUSY;
  1524. /* Check that the device is free */
  1525. spin_lock(&device_data->ctx_lock);
  1526. /* current_ctx allocates a device, NULL = unallocated */
  1527. if (device_data->current_ctx) {
  1528. /* The device is busy */
  1529. spin_unlock(&device_data->ctx_lock);
  1530. /* Return the device to the pool. */
  1531. up(&driver_data.device_allocation);
  1532. return -EBUSY;
  1533. }
  1534. spin_unlock(&device_data->ctx_lock);
  1535. /* Remove the device from the list */
  1536. if (klist_node_attached(&device_data->list_node))
  1537. klist_remove(&device_data->list_node);
  1538. /* If this was the last device, remove the services */
  1539. if (list_empty(&driver_data.device_list.k_list))
  1540. ahash_algs_unregister_all(device_data);
  1541. if (hash_disable_power(device_data, false))
  1542. dev_err(dev, "%s: hash_disable_power() failed\n",
  1543. __func__);
  1544. clk_unprepare(device_data->clk);
  1545. clk_put(device_data->clk);
  1546. regulator_put(device_data->regulator);
  1547. iounmap(device_data->base);
  1548. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1549. if (res)
  1550. release_mem_region(res->start, resource_size(res));
  1551. kfree(device_data);
  1552. return 0;
  1553. }
  1554. /**
  1555. * ux500_hash_shutdown - Function that shutdown the hash device.
  1556. * @pdev: The platform device
  1557. */
  1558. static void ux500_hash_shutdown(struct platform_device *pdev)
  1559. {
  1560. struct resource *res = NULL;
  1561. struct hash_device_data *device_data;
  1562. device_data = platform_get_drvdata(pdev);
  1563. if (!device_data) {
  1564. dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
  1565. __func__);
  1566. return;
  1567. }
  1568. /* Check that the device is free */
  1569. spin_lock(&device_data->ctx_lock);
  1570. /* current_ctx allocates a device, NULL = unallocated */
  1571. if (!device_data->current_ctx) {
  1572. if (down_trylock(&driver_data.device_allocation))
  1573. dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
  1574. __func__);
  1575. /**
  1576. * (Allocate the device)
  1577. * Need to set this to non-null (dummy) value,
  1578. * to avoid usage if context switching.
  1579. */
  1580. device_data->current_ctx++;
  1581. }
  1582. spin_unlock(&device_data->ctx_lock);
  1583. /* Remove the device from the list */
  1584. if (klist_node_attached(&device_data->list_node))
  1585. klist_remove(&device_data->list_node);
  1586. /* If this was the last device, remove the services */
  1587. if (list_empty(&driver_data.device_list.k_list))
  1588. ahash_algs_unregister_all(device_data);
  1589. iounmap(device_data->base);
  1590. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1591. if (res)
  1592. release_mem_region(res->start, resource_size(res));
  1593. if (hash_disable_power(device_data, false))
  1594. dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
  1595. __func__);
  1596. }
  1597. #ifdef CONFIG_PM_SLEEP
  1598. /**
  1599. * ux500_hash_suspend - Function that suspends the hash device.
  1600. * @dev: Device to suspend.
  1601. */
  1602. static int ux500_hash_suspend(struct device *dev)
  1603. {
  1604. int ret;
  1605. struct hash_device_data *device_data;
  1606. struct hash_ctx *temp_ctx = NULL;
  1607. device_data = dev_get_drvdata(dev);
  1608. if (!device_data) {
  1609. dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
  1610. return -ENOMEM;
  1611. }
  1612. spin_lock(&device_data->ctx_lock);
  1613. if (!device_data->current_ctx)
  1614. device_data->current_ctx++;
  1615. spin_unlock(&device_data->ctx_lock);
  1616. if (device_data->current_ctx == ++temp_ctx) {
  1617. if (down_interruptible(&driver_data.device_allocation))
  1618. dev_dbg(dev, "%s: down_interruptible() failed\n",
  1619. __func__);
  1620. ret = hash_disable_power(device_data, false);
  1621. } else {
  1622. ret = hash_disable_power(device_data, true);
  1623. }
  1624. if (ret)
  1625. dev_err(dev, "%s: hash_disable_power()\n", __func__);
  1626. return ret;
  1627. }
  1628. /**
  1629. * ux500_hash_resume - Function that resume the hash device.
  1630. * @dev: Device to resume.
  1631. */
  1632. static int ux500_hash_resume(struct device *dev)
  1633. {
  1634. int ret = 0;
  1635. struct hash_device_data *device_data;
  1636. struct hash_ctx *temp_ctx = NULL;
  1637. device_data = dev_get_drvdata(dev);
  1638. if (!device_data) {
  1639. dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
  1640. return -ENOMEM;
  1641. }
  1642. spin_lock(&device_data->ctx_lock);
  1643. if (device_data->current_ctx == ++temp_ctx)
  1644. device_data->current_ctx = NULL;
  1645. spin_unlock(&device_data->ctx_lock);
  1646. if (!device_data->current_ctx)
  1647. up(&driver_data.device_allocation);
  1648. else
  1649. ret = hash_enable_power(device_data, true);
  1650. if (ret)
  1651. dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
  1652. return ret;
  1653. }
  1654. #endif
  1655. static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
  1656. static const struct of_device_id ux500_hash_match[] = {
  1657. { .compatible = "stericsson,ux500-hash" },
  1658. { },
  1659. };
  1660. static struct platform_driver hash_driver = {
  1661. .probe = ux500_hash_probe,
  1662. .remove = ux500_hash_remove,
  1663. .shutdown = ux500_hash_shutdown,
  1664. .driver = {
  1665. .name = "hash1",
  1666. .of_match_table = ux500_hash_match,
  1667. .pm = &ux500_hash_pm,
  1668. }
  1669. };
  1670. /**
  1671. * ux500_hash_mod_init - The kernel module init function.
  1672. */
  1673. static int __init ux500_hash_mod_init(void)
  1674. {
  1675. klist_init(&driver_data.device_list, NULL, NULL);
  1676. /* Initialize the semaphore to 0 devices (locked state) */
  1677. sema_init(&driver_data.device_allocation, 0);
  1678. return platform_driver_register(&hash_driver);
  1679. }
  1680. /**
  1681. * ux500_hash_mod_fini - The kernel module exit function.
  1682. */
  1683. static void __exit ux500_hash_mod_fini(void)
  1684. {
  1685. platform_driver_unregister(&hash_driver);
  1686. }
  1687. module_init(ux500_hash_mod_init);
  1688. module_exit(ux500_hash_mod_fini);
  1689. MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
  1690. MODULE_LICENSE("GPL");
  1691. MODULE_ALIAS_CRYPTO("sha1-all");
  1692. MODULE_ALIAS_CRYPTO("sha256-all");
  1693. MODULE_ALIAS_CRYPTO("hmac-sha1-all");
  1694. MODULE_ALIAS_CRYPTO("hmac-sha256-all");