s5p-sss.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Cryptographic API.
  4. //
  5. // Support for Samsung S5PV210 and Exynos HW acceleration.
  6. //
  7. // Copyright (C) 2011 NetUP Inc. All rights reserved.
  8. // Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
  9. //
  10. // Hash part based on omap-sham.c driver.
  11. #include <linux/clk.h>
  12. #include <linux/crypto.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/err.h>
  15. #include <linux/errno.h>
  16. #include <linux/init.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/io.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/scatterlist.h>
  24. #include <crypto/ctr.h>
  25. #include <crypto/aes.h>
  26. #include <crypto/algapi.h>
  27. #include <crypto/scatterwalk.h>
  28. #include <crypto/hash.h>
  29. #include <crypto/md5.h>
  30. #include <crypto/sha.h>
  31. #include <crypto/internal/hash.h>
  32. #define _SBF(s, v) ((v) << (s))
  33. /* Feed control registers */
  34. #define SSS_REG_FCINTSTAT 0x0000
  35. #define SSS_FCINTSTAT_HPARTINT BIT(7)
  36. #define SSS_FCINTSTAT_HDONEINT BIT(5)
  37. #define SSS_FCINTSTAT_BRDMAINT BIT(3)
  38. #define SSS_FCINTSTAT_BTDMAINT BIT(2)
  39. #define SSS_FCINTSTAT_HRDMAINT BIT(1)
  40. #define SSS_FCINTSTAT_PKDMAINT BIT(0)
  41. #define SSS_REG_FCINTENSET 0x0004
  42. #define SSS_FCINTENSET_HPARTINTENSET BIT(7)
  43. #define SSS_FCINTENSET_HDONEINTENSET BIT(5)
  44. #define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
  45. #define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
  46. #define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
  47. #define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
  48. #define SSS_REG_FCINTENCLR 0x0008
  49. #define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
  50. #define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
  51. #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
  52. #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
  53. #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
  54. #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
  55. #define SSS_REG_FCINTPEND 0x000C
  56. #define SSS_FCINTPEND_HPARTINTP BIT(7)
  57. #define SSS_FCINTPEND_HDONEINTP BIT(5)
  58. #define SSS_FCINTPEND_BRDMAINTP BIT(3)
  59. #define SSS_FCINTPEND_BTDMAINTP BIT(2)
  60. #define SSS_FCINTPEND_HRDMAINTP BIT(1)
  61. #define SSS_FCINTPEND_PKDMAINTP BIT(0)
  62. #define SSS_REG_FCFIFOSTAT 0x0010
  63. #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
  64. #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
  65. #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
  66. #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
  67. #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
  68. #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
  69. #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
  70. #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
  71. #define SSS_REG_FCFIFOCTRL 0x0014
  72. #define SSS_FCFIFOCTRL_DESSEL BIT(2)
  73. #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
  74. #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
  75. #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
  76. #define SSS_HASHIN_MASK _SBF(0, 0x03)
  77. #define SSS_REG_FCBRDMAS 0x0020
  78. #define SSS_REG_FCBRDMAL 0x0024
  79. #define SSS_REG_FCBRDMAC 0x0028
  80. #define SSS_FCBRDMAC_BYTESWAP BIT(1)
  81. #define SSS_FCBRDMAC_FLUSH BIT(0)
  82. #define SSS_REG_FCBTDMAS 0x0030
  83. #define SSS_REG_FCBTDMAL 0x0034
  84. #define SSS_REG_FCBTDMAC 0x0038
  85. #define SSS_FCBTDMAC_BYTESWAP BIT(1)
  86. #define SSS_FCBTDMAC_FLUSH BIT(0)
  87. #define SSS_REG_FCHRDMAS 0x0040
  88. #define SSS_REG_FCHRDMAL 0x0044
  89. #define SSS_REG_FCHRDMAC 0x0048
  90. #define SSS_FCHRDMAC_BYTESWAP BIT(1)
  91. #define SSS_FCHRDMAC_FLUSH BIT(0)
  92. #define SSS_REG_FCPKDMAS 0x0050
  93. #define SSS_REG_FCPKDMAL 0x0054
  94. #define SSS_REG_FCPKDMAC 0x0058
  95. #define SSS_FCPKDMAC_BYTESWAP BIT(3)
  96. #define SSS_FCPKDMAC_DESCEND BIT(2)
  97. #define SSS_FCPKDMAC_TRANSMIT BIT(1)
  98. #define SSS_FCPKDMAC_FLUSH BIT(0)
  99. #define SSS_REG_FCPKDMAO 0x005C
  100. /* AES registers */
  101. #define SSS_REG_AES_CONTROL 0x00
  102. #define SSS_AES_BYTESWAP_DI BIT(11)
  103. #define SSS_AES_BYTESWAP_DO BIT(10)
  104. #define SSS_AES_BYTESWAP_IV BIT(9)
  105. #define SSS_AES_BYTESWAP_CNT BIT(8)
  106. #define SSS_AES_BYTESWAP_KEY BIT(7)
  107. #define SSS_AES_KEY_CHANGE_MODE BIT(6)
  108. #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
  109. #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
  110. #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
  111. #define SSS_AES_FIFO_MODE BIT(3)
  112. #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
  113. #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
  114. #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
  115. #define SSS_AES_MODE_DECRYPT BIT(0)
  116. #define SSS_REG_AES_STATUS 0x04
  117. #define SSS_AES_BUSY BIT(2)
  118. #define SSS_AES_INPUT_READY BIT(1)
  119. #define SSS_AES_OUTPUT_READY BIT(0)
  120. #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
  121. #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
  122. #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
  123. #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
  124. #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
  125. #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
  126. #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
  127. #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
  128. #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
  129. #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
  130. SSS_AES_REG(dev, reg))
  131. /* HW engine modes */
  132. #define FLAGS_AES_DECRYPT BIT(0)
  133. #define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
  134. #define FLAGS_AES_CBC _SBF(1, 0x01)
  135. #define FLAGS_AES_CTR _SBF(1, 0x02)
  136. #define AES_KEY_LEN 16
  137. #define CRYPTO_QUEUE_LEN 1
  138. /* HASH registers */
  139. #define SSS_REG_HASH_CTRL 0x00
  140. #define SSS_HASH_USER_IV_EN BIT(5)
  141. #define SSS_HASH_INIT_BIT BIT(4)
  142. #define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
  143. #define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
  144. #define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
  145. #define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
  146. #define SSS_REG_HASH_CTRL_PAUSE 0x04
  147. #define SSS_HASH_PAUSE BIT(0)
  148. #define SSS_REG_HASH_CTRL_FIFO 0x08
  149. #define SSS_HASH_FIFO_MODE_DMA BIT(0)
  150. #define SSS_HASH_FIFO_MODE_CPU 0
  151. #define SSS_REG_HASH_CTRL_SWAP 0x0C
  152. #define SSS_HASH_BYTESWAP_DI BIT(3)
  153. #define SSS_HASH_BYTESWAP_DO BIT(2)
  154. #define SSS_HASH_BYTESWAP_IV BIT(1)
  155. #define SSS_HASH_BYTESWAP_KEY BIT(0)
  156. #define SSS_REG_HASH_STATUS 0x10
  157. #define SSS_HASH_STATUS_MSG_DONE BIT(6)
  158. #define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
  159. #define SSS_HASH_STATUS_BUFFER_READY BIT(0)
  160. #define SSS_REG_HASH_MSG_SIZE_LOW 0x20
  161. #define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
  162. #define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
  163. #define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
  164. #define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
  165. #define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
  166. #define HASH_BLOCK_SIZE 64
  167. #define HASH_REG_SIZEOF 4
  168. #define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
  169. #define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
  170. #define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
  171. /*
  172. * HASH bit numbers, used by device, setting in dev->hash_flags with
  173. * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
  174. * to keep HASH state BUSY or FREE, or to signal state from irq_handler
  175. * to hash_tasklet. SGS keep track of allocated memory for scatterlist
  176. */
  177. #define HASH_FLAGS_BUSY 0
  178. #define HASH_FLAGS_FINAL 1
  179. #define HASH_FLAGS_DMA_ACTIVE 2
  180. #define HASH_FLAGS_OUTPUT_READY 3
  181. #define HASH_FLAGS_DMA_READY 4
  182. #define HASH_FLAGS_SGS_COPIED 5
  183. #define HASH_FLAGS_SGS_ALLOCED 6
  184. /* HASH HW constants */
  185. #define BUFLEN HASH_BLOCK_SIZE
  186. #define SSS_HASH_DMA_LEN_ALIGN 8
  187. #define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
  188. #define SSS_HASH_QUEUE_LENGTH 10
  189. /**
  190. * struct samsung_aes_variant - platform specific SSS driver data
  191. * @aes_offset: AES register offset from SSS module's base.
  192. * @hash_offset: HASH register offset from SSS module's base.
  193. *
  194. * Specifies platform specific configuration of SSS module.
  195. * Note: A structure for driver specific platform data is used for future
  196. * expansion of its usage.
  197. */
  198. struct samsung_aes_variant {
  199. unsigned int aes_offset;
  200. unsigned int hash_offset;
  201. };
  202. struct s5p_aes_reqctx {
  203. unsigned long mode;
  204. };
  205. struct s5p_aes_ctx {
  206. struct s5p_aes_dev *dev;
  207. uint8_t aes_key[AES_MAX_KEY_SIZE];
  208. uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
  209. int keylen;
  210. };
  211. /**
  212. * struct s5p_aes_dev - Crypto device state container
  213. * @dev: Associated device
  214. * @clk: Clock for accessing hardware
  215. * @ioaddr: Mapped IO memory region
  216. * @aes_ioaddr: Per-varian offset for AES block IO memory
  217. * @irq_fc: Feed control interrupt line
  218. * @req: Crypto request currently handled by the device
  219. * @ctx: Configuration for currently handled crypto request
  220. * @sg_src: Scatter list with source data for currently handled block
  221. * in device. This is DMA-mapped into device.
  222. * @sg_dst: Scatter list with destination data for currently handled block
  223. * in device. This is DMA-mapped into device.
  224. * @sg_src_cpy: In case of unaligned access, copied scatter list
  225. * with source data.
  226. * @sg_dst_cpy: In case of unaligned access, copied scatter list
  227. * with destination data.
  228. * @tasklet: New request scheduling jib
  229. * @queue: Crypto queue
  230. * @busy: Indicates whether the device is currently handling some request
  231. * thus it uses some of the fields from this state, like:
  232. * req, ctx, sg_src/dst (and copies). This essentially
  233. * protects against concurrent access to these fields.
  234. * @lock: Lock for protecting both access to device hardware registers
  235. * and fields related to current request (including the busy field).
  236. * @res: Resources for hash.
  237. * @io_hash_base: Per-variant offset for HASH block IO memory.
  238. * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags
  239. * variable.
  240. * @hash_flags: Flags for current HASH op.
  241. * @hash_queue: Async hash queue.
  242. * @hash_tasklet: New HASH request scheduling job.
  243. * @xmit_buf: Buffer for current HASH request transfer into SSS block.
  244. * @hash_req: Current request sending to SSS HASH block.
  245. * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
  246. * @hash_sg_cnt: Counter for hash_sg_iter.
  247. *
  248. * @use_hash: true if HASH algs enabled
  249. */
  250. struct s5p_aes_dev {
  251. struct device *dev;
  252. struct clk *clk;
  253. void __iomem *ioaddr;
  254. void __iomem *aes_ioaddr;
  255. int irq_fc;
  256. struct ablkcipher_request *req;
  257. struct s5p_aes_ctx *ctx;
  258. struct scatterlist *sg_src;
  259. struct scatterlist *sg_dst;
  260. struct scatterlist *sg_src_cpy;
  261. struct scatterlist *sg_dst_cpy;
  262. struct tasklet_struct tasklet;
  263. struct crypto_queue queue;
  264. bool busy;
  265. spinlock_t lock;
  266. struct resource *res;
  267. void __iomem *io_hash_base;
  268. spinlock_t hash_lock; /* protect hash_ vars */
  269. unsigned long hash_flags;
  270. struct crypto_queue hash_queue;
  271. struct tasklet_struct hash_tasklet;
  272. u8 xmit_buf[BUFLEN];
  273. struct ahash_request *hash_req;
  274. struct scatterlist *hash_sg_iter;
  275. unsigned int hash_sg_cnt;
  276. bool use_hash;
  277. };
  278. /**
  279. * struct s5p_hash_reqctx - HASH request context
  280. * @dd: Associated device
  281. * @op_update: Current request operation (OP_UPDATE or OP_FINAL)
  282. * @digcnt: Number of bytes processed by HW (without buffer[] ones)
  283. * @digest: Digest message or IV for partial result
  284. * @nregs: Number of HW registers for digest or IV read/write
  285. * @engine: Bits for selecting type of HASH in SSS block
  286. * @sg: sg for DMA transfer
  287. * @sg_len: Length of sg for DMA transfer
  288. * @sgl[]: sg for joining buffer and req->src scatterlist
  289. * @skip: Skip offset in req->src for current op
  290. * @total: Total number of bytes for current request
  291. * @finup: Keep state for finup or final.
  292. * @error: Keep track of error.
  293. * @bufcnt: Number of bytes holded in buffer[]
  294. * @buffer[]: For byte(s) from end of req->src in UPDATE op
  295. */
  296. struct s5p_hash_reqctx {
  297. struct s5p_aes_dev *dd;
  298. bool op_update;
  299. u64 digcnt;
  300. u8 digest[SHA256_DIGEST_SIZE];
  301. unsigned int nregs; /* digest_size / sizeof(reg) */
  302. u32 engine;
  303. struct scatterlist *sg;
  304. unsigned int sg_len;
  305. struct scatterlist sgl[2];
  306. unsigned int skip;
  307. unsigned int total;
  308. bool finup;
  309. bool error;
  310. u32 bufcnt;
  311. u8 buffer[0];
  312. };
  313. /**
  314. * struct s5p_hash_ctx - HASH transformation context
  315. * @dd: Associated device
  316. * @flags: Bits for algorithm HASH.
  317. * @fallback: Software transformation for zero message or size < BUFLEN.
  318. */
  319. struct s5p_hash_ctx {
  320. struct s5p_aes_dev *dd;
  321. unsigned long flags;
  322. struct crypto_shash *fallback;
  323. };
  324. static const struct samsung_aes_variant s5p_aes_data = {
  325. .aes_offset = 0x4000,
  326. .hash_offset = 0x6000,
  327. };
  328. static const struct samsung_aes_variant exynos_aes_data = {
  329. .aes_offset = 0x200,
  330. .hash_offset = 0x400,
  331. };
  332. static const struct of_device_id s5p_sss_dt_match[] = {
  333. {
  334. .compatible = "samsung,s5pv210-secss",
  335. .data = &s5p_aes_data,
  336. },
  337. {
  338. .compatible = "samsung,exynos4210-secss",
  339. .data = &exynos_aes_data,
  340. },
  341. { },
  342. };
  343. MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
  344. static inline const struct samsung_aes_variant *find_s5p_sss_version
  345. (const struct platform_device *pdev)
  346. {
  347. if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
  348. const struct of_device_id *match;
  349. match = of_match_node(s5p_sss_dt_match,
  350. pdev->dev.of_node);
  351. return (const struct samsung_aes_variant *)match->data;
  352. }
  353. return (const struct samsung_aes_variant *)
  354. platform_get_device_id(pdev)->driver_data;
  355. }
  356. static struct s5p_aes_dev *s5p_dev;
  357. static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
  358. const struct scatterlist *sg)
  359. {
  360. SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
  361. SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
  362. }
  363. static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
  364. const struct scatterlist *sg)
  365. {
  366. SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
  367. SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
  368. }
  369. static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
  370. {
  371. int len;
  372. if (!*sg)
  373. return;
  374. len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
  375. free_pages((unsigned long)sg_virt(*sg), get_order(len));
  376. kfree(*sg);
  377. *sg = NULL;
  378. }
  379. static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
  380. unsigned int nbytes, int out)
  381. {
  382. struct scatter_walk walk;
  383. if (!nbytes)
  384. return;
  385. scatterwalk_start(&walk, sg);
  386. scatterwalk_copychunks(buf, &walk, nbytes, out);
  387. scatterwalk_done(&walk, out, 0);
  388. }
  389. static void s5p_sg_done(struct s5p_aes_dev *dev)
  390. {
  391. if (dev->sg_dst_cpy) {
  392. dev_dbg(dev->dev,
  393. "Copying %d bytes of output data back to original place\n",
  394. dev->req->nbytes);
  395. s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
  396. dev->req->nbytes, 1);
  397. }
  398. s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
  399. s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
  400. }
  401. /* Calls the completion. Cannot be called with dev->lock hold. */
  402. static void s5p_aes_complete(struct ablkcipher_request *req, int err)
  403. {
  404. req->base.complete(&req->base, err);
  405. }
  406. static void s5p_unset_outdata(struct s5p_aes_dev *dev)
  407. {
  408. dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
  409. }
  410. static void s5p_unset_indata(struct s5p_aes_dev *dev)
  411. {
  412. dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
  413. }
  414. static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
  415. struct scatterlist **dst)
  416. {
  417. void *pages;
  418. int len;
  419. *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
  420. if (!*dst)
  421. return -ENOMEM;
  422. len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
  423. pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
  424. if (!pages) {
  425. kfree(*dst);
  426. *dst = NULL;
  427. return -ENOMEM;
  428. }
  429. s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
  430. sg_init_table(*dst, 1);
  431. sg_set_buf(*dst, pages, len);
  432. return 0;
  433. }
  434. static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
  435. {
  436. int err;
  437. if (!sg->length) {
  438. err = -EINVAL;
  439. goto exit;
  440. }
  441. err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
  442. if (!err) {
  443. err = -ENOMEM;
  444. goto exit;
  445. }
  446. dev->sg_dst = sg;
  447. err = 0;
  448. exit:
  449. return err;
  450. }
  451. static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
  452. {
  453. int err;
  454. if (!sg->length) {
  455. err = -EINVAL;
  456. goto exit;
  457. }
  458. err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
  459. if (!err) {
  460. err = -ENOMEM;
  461. goto exit;
  462. }
  463. dev->sg_src = sg;
  464. err = 0;
  465. exit:
  466. return err;
  467. }
  468. /*
  469. * Returns -ERRNO on error (mapping of new data failed).
  470. * On success returns:
  471. * - 0 if there is no more data,
  472. * - 1 if new transmitting (output) data is ready and its address+length
  473. * have to be written to device (by calling s5p_set_dma_outdata()).
  474. */
  475. static int s5p_aes_tx(struct s5p_aes_dev *dev)
  476. {
  477. int ret = 0;
  478. s5p_unset_outdata(dev);
  479. if (!sg_is_last(dev->sg_dst)) {
  480. ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
  481. if (!ret)
  482. ret = 1;
  483. }
  484. return ret;
  485. }
  486. /*
  487. * Returns -ERRNO on error (mapping of new data failed).
  488. * On success returns:
  489. * - 0 if there is no more data,
  490. * - 1 if new receiving (input) data is ready and its address+length
  491. * have to be written to device (by calling s5p_set_dma_indata()).
  492. */
  493. static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
  494. {
  495. int ret = 0;
  496. s5p_unset_indata(dev);
  497. if (!sg_is_last(dev->sg_src)) {
  498. ret = s5p_set_indata(dev, sg_next(dev->sg_src));
  499. if (!ret)
  500. ret = 1;
  501. }
  502. return ret;
  503. }
  504. static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
  505. {
  506. return __raw_readl(dd->io_hash_base + offset);
  507. }
  508. static inline void s5p_hash_write(struct s5p_aes_dev *dd,
  509. u32 offset, u32 value)
  510. {
  511. __raw_writel(value, dd->io_hash_base + offset);
  512. }
  513. /**
  514. * s5p_set_dma_hashdata() - start DMA with sg
  515. * @dev: device
  516. * @sg: scatterlist ready to DMA transmit
  517. */
  518. static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
  519. const struct scatterlist *sg)
  520. {
  521. dev->hash_sg_cnt--;
  522. SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
  523. SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
  524. }
  525. /**
  526. * s5p_hash_rx() - get next hash_sg_iter
  527. * @dev: device
  528. *
  529. * Return:
  530. * 2 if there is no more data and it is UPDATE op
  531. * 1 if new receiving (input) data is ready and can be written to device
  532. * 0 if there is no more data and it is FINAL op
  533. */
  534. static int s5p_hash_rx(struct s5p_aes_dev *dev)
  535. {
  536. if (dev->hash_sg_cnt > 0) {
  537. dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
  538. return 1;
  539. }
  540. set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
  541. if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
  542. return 0;
  543. return 2;
  544. }
  545. static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
  546. {
  547. struct platform_device *pdev = dev_id;
  548. struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
  549. struct ablkcipher_request *req;
  550. int err_dma_tx = 0;
  551. int err_dma_rx = 0;
  552. int err_dma_hx = 0;
  553. bool tx_end = false;
  554. bool hx_end = false;
  555. unsigned long flags;
  556. uint32_t status;
  557. u32 st_bits;
  558. int err;
  559. spin_lock_irqsave(&dev->lock, flags);
  560. /*
  561. * Handle rx or tx interrupt. If there is still data (scatterlist did not
  562. * reach end), then map next scatterlist entry.
  563. * In case of such mapping error, s5p_aes_complete() should be called.
  564. *
  565. * If there is no more data in tx scatter list, call s5p_aes_complete()
  566. * and schedule new tasklet.
  567. *
  568. * Handle hx interrupt. If there is still data map next entry.
  569. */
  570. status = SSS_READ(dev, FCINTSTAT);
  571. if (status & SSS_FCINTSTAT_BRDMAINT)
  572. err_dma_rx = s5p_aes_rx(dev);
  573. if (status & SSS_FCINTSTAT_BTDMAINT) {
  574. if (sg_is_last(dev->sg_dst))
  575. tx_end = true;
  576. err_dma_tx = s5p_aes_tx(dev);
  577. }
  578. if (status & SSS_FCINTSTAT_HRDMAINT)
  579. err_dma_hx = s5p_hash_rx(dev);
  580. st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
  581. SSS_FCINTSTAT_HRDMAINT);
  582. /* clear DMA bits */
  583. SSS_WRITE(dev, FCINTPEND, st_bits);
  584. /* clear HASH irq bits */
  585. if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
  586. /* cannot have both HPART and HDONE */
  587. if (status & SSS_FCINTSTAT_HPARTINT)
  588. st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
  589. if (status & SSS_FCINTSTAT_HDONEINT)
  590. st_bits = SSS_HASH_STATUS_MSG_DONE;
  591. set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
  592. s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
  593. hx_end = true;
  594. /* when DONE or PART, do not handle HASH DMA */
  595. err_dma_hx = 0;
  596. }
  597. if (err_dma_rx < 0) {
  598. err = err_dma_rx;
  599. goto error;
  600. }
  601. if (err_dma_tx < 0) {
  602. err = err_dma_tx;
  603. goto error;
  604. }
  605. if (tx_end) {
  606. s5p_sg_done(dev);
  607. if (err_dma_hx == 1)
  608. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  609. spin_unlock_irqrestore(&dev->lock, flags);
  610. s5p_aes_complete(dev->req, 0);
  611. /* Device is still busy */
  612. tasklet_schedule(&dev->tasklet);
  613. } else {
  614. /*
  615. * Writing length of DMA block (either receiving or
  616. * transmitting) will start the operation immediately, so this
  617. * should be done at the end (even after clearing pending
  618. * interrupts to not miss the interrupt).
  619. */
  620. if (err_dma_tx == 1)
  621. s5p_set_dma_outdata(dev, dev->sg_dst);
  622. if (err_dma_rx == 1)
  623. s5p_set_dma_indata(dev, dev->sg_src);
  624. if (err_dma_hx == 1)
  625. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  626. spin_unlock_irqrestore(&dev->lock, flags);
  627. }
  628. goto hash_irq_end;
  629. error:
  630. s5p_sg_done(dev);
  631. dev->busy = false;
  632. req = dev->req;
  633. if (err_dma_hx == 1)
  634. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  635. spin_unlock_irqrestore(&dev->lock, flags);
  636. s5p_aes_complete(req, err);
  637. hash_irq_end:
  638. /*
  639. * Note about else if:
  640. * when hash_sg_iter reaches end and its UPDATE op,
  641. * issue SSS_HASH_PAUSE and wait for HPART irq
  642. */
  643. if (hx_end)
  644. tasklet_schedule(&dev->hash_tasklet);
  645. else if (err_dma_hx == 2)
  646. s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
  647. SSS_HASH_PAUSE);
  648. return IRQ_HANDLED;
  649. }
  650. /**
  651. * s5p_hash_read_msg() - read message or IV from HW
  652. * @req: AHASH request
  653. */
  654. static void s5p_hash_read_msg(struct ahash_request *req)
  655. {
  656. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  657. struct s5p_aes_dev *dd = ctx->dd;
  658. u32 *hash = (u32 *)ctx->digest;
  659. unsigned int i;
  660. for (i = 0; i < ctx->nregs; i++)
  661. hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
  662. }
  663. /**
  664. * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
  665. * @dd: device
  666. * @ctx: request context
  667. */
  668. static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
  669. const struct s5p_hash_reqctx *ctx)
  670. {
  671. const u32 *hash = (const u32 *)ctx->digest;
  672. unsigned int i;
  673. for (i = 0; i < ctx->nregs; i++)
  674. s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
  675. }
  676. /**
  677. * s5p_hash_write_iv() - write IV for next partial/finup op.
  678. * @req: AHASH request
  679. */
  680. static void s5p_hash_write_iv(struct ahash_request *req)
  681. {
  682. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  683. s5p_hash_write_ctx_iv(ctx->dd, ctx);
  684. }
  685. /**
  686. * s5p_hash_copy_result() - copy digest into req->result
  687. * @req: AHASH request
  688. */
  689. static void s5p_hash_copy_result(struct ahash_request *req)
  690. {
  691. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  692. if (!req->result)
  693. return;
  694. memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
  695. }
  696. /**
  697. * s5p_hash_dma_flush() - flush HASH DMA
  698. * @dev: secss device
  699. */
  700. static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
  701. {
  702. SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
  703. }
  704. /**
  705. * s5p_hash_dma_enable() - enable DMA mode for HASH
  706. * @dev: secss device
  707. *
  708. * enable DMA mode for HASH
  709. */
  710. static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
  711. {
  712. s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
  713. }
  714. /**
  715. * s5p_hash_irq_disable() - disable irq HASH signals
  716. * @dev: secss device
  717. * @flags: bitfield with irq's to be disabled
  718. */
  719. static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
  720. {
  721. SSS_WRITE(dev, FCINTENCLR, flags);
  722. }
  723. /**
  724. * s5p_hash_irq_enable() - enable irq signals
  725. * @dev: secss device
  726. * @flags: bitfield with irq's to be enabled
  727. */
  728. static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
  729. {
  730. SSS_WRITE(dev, FCINTENSET, flags);
  731. }
  732. /**
  733. * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
  734. * @dev: secss device
  735. * @hashflow: HASH stream flow with/without crypto AES/DES
  736. */
  737. static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
  738. {
  739. unsigned long flags;
  740. u32 flow;
  741. spin_lock_irqsave(&dev->lock, flags);
  742. flow = SSS_READ(dev, FCFIFOCTRL);
  743. flow &= ~SSS_HASHIN_MASK;
  744. flow |= hashflow;
  745. SSS_WRITE(dev, FCFIFOCTRL, flow);
  746. spin_unlock_irqrestore(&dev->lock, flags);
  747. }
  748. /**
  749. * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
  750. * @dev: secss device
  751. * @hashflow: HASH stream flow with/without AES/DES
  752. *
  753. * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
  754. * enable HASH irq's HRDMA, HDONE, HPART
  755. */
  756. static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
  757. {
  758. s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
  759. SSS_FCINTENCLR_HDONEINTENCLR |
  760. SSS_FCINTENCLR_HPARTINTENCLR);
  761. s5p_hash_dma_flush(dev);
  762. s5p_hash_dma_enable(dev);
  763. s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
  764. s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
  765. SSS_FCINTENSET_HDONEINTENSET |
  766. SSS_FCINTENSET_HPARTINTENSET);
  767. }
  768. /**
  769. * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
  770. * @dd: secss device
  771. * @length: length for request
  772. * @final: true if final op
  773. *
  774. * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
  775. * after previous updates, fill up IV words. For final, calculate and set
  776. * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
  777. * length as 2^63 so it will be never reached and set to zero prelow and
  778. * prehigh.
  779. *
  780. * This function does not start DMA transfer.
  781. */
  782. static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
  783. bool final)
  784. {
  785. struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  786. u32 prelow, prehigh, low, high;
  787. u32 configflags, swapflags;
  788. u64 tmplen;
  789. configflags = ctx->engine | SSS_HASH_INIT_BIT;
  790. if (likely(ctx->digcnt)) {
  791. s5p_hash_write_ctx_iv(dd, ctx);
  792. configflags |= SSS_HASH_USER_IV_EN;
  793. }
  794. if (final) {
  795. /* number of bytes for last part */
  796. low = length;
  797. high = 0;
  798. /* total number of bits prev hashed */
  799. tmplen = ctx->digcnt * 8;
  800. prelow = (u32)tmplen;
  801. prehigh = (u32)(tmplen >> 32);
  802. } else {
  803. prelow = 0;
  804. prehigh = 0;
  805. low = 0;
  806. high = BIT(31);
  807. }
  808. swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
  809. SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
  810. s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
  811. s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
  812. s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
  813. s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
  814. s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
  815. s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
  816. }
  817. /**
  818. * s5p_hash_xmit_dma() - start DMA hash processing
  819. * @dd: secss device
  820. * @length: length for request
  821. * @final: true if final op
  822. *
  823. * Update digcnt here, as it is needed for finup/final op.
  824. */
  825. static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
  826. bool final)
  827. {
  828. struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  829. unsigned int cnt;
  830. cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
  831. if (!cnt) {
  832. dev_err(dd->dev, "dma_map_sg error\n");
  833. ctx->error = true;
  834. return -EINVAL;
  835. }
  836. set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
  837. dd->hash_sg_iter = ctx->sg;
  838. dd->hash_sg_cnt = cnt;
  839. s5p_hash_write_ctrl(dd, length, final);
  840. ctx->digcnt += length;
  841. ctx->total -= length;
  842. /* catch last interrupt */
  843. if (final)
  844. set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
  845. s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
  846. return -EINPROGRESS;
  847. }
  848. /**
  849. * s5p_hash_copy_sgs() - copy request's bytes into new buffer
  850. * @ctx: request context
  851. * @sg: source scatterlist request
  852. * @new_len: number of bytes to process from sg
  853. *
  854. * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
  855. * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
  856. * with allocated buffer.
  857. *
  858. * Set bit in dd->hash_flag so we can free it after irq ends processing.
  859. */
  860. static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
  861. struct scatterlist *sg, unsigned int new_len)
  862. {
  863. unsigned int pages, len;
  864. void *buf;
  865. len = new_len + ctx->bufcnt;
  866. pages = get_order(len);
  867. buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
  868. if (!buf) {
  869. dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
  870. ctx->error = true;
  871. return -ENOMEM;
  872. }
  873. if (ctx->bufcnt)
  874. memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
  875. scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
  876. new_len, 0);
  877. sg_init_table(ctx->sgl, 1);
  878. sg_set_buf(ctx->sgl, buf, len);
  879. ctx->sg = ctx->sgl;
  880. ctx->sg_len = 1;
  881. ctx->bufcnt = 0;
  882. ctx->skip = 0;
  883. set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
  884. return 0;
  885. }
  886. /**
  887. * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
  888. * @ctx: request context
  889. * @sg: source scatterlist request
  890. * @new_len: number of bytes to process from sg
  891. *
  892. * Allocate new scatterlist table, copy data for HASH into it. If there was
  893. * xmit_buf filled, prepare it first, then copy page, length and offset from
  894. * source sg into it, adjusting begin and/or end for skip offset and
  895. * hash_later value.
  896. *
  897. * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
  898. * it after irq ends processing.
  899. */
  900. static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
  901. struct scatterlist *sg, unsigned int new_len)
  902. {
  903. unsigned int skip = ctx->skip, n = sg_nents(sg);
  904. struct scatterlist *tmp;
  905. unsigned int len;
  906. if (ctx->bufcnt)
  907. n++;
  908. ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
  909. if (!ctx->sg) {
  910. ctx->error = true;
  911. return -ENOMEM;
  912. }
  913. sg_init_table(ctx->sg, n);
  914. tmp = ctx->sg;
  915. ctx->sg_len = 0;
  916. if (ctx->bufcnt) {
  917. sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
  918. tmp = sg_next(tmp);
  919. ctx->sg_len++;
  920. }
  921. while (sg && skip >= sg->length) {
  922. skip -= sg->length;
  923. sg = sg_next(sg);
  924. }
  925. while (sg && new_len) {
  926. len = sg->length - skip;
  927. if (new_len < len)
  928. len = new_len;
  929. new_len -= len;
  930. sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
  931. skip = 0;
  932. if (new_len <= 0)
  933. sg_mark_end(tmp);
  934. tmp = sg_next(tmp);
  935. ctx->sg_len++;
  936. sg = sg_next(sg);
  937. }
  938. set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
  939. return 0;
  940. }
  941. /**
  942. * s5p_hash_prepare_sgs() - prepare sg for processing
  943. * @ctx: request context
  944. * @sg: source scatterlist request
  945. * @nbytes: number of bytes to process from sg
  946. * @final: final flag
  947. *
  948. * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
  949. * sg table have good aligned elements (list_ok). If one of this checks fails,
  950. * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
  951. * data into this buffer and prepare request in sgl, or (2) allocates new sg
  952. * table and prepare sg elements.
  953. *
  954. * For digest or finup all conditions can be good, and we may not need any
  955. * fixes.
  956. */
  957. static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
  958. struct scatterlist *sg,
  959. unsigned int new_len, bool final)
  960. {
  961. unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
  962. bool aligned = true, list_ok = true;
  963. struct scatterlist *sg_tmp = sg;
  964. if (!sg || !sg->length || !new_len)
  965. return 0;
  966. if (skip || !final)
  967. list_ok = false;
  968. while (nbytes > 0 && sg_tmp) {
  969. n++;
  970. if (skip >= sg_tmp->length) {
  971. skip -= sg_tmp->length;
  972. if (!sg_tmp->length) {
  973. aligned = false;
  974. break;
  975. }
  976. } else {
  977. if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
  978. aligned = false;
  979. break;
  980. }
  981. if (nbytes < sg_tmp->length - skip) {
  982. list_ok = false;
  983. break;
  984. }
  985. nbytes -= sg_tmp->length - skip;
  986. skip = 0;
  987. }
  988. sg_tmp = sg_next(sg_tmp);
  989. }
  990. if (!aligned)
  991. return s5p_hash_copy_sgs(ctx, sg, new_len);
  992. else if (!list_ok)
  993. return s5p_hash_copy_sg_lists(ctx, sg, new_len);
  994. /*
  995. * Have aligned data from previous operation and/or current
  996. * Note: will enter here only if (digest or finup) and aligned
  997. */
  998. if (ctx->bufcnt) {
  999. ctx->sg_len = n;
  1000. sg_init_table(ctx->sgl, 2);
  1001. sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
  1002. sg_chain(ctx->sgl, 2, sg);
  1003. ctx->sg = ctx->sgl;
  1004. ctx->sg_len++;
  1005. } else {
  1006. ctx->sg = sg;
  1007. ctx->sg_len = n;
  1008. }
  1009. return 0;
  1010. }
  1011. /**
  1012. * s5p_hash_prepare_request() - prepare request for processing
  1013. * @req: AHASH request
  1014. * @update: true if UPDATE op
  1015. *
  1016. * Note 1: we can have update flag _and_ final flag at the same time.
  1017. * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
  1018. * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
  1019. * we have final op
  1020. */
  1021. static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
  1022. {
  1023. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1024. bool final = ctx->finup;
  1025. int xmit_len, hash_later, nbytes;
  1026. int ret;
  1027. if (update)
  1028. nbytes = req->nbytes;
  1029. else
  1030. nbytes = 0;
  1031. ctx->total = nbytes + ctx->bufcnt;
  1032. if (!ctx->total)
  1033. return 0;
  1034. if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
  1035. /* bytes left from previous request, so fill up to BUFLEN */
  1036. int len = BUFLEN - ctx->bufcnt % BUFLEN;
  1037. if (len > nbytes)
  1038. len = nbytes;
  1039. scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
  1040. 0, len, 0);
  1041. ctx->bufcnt += len;
  1042. nbytes -= len;
  1043. ctx->skip = len;
  1044. } else {
  1045. ctx->skip = 0;
  1046. }
  1047. if (ctx->bufcnt)
  1048. memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
  1049. xmit_len = ctx->total;
  1050. if (final) {
  1051. hash_later = 0;
  1052. } else {
  1053. if (IS_ALIGNED(xmit_len, BUFLEN))
  1054. xmit_len -= BUFLEN;
  1055. else
  1056. xmit_len -= xmit_len & (BUFLEN - 1);
  1057. hash_later = ctx->total - xmit_len;
  1058. /* copy hash_later bytes from end of req->src */
  1059. /* previous bytes are in xmit_buf, so no overwrite */
  1060. scatterwalk_map_and_copy(ctx->buffer, req->src,
  1061. req->nbytes - hash_later,
  1062. hash_later, 0);
  1063. }
  1064. if (xmit_len > BUFLEN) {
  1065. ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
  1066. final);
  1067. if (ret)
  1068. return ret;
  1069. } else {
  1070. /* have buffered data only */
  1071. if (unlikely(!ctx->bufcnt)) {
  1072. /* first update didn't fill up buffer */
  1073. scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
  1074. 0, xmit_len, 0);
  1075. }
  1076. sg_init_table(ctx->sgl, 1);
  1077. sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
  1078. ctx->sg = ctx->sgl;
  1079. ctx->sg_len = 1;
  1080. }
  1081. ctx->bufcnt = hash_later;
  1082. if (!final)
  1083. ctx->total = xmit_len;
  1084. return 0;
  1085. }
  1086. /**
  1087. * s5p_hash_update_dma_stop() - unmap DMA
  1088. * @dd: secss device
  1089. *
  1090. * Unmap scatterlist ctx->sg.
  1091. */
  1092. static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
  1093. {
  1094. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  1095. dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
  1096. clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
  1097. }
  1098. /**
  1099. * s5p_hash_finish() - copy calculated digest to crypto layer
  1100. * @req: AHASH request
  1101. */
  1102. static void s5p_hash_finish(struct ahash_request *req)
  1103. {
  1104. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1105. struct s5p_aes_dev *dd = ctx->dd;
  1106. if (ctx->digcnt)
  1107. s5p_hash_copy_result(req);
  1108. dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
  1109. }
  1110. /**
  1111. * s5p_hash_finish_req() - finish request
  1112. * @req: AHASH request
  1113. * @err: error
  1114. */
  1115. static void s5p_hash_finish_req(struct ahash_request *req, int err)
  1116. {
  1117. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1118. struct s5p_aes_dev *dd = ctx->dd;
  1119. unsigned long flags;
  1120. if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
  1121. free_pages((unsigned long)sg_virt(ctx->sg),
  1122. get_order(ctx->sg->length));
  1123. if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
  1124. kfree(ctx->sg);
  1125. ctx->sg = NULL;
  1126. dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
  1127. BIT(HASH_FLAGS_SGS_COPIED));
  1128. if (!err && !ctx->error) {
  1129. s5p_hash_read_msg(req);
  1130. if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
  1131. s5p_hash_finish(req);
  1132. } else {
  1133. ctx->error = true;
  1134. }
  1135. spin_lock_irqsave(&dd->hash_lock, flags);
  1136. dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
  1137. BIT(HASH_FLAGS_DMA_READY) |
  1138. BIT(HASH_FLAGS_OUTPUT_READY));
  1139. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1140. if (req->base.complete)
  1141. req->base.complete(&req->base, err);
  1142. }
  1143. /**
  1144. * s5p_hash_handle_queue() - handle hash queue
  1145. * @dd: device s5p_aes_dev
  1146. * @req: AHASH request
  1147. *
  1148. * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
  1149. * device then processes the first request from the dd->queue
  1150. *
  1151. * Returns: see s5p_hash_final below.
  1152. */
  1153. static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
  1154. struct ahash_request *req)
  1155. {
  1156. struct crypto_async_request *async_req, *backlog;
  1157. struct s5p_hash_reqctx *ctx;
  1158. unsigned long flags;
  1159. int err = 0, ret = 0;
  1160. retry:
  1161. spin_lock_irqsave(&dd->hash_lock, flags);
  1162. if (req)
  1163. ret = ahash_enqueue_request(&dd->hash_queue, req);
  1164. if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
  1165. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1166. return ret;
  1167. }
  1168. backlog = crypto_get_backlog(&dd->hash_queue);
  1169. async_req = crypto_dequeue_request(&dd->hash_queue);
  1170. if (async_req)
  1171. set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
  1172. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1173. if (!async_req)
  1174. return ret;
  1175. if (backlog)
  1176. backlog->complete(backlog, -EINPROGRESS);
  1177. req = ahash_request_cast(async_req);
  1178. dd->hash_req = req;
  1179. ctx = ahash_request_ctx(req);
  1180. err = s5p_hash_prepare_request(req, ctx->op_update);
  1181. if (err || !ctx->total)
  1182. goto out;
  1183. dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
  1184. ctx->op_update, req->nbytes);
  1185. s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
  1186. if (ctx->digcnt)
  1187. s5p_hash_write_iv(req); /* restore hash IV */
  1188. if (ctx->op_update) { /* HASH_OP_UPDATE */
  1189. err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
  1190. if (err != -EINPROGRESS && ctx->finup && !ctx->error)
  1191. /* no final() after finup() */
  1192. err = s5p_hash_xmit_dma(dd, ctx->total, true);
  1193. } else { /* HASH_OP_FINAL */
  1194. err = s5p_hash_xmit_dma(dd, ctx->total, true);
  1195. }
  1196. out:
  1197. if (err != -EINPROGRESS) {
  1198. /* hash_tasklet_cb will not finish it, so do it here */
  1199. s5p_hash_finish_req(req, err);
  1200. req = NULL;
  1201. /*
  1202. * Execute next request immediately if there is anything
  1203. * in queue.
  1204. */
  1205. goto retry;
  1206. }
  1207. return ret;
  1208. }
  1209. /**
  1210. * s5p_hash_tasklet_cb() - hash tasklet
  1211. * @data: ptr to s5p_aes_dev
  1212. */
  1213. static void s5p_hash_tasklet_cb(unsigned long data)
  1214. {
  1215. struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
  1216. if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
  1217. s5p_hash_handle_queue(dd, NULL);
  1218. return;
  1219. }
  1220. if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
  1221. if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
  1222. &dd->hash_flags)) {
  1223. s5p_hash_update_dma_stop(dd);
  1224. }
  1225. if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
  1226. &dd->hash_flags)) {
  1227. /* hash or semi-hash ready */
  1228. clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
  1229. goto finish;
  1230. }
  1231. }
  1232. return;
  1233. finish:
  1234. /* finish curent request */
  1235. s5p_hash_finish_req(dd->hash_req, 0);
  1236. /* If we are not busy, process next req */
  1237. if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
  1238. s5p_hash_handle_queue(dd, NULL);
  1239. }
  1240. /**
  1241. * s5p_hash_enqueue() - enqueue request
  1242. * @req: AHASH request
  1243. * @op: operation UPDATE (true) or FINAL (false)
  1244. *
  1245. * Returns: see s5p_hash_final below.
  1246. */
  1247. static int s5p_hash_enqueue(struct ahash_request *req, bool op)
  1248. {
  1249. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1250. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
  1251. ctx->op_update = op;
  1252. return s5p_hash_handle_queue(tctx->dd, req);
  1253. }
  1254. /**
  1255. * s5p_hash_update() - process the hash input data
  1256. * @req: AHASH request
  1257. *
  1258. * If request will fit in buffer, copy it and return immediately
  1259. * else enqueue it with OP_UPDATE.
  1260. *
  1261. * Returns: see s5p_hash_final below.
  1262. */
  1263. static int s5p_hash_update(struct ahash_request *req)
  1264. {
  1265. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1266. if (!req->nbytes)
  1267. return 0;
  1268. if (ctx->bufcnt + req->nbytes <= BUFLEN) {
  1269. scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
  1270. 0, req->nbytes, 0);
  1271. ctx->bufcnt += req->nbytes;
  1272. return 0;
  1273. }
  1274. return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
  1275. }
  1276. /**
  1277. * s5p_hash_shash_digest() - calculate shash digest
  1278. * @tfm: crypto transformation
  1279. * @flags: tfm flags
  1280. * @data: input data
  1281. * @len: length of data
  1282. * @out: output buffer
  1283. */
  1284. static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
  1285. const u8 *data, unsigned int len, u8 *out)
  1286. {
  1287. SHASH_DESC_ON_STACK(shash, tfm);
  1288. shash->tfm = tfm;
  1289. shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
  1290. return crypto_shash_digest(shash, data, len, out);
  1291. }
  1292. /**
  1293. * s5p_hash_final_shash() - calculate shash digest
  1294. * @req: AHASH request
  1295. */
  1296. static int s5p_hash_final_shash(struct ahash_request *req)
  1297. {
  1298. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
  1299. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1300. return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
  1301. ctx->buffer, ctx->bufcnt, req->result);
  1302. }
  1303. /**
  1304. * s5p_hash_final() - close up hash and calculate digest
  1305. * @req: AHASH request
  1306. *
  1307. * Note: in final req->src do not have any data, and req->nbytes can be
  1308. * non-zero.
  1309. *
  1310. * If there were no input data processed yet and the buffered hash data is
  1311. * less than BUFLEN (64) then calculate the final hash immediately by using
  1312. * SW algorithm fallback.
  1313. *
  1314. * Otherwise enqueues the current AHASH request with OP_FINAL operation op
  1315. * and finalize hash message in HW. Note that if digcnt!=0 then there were
  1316. * previous update op, so there are always some buffered bytes in ctx->buffer,
  1317. * which means that ctx->bufcnt!=0
  1318. *
  1319. * Returns:
  1320. * 0 if the request has been processed immediately,
  1321. * -EINPROGRESS if the operation has been queued for later execution or is set
  1322. * to processing by HW,
  1323. * -EBUSY if queue is full and request should be resubmitted later,
  1324. * other negative values denotes an error.
  1325. */
  1326. static int s5p_hash_final(struct ahash_request *req)
  1327. {
  1328. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1329. ctx->finup = true;
  1330. if (ctx->error)
  1331. return -EINVAL; /* uncompleted hash is not needed */
  1332. if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
  1333. return s5p_hash_final_shash(req);
  1334. return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
  1335. }
  1336. /**
  1337. * s5p_hash_finup() - process last req->src and calculate digest
  1338. * @req: AHASH request containing the last update data
  1339. *
  1340. * Return values: see s5p_hash_final above.
  1341. */
  1342. static int s5p_hash_finup(struct ahash_request *req)
  1343. {
  1344. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1345. int err1, err2;
  1346. ctx->finup = true;
  1347. err1 = s5p_hash_update(req);
  1348. if (err1 == -EINPROGRESS || err1 == -EBUSY)
  1349. return err1;
  1350. /*
  1351. * final() has to be always called to cleanup resources even if
  1352. * update() failed, except EINPROGRESS or calculate digest for small
  1353. * size
  1354. */
  1355. err2 = s5p_hash_final(req);
  1356. return err1 ?: err2;
  1357. }
  1358. /**
  1359. * s5p_hash_init() - initialize AHASH request contex
  1360. * @req: AHASH request
  1361. *
  1362. * Init async hash request context.
  1363. */
  1364. static int s5p_hash_init(struct ahash_request *req)
  1365. {
  1366. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1367. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1368. struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  1369. ctx->dd = tctx->dd;
  1370. ctx->error = false;
  1371. ctx->finup = false;
  1372. ctx->bufcnt = 0;
  1373. ctx->digcnt = 0;
  1374. ctx->total = 0;
  1375. ctx->skip = 0;
  1376. dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
  1377. crypto_ahash_digestsize(tfm));
  1378. switch (crypto_ahash_digestsize(tfm)) {
  1379. case MD5_DIGEST_SIZE:
  1380. ctx->engine = SSS_HASH_ENGINE_MD5;
  1381. ctx->nregs = HASH_MD5_MAX_REG;
  1382. break;
  1383. case SHA1_DIGEST_SIZE:
  1384. ctx->engine = SSS_HASH_ENGINE_SHA1;
  1385. ctx->nregs = HASH_SHA1_MAX_REG;
  1386. break;
  1387. case SHA256_DIGEST_SIZE:
  1388. ctx->engine = SSS_HASH_ENGINE_SHA256;
  1389. ctx->nregs = HASH_SHA256_MAX_REG;
  1390. break;
  1391. default:
  1392. ctx->error = true;
  1393. return -EINVAL;
  1394. }
  1395. return 0;
  1396. }
  1397. /**
  1398. * s5p_hash_digest - calculate digest from req->src
  1399. * @req: AHASH request
  1400. *
  1401. * Return values: see s5p_hash_final above.
  1402. */
  1403. static int s5p_hash_digest(struct ahash_request *req)
  1404. {
  1405. return s5p_hash_init(req) ?: s5p_hash_finup(req);
  1406. }
  1407. /**
  1408. * s5p_hash_cra_init_alg - init crypto alg transformation
  1409. * @tfm: crypto transformation
  1410. */
  1411. static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
  1412. {
  1413. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  1414. const char *alg_name = crypto_tfm_alg_name(tfm);
  1415. tctx->dd = s5p_dev;
  1416. /* Allocate a fallback and abort if it failed. */
  1417. tctx->fallback = crypto_alloc_shash(alg_name, 0,
  1418. CRYPTO_ALG_NEED_FALLBACK);
  1419. if (IS_ERR(tctx->fallback)) {
  1420. pr_err("fallback alloc fails for '%s'\n", alg_name);
  1421. return PTR_ERR(tctx->fallback);
  1422. }
  1423. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1424. sizeof(struct s5p_hash_reqctx) + BUFLEN);
  1425. return 0;
  1426. }
  1427. /**
  1428. * s5p_hash_cra_init - init crypto tfm
  1429. * @tfm: crypto transformation
  1430. */
  1431. static int s5p_hash_cra_init(struct crypto_tfm *tfm)
  1432. {
  1433. return s5p_hash_cra_init_alg(tfm);
  1434. }
  1435. /**
  1436. * s5p_hash_cra_exit - exit crypto tfm
  1437. * @tfm: crypto transformation
  1438. *
  1439. * free allocated fallback
  1440. */
  1441. static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
  1442. {
  1443. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  1444. crypto_free_shash(tctx->fallback);
  1445. tctx->fallback = NULL;
  1446. }
  1447. /**
  1448. * s5p_hash_export - export hash state
  1449. * @req: AHASH request
  1450. * @out: buffer for exported state
  1451. */
  1452. static int s5p_hash_export(struct ahash_request *req, void *out)
  1453. {
  1454. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1455. memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
  1456. return 0;
  1457. }
  1458. /**
  1459. * s5p_hash_import - import hash state
  1460. * @req: AHASH request
  1461. * @in: buffer with state to be imported from
  1462. */
  1463. static int s5p_hash_import(struct ahash_request *req, const void *in)
  1464. {
  1465. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1466. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1467. struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  1468. const struct s5p_hash_reqctx *ctx_in = in;
  1469. memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
  1470. if (ctx_in->bufcnt > BUFLEN) {
  1471. ctx->error = true;
  1472. return -EINVAL;
  1473. }
  1474. ctx->dd = tctx->dd;
  1475. ctx->error = false;
  1476. return 0;
  1477. }
  1478. static struct ahash_alg algs_sha1_md5_sha256[] = {
  1479. {
  1480. .init = s5p_hash_init,
  1481. .update = s5p_hash_update,
  1482. .final = s5p_hash_final,
  1483. .finup = s5p_hash_finup,
  1484. .digest = s5p_hash_digest,
  1485. .export = s5p_hash_export,
  1486. .import = s5p_hash_import,
  1487. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1488. .halg.digestsize = SHA1_DIGEST_SIZE,
  1489. .halg.base = {
  1490. .cra_name = "sha1",
  1491. .cra_driver_name = "exynos-sha1",
  1492. .cra_priority = 100,
  1493. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1494. CRYPTO_ALG_ASYNC |
  1495. CRYPTO_ALG_NEED_FALLBACK,
  1496. .cra_blocksize = HASH_BLOCK_SIZE,
  1497. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1498. .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
  1499. .cra_module = THIS_MODULE,
  1500. .cra_init = s5p_hash_cra_init,
  1501. .cra_exit = s5p_hash_cra_exit,
  1502. }
  1503. },
  1504. {
  1505. .init = s5p_hash_init,
  1506. .update = s5p_hash_update,
  1507. .final = s5p_hash_final,
  1508. .finup = s5p_hash_finup,
  1509. .digest = s5p_hash_digest,
  1510. .export = s5p_hash_export,
  1511. .import = s5p_hash_import,
  1512. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1513. .halg.digestsize = MD5_DIGEST_SIZE,
  1514. .halg.base = {
  1515. .cra_name = "md5",
  1516. .cra_driver_name = "exynos-md5",
  1517. .cra_priority = 100,
  1518. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1519. CRYPTO_ALG_ASYNC |
  1520. CRYPTO_ALG_NEED_FALLBACK,
  1521. .cra_blocksize = HASH_BLOCK_SIZE,
  1522. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1523. .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
  1524. .cra_module = THIS_MODULE,
  1525. .cra_init = s5p_hash_cra_init,
  1526. .cra_exit = s5p_hash_cra_exit,
  1527. }
  1528. },
  1529. {
  1530. .init = s5p_hash_init,
  1531. .update = s5p_hash_update,
  1532. .final = s5p_hash_final,
  1533. .finup = s5p_hash_finup,
  1534. .digest = s5p_hash_digest,
  1535. .export = s5p_hash_export,
  1536. .import = s5p_hash_import,
  1537. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1538. .halg.digestsize = SHA256_DIGEST_SIZE,
  1539. .halg.base = {
  1540. .cra_name = "sha256",
  1541. .cra_driver_name = "exynos-sha256",
  1542. .cra_priority = 100,
  1543. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1544. CRYPTO_ALG_ASYNC |
  1545. CRYPTO_ALG_NEED_FALLBACK,
  1546. .cra_blocksize = HASH_BLOCK_SIZE,
  1547. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1548. .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
  1549. .cra_module = THIS_MODULE,
  1550. .cra_init = s5p_hash_cra_init,
  1551. .cra_exit = s5p_hash_cra_exit,
  1552. }
  1553. }
  1554. };
  1555. static void s5p_set_aes(struct s5p_aes_dev *dev,
  1556. const uint8_t *key, const uint8_t *iv,
  1557. unsigned int keylen)
  1558. {
  1559. void __iomem *keystart;
  1560. if (iv)
  1561. memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
  1562. if (keylen == AES_KEYSIZE_256)
  1563. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
  1564. else if (keylen == AES_KEYSIZE_192)
  1565. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
  1566. else
  1567. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
  1568. memcpy_toio(keystart, key, keylen);
  1569. }
  1570. static bool s5p_is_sg_aligned(struct scatterlist *sg)
  1571. {
  1572. while (sg) {
  1573. if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
  1574. return false;
  1575. sg = sg_next(sg);
  1576. }
  1577. return true;
  1578. }
  1579. static int s5p_set_indata_start(struct s5p_aes_dev *dev,
  1580. struct ablkcipher_request *req)
  1581. {
  1582. struct scatterlist *sg;
  1583. int err;
  1584. dev->sg_src_cpy = NULL;
  1585. sg = req->src;
  1586. if (!s5p_is_sg_aligned(sg)) {
  1587. dev_dbg(dev->dev,
  1588. "At least one unaligned source scatter list, making a copy\n");
  1589. err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
  1590. if (err)
  1591. return err;
  1592. sg = dev->sg_src_cpy;
  1593. }
  1594. err = s5p_set_indata(dev, sg);
  1595. if (err) {
  1596. s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
  1597. return err;
  1598. }
  1599. return 0;
  1600. }
  1601. static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
  1602. struct ablkcipher_request *req)
  1603. {
  1604. struct scatterlist *sg;
  1605. int err;
  1606. dev->sg_dst_cpy = NULL;
  1607. sg = req->dst;
  1608. if (!s5p_is_sg_aligned(sg)) {
  1609. dev_dbg(dev->dev,
  1610. "At least one unaligned dest scatter list, making a copy\n");
  1611. err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
  1612. if (err)
  1613. return err;
  1614. sg = dev->sg_dst_cpy;
  1615. }
  1616. err = s5p_set_outdata(dev, sg);
  1617. if (err) {
  1618. s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
  1619. return err;
  1620. }
  1621. return 0;
  1622. }
  1623. static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
  1624. {
  1625. struct ablkcipher_request *req = dev->req;
  1626. uint32_t aes_control;
  1627. unsigned long flags;
  1628. int err;
  1629. u8 *iv;
  1630. aes_control = SSS_AES_KEY_CHANGE_MODE;
  1631. if (mode & FLAGS_AES_DECRYPT)
  1632. aes_control |= SSS_AES_MODE_DECRYPT;
  1633. if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
  1634. aes_control |= SSS_AES_CHAIN_MODE_CBC;
  1635. iv = req->info;
  1636. } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
  1637. aes_control |= SSS_AES_CHAIN_MODE_CTR;
  1638. iv = req->info;
  1639. } else {
  1640. iv = NULL; /* AES_ECB */
  1641. }
  1642. if (dev->ctx->keylen == AES_KEYSIZE_192)
  1643. aes_control |= SSS_AES_KEY_SIZE_192;
  1644. else if (dev->ctx->keylen == AES_KEYSIZE_256)
  1645. aes_control |= SSS_AES_KEY_SIZE_256;
  1646. aes_control |= SSS_AES_FIFO_MODE;
  1647. /* as a variant it is possible to use byte swapping on DMA side */
  1648. aes_control |= SSS_AES_BYTESWAP_DI
  1649. | SSS_AES_BYTESWAP_DO
  1650. | SSS_AES_BYTESWAP_IV
  1651. | SSS_AES_BYTESWAP_KEY
  1652. | SSS_AES_BYTESWAP_CNT;
  1653. spin_lock_irqsave(&dev->lock, flags);
  1654. SSS_WRITE(dev, FCINTENCLR,
  1655. SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
  1656. SSS_WRITE(dev, FCFIFOCTRL, 0x00);
  1657. err = s5p_set_indata_start(dev, req);
  1658. if (err)
  1659. goto indata_error;
  1660. err = s5p_set_outdata_start(dev, req);
  1661. if (err)
  1662. goto outdata_error;
  1663. SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
  1664. s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
  1665. s5p_set_dma_indata(dev, dev->sg_src);
  1666. s5p_set_dma_outdata(dev, dev->sg_dst);
  1667. SSS_WRITE(dev, FCINTENSET,
  1668. SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
  1669. spin_unlock_irqrestore(&dev->lock, flags);
  1670. return;
  1671. outdata_error:
  1672. s5p_unset_indata(dev);
  1673. indata_error:
  1674. s5p_sg_done(dev);
  1675. dev->busy = false;
  1676. spin_unlock_irqrestore(&dev->lock, flags);
  1677. s5p_aes_complete(req, err);
  1678. }
  1679. static void s5p_tasklet_cb(unsigned long data)
  1680. {
  1681. struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
  1682. struct crypto_async_request *async_req, *backlog;
  1683. struct s5p_aes_reqctx *reqctx;
  1684. unsigned long flags;
  1685. spin_lock_irqsave(&dev->lock, flags);
  1686. backlog = crypto_get_backlog(&dev->queue);
  1687. async_req = crypto_dequeue_request(&dev->queue);
  1688. if (!async_req) {
  1689. dev->busy = false;
  1690. spin_unlock_irqrestore(&dev->lock, flags);
  1691. return;
  1692. }
  1693. spin_unlock_irqrestore(&dev->lock, flags);
  1694. if (backlog)
  1695. backlog->complete(backlog, -EINPROGRESS);
  1696. dev->req = ablkcipher_request_cast(async_req);
  1697. dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
  1698. reqctx = ablkcipher_request_ctx(dev->req);
  1699. s5p_aes_crypt_start(dev, reqctx->mode);
  1700. }
  1701. static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
  1702. struct ablkcipher_request *req)
  1703. {
  1704. unsigned long flags;
  1705. int err;
  1706. spin_lock_irqsave(&dev->lock, flags);
  1707. err = ablkcipher_enqueue_request(&dev->queue, req);
  1708. if (dev->busy) {
  1709. spin_unlock_irqrestore(&dev->lock, flags);
  1710. goto exit;
  1711. }
  1712. dev->busy = true;
  1713. spin_unlock_irqrestore(&dev->lock, flags);
  1714. tasklet_schedule(&dev->tasklet);
  1715. exit:
  1716. return err;
  1717. }
  1718. static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
  1719. {
  1720. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1721. struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
  1722. struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  1723. struct s5p_aes_dev *dev = ctx->dev;
  1724. if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
  1725. dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
  1726. return -EINVAL;
  1727. }
  1728. reqctx->mode = mode;
  1729. return s5p_aes_handle_req(dev, req);
  1730. }
  1731. static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
  1732. const uint8_t *key, unsigned int keylen)
  1733. {
  1734. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  1735. struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  1736. if (keylen != AES_KEYSIZE_128 &&
  1737. keylen != AES_KEYSIZE_192 &&
  1738. keylen != AES_KEYSIZE_256)
  1739. return -EINVAL;
  1740. memcpy(ctx->aes_key, key, keylen);
  1741. ctx->keylen = keylen;
  1742. return 0;
  1743. }
  1744. static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
  1745. {
  1746. return s5p_aes_crypt(req, 0);
  1747. }
  1748. static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
  1749. {
  1750. return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
  1751. }
  1752. static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
  1753. {
  1754. return s5p_aes_crypt(req, FLAGS_AES_CBC);
  1755. }
  1756. static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
  1757. {
  1758. return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
  1759. }
  1760. static int s5p_aes_cra_init(struct crypto_tfm *tfm)
  1761. {
  1762. struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  1763. ctx->dev = s5p_dev;
  1764. tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
  1765. return 0;
  1766. }
  1767. static struct crypto_alg algs[] = {
  1768. {
  1769. .cra_name = "ecb(aes)",
  1770. .cra_driver_name = "ecb-aes-s5p",
  1771. .cra_priority = 100,
  1772. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1773. CRYPTO_ALG_ASYNC |
  1774. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1775. .cra_blocksize = AES_BLOCK_SIZE,
  1776. .cra_ctxsize = sizeof(struct s5p_aes_ctx),
  1777. .cra_alignmask = 0x0f,
  1778. .cra_type = &crypto_ablkcipher_type,
  1779. .cra_module = THIS_MODULE,
  1780. .cra_init = s5p_aes_cra_init,
  1781. .cra_u.ablkcipher = {
  1782. .min_keysize = AES_MIN_KEY_SIZE,
  1783. .max_keysize = AES_MAX_KEY_SIZE,
  1784. .setkey = s5p_aes_setkey,
  1785. .encrypt = s5p_aes_ecb_encrypt,
  1786. .decrypt = s5p_aes_ecb_decrypt,
  1787. }
  1788. },
  1789. {
  1790. .cra_name = "cbc(aes)",
  1791. .cra_driver_name = "cbc-aes-s5p",
  1792. .cra_priority = 100,
  1793. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1794. CRYPTO_ALG_ASYNC |
  1795. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1796. .cra_blocksize = AES_BLOCK_SIZE,
  1797. .cra_ctxsize = sizeof(struct s5p_aes_ctx),
  1798. .cra_alignmask = 0x0f,
  1799. .cra_type = &crypto_ablkcipher_type,
  1800. .cra_module = THIS_MODULE,
  1801. .cra_init = s5p_aes_cra_init,
  1802. .cra_u.ablkcipher = {
  1803. .min_keysize = AES_MIN_KEY_SIZE,
  1804. .max_keysize = AES_MAX_KEY_SIZE,
  1805. .ivsize = AES_BLOCK_SIZE,
  1806. .setkey = s5p_aes_setkey,
  1807. .encrypt = s5p_aes_cbc_encrypt,
  1808. .decrypt = s5p_aes_cbc_decrypt,
  1809. }
  1810. },
  1811. };
  1812. static int s5p_aes_probe(struct platform_device *pdev)
  1813. {
  1814. struct device *dev = &pdev->dev;
  1815. int i, j, err = -ENODEV;
  1816. const struct samsung_aes_variant *variant;
  1817. struct s5p_aes_dev *pdata;
  1818. struct resource *res;
  1819. unsigned int hash_i;
  1820. if (s5p_dev)
  1821. return -EEXIST;
  1822. pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
  1823. if (!pdata)
  1824. return -ENOMEM;
  1825. variant = find_s5p_sss_version(pdev);
  1826. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1827. /*
  1828. * Note: HASH and PRNG uses the same registers in secss, avoid
  1829. * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
  1830. * is enabled in config. We need larger size for HASH registers in
  1831. * secss, current describe only AES/DES
  1832. */
  1833. if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
  1834. if (variant == &exynos_aes_data) {
  1835. res->end += 0x300;
  1836. pdata->use_hash = true;
  1837. }
  1838. }
  1839. pdata->res = res;
  1840. pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
  1841. if (IS_ERR(pdata->ioaddr)) {
  1842. if (!pdata->use_hash)
  1843. return PTR_ERR(pdata->ioaddr);
  1844. /* try AES without HASH */
  1845. res->end -= 0x300;
  1846. pdata->use_hash = false;
  1847. pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
  1848. if (IS_ERR(pdata->ioaddr))
  1849. return PTR_ERR(pdata->ioaddr);
  1850. }
  1851. pdata->clk = devm_clk_get(dev, "secss");
  1852. if (IS_ERR(pdata->clk)) {
  1853. dev_err(dev, "failed to find secss clock source\n");
  1854. return -ENOENT;
  1855. }
  1856. err = clk_prepare_enable(pdata->clk);
  1857. if (err < 0) {
  1858. dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
  1859. return err;
  1860. }
  1861. spin_lock_init(&pdata->lock);
  1862. spin_lock_init(&pdata->hash_lock);
  1863. pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
  1864. pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
  1865. pdata->irq_fc = platform_get_irq(pdev, 0);
  1866. if (pdata->irq_fc < 0) {
  1867. err = pdata->irq_fc;
  1868. dev_warn(dev, "feed control interrupt is not available.\n");
  1869. goto err_irq;
  1870. }
  1871. err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
  1872. s5p_aes_interrupt, IRQF_ONESHOT,
  1873. pdev->name, pdev);
  1874. if (err < 0) {
  1875. dev_warn(dev, "feed control interrupt is not available.\n");
  1876. goto err_irq;
  1877. }
  1878. pdata->busy = false;
  1879. pdata->dev = dev;
  1880. platform_set_drvdata(pdev, pdata);
  1881. s5p_dev = pdata;
  1882. tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
  1883. crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
  1884. for (i = 0; i < ARRAY_SIZE(algs); i++) {
  1885. err = crypto_register_alg(&algs[i]);
  1886. if (err)
  1887. goto err_algs;
  1888. }
  1889. if (pdata->use_hash) {
  1890. tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
  1891. (unsigned long)pdata);
  1892. crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
  1893. for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
  1894. hash_i++) {
  1895. struct ahash_alg *alg;
  1896. alg = &algs_sha1_md5_sha256[hash_i];
  1897. err = crypto_register_ahash(alg);
  1898. if (err) {
  1899. dev_err(dev, "can't register '%s': %d\n",
  1900. alg->halg.base.cra_driver_name, err);
  1901. goto err_hash;
  1902. }
  1903. }
  1904. }
  1905. dev_info(dev, "s5p-sss driver registered\n");
  1906. return 0;
  1907. err_hash:
  1908. for (j = hash_i - 1; j >= 0; j--)
  1909. crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
  1910. tasklet_kill(&pdata->hash_tasklet);
  1911. res->end -= 0x300;
  1912. err_algs:
  1913. if (i < ARRAY_SIZE(algs))
  1914. dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
  1915. err);
  1916. for (j = 0; j < i; j++)
  1917. crypto_unregister_alg(&algs[j]);
  1918. tasklet_kill(&pdata->tasklet);
  1919. err_irq:
  1920. clk_disable_unprepare(pdata->clk);
  1921. s5p_dev = NULL;
  1922. return err;
  1923. }
  1924. static int s5p_aes_remove(struct platform_device *pdev)
  1925. {
  1926. struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
  1927. int i;
  1928. if (!pdata)
  1929. return -ENODEV;
  1930. for (i = 0; i < ARRAY_SIZE(algs); i++)
  1931. crypto_unregister_alg(&algs[i]);
  1932. tasklet_kill(&pdata->tasklet);
  1933. if (pdata->use_hash) {
  1934. for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
  1935. crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
  1936. pdata->res->end -= 0x300;
  1937. tasklet_kill(&pdata->hash_tasklet);
  1938. pdata->use_hash = false;
  1939. }
  1940. clk_disable_unprepare(pdata->clk);
  1941. s5p_dev = NULL;
  1942. return 0;
  1943. }
  1944. static struct platform_driver s5p_aes_crypto = {
  1945. .probe = s5p_aes_probe,
  1946. .remove = s5p_aes_remove,
  1947. .driver = {
  1948. .name = "s5p-secss",
  1949. .of_match_table = s5p_sss_dt_match,
  1950. },
  1951. };
  1952. module_platform_driver(s5p_aes_crypto);
  1953. MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
  1954. MODULE_LICENSE("GPL v2");
  1955. MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
  1956. MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");