safexcel.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2017 Marvell
  4. *
  5. * Antoine Tenart <antoine.tenart@free-electrons.com>
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/device.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/dmapool.h>
  11. #include <linux/firmware.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/module.h>
  14. #include <linux/of_platform.h>
  15. #include <linux/of_irq.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/workqueue.h>
  18. #include <crypto/internal/aead.h>
  19. #include <crypto/internal/hash.h>
  20. #include <crypto/internal/skcipher.h>
  21. #include "safexcel.h"
  22. static u32 max_rings = EIP197_MAX_RINGS;
  23. module_param(max_rings, uint, 0644);
  24. MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
  25. static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
  26. {
  27. u32 val, htable_offset;
  28. int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
  29. if (priv->version == EIP197B) {
  30. cs_rc_max = EIP197B_CS_RC_MAX;
  31. cs_ht_wc = EIP197B_CS_HT_WC;
  32. cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
  33. cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
  34. } else {
  35. cs_rc_max = EIP197D_CS_RC_MAX;
  36. cs_ht_wc = EIP197D_CS_HT_WC;
  37. cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
  38. cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
  39. }
  40. /* Enable the record cache memory access */
  41. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  42. val &= ~EIP197_TRC_ENABLE_MASK;
  43. val |= EIP197_TRC_ENABLE_0;
  44. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  45. /* Clear all ECC errors */
  46. writel(0, priv->base + EIP197_TRC_ECCCTRL);
  47. /*
  48. * Make sure the cache memory is accessible by taking record cache into
  49. * reset.
  50. */
  51. val = readl(priv->base + EIP197_TRC_PARAMS);
  52. val |= EIP197_TRC_PARAMS_SW_RESET;
  53. val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
  54. writel(val, priv->base + EIP197_TRC_PARAMS);
  55. /* Clear all records */
  56. for (i = 0; i < cs_rc_max; i++) {
  57. u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
  58. writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
  59. EIP197_CS_RC_PREV(EIP197_RC_NULL),
  60. priv->base + offset);
  61. val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
  62. if (i == 0)
  63. val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
  64. else if (i == cs_rc_max - 1)
  65. val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
  66. writel(val, priv->base + offset + sizeof(u32));
  67. }
  68. /* Clear the hash table entries */
  69. htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
  70. for (i = 0; i < cs_ht_wc; i++)
  71. writel(GENMASK(29, 0),
  72. priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
  73. /* Disable the record cache memory access */
  74. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  75. val &= ~EIP197_TRC_ENABLE_MASK;
  76. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  77. /* Write head and tail pointers of the record free chain */
  78. val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
  79. EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
  80. writel(val, priv->base + EIP197_TRC_FREECHAIN);
  81. /* Configure the record cache #1 */
  82. val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
  83. EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
  84. writel(val, priv->base + EIP197_TRC_PARAMS2);
  85. /* Configure the record cache #2 */
  86. val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
  87. EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
  88. EIP197_TRC_PARAMS_HTABLE_SZ(2);
  89. writel(val, priv->base + EIP197_TRC_PARAMS);
  90. }
  91. static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
  92. const struct firmware *fw, int pe, u32 ctrl,
  93. u32 prog_en)
  94. {
  95. const u32 *data = (const u32 *)fw->data;
  96. u32 val;
  97. int i;
  98. /* Reset the engine to make its program memory accessible */
  99. writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
  100. EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
  101. EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
  102. EIP197_PE(priv) + ctrl);
  103. /* Enable access to the program memory */
  104. writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
  105. /* Write the firmware */
  106. for (i = 0; i < fw->size / sizeof(u32); i++)
  107. writel(be32_to_cpu(data[i]),
  108. priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
  109. /* Disable access to the program memory */
  110. writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
  111. /* Release engine from reset */
  112. val = readl(EIP197_PE(priv) + ctrl);
  113. val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
  114. writel(val, EIP197_PE(priv) + ctrl);
  115. }
  116. static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
  117. {
  118. const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
  119. const struct firmware *fw[FW_NB];
  120. char fw_path[31], *dir = NULL;
  121. int i, j, ret = 0, pe;
  122. u32 val;
  123. switch (priv->version) {
  124. case EIP197B:
  125. dir = "eip197b";
  126. break;
  127. case EIP197D:
  128. dir = "eip197d";
  129. break;
  130. default:
  131. /* No firmware is required */
  132. return 0;
  133. }
  134. for (i = 0; i < FW_NB; i++) {
  135. snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
  136. ret = request_firmware(&fw[i], fw_path, priv->dev);
  137. if (ret) {
  138. if (priv->version != EIP197B)
  139. goto release_fw;
  140. /* Fallback to the old firmware location for the
  141. * EIP197b.
  142. */
  143. ret = request_firmware(&fw[i], fw_name[i], priv->dev);
  144. if (ret) {
  145. dev_err(priv->dev,
  146. "Failed to request firmware %s (%d)\n",
  147. fw_name[i], ret);
  148. goto release_fw;
  149. }
  150. }
  151. }
  152. for (pe = 0; pe < priv->config.pes; pe++) {
  153. /* Clear the scratchpad memory */
  154. val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
  155. val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
  156. EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
  157. EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
  158. EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
  159. writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
  160. memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
  161. EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
  162. eip197_write_firmware(priv, fw[FW_IFPP], pe,
  163. EIP197_PE_ICE_FPP_CTRL(pe),
  164. EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
  165. eip197_write_firmware(priv, fw[FW_IPUE], pe,
  166. EIP197_PE_ICE_PUE_CTRL(pe),
  167. EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
  168. }
  169. release_fw:
  170. for (j = 0; j < i; j++)
  171. release_firmware(fw[j]);
  172. return ret;
  173. }
  174. static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
  175. {
  176. u32 hdw, cd_size_rnd, val;
  177. int i;
  178. hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  179. hdw &= GENMASK(27, 25);
  180. hdw >>= 25;
  181. cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
  182. for (i = 0; i < priv->config.rings; i++) {
  183. /* ring base address */
  184. writel(lower_32_bits(priv->ring[i].cdr.base_dma),
  185. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  186. writel(upper_32_bits(priv->ring[i].cdr.base_dma),
  187. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  188. writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
  189. priv->config.cd_size,
  190. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  191. writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
  192. (EIP197_FETCH_COUNT * priv->config.cd_offset),
  193. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  194. /* Configure DMA tx control */
  195. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  196. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  197. writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  198. /* clear any pending interrupt */
  199. writel(GENMASK(5, 0),
  200. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
  201. }
  202. return 0;
  203. }
  204. static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
  205. {
  206. u32 hdw, rd_size_rnd, val;
  207. int i;
  208. hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  209. hdw &= GENMASK(27, 25);
  210. hdw >>= 25;
  211. rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
  212. for (i = 0; i < priv->config.rings; i++) {
  213. /* ring base address */
  214. writel(lower_32_bits(priv->ring[i].rdr.base_dma),
  215. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  216. writel(upper_32_bits(priv->ring[i].rdr.base_dma),
  217. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  218. writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
  219. priv->config.rd_size,
  220. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  221. writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
  222. (EIP197_FETCH_COUNT * priv->config.rd_offset),
  223. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  224. /* Configure DMA tx control */
  225. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  226. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  227. val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
  228. writel(val,
  229. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  230. /* clear any pending interrupt */
  231. writel(GENMASK(7, 0),
  232. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
  233. /* enable ring interrupt */
  234. val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  235. val |= EIP197_RDR_IRQ(i);
  236. writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  237. }
  238. return 0;
  239. }
  240. static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
  241. {
  242. u32 version, val;
  243. int i, ret, pe;
  244. /* Determine endianess and configure byte swap */
  245. version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
  246. val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  247. if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
  248. val |= EIP197_MST_CTRL_BYTE_SWAP;
  249. else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
  250. val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
  251. /* For EIP197 set maximum number of TX commands to 2^5 = 32 */
  252. if (priv->version == EIP197B || priv->version == EIP197D)
  253. val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
  254. writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  255. /* Configure wr/rd cache values */
  256. writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
  257. EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
  258. EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
  259. /* Interrupts reset */
  260. /* Disable all global interrupts */
  261. writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
  262. /* Clear any pending interrupt */
  263. writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  264. /* Processing Engine configuration */
  265. for (pe = 0; pe < priv->config.pes; pe++) {
  266. /* Data Fetch Engine configuration */
  267. /* Reset all DFE threads */
  268. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  269. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
  270. if (priv->version == EIP197B || priv->version == EIP197D) {
  271. /* Reset HIA input interface arbiter */
  272. writel(EIP197_HIA_RA_PE_CTRL_RESET,
  273. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
  274. }
  275. /* DMA transfer size to use */
  276. val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
  277. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
  278. EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
  279. val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
  280. EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
  281. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
  282. val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
  283. writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
  284. /* Leave the DFE threads reset state */
  285. writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
  286. /* Configure the processing engine thresholds */
  287. writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
  288. EIP197_PE_IN_xBUF_THRES_MAX(9),
  289. EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
  290. writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
  291. EIP197_PE_IN_xBUF_THRES_MAX(7),
  292. EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
  293. if (priv->version == EIP197B || priv->version == EIP197D) {
  294. /* enable HIA input interface arbiter and rings */
  295. writel(EIP197_HIA_RA_PE_CTRL_EN |
  296. GENMASK(priv->config.rings - 1, 0),
  297. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
  298. }
  299. /* Data Store Engine configuration */
  300. /* Reset all DSE threads */
  301. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  302. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
  303. /* Wait for all DSE threads to complete */
  304. while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
  305. GENMASK(15, 12)) != GENMASK(15, 12))
  306. ;
  307. /* DMA transfer size to use */
  308. val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
  309. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
  310. EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
  311. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
  312. val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
  313. /* FIXME: instability issues can occur for EIP97 but disabling it impact
  314. * performances.
  315. */
  316. if (priv->version == EIP197B || priv->version == EIP197D)
  317. val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
  318. writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
  319. /* Leave the DSE threads reset state */
  320. writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
  321. /* Configure the procesing engine thresholds */
  322. writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
  323. EIP197_PE_OUT_DBUF_THRES_MAX(8),
  324. EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
  325. /* Processing Engine configuration */
  326. /* H/W capabilities selection */
  327. val = EIP197_FUNCTION_RSVD;
  328. val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
  329. val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
  330. val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
  331. val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
  332. val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
  333. val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
  334. val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
  335. val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
  336. writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
  337. }
  338. /* Command Descriptor Rings prepare */
  339. for (i = 0; i < priv->config.rings; i++) {
  340. /* Clear interrupts for this ring */
  341. writel(GENMASK(31, 0),
  342. EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
  343. /* Disable external triggering */
  344. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  345. /* Clear the pending prepared counter */
  346. writel(EIP197_xDR_PREP_CLR_COUNT,
  347. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  348. /* Clear the pending processed counter */
  349. writel(EIP197_xDR_PROC_CLR_COUNT,
  350. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  351. writel(0,
  352. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  353. writel(0,
  354. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  355. writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
  356. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  357. }
  358. /* Result Descriptor Ring prepare */
  359. for (i = 0; i < priv->config.rings; i++) {
  360. /* Disable external triggering*/
  361. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  362. /* Clear the pending prepared counter */
  363. writel(EIP197_xDR_PREP_CLR_COUNT,
  364. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  365. /* Clear the pending processed counter */
  366. writel(EIP197_xDR_PROC_CLR_COUNT,
  367. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  368. writel(0,
  369. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  370. writel(0,
  371. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  372. /* Ring size */
  373. writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
  374. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  375. }
  376. for (pe = 0; pe < priv->config.pes; pe++) {
  377. /* Enable command descriptor rings */
  378. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  379. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
  380. /* Enable result descriptor rings */
  381. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  382. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
  383. }
  384. /* Clear any HIA interrupt */
  385. writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  386. if (priv->version == EIP197B || priv->version == EIP197D) {
  387. eip197_trc_cache_init(priv);
  388. ret = eip197_load_firmwares(priv);
  389. if (ret)
  390. return ret;
  391. }
  392. safexcel_hw_setup_cdesc_rings(priv);
  393. safexcel_hw_setup_rdesc_rings(priv);
  394. return 0;
  395. }
  396. /* Called with ring's lock taken */
  397. static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
  398. int ring)
  399. {
  400. int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
  401. if (!coal)
  402. return;
  403. /* Configure when we want an interrupt */
  404. writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
  405. EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
  406. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
  407. }
  408. void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
  409. {
  410. struct crypto_async_request *req, *backlog;
  411. struct safexcel_context *ctx;
  412. int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
  413. /* If a request wasn't properly dequeued because of a lack of resources,
  414. * proceeded it first,
  415. */
  416. req = priv->ring[ring].req;
  417. backlog = priv->ring[ring].backlog;
  418. if (req)
  419. goto handle_req;
  420. while (true) {
  421. spin_lock_bh(&priv->ring[ring].queue_lock);
  422. backlog = crypto_get_backlog(&priv->ring[ring].queue);
  423. req = crypto_dequeue_request(&priv->ring[ring].queue);
  424. spin_unlock_bh(&priv->ring[ring].queue_lock);
  425. if (!req) {
  426. priv->ring[ring].req = NULL;
  427. priv->ring[ring].backlog = NULL;
  428. goto finalize;
  429. }
  430. handle_req:
  431. ctx = crypto_tfm_ctx(req->tfm);
  432. ret = ctx->send(req, ring, &commands, &results);
  433. if (ret)
  434. goto request_failed;
  435. if (backlog)
  436. backlog->complete(backlog, -EINPROGRESS);
  437. /* In case the send() helper did not issue any command to push
  438. * to the engine because the input data was cached, continue to
  439. * dequeue other requests as this is valid and not an error.
  440. */
  441. if (!commands && !results)
  442. continue;
  443. cdesc += commands;
  444. rdesc += results;
  445. nreq++;
  446. }
  447. request_failed:
  448. /* Not enough resources to handle all the requests. Bail out and save
  449. * the request and the backlog for the next dequeue call (per-ring).
  450. */
  451. priv->ring[ring].req = req;
  452. priv->ring[ring].backlog = backlog;
  453. finalize:
  454. if (!nreq)
  455. return;
  456. spin_lock_bh(&priv->ring[ring].lock);
  457. priv->ring[ring].requests += nreq;
  458. if (!priv->ring[ring].busy) {
  459. safexcel_try_push_requests(priv, ring);
  460. priv->ring[ring].busy = true;
  461. }
  462. spin_unlock_bh(&priv->ring[ring].lock);
  463. /* let the RDR know we have pending descriptors */
  464. writel((rdesc * priv->config.rd_offset) << 2,
  465. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  466. /* let the CDR know we have pending descriptors */
  467. writel((cdesc * priv->config.cd_offset) << 2,
  468. EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  469. }
  470. inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
  471. struct safexcel_result_desc *rdesc)
  472. {
  473. if (likely(!rdesc->result_data.error_code))
  474. return 0;
  475. if (rdesc->result_data.error_code & 0x407f) {
  476. /* Fatal error (bits 0-7, 14) */
  477. dev_err(priv->dev,
  478. "cipher: result: result descriptor error (%d)\n",
  479. rdesc->result_data.error_code);
  480. return -EIO;
  481. } else if (rdesc->result_data.error_code == BIT(9)) {
  482. /* Authentication failed */
  483. return -EBADMSG;
  484. }
  485. /* All other non-fatal errors */
  486. return -EINVAL;
  487. }
  488. inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
  489. int ring,
  490. struct safexcel_result_desc *rdesc,
  491. struct crypto_async_request *req)
  492. {
  493. int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
  494. priv->ring[ring].rdr_req[i] = req;
  495. }
  496. inline struct crypto_async_request *
  497. safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
  498. {
  499. int i = safexcel_ring_first_rdr_index(priv, ring);
  500. return priv->ring[ring].rdr_req[i];
  501. }
  502. void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
  503. {
  504. struct safexcel_command_desc *cdesc;
  505. /* Acknowledge the command descriptors */
  506. do {
  507. cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
  508. if (IS_ERR(cdesc)) {
  509. dev_err(priv->dev,
  510. "Could not retrieve the command descriptor\n");
  511. return;
  512. }
  513. } while (!cdesc->last_seg);
  514. }
  515. void safexcel_inv_complete(struct crypto_async_request *req, int error)
  516. {
  517. struct safexcel_inv_result *result = req->data;
  518. if (error == -EINPROGRESS)
  519. return;
  520. result->error = error;
  521. complete(&result->completion);
  522. }
  523. int safexcel_invalidate_cache(struct crypto_async_request *async,
  524. struct safexcel_crypto_priv *priv,
  525. dma_addr_t ctxr_dma, int ring)
  526. {
  527. struct safexcel_command_desc *cdesc;
  528. struct safexcel_result_desc *rdesc;
  529. int ret = 0;
  530. /* Prepare command descriptor */
  531. cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
  532. if (IS_ERR(cdesc))
  533. return PTR_ERR(cdesc);
  534. cdesc->control_data.type = EIP197_TYPE_EXTENDED;
  535. cdesc->control_data.options = 0;
  536. cdesc->control_data.refresh = 0;
  537. cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
  538. /* Prepare result descriptor */
  539. rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
  540. if (IS_ERR(rdesc)) {
  541. ret = PTR_ERR(rdesc);
  542. goto cdesc_rollback;
  543. }
  544. safexcel_rdr_req_set(priv, ring, rdesc, async);
  545. return ret;
  546. cdesc_rollback:
  547. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  548. return ret;
  549. }
  550. static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
  551. int ring)
  552. {
  553. struct crypto_async_request *req;
  554. struct safexcel_context *ctx;
  555. int ret, i, nreq, ndesc, tot_descs, handled = 0;
  556. bool should_complete;
  557. handle_results:
  558. tot_descs = 0;
  559. nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  560. nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
  561. nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
  562. if (!nreq)
  563. goto requests_left;
  564. for (i = 0; i < nreq; i++) {
  565. req = safexcel_rdr_req_get(priv, ring);
  566. ctx = crypto_tfm_ctx(req->tfm);
  567. ndesc = ctx->handle_result(priv, ring, req,
  568. &should_complete, &ret);
  569. if (ndesc < 0) {
  570. dev_err(priv->dev, "failed to handle result (%d)", ndesc);
  571. goto acknowledge;
  572. }
  573. if (should_complete) {
  574. local_bh_disable();
  575. req->complete(req, ret);
  576. local_bh_enable();
  577. }
  578. tot_descs += ndesc;
  579. handled++;
  580. }
  581. acknowledge:
  582. if (i) {
  583. writel(EIP197_xDR_PROC_xD_PKT(i) |
  584. EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
  585. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  586. }
  587. /* If the number of requests overflowed the counter, try to proceed more
  588. * requests.
  589. */
  590. if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
  591. goto handle_results;
  592. requests_left:
  593. spin_lock_bh(&priv->ring[ring].lock);
  594. priv->ring[ring].requests -= handled;
  595. safexcel_try_push_requests(priv, ring);
  596. if (!priv->ring[ring].requests)
  597. priv->ring[ring].busy = false;
  598. spin_unlock_bh(&priv->ring[ring].lock);
  599. }
  600. static void safexcel_dequeue_work(struct work_struct *work)
  601. {
  602. struct safexcel_work_data *data =
  603. container_of(work, struct safexcel_work_data, work);
  604. safexcel_dequeue(data->priv, data->ring);
  605. }
  606. struct safexcel_ring_irq_data {
  607. struct safexcel_crypto_priv *priv;
  608. int ring;
  609. };
  610. static irqreturn_t safexcel_irq_ring(int irq, void *data)
  611. {
  612. struct safexcel_ring_irq_data *irq_data = data;
  613. struct safexcel_crypto_priv *priv = irq_data->priv;
  614. int ring = irq_data->ring, rc = IRQ_NONE;
  615. u32 status, stat;
  616. status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
  617. if (!status)
  618. return rc;
  619. /* RDR interrupts */
  620. if (status & EIP197_RDR_IRQ(ring)) {
  621. stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  622. if (unlikely(stat & EIP197_xDR_ERR)) {
  623. /*
  624. * Fatal error, the RDR is unusable and must be
  625. * reinitialized. This should not happen under
  626. * normal circumstances.
  627. */
  628. dev_err(priv->dev, "RDR: fatal error.");
  629. } else if (likely(stat & EIP197_xDR_THRESH)) {
  630. rc = IRQ_WAKE_THREAD;
  631. }
  632. /* ACK the interrupts */
  633. writel(stat & 0xff,
  634. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  635. }
  636. /* ACK the interrupts */
  637. writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
  638. return rc;
  639. }
  640. static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
  641. {
  642. struct safexcel_ring_irq_data *irq_data = data;
  643. struct safexcel_crypto_priv *priv = irq_data->priv;
  644. int ring = irq_data->ring;
  645. safexcel_handle_result_descriptor(priv, ring);
  646. queue_work(priv->ring[ring].workqueue,
  647. &priv->ring[ring].work_data.work);
  648. return IRQ_HANDLED;
  649. }
  650. static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
  651. irq_handler_t handler,
  652. irq_handler_t threaded_handler,
  653. struct safexcel_ring_irq_data *ring_irq_priv)
  654. {
  655. int ret, irq = platform_get_irq_byname(pdev, name);
  656. if (irq < 0) {
  657. dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
  658. return irq;
  659. }
  660. ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
  661. threaded_handler, IRQF_ONESHOT,
  662. dev_name(&pdev->dev), ring_irq_priv);
  663. if (ret) {
  664. dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
  665. return ret;
  666. }
  667. return irq;
  668. }
  669. static struct safexcel_alg_template *safexcel_algs[] = {
  670. &safexcel_alg_ecb_des,
  671. &safexcel_alg_cbc_des,
  672. &safexcel_alg_ecb_des3_ede,
  673. &safexcel_alg_cbc_des3_ede,
  674. &safexcel_alg_ecb_aes,
  675. &safexcel_alg_cbc_aes,
  676. &safexcel_alg_md5,
  677. &safexcel_alg_sha1,
  678. &safexcel_alg_sha224,
  679. &safexcel_alg_sha256,
  680. &safexcel_alg_sha384,
  681. &safexcel_alg_sha512,
  682. &safexcel_alg_hmac_md5,
  683. &safexcel_alg_hmac_sha1,
  684. &safexcel_alg_hmac_sha224,
  685. &safexcel_alg_hmac_sha256,
  686. &safexcel_alg_hmac_sha384,
  687. &safexcel_alg_hmac_sha512,
  688. &safexcel_alg_authenc_hmac_sha1_cbc_aes,
  689. &safexcel_alg_authenc_hmac_sha224_cbc_aes,
  690. &safexcel_alg_authenc_hmac_sha256_cbc_aes,
  691. &safexcel_alg_authenc_hmac_sha384_cbc_aes,
  692. &safexcel_alg_authenc_hmac_sha512_cbc_aes,
  693. };
  694. static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
  695. {
  696. int i, j, ret = 0;
  697. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  698. safexcel_algs[i]->priv = priv;
  699. if (!(safexcel_algs[i]->engines & priv->version))
  700. continue;
  701. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  702. ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
  703. else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
  704. ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
  705. else
  706. ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
  707. if (ret)
  708. goto fail;
  709. }
  710. return 0;
  711. fail:
  712. for (j = 0; j < i; j++) {
  713. if (!(safexcel_algs[j]->engines & priv->version))
  714. continue;
  715. if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  716. crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
  717. else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
  718. crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
  719. else
  720. crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
  721. }
  722. return ret;
  723. }
  724. static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
  725. {
  726. int i;
  727. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  728. if (!(safexcel_algs[i]->engines & priv->version))
  729. continue;
  730. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  731. crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
  732. else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
  733. crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
  734. else
  735. crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
  736. }
  737. }
  738. static void safexcel_configure(struct safexcel_crypto_priv *priv)
  739. {
  740. u32 val, mask = 0;
  741. val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  742. /* Read number of PEs from the engine */
  743. switch (priv->version) {
  744. case EIP197B:
  745. case EIP197D:
  746. mask = EIP197_N_PES_MASK;
  747. break;
  748. default:
  749. mask = EIP97_N_PES_MASK;
  750. }
  751. priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
  752. val = (val & GENMASK(27, 25)) >> 25;
  753. mask = BIT(val) - 1;
  754. val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  755. priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
  756. priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
  757. priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
  758. priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
  759. priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
  760. }
  761. static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
  762. {
  763. struct safexcel_register_offsets *offsets = &priv->offsets;
  764. switch (priv->version) {
  765. case EIP197B:
  766. case EIP197D:
  767. offsets->hia_aic = EIP197_HIA_AIC_BASE;
  768. offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
  769. offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
  770. offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
  771. offsets->hia_dfe = EIP197_HIA_DFE_BASE;
  772. offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
  773. offsets->hia_dse = EIP197_HIA_DSE_BASE;
  774. offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
  775. offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
  776. offsets->pe = EIP197_PE_BASE;
  777. break;
  778. case EIP97IES:
  779. offsets->hia_aic = EIP97_HIA_AIC_BASE;
  780. offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
  781. offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
  782. offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
  783. offsets->hia_dfe = EIP97_HIA_DFE_BASE;
  784. offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
  785. offsets->hia_dse = EIP97_HIA_DSE_BASE;
  786. offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
  787. offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
  788. offsets->pe = EIP97_PE_BASE;
  789. break;
  790. }
  791. }
  792. static int safexcel_probe(struct platform_device *pdev)
  793. {
  794. struct device *dev = &pdev->dev;
  795. struct resource *res;
  796. struct safexcel_crypto_priv *priv;
  797. int i, ret;
  798. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  799. if (!priv)
  800. return -ENOMEM;
  801. priv->dev = dev;
  802. priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
  803. if (priv->version == EIP197B || priv->version == EIP197D)
  804. priv->flags |= EIP197_TRC_CACHE;
  805. safexcel_init_register_offsets(priv);
  806. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  807. priv->base = devm_ioremap_resource(dev, res);
  808. if (IS_ERR(priv->base)) {
  809. dev_err(dev, "failed to get resource\n");
  810. return PTR_ERR(priv->base);
  811. }
  812. priv->clk = devm_clk_get(&pdev->dev, NULL);
  813. ret = PTR_ERR_OR_ZERO(priv->clk);
  814. /* The clock isn't mandatory */
  815. if (ret != -ENOENT) {
  816. if (ret)
  817. return ret;
  818. ret = clk_prepare_enable(priv->clk);
  819. if (ret) {
  820. dev_err(dev, "unable to enable clk (%d)\n", ret);
  821. return ret;
  822. }
  823. }
  824. priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
  825. ret = PTR_ERR_OR_ZERO(priv->reg_clk);
  826. /* The clock isn't mandatory */
  827. if (ret != -ENOENT) {
  828. if (ret)
  829. goto err_core_clk;
  830. ret = clk_prepare_enable(priv->reg_clk);
  831. if (ret) {
  832. dev_err(dev, "unable to enable reg clk (%d)\n", ret);
  833. goto err_core_clk;
  834. }
  835. }
  836. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  837. if (ret)
  838. goto err_reg_clk;
  839. priv->context_pool = dmam_pool_create("safexcel-context", dev,
  840. sizeof(struct safexcel_context_record),
  841. 1, 0);
  842. if (!priv->context_pool) {
  843. ret = -ENOMEM;
  844. goto err_reg_clk;
  845. }
  846. safexcel_configure(priv);
  847. priv->ring = devm_kcalloc(dev, priv->config.rings,
  848. sizeof(*priv->ring),
  849. GFP_KERNEL);
  850. if (!priv->ring) {
  851. ret = -ENOMEM;
  852. goto err_reg_clk;
  853. }
  854. for (i = 0; i < priv->config.rings; i++) {
  855. char irq_name[6] = {0}; /* "ringX\0" */
  856. char wq_name[9] = {0}; /* "wq_ringX\0" */
  857. int irq;
  858. struct safexcel_ring_irq_data *ring_irq;
  859. ret = safexcel_init_ring_descriptors(priv,
  860. &priv->ring[i].cdr,
  861. &priv->ring[i].rdr);
  862. if (ret)
  863. goto err_reg_clk;
  864. priv->ring[i].rdr_req = devm_kcalloc(dev,
  865. EIP197_DEFAULT_RING_SIZE,
  866. sizeof(priv->ring[i].rdr_req),
  867. GFP_KERNEL);
  868. if (!priv->ring[i].rdr_req) {
  869. ret = -ENOMEM;
  870. goto err_reg_clk;
  871. }
  872. ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
  873. if (!ring_irq) {
  874. ret = -ENOMEM;
  875. goto err_reg_clk;
  876. }
  877. ring_irq->priv = priv;
  878. ring_irq->ring = i;
  879. snprintf(irq_name, 6, "ring%d", i);
  880. irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
  881. safexcel_irq_ring_thread,
  882. ring_irq);
  883. if (irq < 0) {
  884. ret = irq;
  885. goto err_reg_clk;
  886. }
  887. priv->ring[i].work_data.priv = priv;
  888. priv->ring[i].work_data.ring = i;
  889. INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
  890. snprintf(wq_name, 9, "wq_ring%d", i);
  891. priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
  892. if (!priv->ring[i].workqueue) {
  893. ret = -ENOMEM;
  894. goto err_reg_clk;
  895. }
  896. priv->ring[i].requests = 0;
  897. priv->ring[i].busy = false;
  898. crypto_init_queue(&priv->ring[i].queue,
  899. EIP197_DEFAULT_RING_SIZE);
  900. spin_lock_init(&priv->ring[i].lock);
  901. spin_lock_init(&priv->ring[i].queue_lock);
  902. }
  903. platform_set_drvdata(pdev, priv);
  904. atomic_set(&priv->ring_used, 0);
  905. ret = safexcel_hw_init(priv);
  906. if (ret) {
  907. dev_err(dev, "EIP h/w init failed (%d)\n", ret);
  908. goto err_reg_clk;
  909. }
  910. ret = safexcel_register_algorithms(priv);
  911. if (ret) {
  912. dev_err(dev, "Failed to register algorithms (%d)\n", ret);
  913. goto err_reg_clk;
  914. }
  915. return 0;
  916. err_reg_clk:
  917. clk_disable_unprepare(priv->reg_clk);
  918. err_core_clk:
  919. clk_disable_unprepare(priv->clk);
  920. return ret;
  921. }
  922. static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
  923. {
  924. int i;
  925. for (i = 0; i < priv->config.rings; i++) {
  926. /* clear any pending interrupt */
  927. writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
  928. writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
  929. /* Reset the CDR base address */
  930. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  931. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  932. /* Reset the RDR base address */
  933. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  934. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  935. }
  936. }
  937. static int safexcel_remove(struct platform_device *pdev)
  938. {
  939. struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
  940. int i;
  941. safexcel_unregister_algorithms(priv);
  942. safexcel_hw_reset_rings(priv);
  943. clk_disable_unprepare(priv->clk);
  944. for (i = 0; i < priv->config.rings; i++)
  945. destroy_workqueue(priv->ring[i].workqueue);
  946. return 0;
  947. }
  948. static const struct of_device_id safexcel_of_match_table[] = {
  949. {
  950. .compatible = "inside-secure,safexcel-eip97ies",
  951. .data = (void *)EIP97IES,
  952. },
  953. {
  954. .compatible = "inside-secure,safexcel-eip197b",
  955. .data = (void *)EIP197B,
  956. },
  957. {
  958. .compatible = "inside-secure,safexcel-eip197d",
  959. .data = (void *)EIP197D,
  960. },
  961. {
  962. /* Deprecated. Kept for backward compatibility. */
  963. .compatible = "inside-secure,safexcel-eip97",
  964. .data = (void *)EIP97IES,
  965. },
  966. {
  967. /* Deprecated. Kept for backward compatibility. */
  968. .compatible = "inside-secure,safexcel-eip197",
  969. .data = (void *)EIP197B,
  970. },
  971. {},
  972. };
  973. static struct platform_driver crypto_safexcel = {
  974. .probe = safexcel_probe,
  975. .remove = safexcel_remove,
  976. .driver = {
  977. .name = "crypto-safexcel",
  978. .of_match_table = safexcel_of_match_table,
  979. },
  980. };
  981. module_platform_driver(crypto_safexcel);
  982. MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
  983. MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
  984. MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
  985. MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
  986. MODULE_LICENSE("GPL v2");