ctrl.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. /*
  2. * CAAM control-plane driver backend
  3. * Controller-level driver, kernel property detection, initialization
  4. *
  5. * Copyright 2008-2011 Freescale Semiconductor, Inc.
  6. */
  7. #include "compat.h"
  8. #include "regs.h"
  9. #include "intern.h"
  10. #include "jr.h"
  11. static int caam_remove(struct platform_device *pdev)
  12. {
  13. struct device *ctrldev;
  14. struct caam_drv_private *ctrlpriv;
  15. struct caam_drv_private_jr *jrpriv;
  16. struct caam_full __iomem *topregs;
  17. int ring, ret = 0;
  18. ctrldev = &pdev->dev;
  19. ctrlpriv = dev_get_drvdata(ctrldev);
  20. topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
  21. /* shut down JobRs */
  22. for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
  23. ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
  24. jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
  25. irq_dispose_mapping(jrpriv->irq);
  26. }
  27. /* Shut down debug views */
  28. #ifdef CONFIG_DEBUG_FS
  29. debugfs_remove_recursive(ctrlpriv->dfs_root);
  30. #endif
  31. /* Unmap controller region */
  32. iounmap(&topregs->ctrl);
  33. kfree(ctrlpriv->jrdev);
  34. kfree(ctrlpriv);
  35. return ret;
  36. }
  37. /* Probe routine for CAAM top (controller) level */
  38. static int caam_probe(struct platform_device *pdev)
  39. {
  40. int d, ring, rspec;
  41. struct device *dev;
  42. struct device_node *nprop, *np;
  43. struct caam_ctrl __iomem *ctrl;
  44. struct caam_full __iomem *topregs;
  45. struct caam_drv_private *ctrlpriv;
  46. struct caam_perfmon *perfmon;
  47. struct caam_deco **deco;
  48. u32 deconum;
  49. ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
  50. if (!ctrlpriv)
  51. return -ENOMEM;
  52. dev = &pdev->dev;
  53. dev_set_drvdata(dev, ctrlpriv);
  54. ctrlpriv->pdev = pdev;
  55. nprop = pdev->dev.of_node;
  56. /* Get configuration properties from device tree */
  57. /* First, get register page */
  58. ctrl = of_iomap(nprop, 0);
  59. if (ctrl == NULL) {
  60. dev_err(dev, "caam: of_iomap() failed\n");
  61. return -ENOMEM;
  62. }
  63. ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
  64. /* topregs used to derive pointers to CAAM sub-blocks only */
  65. topregs = (struct caam_full __iomem *)ctrl;
  66. /* Get the IRQ of the controller (for security violations only) */
  67. ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
  68. /*
  69. * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
  70. * 36-bit pointers in master configuration register
  71. */
  72. setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
  73. (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
  74. if (sizeof(dma_addr_t) == sizeof(u64))
  75. dma_set_mask(dev, DMA_BIT_MASK(36));
  76. /* Find out how many DECOs are present */
  77. deconum = (rd_reg64(&topregs->ctrl.perfmon.cha_num) &
  78. CHA_NUM_DECONUM_MASK) >> CHA_NUM_DECONUM_SHIFT;
  79. ctrlpriv->deco = kmalloc(deconum * sizeof(struct caam_deco *),
  80. GFP_KERNEL);
  81. deco = (struct caam_deco __force **)&topregs->deco;
  82. for (d = 0; d < deconum; d++)
  83. ctrlpriv->deco[d] = deco[d];
  84. /*
  85. * Detect and enable JobRs
  86. * First, find out how many ring spec'ed, allocate references
  87. * for all, then go probe each one.
  88. */
  89. rspec = 0;
  90. for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
  91. rspec++;
  92. ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
  93. if (ctrlpriv->jrdev == NULL) {
  94. iounmap(&topregs->ctrl);
  95. return -ENOMEM;
  96. }
  97. ring = 0;
  98. ctrlpriv->total_jobrs = 0;
  99. for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
  100. caam_jr_probe(pdev, np, ring);
  101. ctrlpriv->total_jobrs++;
  102. ring++;
  103. }
  104. /* Check to see if QI present. If so, enable */
  105. ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
  106. CTPR_QI_MASK);
  107. if (ctrlpriv->qi_present) {
  108. ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
  109. /* This is all that's required to physically enable QI */
  110. wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
  111. }
  112. /* If no QI and no rings specified, quit and go home */
  113. if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
  114. dev_err(dev, "no queues configured, terminating\n");
  115. caam_remove(pdev);
  116. return -ENOMEM;
  117. }
  118. /* NOTE: RTIC detection ought to go here, around Si time */
  119. /* Initialize queue allocator lock */
  120. spin_lock_init(&ctrlpriv->jr_alloc_lock);
  121. /* Report "alive" for developer to see */
  122. dev_info(dev, "device ID = 0x%016llx\n",
  123. rd_reg64(&topregs->ctrl.perfmon.caam_id));
  124. dev_info(dev, "job rings = %d, qi = %d\n",
  125. ctrlpriv->total_jobrs, ctrlpriv->qi_present);
  126. #ifdef CONFIG_DEBUG_FS
  127. /*
  128. * FIXME: needs better naming distinction, as some amalgamation of
  129. * "caam" and nprop->full_name. The OF name isn't distinctive,
  130. * but does separate instances
  131. */
  132. perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
  133. ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
  134. ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
  135. /* Controller-level - performance monitor counters */
  136. ctrlpriv->ctl_rq_dequeued =
  137. debugfs_create_u64("rq_dequeued",
  138. S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
  139. ctrlpriv->ctl, &perfmon->req_dequeued);
  140. ctrlpriv->ctl_ob_enc_req =
  141. debugfs_create_u64("ob_rq_encrypted",
  142. S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
  143. ctrlpriv->ctl, &perfmon->ob_enc_req);
  144. ctrlpriv->ctl_ib_dec_req =
  145. debugfs_create_u64("ib_rq_decrypted",
  146. S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
  147. ctrlpriv->ctl, &perfmon->ib_dec_req);
  148. ctrlpriv->ctl_ob_enc_bytes =
  149. debugfs_create_u64("ob_bytes_encrypted",
  150. S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
  151. ctrlpriv->ctl, &perfmon->ob_enc_bytes);
  152. ctrlpriv->ctl_ob_prot_bytes =
  153. debugfs_create_u64("ob_bytes_protected",
  154. S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
  155. ctrlpriv->ctl, &perfmon->ob_prot_bytes);
  156. ctrlpriv->ctl_ib_dec_bytes =
  157. debugfs_create_u64("ib_bytes_decrypted",
  158. S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
  159. ctrlpriv->ctl, &perfmon->ib_dec_bytes);
  160. ctrlpriv->ctl_ib_valid_bytes =
  161. debugfs_create_u64("ib_bytes_validated",
  162. S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
  163. ctrlpriv->ctl, &perfmon->ib_valid_bytes);
  164. /* Controller level - global status values */
  165. ctrlpriv->ctl_faultaddr =
  166. debugfs_create_u64("fault_addr",
  167. S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
  168. ctrlpriv->ctl, &perfmon->faultaddr);
  169. ctrlpriv->ctl_faultdetail =
  170. debugfs_create_u32("fault_detail",
  171. S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
  172. ctrlpriv->ctl, &perfmon->faultdetail);
  173. ctrlpriv->ctl_faultstatus =
  174. debugfs_create_u32("fault_status",
  175. S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
  176. ctrlpriv->ctl, &perfmon->status);
  177. /* Internal covering keys (useful in non-secure mode only) */
  178. ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
  179. ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  180. ctrlpriv->ctl_kek = debugfs_create_blob("kek",
  181. S_IFCHR | S_IRUSR |
  182. S_IRGRP | S_IROTH,
  183. ctrlpriv->ctl,
  184. &ctrlpriv->ctl_kek_wrap);
  185. ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
  186. ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  187. ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
  188. S_IFCHR | S_IRUSR |
  189. S_IRGRP | S_IROTH,
  190. ctrlpriv->ctl,
  191. &ctrlpriv->ctl_tkek_wrap);
  192. ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
  193. ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  194. ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
  195. S_IFCHR | S_IRUSR |
  196. S_IRGRP | S_IROTH,
  197. ctrlpriv->ctl,
  198. &ctrlpriv->ctl_tdsk_wrap);
  199. #endif
  200. return 0;
  201. }
  202. static struct of_device_id caam_match[] = {
  203. {
  204. .compatible = "fsl,sec-v4.0",
  205. },
  206. {},
  207. };
  208. MODULE_DEVICE_TABLE(of, caam_match);
  209. static struct platform_driver caam_driver = {
  210. .driver = {
  211. .name = "caam",
  212. .owner = THIS_MODULE,
  213. .of_match_table = caam_match,
  214. },
  215. .probe = caam_probe,
  216. .remove = __devexit_p(caam_remove),
  217. };
  218. static int __init caam_base_init(void)
  219. {
  220. return platform_driver_register(&caam_driver);
  221. }
  222. static void __exit caam_base_exit(void)
  223. {
  224. return platform_driver_unregister(&caam_driver);
  225. }
  226. module_init(caam_base_init);
  227. module_exit(caam_base_exit);
  228. MODULE_LICENSE("GPL");
  229. MODULE_DESCRIPTION("FSL CAAM request backend");
  230. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");