qcom-ebi2.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. /*
  2. * Qualcomm External Bus Interface 2 (EBI2) driver
  3. * an older version of the Qualcomm Parallel Interface Controller (QPIC)
  4. *
  5. * Copyright (C) 2016 Linaro Ltd.
  6. *
  7. * Author: Linus Walleij <linus.walleij@linaro.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * See the device tree bindings for this block for more details on the
  14. * hardware.
  15. */
  16. #include <linux/module.h>
  17. #include <linux/clk.h>
  18. #include <linux/err.h>
  19. #include <linux/io.h>
  20. #include <linux/of.h>
  21. #include <linux/of_platform.h>
  22. #include <linux/init.h>
  23. #include <linux/io.h>
  24. #include <linux/slab.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/bitops.h>
  27. /*
  28. * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit.
  29. */
  30. #define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1)
  31. #define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3)
  32. #define EBI2_CS2_ENABLE_MASK BIT(4)
  33. #define EBI2_CS3_ENABLE_MASK BIT(5)
  34. #define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7)
  35. #define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9)
  36. #define EBI2_CSN_MASK GENMASK(9, 0)
  37. #define EBI2_XMEM_CFG 0x0000 /* Power management etc */
  38. /*
  39. * SLOW CSn CFG
  40. *
  41. * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the
  42. * memory continues to drive the data bus after OE is de-asserted.
  43. * Inserted when reading one CS and switching to another CS or read
  44. * followed by write on the same CS. Valid values 0 thru 15.
  45. * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after
  46. * every write minimum 1. The data out is driven from the time WE is
  47. * asserted until CS is asserted. With a hold of 1, the CS stays
  48. * active for 1 extra cycle etc. Valid values 0 thru 15.
  49. * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first
  50. * write to a page or burst memory
  51. * Bits 15-8: RD_DELTA initial latency for read cycles inserted for the first
  52. * read to a page or burst memory
  53. * Bits 7-4: WR_WAIT number of wait cycles for every write access, 0=1 cycle
  54. * so 1 thru 16 cycles.
  55. * Bits 3-0: RD_WAIT number of wait cycles for every read access, 0=1 cycle
  56. * so 1 thru 16 cycles.
  57. */
  58. #define EBI2_XMEM_CS0_SLOW_CFG 0x0008
  59. #define EBI2_XMEM_CS1_SLOW_CFG 0x000C
  60. #define EBI2_XMEM_CS2_SLOW_CFG 0x0010
  61. #define EBI2_XMEM_CS3_SLOW_CFG 0x0014
  62. #define EBI2_XMEM_CS4_SLOW_CFG 0x0018
  63. #define EBI2_XMEM_CS5_SLOW_CFG 0x001C
  64. #define EBI2_XMEM_RECOVERY_SHIFT 28
  65. #define EBI2_XMEM_WR_HOLD_SHIFT 24
  66. #define EBI2_XMEM_WR_DELTA_SHIFT 16
  67. #define EBI2_XMEM_RD_DELTA_SHIFT 8
  68. #define EBI2_XMEM_WR_WAIT_SHIFT 4
  69. #define EBI2_XMEM_RD_WAIT_SHIFT 0
  70. /*
  71. * FAST CSn CFG
  72. * Bits 31-28: ?
  73. * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read
  74. * transfer. For a single read trandfer this will be the time
  75. * from CS assertion to OE assertion.
  76. * Bits 18-24: ?
  77. * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE
  78. * assertion, with respect to the cycle where ADV is asserted.
  79. * 2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3.
  80. * Bits 5: ADDR_HOLD_ENA, The address is held for an extra cycle to meet
  81. * hold time requirements with ADV assertion.
  82. *
  83. * The manual mentions "write precharge cycles" and "precharge cycles".
  84. * We have not been able to figure out which bit fields these correspond to
  85. * in the hardware, or what valid values exist. The current hypothesis is that
  86. * this is something just used on the FAST chip selects. There is also a "byte
  87. * device enable" flag somewhere for 8bit memories.
  88. */
  89. #define EBI2_XMEM_CS0_FAST_CFG 0x0028
  90. #define EBI2_XMEM_CS1_FAST_CFG 0x002C
  91. #define EBI2_XMEM_CS2_FAST_CFG 0x0030
  92. #define EBI2_XMEM_CS3_FAST_CFG 0x0034
  93. #define EBI2_XMEM_CS4_FAST_CFG 0x0038
  94. #define EBI2_XMEM_CS5_FAST_CFG 0x003C
  95. #define EBI2_XMEM_RD_HOLD_SHIFT 24
  96. #define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT 16
  97. #define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT 5
  98. /**
  99. * struct cs_data - struct with info on a chipselect setting
  100. * @enable_mask: mask to enable the chipselect in the EBI2 config
  101. * @slow_cfg0: offset to XMEMC slow CS config
  102. * @fast_cfg1: offset to XMEMC fast CS config
  103. */
  104. struct cs_data {
  105. u32 enable_mask;
  106. u16 slow_cfg;
  107. u16 fast_cfg;
  108. };
  109. static const struct cs_data cs_info[] = {
  110. {
  111. /* CS0 */
  112. .enable_mask = EBI2_CS0_ENABLE_MASK,
  113. .slow_cfg = EBI2_XMEM_CS0_SLOW_CFG,
  114. .fast_cfg = EBI2_XMEM_CS0_FAST_CFG,
  115. },
  116. {
  117. /* CS1 */
  118. .enable_mask = EBI2_CS1_ENABLE_MASK,
  119. .slow_cfg = EBI2_XMEM_CS1_SLOW_CFG,
  120. .fast_cfg = EBI2_XMEM_CS1_FAST_CFG,
  121. },
  122. {
  123. /* CS2 */
  124. .enable_mask = EBI2_CS2_ENABLE_MASK,
  125. .slow_cfg = EBI2_XMEM_CS2_SLOW_CFG,
  126. .fast_cfg = EBI2_XMEM_CS2_FAST_CFG,
  127. },
  128. {
  129. /* CS3 */
  130. .enable_mask = EBI2_CS3_ENABLE_MASK,
  131. .slow_cfg = EBI2_XMEM_CS3_SLOW_CFG,
  132. .fast_cfg = EBI2_XMEM_CS3_FAST_CFG,
  133. },
  134. {
  135. /* CS4 */
  136. .enable_mask = EBI2_CS4_ENABLE_MASK,
  137. .slow_cfg = EBI2_XMEM_CS4_SLOW_CFG,
  138. .fast_cfg = EBI2_XMEM_CS4_FAST_CFG,
  139. },
  140. {
  141. /* CS5 */
  142. .enable_mask = EBI2_CS5_ENABLE_MASK,
  143. .slow_cfg = EBI2_XMEM_CS5_SLOW_CFG,
  144. .fast_cfg = EBI2_XMEM_CS5_FAST_CFG,
  145. },
  146. };
  147. /**
  148. * struct ebi2_xmem_prop - describes an XMEM config property
  149. * @prop: the device tree binding name
  150. * @max: maximum value for the property
  151. * @slowreg: true if this property is in the SLOW CS config register
  152. * else it is assumed to be in the FAST config register
  153. * @shift: the bit field start in the SLOW or FAST register for this
  154. * property
  155. */
  156. struct ebi2_xmem_prop {
  157. const char *prop;
  158. u32 max;
  159. bool slowreg;
  160. u16 shift;
  161. };
  162. static const struct ebi2_xmem_prop xmem_props[] = {
  163. {
  164. .prop = "qcom,xmem-recovery-cycles",
  165. .max = 15,
  166. .slowreg = true,
  167. .shift = EBI2_XMEM_RECOVERY_SHIFT,
  168. },
  169. {
  170. .prop = "qcom,xmem-write-hold-cycles",
  171. .max = 15,
  172. .slowreg = true,
  173. .shift = EBI2_XMEM_WR_HOLD_SHIFT,
  174. },
  175. {
  176. .prop = "qcom,xmem-write-delta-cycles",
  177. .max = 255,
  178. .slowreg = true,
  179. .shift = EBI2_XMEM_WR_DELTA_SHIFT,
  180. },
  181. {
  182. .prop = "qcom,xmem-read-delta-cycles",
  183. .max = 255,
  184. .slowreg = true,
  185. .shift = EBI2_XMEM_RD_DELTA_SHIFT,
  186. },
  187. {
  188. .prop = "qcom,xmem-write-wait-cycles",
  189. .max = 15,
  190. .slowreg = true,
  191. .shift = EBI2_XMEM_WR_WAIT_SHIFT,
  192. },
  193. {
  194. .prop = "qcom,xmem-read-wait-cycles",
  195. .max = 15,
  196. .slowreg = true,
  197. .shift = EBI2_XMEM_RD_WAIT_SHIFT,
  198. },
  199. {
  200. .prop = "qcom,xmem-address-hold-enable",
  201. .max = 1, /* boolean prop */
  202. .slowreg = false,
  203. .shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT,
  204. },
  205. {
  206. .prop = "qcom,xmem-adv-to-oe-recovery-cycles",
  207. .max = 3,
  208. .slowreg = false,
  209. .shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT,
  210. },
  211. {
  212. .prop = "qcom,xmem-read-hold-cycles",
  213. .max = 15,
  214. .slowreg = false,
  215. .shift = EBI2_XMEM_RD_HOLD_SHIFT,
  216. },
  217. };
  218. static void qcom_ebi2_setup_chipselect(struct device_node *np,
  219. struct device *dev,
  220. void __iomem *ebi2_base,
  221. void __iomem *ebi2_xmem,
  222. u32 csindex)
  223. {
  224. const struct cs_data *csd;
  225. u32 slowcfg, fastcfg;
  226. u32 val;
  227. int ret;
  228. int i;
  229. csd = &cs_info[csindex];
  230. val = readl(ebi2_base);
  231. val |= csd->enable_mask;
  232. writel(val, ebi2_base);
  233. dev_dbg(dev, "enabled CS%u\n", csindex);
  234. /* Next set up the XMEMC */
  235. slowcfg = 0;
  236. fastcfg = 0;
  237. for (i = 0; i < ARRAY_SIZE(xmem_props); i++) {
  238. const struct ebi2_xmem_prop *xp = &xmem_props[i];
  239. /* All are regular u32 values */
  240. ret = of_property_read_u32(np, xp->prop, &val);
  241. if (ret) {
  242. dev_dbg(dev, "could not read %s for CS%d\n",
  243. xp->prop, csindex);
  244. continue;
  245. }
  246. /* First check boolean props */
  247. if (xp->max == 1 && val) {
  248. if (xp->slowreg)
  249. slowcfg |= BIT(xp->shift);
  250. else
  251. fastcfg |= BIT(xp->shift);
  252. dev_dbg(dev, "set %s flag\n", xp->prop);
  253. continue;
  254. }
  255. /* We're dealing with an u32 */
  256. if (val > xp->max) {
  257. dev_err(dev,
  258. "too high value for %s: %u, capped at %u\n",
  259. xp->prop, val, xp->max);
  260. val = xp->max;
  261. }
  262. if (xp->slowreg)
  263. slowcfg |= (val << xp->shift);
  264. else
  265. fastcfg |= (val << xp->shift);
  266. dev_dbg(dev, "set %s to %u\n", xp->prop, val);
  267. }
  268. dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n",
  269. csindex, slowcfg, fastcfg);
  270. if (slowcfg)
  271. writel(slowcfg, ebi2_xmem + csd->slow_cfg);
  272. if (fastcfg)
  273. writel(fastcfg, ebi2_xmem + csd->fast_cfg);
  274. }
  275. static int qcom_ebi2_probe(struct platform_device *pdev)
  276. {
  277. struct device_node *np = pdev->dev.of_node;
  278. struct device_node *child;
  279. struct device *dev = &pdev->dev;
  280. struct resource *res;
  281. void __iomem *ebi2_base;
  282. void __iomem *ebi2_xmem;
  283. struct clk *ebi2xclk;
  284. struct clk *ebi2clk;
  285. bool have_children = false;
  286. u32 val;
  287. int ret;
  288. ebi2xclk = devm_clk_get(dev, "ebi2x");
  289. if (IS_ERR(ebi2xclk))
  290. return PTR_ERR(ebi2xclk);
  291. ret = clk_prepare_enable(ebi2xclk);
  292. if (ret) {
  293. dev_err(dev, "could not enable EBI2X clk (%d)\n", ret);
  294. return ret;
  295. }
  296. ebi2clk = devm_clk_get(dev, "ebi2");
  297. if (IS_ERR(ebi2clk)) {
  298. ret = PTR_ERR(ebi2clk);
  299. goto err_disable_2x_clk;
  300. }
  301. ret = clk_prepare_enable(ebi2clk);
  302. if (ret) {
  303. dev_err(dev, "could not enable EBI2 clk\n");
  304. goto err_disable_2x_clk;
  305. }
  306. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  307. ebi2_base = devm_ioremap_resource(dev, res);
  308. if (IS_ERR(ebi2_base)) {
  309. ret = PTR_ERR(ebi2_base);
  310. goto err_disable_clk;
  311. }
  312. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  313. ebi2_xmem = devm_ioremap_resource(dev, res);
  314. if (IS_ERR(ebi2_xmem)) {
  315. ret = PTR_ERR(ebi2_xmem);
  316. goto err_disable_clk;
  317. }
  318. /* Allegedly this turns the power save mode off */
  319. writel(0UL, ebi2_xmem + EBI2_XMEM_CFG);
  320. /* Disable all chipselects */
  321. val = readl(ebi2_base);
  322. val &= ~EBI2_CSN_MASK;
  323. writel(val, ebi2_base);
  324. /* Walk over the child nodes and see what chipselects we use */
  325. for_each_available_child_of_node(np, child) {
  326. u32 csindex;
  327. /* Figure out the chipselect */
  328. ret = of_property_read_u32(child, "reg", &csindex);
  329. if (ret)
  330. return ret;
  331. if (csindex > 5) {
  332. dev_err(dev,
  333. "invalid chipselect %u, we only support 0-5\n",
  334. csindex);
  335. continue;
  336. }
  337. qcom_ebi2_setup_chipselect(child,
  338. dev,
  339. ebi2_base,
  340. ebi2_xmem,
  341. csindex);
  342. /* We have at least one child */
  343. have_children = true;
  344. }
  345. if (have_children)
  346. return of_platform_default_populate(np, NULL, dev);
  347. return 0;
  348. err_disable_clk:
  349. clk_disable_unprepare(ebi2clk);
  350. err_disable_2x_clk:
  351. clk_disable_unprepare(ebi2xclk);
  352. return ret;
  353. }
  354. static const struct of_device_id qcom_ebi2_of_match[] = {
  355. { .compatible = "qcom,msm8660-ebi2", },
  356. { .compatible = "qcom,apq8060-ebi2", },
  357. { }
  358. };
  359. static struct platform_driver qcom_ebi2_driver = {
  360. .probe = qcom_ebi2_probe,
  361. .driver = {
  362. .name = "qcom-ebi2",
  363. .of_match_table = qcom_ebi2_of_match,
  364. },
  365. };
  366. module_platform_driver(qcom_ebi2_driver);
  367. MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
  368. MODULE_DESCRIPTION("Qualcomm EBI2 driver");
  369. MODULE_LICENSE("GPL");