pm-arm.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839
  1. /*
  2. * ARM-specific support for Broadcom STB S2/S3/S5 power management
  3. *
  4. * S2: clock gate CPUs and as many peripherals as possible
  5. * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
  6. * self-refresh
  7. * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
  8. * treat this mode like a soft power-off, with wakeup allowed from AON
  9. *
  10. * Copyright © 2014-2017 Broadcom
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. */
  21. #define pr_fmt(fmt) "brcmstb-pm: " fmt
  22. #include <linux/bitops.h>
  23. #include <linux/compiler.h>
  24. #include <linux/delay.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/err.h>
  27. #include <linux/init.h>
  28. #include <linux/io.h>
  29. #include <linux/ioport.h>
  30. #include <linux/kconfig.h>
  31. #include <linux/kernel.h>
  32. #include <linux/memblock.h>
  33. #include <linux/module.h>
  34. #include <linux/notifier.h>
  35. #include <linux/of.h>
  36. #include <linux/of_address.h>
  37. #include <linux/platform_device.h>
  38. #include <linux/pm.h>
  39. #include <linux/printk.h>
  40. #include <linux/proc_fs.h>
  41. #include <linux/sizes.h>
  42. #include <linux/slab.h>
  43. #include <linux/sort.h>
  44. #include <linux/suspend.h>
  45. #include <linux/types.h>
  46. #include <linux/uaccess.h>
  47. #include <linux/soc/brcmstb/brcmstb.h>
  48. #include <asm/fncpy.h>
  49. #include <asm/setup.h>
  50. #include <asm/suspend.h>
  51. #include "pm.h"
  52. #include "aon_defs.h"
  53. #define SHIMPHY_DDR_PAD_CNTRL 0x8c
  54. /* Method #0 */
  55. #define SHIMPHY_PAD_PLL_SEQUENCE BIT(8)
  56. #define SHIMPHY_PAD_GATE_PLL_S3 BIT(9)
  57. /* Method #1 */
  58. #define PWRDWN_SEQ_NO_SEQUENCING 0
  59. #define PWRDWN_SEQ_HOLD_CHANNEL 1
  60. #define PWRDWN_SEQ_RESET_PLL 2
  61. #define PWRDWN_SEQ_POWERDOWN_PLL 3
  62. #define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK 0x00f00000
  63. #define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT 20
  64. #define DDR_FORCE_CKE_RST_N BIT(3)
  65. #define DDR_PHY_RST_N BIT(2)
  66. #define DDR_PHY_CKE BIT(1)
  67. #define DDR_PHY_NO_CHANNEL 0xffffffff
  68. #define MAX_NUM_MEMC 3
  69. struct brcmstb_memc {
  70. void __iomem *ddr_phy_base;
  71. void __iomem *ddr_shimphy_base;
  72. void __iomem *ddr_ctrl;
  73. };
  74. struct brcmstb_pm_control {
  75. void __iomem *aon_ctrl_base;
  76. void __iomem *aon_sram;
  77. struct brcmstb_memc memcs[MAX_NUM_MEMC];
  78. void __iomem *boot_sram;
  79. size_t boot_sram_len;
  80. bool support_warm_boot;
  81. size_t pll_status_offset;
  82. int num_memc;
  83. struct brcmstb_s3_params *s3_params;
  84. dma_addr_t s3_params_pa;
  85. int s3entry_method;
  86. u32 warm_boot_offset;
  87. u32 phy_a_standby_ctrl_offs;
  88. u32 phy_b_standby_ctrl_offs;
  89. bool needs_ddr_pad;
  90. struct platform_device *pdev;
  91. };
  92. enum bsp_initiate_command {
  93. BSP_CLOCK_STOP = 0x00,
  94. BSP_GEN_RANDOM_KEY = 0x4A,
  95. BSP_RESTORE_RANDOM_KEY = 0x55,
  96. BSP_GEN_FIXED_KEY = 0x63,
  97. };
  98. #define PM_INITIATE 0x01
  99. #define PM_INITIATE_SUCCESS 0x00
  100. #define PM_INITIATE_FAIL 0xfe
  101. static struct brcmstb_pm_control ctrl;
  102. static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
  103. void __iomem *ddr_phy_pll_status);
  104. static int brcmstb_init_sram(struct device_node *dn)
  105. {
  106. void __iomem *sram;
  107. struct resource res;
  108. int ret;
  109. ret = of_address_to_resource(dn, 0, &res);
  110. if (ret)
  111. return ret;
  112. /* Uncached, executable remapping of SRAM */
  113. sram = __arm_ioremap_exec(res.start, resource_size(&res), false);
  114. if (!sram)
  115. return -ENOMEM;
  116. ctrl.boot_sram = sram;
  117. ctrl.boot_sram_len = resource_size(&res);
  118. return 0;
  119. }
  120. static const struct of_device_id sram_dt_ids[] = {
  121. { .compatible = "mmio-sram" },
  122. { /* sentinel */ }
  123. };
  124. static int do_bsp_initiate_command(enum bsp_initiate_command cmd)
  125. {
  126. void __iomem *base = ctrl.aon_ctrl_base;
  127. int ret;
  128. int timeo = 1000 * 1000; /* 1 second */
  129. writel_relaxed(0, base + AON_CTRL_PM_INITIATE);
  130. (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
  131. /* Go! */
  132. writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE);
  133. /*
  134. * If firmware doesn't support the 'ack', then just assume it's done
  135. * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
  136. */
  137. if (of_machine_is_compatible("brcm,bcm74371a0")) {
  138. (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
  139. mdelay(10);
  140. return 0;
  141. }
  142. for (;;) {
  143. ret = readl_relaxed(base + AON_CTRL_PM_INITIATE);
  144. if (!(ret & PM_INITIATE))
  145. break;
  146. if (timeo <= 0) {
  147. pr_err("error: timeout waiting for BSP (%x)\n", ret);
  148. break;
  149. }
  150. timeo -= 50;
  151. udelay(50);
  152. }
  153. return (ret & 0xff) != PM_INITIATE_SUCCESS;
  154. }
  155. static int brcmstb_pm_handshake(void)
  156. {
  157. void __iomem *base = ctrl.aon_ctrl_base;
  158. u32 tmp;
  159. int ret;
  160. /* BSP power handshake, v1 */
  161. tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
  162. tmp &= ~1UL;
  163. writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS);
  164. (void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
  165. ret = do_bsp_initiate_command(BSP_CLOCK_STOP);
  166. if (ret)
  167. pr_err("BSP handshake failed\n");
  168. /*
  169. * HACK: BSP may have internal race on the CLOCK_STOP command.
  170. * Avoid touching the BSP for a few milliseconds.
  171. */
  172. mdelay(3);
  173. return ret;
  174. }
  175. static inline void shimphy_set(u32 value, u32 mask)
  176. {
  177. int i;
  178. if (!ctrl.needs_ddr_pad)
  179. return;
  180. for (i = 0; i < ctrl.num_memc; i++) {
  181. u32 tmp;
  182. tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base +
  183. SHIMPHY_DDR_PAD_CNTRL);
  184. tmp = value | (tmp & mask);
  185. writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base +
  186. SHIMPHY_DDR_PAD_CNTRL);
  187. }
  188. wmb(); /* Complete sequence in order. */
  189. }
  190. static inline void ddr_ctrl_set(bool warmboot)
  191. {
  192. int i;
  193. for (i = 0; i < ctrl.num_memc; i++) {
  194. u32 tmp;
  195. tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl +
  196. ctrl.warm_boot_offset);
  197. if (warmboot)
  198. tmp |= 1;
  199. else
  200. tmp &= ~1; /* Cold boot */
  201. writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl +
  202. ctrl.warm_boot_offset);
  203. }
  204. /* Complete sequence in order */
  205. wmb();
  206. }
  207. static inline void s3entry_method0(void)
  208. {
  209. shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE,
  210. 0xffffffff);
  211. }
  212. static inline void s3entry_method1(void)
  213. {
  214. /*
  215. * S3 Entry Sequence
  216. * -----------------
  217. * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
  218. * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
  219. */
  220. shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
  221. SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
  222. ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
  223. ddr_ctrl_set(true);
  224. }
  225. static inline void s5entry_method1(void)
  226. {
  227. int i;
  228. /*
  229. * S5 Entry Sequence
  230. * -----------------
  231. * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
  232. * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
  233. * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
  234. * DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
  235. */
  236. shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
  237. SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
  238. ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
  239. ddr_ctrl_set(false);
  240. for (i = 0; i < ctrl.num_memc; i++) {
  241. u32 tmp;
  242. /* Step 3: Channel A (RST_N = CKE = 0) */
  243. tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
  244. ctrl.phy_a_standby_ctrl_offs);
  245. tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
  246. writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
  247. ctrl.phy_a_standby_ctrl_offs);
  248. /* Step 3: Channel B? */
  249. if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) {
  250. tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
  251. ctrl.phy_b_standby_ctrl_offs);
  252. tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
  253. writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
  254. ctrl.phy_b_standby_ctrl_offs);
  255. }
  256. }
  257. /* Must complete */
  258. wmb();
  259. }
  260. /*
  261. * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
  262. * into a low-power mode
  263. */
  264. static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite)
  265. {
  266. void __iomem *base = ctrl.aon_ctrl_base;
  267. if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG))
  268. s5entry_method1();
  269. /* pm_start_pwrdn transition 0->1 */
  270. writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL);
  271. if (!onewrite) {
  272. (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
  273. writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL);
  274. (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
  275. }
  276. wfi();
  277. }
  278. /* Support S5 cold boot out of "poweroff" */
  279. static void brcmstb_pm_poweroff(void)
  280. {
  281. brcmstb_pm_handshake();
  282. /* Clear magic S3 warm-boot value */
  283. writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
  284. (void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
  285. /* Skip wait-for-interrupt signal; just use a countdown */
  286. writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
  287. (void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
  288. if (ctrl.s3entry_method == 1) {
  289. shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
  290. SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
  291. ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
  292. ddr_ctrl_set(false);
  293. brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true);
  294. return; /* We should never actually get here */
  295. }
  296. brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false);
  297. }
  298. static void *brcmstb_pm_copy_to_sram(void *fn, size_t len)
  299. {
  300. unsigned int size = ALIGN(len, FNCPY_ALIGN);
  301. if (ctrl.boot_sram_len < size) {
  302. pr_err("standby code will not fit in SRAM\n");
  303. return NULL;
  304. }
  305. return fncpy(ctrl.boot_sram, fn, size);
  306. }
  307. /*
  308. * S2 suspend/resume picks up where we left off, so we must execute carefully
  309. * from SRAM, in order to allow DDR to come back up safely before we continue.
  310. */
  311. static int brcmstb_pm_s2(void)
  312. {
  313. /* A previous S3 can set a value hazardous to S2, so make sure. */
  314. if (ctrl.s3entry_method == 1) {
  315. shimphy_set((PWRDWN_SEQ_NO_SEQUENCING <<
  316. SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
  317. ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
  318. ddr_ctrl_set(false);
  319. }
  320. brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2,
  321. brcmstb_pm_do_s2_sz);
  322. if (!brcmstb_pm_do_s2_sram)
  323. return -EINVAL;
  324. return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base,
  325. ctrl.memcs[0].ddr_phy_base +
  326. ctrl.pll_status_offset);
  327. }
  328. /*
  329. * This function is called on a new stack, so don't allow inlining (which will
  330. * generate stack references on the old stack). It cannot be made static because
  331. * it is referenced from brcmstb_pm_s3()
  332. */
  333. noinline int brcmstb_pm_s3_finish(void)
  334. {
  335. struct brcmstb_s3_params *params = ctrl.s3_params;
  336. dma_addr_t params_pa = ctrl.s3_params_pa;
  337. phys_addr_t reentry = virt_to_phys(&cpu_resume_arm);
  338. enum bsp_initiate_command cmd;
  339. u32 flags;
  340. /*
  341. * Clear parameter structure, but not DTU area, which has already been
  342. * filled in. We know DTU is a the end, so we can just subtract its
  343. * size.
  344. */
  345. memset(params, 0, sizeof(*params) - sizeof(params->dtu));
  346. flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
  347. flags &= S3_BOOTLOADER_RESERVED;
  348. flags |= S3_FLAG_NO_MEM_VERIFY;
  349. flags |= S3_FLAG_LOAD_RANDKEY;
  350. /* Load random / fixed key */
  351. if (flags & S3_FLAG_LOAD_RANDKEY)
  352. cmd = BSP_GEN_RANDOM_KEY;
  353. else
  354. cmd = BSP_GEN_FIXED_KEY;
  355. if (do_bsp_initiate_command(cmd)) {
  356. pr_info("key loading failed\n");
  357. return -EIO;
  358. }
  359. params->magic = BRCMSTB_S3_MAGIC;
  360. params->reentry = reentry;
  361. /* No more writes to DRAM */
  362. flush_cache_all();
  363. flags |= BRCMSTB_S3_MAGIC_SHORT;
  364. writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
  365. writel_relaxed(lower_32_bits(params_pa),
  366. ctrl.aon_sram + AON_REG_CONTROL_LOW);
  367. writel_relaxed(upper_32_bits(params_pa),
  368. ctrl.aon_sram + AON_REG_CONTROL_HIGH);
  369. switch (ctrl.s3entry_method) {
  370. case 0:
  371. s3entry_method0();
  372. brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false);
  373. break;
  374. case 1:
  375. s3entry_method1();
  376. brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true);
  377. break;
  378. default:
  379. return -EINVAL;
  380. }
  381. /* Must have been interrupted from wfi()? */
  382. return -EINTR;
  383. }
  384. static int brcmstb_pm_do_s3(unsigned long sp)
  385. {
  386. unsigned long save_sp;
  387. int ret;
  388. asm volatile (
  389. "mov %[save], sp\n"
  390. "mov sp, %[new]\n"
  391. "bl brcmstb_pm_s3_finish\n"
  392. "mov %[ret], r0\n"
  393. "mov %[new], sp\n"
  394. "mov sp, %[save]\n"
  395. : [save] "=&r" (save_sp), [ret] "=&r" (ret)
  396. : [new] "r" (sp)
  397. );
  398. return ret;
  399. }
  400. static int brcmstb_pm_s3(void)
  401. {
  402. void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len;
  403. return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3);
  404. }
  405. static int brcmstb_pm_standby(bool deep_standby)
  406. {
  407. int ret;
  408. if (brcmstb_pm_handshake())
  409. return -EIO;
  410. if (deep_standby)
  411. ret = brcmstb_pm_s3();
  412. else
  413. ret = brcmstb_pm_s2();
  414. if (ret)
  415. pr_err("%s: standby failed\n", __func__);
  416. return ret;
  417. }
  418. static int brcmstb_pm_enter(suspend_state_t state)
  419. {
  420. int ret = -EINVAL;
  421. switch (state) {
  422. case PM_SUSPEND_STANDBY:
  423. ret = brcmstb_pm_standby(false);
  424. break;
  425. case PM_SUSPEND_MEM:
  426. ret = brcmstb_pm_standby(true);
  427. break;
  428. }
  429. return ret;
  430. }
  431. static int brcmstb_pm_valid(suspend_state_t state)
  432. {
  433. switch (state) {
  434. case PM_SUSPEND_STANDBY:
  435. return true;
  436. case PM_SUSPEND_MEM:
  437. return ctrl.support_warm_boot;
  438. default:
  439. return false;
  440. }
  441. }
  442. static const struct platform_suspend_ops brcmstb_pm_ops = {
  443. .enter = brcmstb_pm_enter,
  444. .valid = brcmstb_pm_valid,
  445. };
  446. static const struct of_device_id aon_ctrl_dt_ids[] = {
  447. { .compatible = "brcm,brcmstb-aon-ctrl" },
  448. {}
  449. };
  450. struct ddr_phy_ofdata {
  451. bool supports_warm_boot;
  452. size_t pll_status_offset;
  453. int s3entry_method;
  454. u32 warm_boot_offset;
  455. u32 phy_a_standby_ctrl_offs;
  456. u32 phy_b_standby_ctrl_offs;
  457. };
  458. static struct ddr_phy_ofdata ddr_phy_71_1 = {
  459. .supports_warm_boot = true,
  460. .pll_status_offset = 0x0c,
  461. .s3entry_method = 1,
  462. .warm_boot_offset = 0x2c,
  463. .phy_a_standby_ctrl_offs = 0x198,
  464. .phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL
  465. };
  466. static struct ddr_phy_ofdata ddr_phy_72_0 = {
  467. .supports_warm_boot = true,
  468. .pll_status_offset = 0x10,
  469. .s3entry_method = 1,
  470. .warm_boot_offset = 0x40,
  471. .phy_a_standby_ctrl_offs = 0x2a4,
  472. .phy_b_standby_ctrl_offs = 0x8a4
  473. };
  474. static struct ddr_phy_ofdata ddr_phy_225_1 = {
  475. .supports_warm_boot = false,
  476. .pll_status_offset = 0x4,
  477. .s3entry_method = 0
  478. };
  479. static struct ddr_phy_ofdata ddr_phy_240_1 = {
  480. .supports_warm_boot = true,
  481. .pll_status_offset = 0x4,
  482. .s3entry_method = 0
  483. };
  484. static const struct of_device_id ddr_phy_dt_ids[] = {
  485. {
  486. .compatible = "brcm,brcmstb-ddr-phy-v71.1",
  487. .data = &ddr_phy_71_1,
  488. },
  489. {
  490. .compatible = "brcm,brcmstb-ddr-phy-v72.0",
  491. .data = &ddr_phy_72_0,
  492. },
  493. {
  494. .compatible = "brcm,brcmstb-ddr-phy-v225.1",
  495. .data = &ddr_phy_225_1,
  496. },
  497. {
  498. .compatible = "brcm,brcmstb-ddr-phy-v240.1",
  499. .data = &ddr_phy_240_1,
  500. },
  501. {
  502. /* Same as v240.1, for the registers we care about */
  503. .compatible = "brcm,brcmstb-ddr-phy-v240.2",
  504. .data = &ddr_phy_240_1,
  505. },
  506. {}
  507. };
  508. struct ddr_seq_ofdata {
  509. bool needs_ddr_pad;
  510. u32 warm_boot_offset;
  511. };
  512. static const struct ddr_seq_ofdata ddr_seq_b22 = {
  513. .needs_ddr_pad = false,
  514. .warm_boot_offset = 0x2c,
  515. };
  516. static const struct ddr_seq_ofdata ddr_seq = {
  517. .needs_ddr_pad = true,
  518. };
  519. static const struct of_device_id ddr_shimphy_dt_ids[] = {
  520. { .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" },
  521. {}
  522. };
  523. static const struct of_device_id brcmstb_memc_of_match[] = {
  524. {
  525. .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
  526. .data = &ddr_seq,
  527. },
  528. {
  529. .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
  530. .data = &ddr_seq_b22,
  531. },
  532. {
  533. .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
  534. .data = &ddr_seq_b22,
  535. },
  536. {
  537. .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
  538. .data = &ddr_seq_b22,
  539. },
  540. {
  541. .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
  542. .data = &ddr_seq_b22,
  543. },
  544. {
  545. .compatible = "brcm,brcmstb-memc-ddr",
  546. .data = &ddr_seq,
  547. },
  548. {},
  549. };
  550. static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
  551. int index, const void **ofdata)
  552. {
  553. struct device_node *dn;
  554. const struct of_device_id *match;
  555. dn = of_find_matching_node_and_match(NULL, matches, &match);
  556. if (!dn)
  557. return ERR_PTR(-EINVAL);
  558. if (ofdata)
  559. *ofdata = match->data;
  560. return of_io_request_and_map(dn, index, dn->full_name);
  561. }
  562. static int brcmstb_pm_panic_notify(struct notifier_block *nb,
  563. unsigned long action, void *data)
  564. {
  565. writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC);
  566. return NOTIFY_DONE;
  567. }
  568. static struct notifier_block brcmstb_pm_panic_nb = {
  569. .notifier_call = brcmstb_pm_panic_notify,
  570. };
  571. static int brcmstb_pm_probe(struct platform_device *pdev)
  572. {
  573. const struct ddr_phy_ofdata *ddr_phy_data;
  574. const struct ddr_seq_ofdata *ddr_seq_data;
  575. const struct of_device_id *of_id = NULL;
  576. struct device_node *dn;
  577. void __iomem *base;
  578. int ret, i;
  579. /* AON ctrl registers */
  580. base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
  581. if (IS_ERR(base)) {
  582. pr_err("error mapping AON_CTRL\n");
  583. return PTR_ERR(base);
  584. }
  585. ctrl.aon_ctrl_base = base;
  586. /* AON SRAM registers */
  587. base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
  588. if (IS_ERR(base)) {
  589. /* Assume standard offset */
  590. ctrl.aon_sram = ctrl.aon_ctrl_base +
  591. AON_CTRL_SYSTEM_DATA_RAM_OFS;
  592. } else {
  593. ctrl.aon_sram = base;
  594. }
  595. writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
  596. /* DDR PHY registers */
  597. base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
  598. (const void **)&ddr_phy_data);
  599. if (IS_ERR(base)) {
  600. pr_err("error mapping DDR PHY\n");
  601. return PTR_ERR(base);
  602. }
  603. ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
  604. ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
  605. /* Only need DDR PHY 0 for now? */
  606. ctrl.memcs[0].ddr_phy_base = base;
  607. ctrl.s3entry_method = ddr_phy_data->s3entry_method;
  608. ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
  609. ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
  610. /*
  611. * Slightly grosss to use the phy ver to get a memc,
  612. * offset but that is the only versioned things so far
  613. * we can test for.
  614. */
  615. ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;
  616. /* DDR SHIM-PHY registers */
  617. for_each_matching_node(dn, ddr_shimphy_dt_ids) {
  618. i = ctrl.num_memc;
  619. if (i >= MAX_NUM_MEMC) {
  620. pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
  621. break;
  622. }
  623. base = of_io_request_and_map(dn, 0, dn->full_name);
  624. if (IS_ERR(base)) {
  625. if (!ctrl.support_warm_boot)
  626. break;
  627. pr_err("error mapping DDR SHIMPHY %d\n", i);
  628. return PTR_ERR(base);
  629. }
  630. ctrl.memcs[i].ddr_shimphy_base = base;
  631. ctrl.num_memc++;
  632. }
  633. /* Sequencer DRAM Param and Control Registers */
  634. i = 0;
  635. for_each_matching_node(dn, brcmstb_memc_of_match) {
  636. base = of_iomap(dn, 0);
  637. if (!base) {
  638. pr_err("error mapping DDR Sequencer %d\n", i);
  639. return -ENOMEM;
  640. }
  641. of_id = of_match_node(brcmstb_memc_of_match, dn);
  642. if (!of_id) {
  643. iounmap(base);
  644. return -EINVAL;
  645. }
  646. ddr_seq_data = of_id->data;
  647. ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
  648. /* Adjust warm boot offset based on the DDR sequencer */
  649. if (ddr_seq_data->warm_boot_offset)
  650. ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;
  651. ctrl.memcs[i].ddr_ctrl = base;
  652. i++;
  653. }
  654. pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
  655. ctrl.support_warm_boot, ctrl.s3entry_method,
  656. ctrl.warm_boot_offset);
  657. dn = of_find_matching_node(NULL, sram_dt_ids);
  658. if (!dn) {
  659. pr_err("SRAM not found\n");
  660. return -EINVAL;
  661. }
  662. ret = brcmstb_init_sram(dn);
  663. if (ret) {
  664. pr_err("error setting up SRAM for PM\n");
  665. return ret;
  666. }
  667. ctrl.pdev = pdev;
  668. ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
  669. if (!ctrl.s3_params)
  670. return -ENOMEM;
  671. ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
  672. sizeof(*ctrl.s3_params),
  673. DMA_TO_DEVICE);
  674. if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
  675. pr_err("error mapping DMA memory\n");
  676. ret = -ENOMEM;
  677. goto out;
  678. }
  679. atomic_notifier_chain_register(&panic_notifier_list,
  680. &brcmstb_pm_panic_nb);
  681. pm_power_off = brcmstb_pm_poweroff;
  682. suspend_set_ops(&brcmstb_pm_ops);
  683. return 0;
  684. out:
  685. kfree(ctrl.s3_params);
  686. pr_warn("PM: initialization failed with code %d\n", ret);
  687. return ret;
  688. }
  689. static struct platform_driver brcmstb_pm_driver = {
  690. .driver = {
  691. .name = "brcmstb-pm",
  692. .of_match_table = aon_ctrl_dt_ids,
  693. },
  694. };
  695. static int __init brcmstb_pm_init(void)
  696. {
  697. return platform_driver_probe(&brcmstb_pm_driver,
  698. brcmstb_pm_probe);
  699. }
  700. module_init(brcmstb_pm_init);