qcom_q6v5_mss.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Qualcomm self-authenticating modem subsystem remoteproc driver
  4. *
  5. * Copyright (C) 2016 Linaro Ltd.
  6. * Copyright (C) 2014 Sony Mobile Communications AB
  7. * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  8. */
  9. #include <linux/clk.h>
  10. #include <linux/delay.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel.h>
  14. #include <linux/mfd/syscon.h>
  15. #include <linux/module.h>
  16. #include <linux/of_address.h>
  17. #include <linux/of_device.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/pm_domain.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/regmap.h>
  22. #include <linux/regulator/consumer.h>
  23. #include <linux/remoteproc.h>
  24. #include <linux/reset.h>
  25. #include <linux/soc/qcom/mdt_loader.h>
  26. #include <linux/iopoll.h>
  27. #include "remoteproc_internal.h"
  28. #include "qcom_common.h"
  29. #include "qcom_q6v5.h"
  30. #include <linux/qcom_scm.h>
  31. #define MPSS_CRASH_REASON_SMEM 421
  32. /* RMB Status Register Values */
  33. #define RMB_PBL_SUCCESS 0x1
  34. #define RMB_MBA_XPU_UNLOCKED 0x1
  35. #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
  36. #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
  37. #define RMB_MBA_AUTH_COMPLETE 0x4
  38. /* PBL/MBA interface registers */
  39. #define RMB_MBA_IMAGE_REG 0x00
  40. #define RMB_PBL_STATUS_REG 0x04
  41. #define RMB_MBA_COMMAND_REG 0x08
  42. #define RMB_MBA_STATUS_REG 0x0C
  43. #define RMB_PMI_META_DATA_REG 0x10
  44. #define RMB_PMI_CODE_START_REG 0x14
  45. #define RMB_PMI_CODE_LENGTH_REG 0x18
  46. #define RMB_MBA_MSS_STATUS 0x40
  47. #define RMB_MBA_ALT_RESET 0x44
  48. #define RMB_CMD_META_DATA_READY 0x1
  49. #define RMB_CMD_LOAD_READY 0x2
  50. /* QDSP6SS Register Offsets */
  51. #define QDSP6SS_RESET_REG 0x014
  52. #define QDSP6SS_GFMUX_CTL_REG 0x020
  53. #define QDSP6SS_PWR_CTL_REG 0x030
  54. #define QDSP6SS_MEM_PWR_CTL 0x0B0
  55. #define QDSP6SS_STRAP_ACC 0x110
  56. /* AXI Halt Register Offsets */
  57. #define AXI_HALTREQ_REG 0x0
  58. #define AXI_HALTACK_REG 0x4
  59. #define AXI_IDLE_REG 0x8
  60. #define HALT_ACK_TIMEOUT_MS 100
  61. /* QDSP6SS_RESET */
  62. #define Q6SS_STOP_CORE BIT(0)
  63. #define Q6SS_CORE_ARES BIT(1)
  64. #define Q6SS_BUS_ARES_ENABLE BIT(2)
  65. /* QDSP6SS_GFMUX_CTL */
  66. #define Q6SS_CLK_ENABLE BIT(1)
  67. /* QDSP6SS_PWR_CTL */
  68. #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
  69. #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
  70. #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
  71. #define Q6SS_L2TAG_SLP_NRET_N BIT(16)
  72. #define Q6SS_ETB_SLP_NRET_N BIT(17)
  73. #define Q6SS_L2DATA_STBY_N BIT(18)
  74. #define Q6SS_SLP_RET_N BIT(19)
  75. #define Q6SS_CLAMP_IO BIT(20)
  76. #define QDSS_BHS_ON BIT(21)
  77. #define QDSS_LDO_BYP BIT(22)
  78. /* QDSP6v56 parameters */
  79. #define QDSP6v56_LDO_BYP BIT(25)
  80. #define QDSP6v56_BHS_ON BIT(24)
  81. #define QDSP6v56_CLAMP_WL BIT(21)
  82. #define QDSP6v56_CLAMP_QMC_MEM BIT(22)
  83. #define HALT_CHECK_MAX_LOOPS 200
  84. #define QDSP6SS_XO_CBCR 0x0038
  85. #define QDSP6SS_ACC_OVERRIDE_VAL 0x20
  86. /* QDSP6v65 parameters */
  87. #define QDSP6SS_SLEEP 0x3C
  88. #define QDSP6SS_BOOT_CORE_START 0x400
  89. #define QDSP6SS_BOOT_CMD 0x404
  90. #define SLEEP_CHECK_MAX_LOOPS 200
  91. #define BOOT_FSM_TIMEOUT 10000
  92. struct reg_info {
  93. struct regulator *reg;
  94. int uV;
  95. int uA;
  96. };
  97. struct qcom_mss_reg_res {
  98. const char *supply;
  99. int uV;
  100. int uA;
  101. };
  102. struct rproc_hexagon_res {
  103. const char *hexagon_mba_image;
  104. struct qcom_mss_reg_res *proxy_supply;
  105. struct qcom_mss_reg_res *active_supply;
  106. char **proxy_clk_names;
  107. char **reset_clk_names;
  108. char **active_clk_names;
  109. char **active_pd_names;
  110. char **proxy_pd_names;
  111. int version;
  112. bool need_mem_protection;
  113. bool has_alt_reset;
  114. };
  115. struct q6v5 {
  116. struct device *dev;
  117. struct rproc *rproc;
  118. void __iomem *reg_base;
  119. void __iomem *rmb_base;
  120. struct regmap *halt_map;
  121. u32 halt_q6;
  122. u32 halt_modem;
  123. u32 halt_nc;
  124. struct reset_control *mss_restart;
  125. struct reset_control *pdc_reset;
  126. struct qcom_q6v5 q6v5;
  127. struct clk *active_clks[8];
  128. struct clk *reset_clks[4];
  129. struct clk *proxy_clks[4];
  130. struct device *active_pds[1];
  131. struct device *proxy_pds[3];
  132. int active_clk_count;
  133. int reset_clk_count;
  134. int proxy_clk_count;
  135. int active_pd_count;
  136. int proxy_pd_count;
  137. struct reg_info active_regs[1];
  138. struct reg_info proxy_regs[3];
  139. int active_reg_count;
  140. int proxy_reg_count;
  141. bool running;
  142. bool dump_mba_loaded;
  143. unsigned long dump_segment_mask;
  144. unsigned long dump_complete_mask;
  145. phys_addr_t mba_phys;
  146. void *mba_region;
  147. size_t mba_size;
  148. phys_addr_t mpss_phys;
  149. phys_addr_t mpss_reloc;
  150. void *mpss_region;
  151. size_t mpss_size;
  152. struct qcom_rproc_glink glink_subdev;
  153. struct qcom_rproc_subdev smd_subdev;
  154. struct qcom_rproc_ssr ssr_subdev;
  155. struct qcom_sysmon *sysmon;
  156. bool need_mem_protection;
  157. bool has_alt_reset;
  158. int mpss_perm;
  159. int mba_perm;
  160. const char *hexagon_mdt_image;
  161. int version;
  162. };
  163. enum {
  164. MSS_MSM8916,
  165. MSS_MSM8974,
  166. MSS_MSM8996,
  167. MSS_SDM845,
  168. };
  169. static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
  170. const struct qcom_mss_reg_res *reg_res)
  171. {
  172. int rc;
  173. int i;
  174. if (!reg_res)
  175. return 0;
  176. for (i = 0; reg_res[i].supply; i++) {
  177. regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
  178. if (IS_ERR(regs[i].reg)) {
  179. rc = PTR_ERR(regs[i].reg);
  180. if (rc != -EPROBE_DEFER)
  181. dev_err(dev, "Failed to get %s\n regulator",
  182. reg_res[i].supply);
  183. return rc;
  184. }
  185. regs[i].uV = reg_res[i].uV;
  186. regs[i].uA = reg_res[i].uA;
  187. }
  188. return i;
  189. }
  190. static int q6v5_regulator_enable(struct q6v5 *qproc,
  191. struct reg_info *regs, int count)
  192. {
  193. int ret;
  194. int i;
  195. for (i = 0; i < count; i++) {
  196. if (regs[i].uV > 0) {
  197. ret = regulator_set_voltage(regs[i].reg,
  198. regs[i].uV, INT_MAX);
  199. if (ret) {
  200. dev_err(qproc->dev,
  201. "Failed to request voltage for %d.\n",
  202. i);
  203. goto err;
  204. }
  205. }
  206. if (regs[i].uA > 0) {
  207. ret = regulator_set_load(regs[i].reg,
  208. regs[i].uA);
  209. if (ret < 0) {
  210. dev_err(qproc->dev,
  211. "Failed to set regulator mode\n");
  212. goto err;
  213. }
  214. }
  215. ret = regulator_enable(regs[i].reg);
  216. if (ret) {
  217. dev_err(qproc->dev, "Regulator enable failed\n");
  218. goto err;
  219. }
  220. }
  221. return 0;
  222. err:
  223. for (; i >= 0; i--) {
  224. if (regs[i].uV > 0)
  225. regulator_set_voltage(regs[i].reg, 0, INT_MAX);
  226. if (regs[i].uA > 0)
  227. regulator_set_load(regs[i].reg, 0);
  228. regulator_disable(regs[i].reg);
  229. }
  230. return ret;
  231. }
  232. static void q6v5_regulator_disable(struct q6v5 *qproc,
  233. struct reg_info *regs, int count)
  234. {
  235. int i;
  236. for (i = 0; i < count; i++) {
  237. if (regs[i].uV > 0)
  238. regulator_set_voltage(regs[i].reg, 0, INT_MAX);
  239. if (regs[i].uA > 0)
  240. regulator_set_load(regs[i].reg, 0);
  241. regulator_disable(regs[i].reg);
  242. }
  243. }
  244. static int q6v5_clk_enable(struct device *dev,
  245. struct clk **clks, int count)
  246. {
  247. int rc;
  248. int i;
  249. for (i = 0; i < count; i++) {
  250. rc = clk_prepare_enable(clks[i]);
  251. if (rc) {
  252. dev_err(dev, "Clock enable failed\n");
  253. goto err;
  254. }
  255. }
  256. return 0;
  257. err:
  258. for (i--; i >= 0; i--)
  259. clk_disable_unprepare(clks[i]);
  260. return rc;
  261. }
  262. static void q6v5_clk_disable(struct device *dev,
  263. struct clk **clks, int count)
  264. {
  265. int i;
  266. for (i = 0; i < count; i++)
  267. clk_disable_unprepare(clks[i]);
  268. }
  269. static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
  270. size_t pd_count)
  271. {
  272. int ret;
  273. int i;
  274. for (i = 0; i < pd_count; i++) {
  275. dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
  276. ret = pm_runtime_get_sync(pds[i]);
  277. if (ret < 0) {
  278. pm_runtime_put_noidle(pds[i]);
  279. dev_pm_genpd_set_performance_state(pds[i], 0);
  280. goto unroll_pd_votes;
  281. }
  282. }
  283. return 0;
  284. unroll_pd_votes:
  285. for (i--; i >= 0; i--) {
  286. dev_pm_genpd_set_performance_state(pds[i], 0);
  287. pm_runtime_put(pds[i]);
  288. }
  289. return ret;
  290. };
  291. static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
  292. size_t pd_count)
  293. {
  294. int i;
  295. for (i = 0; i < pd_count; i++) {
  296. dev_pm_genpd_set_performance_state(pds[i], 0);
  297. pm_runtime_put(pds[i]);
  298. }
  299. }
  300. static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
  301. bool remote_owner, phys_addr_t addr,
  302. size_t size)
  303. {
  304. struct qcom_scm_vmperm next;
  305. if (!qproc->need_mem_protection)
  306. return 0;
  307. if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
  308. return 0;
  309. if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
  310. return 0;
  311. next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
  312. next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
  313. return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
  314. current_perm, &next, 1);
  315. }
  316. static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
  317. {
  318. struct q6v5 *qproc = rproc->priv;
  319. /* MBA is restricted to a maximum size of 1M */
  320. if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
  321. dev_err(qproc->dev, "MBA firmware load failed\n");
  322. return -EINVAL;
  323. }
  324. memcpy(qproc->mba_region, fw->data, fw->size);
  325. return 0;
  326. }
  327. static int q6v5_reset_assert(struct q6v5 *qproc)
  328. {
  329. int ret;
  330. if (qproc->has_alt_reset) {
  331. reset_control_assert(qproc->pdc_reset);
  332. ret = reset_control_reset(qproc->mss_restart);
  333. reset_control_deassert(qproc->pdc_reset);
  334. } else {
  335. ret = reset_control_assert(qproc->mss_restart);
  336. }
  337. return ret;
  338. }
  339. static int q6v5_reset_deassert(struct q6v5 *qproc)
  340. {
  341. int ret;
  342. if (qproc->has_alt_reset) {
  343. reset_control_assert(qproc->pdc_reset);
  344. writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
  345. ret = reset_control_reset(qproc->mss_restart);
  346. writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
  347. reset_control_deassert(qproc->pdc_reset);
  348. } else {
  349. ret = reset_control_deassert(qproc->mss_restart);
  350. }
  351. return ret;
  352. }
  353. static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
  354. {
  355. unsigned long timeout;
  356. s32 val;
  357. timeout = jiffies + msecs_to_jiffies(ms);
  358. for (;;) {
  359. val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
  360. if (val)
  361. break;
  362. if (time_after(jiffies, timeout))
  363. return -ETIMEDOUT;
  364. msleep(1);
  365. }
  366. return val;
  367. }
  368. static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
  369. {
  370. unsigned long timeout;
  371. s32 val;
  372. timeout = jiffies + msecs_to_jiffies(ms);
  373. for (;;) {
  374. val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
  375. if (val < 0)
  376. break;
  377. if (!status && val)
  378. break;
  379. else if (status && val == status)
  380. break;
  381. if (time_after(jiffies, timeout))
  382. return -ETIMEDOUT;
  383. msleep(1);
  384. }
  385. return val;
  386. }
  387. static int q6v5proc_reset(struct q6v5 *qproc)
  388. {
  389. u32 val;
  390. int ret;
  391. int i;
  392. if (qproc->version == MSS_SDM845) {
  393. val = readl(qproc->reg_base + QDSP6SS_SLEEP);
  394. val |= 0x1;
  395. writel(val, qproc->reg_base + QDSP6SS_SLEEP);
  396. ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
  397. val, !(val & BIT(31)), 1,
  398. SLEEP_CHECK_MAX_LOOPS);
  399. if (ret) {
  400. dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
  401. return -ETIMEDOUT;
  402. }
  403. /* De-assert QDSP6 stop core */
  404. writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
  405. /* Trigger boot FSM */
  406. writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
  407. ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
  408. val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
  409. if (ret) {
  410. dev_err(qproc->dev, "Boot FSM failed to complete.\n");
  411. /* Reset the modem so that boot FSM is in reset state */
  412. q6v5_reset_deassert(qproc);
  413. return ret;
  414. }
  415. goto pbl_wait;
  416. } else if (qproc->version == MSS_MSM8996) {
  417. /* Override the ACC value if required */
  418. writel(QDSP6SS_ACC_OVERRIDE_VAL,
  419. qproc->reg_base + QDSP6SS_STRAP_ACC);
  420. /* Assert resets, stop core */
  421. val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
  422. val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
  423. writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
  424. /* BHS require xo cbcr to be enabled */
  425. val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
  426. val |= 0x1;
  427. writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
  428. /* Read CLKOFF bit to go low indicating CLK is enabled */
  429. ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
  430. val, !(val & BIT(31)), 1,
  431. HALT_CHECK_MAX_LOOPS);
  432. if (ret) {
  433. dev_err(qproc->dev,
  434. "xo cbcr enabling timed out (rc:%d)\n", ret);
  435. return ret;
  436. }
  437. /* Enable power block headswitch and wait for it to stabilize */
  438. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  439. val |= QDSP6v56_BHS_ON;
  440. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  441. val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  442. udelay(1);
  443. /* Put LDO in bypass mode */
  444. val |= QDSP6v56_LDO_BYP;
  445. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  446. /* Deassert QDSP6 compiler memory clamp */
  447. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  448. val &= ~QDSP6v56_CLAMP_QMC_MEM;
  449. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  450. /* Deassert memory peripheral sleep and L2 memory standby */
  451. val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
  452. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  453. /* Turn on L1, L2, ETB and JU memories 1 at a time */
  454. val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
  455. for (i = 19; i >= 0; i--) {
  456. val |= BIT(i);
  457. writel(val, qproc->reg_base +
  458. QDSP6SS_MEM_PWR_CTL);
  459. /*
  460. * Read back value to ensure the write is done then
  461. * wait for 1us for both memory peripheral and data
  462. * array to turn on.
  463. */
  464. val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
  465. udelay(1);
  466. }
  467. /* Remove word line clamp */
  468. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  469. val &= ~QDSP6v56_CLAMP_WL;
  470. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  471. } else {
  472. /* Assert resets, stop core */
  473. val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
  474. val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
  475. writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
  476. /* Enable power block headswitch and wait for it to stabilize */
  477. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  478. val |= QDSS_BHS_ON | QDSS_LDO_BYP;
  479. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  480. val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  481. udelay(1);
  482. /*
  483. * Turn on memories. L2 banks should be done individually
  484. * to minimize inrush current.
  485. */
  486. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  487. val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
  488. Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
  489. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  490. val |= Q6SS_L2DATA_SLP_NRET_N_2;
  491. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  492. val |= Q6SS_L2DATA_SLP_NRET_N_1;
  493. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  494. val |= Q6SS_L2DATA_SLP_NRET_N_0;
  495. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  496. }
  497. /* Remove IO clamp */
  498. val &= ~Q6SS_CLAMP_IO;
  499. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  500. /* Bring core out of reset */
  501. val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
  502. val &= ~Q6SS_CORE_ARES;
  503. writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
  504. /* Turn on core clock */
  505. val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
  506. val |= Q6SS_CLK_ENABLE;
  507. writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
  508. /* Start core execution */
  509. val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
  510. val &= ~Q6SS_STOP_CORE;
  511. writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
  512. pbl_wait:
  513. /* Wait for PBL status */
  514. ret = q6v5_rmb_pbl_wait(qproc, 1000);
  515. if (ret == -ETIMEDOUT) {
  516. dev_err(qproc->dev, "PBL boot timed out\n");
  517. } else if (ret != RMB_PBL_SUCCESS) {
  518. dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
  519. ret = -EINVAL;
  520. } else {
  521. ret = 0;
  522. }
  523. return ret;
  524. }
  525. static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
  526. struct regmap *halt_map,
  527. u32 offset)
  528. {
  529. unsigned long timeout;
  530. unsigned int val;
  531. int ret;
  532. /* Check if we're already idle */
  533. ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
  534. if (!ret && val)
  535. return;
  536. /* Assert halt request */
  537. regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
  538. /* Wait for halt */
  539. timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
  540. for (;;) {
  541. ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
  542. if (ret || val || time_after(jiffies, timeout))
  543. break;
  544. msleep(1);
  545. }
  546. ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
  547. if (ret || !val)
  548. dev_err(qproc->dev, "port failed halt\n");
  549. /* Clear halt request (port will remain halted until reset) */
  550. regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
  551. }
  552. static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
  553. {
  554. unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
  555. dma_addr_t phys;
  556. void *metadata;
  557. int mdata_perm;
  558. int xferop_ret;
  559. size_t size;
  560. void *ptr;
  561. int ret;
  562. metadata = qcom_mdt_read_metadata(fw, &size);
  563. if (IS_ERR(metadata))
  564. return PTR_ERR(metadata);
  565. ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
  566. if (!ptr) {
  567. kfree(metadata);
  568. dev_err(qproc->dev, "failed to allocate mdt buffer\n");
  569. return -ENOMEM;
  570. }
  571. memcpy(ptr, metadata, size);
  572. /* Hypervisor mapping to access metadata by modem */
  573. mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
  574. ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, phys, size);
  575. if (ret) {
  576. dev_err(qproc->dev,
  577. "assigning Q6 access to metadata failed: %d\n", ret);
  578. ret = -EAGAIN;
  579. goto free_dma_attrs;
  580. }
  581. writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
  582. writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
  583. ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
  584. if (ret == -ETIMEDOUT)
  585. dev_err(qproc->dev, "MPSS header authentication timed out\n");
  586. else if (ret < 0)
  587. dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
  588. /* Metadata authentication done, remove modem access */
  589. xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, phys, size);
  590. if (xferop_ret)
  591. dev_warn(qproc->dev,
  592. "mdt buffer not reclaimed system may become unstable\n");
  593. free_dma_attrs:
  594. dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
  595. kfree(metadata);
  596. return ret < 0 ? ret : 0;
  597. }
  598. static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
  599. {
  600. if (phdr->p_type != PT_LOAD)
  601. return false;
  602. if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
  603. return false;
  604. if (!phdr->p_memsz)
  605. return false;
  606. return true;
  607. }
  608. static int q6v5_mba_load(struct q6v5 *qproc)
  609. {
  610. int ret;
  611. int xfermemop_ret;
  612. qcom_q6v5_prepare(&qproc->q6v5);
  613. ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
  614. if (ret < 0) {
  615. dev_err(qproc->dev, "failed to enable active power domains\n");
  616. goto disable_irqs;
  617. }
  618. ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
  619. if (ret < 0) {
  620. dev_err(qproc->dev, "failed to enable proxy power domains\n");
  621. goto disable_active_pds;
  622. }
  623. ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
  624. qproc->proxy_reg_count);
  625. if (ret) {
  626. dev_err(qproc->dev, "failed to enable proxy supplies\n");
  627. goto disable_proxy_pds;
  628. }
  629. ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
  630. qproc->proxy_clk_count);
  631. if (ret) {
  632. dev_err(qproc->dev, "failed to enable proxy clocks\n");
  633. goto disable_proxy_reg;
  634. }
  635. ret = q6v5_regulator_enable(qproc, qproc->active_regs,
  636. qproc->active_reg_count);
  637. if (ret) {
  638. dev_err(qproc->dev, "failed to enable supplies\n");
  639. goto disable_proxy_clk;
  640. }
  641. ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
  642. qproc->reset_clk_count);
  643. if (ret) {
  644. dev_err(qproc->dev, "failed to enable reset clocks\n");
  645. goto disable_vdd;
  646. }
  647. ret = q6v5_reset_deassert(qproc);
  648. if (ret) {
  649. dev_err(qproc->dev, "failed to deassert mss restart\n");
  650. goto disable_reset_clks;
  651. }
  652. ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
  653. qproc->active_clk_count);
  654. if (ret) {
  655. dev_err(qproc->dev, "failed to enable clocks\n");
  656. goto assert_reset;
  657. }
  658. /* Assign MBA image access in DDR to q6 */
  659. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
  660. qproc->mba_phys, qproc->mba_size);
  661. if (ret) {
  662. dev_err(qproc->dev,
  663. "assigning Q6 access to mba memory failed: %d\n", ret);
  664. goto disable_active_clks;
  665. }
  666. writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
  667. ret = q6v5proc_reset(qproc);
  668. if (ret)
  669. goto reclaim_mba;
  670. ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
  671. if (ret == -ETIMEDOUT) {
  672. dev_err(qproc->dev, "MBA boot timed out\n");
  673. goto halt_axi_ports;
  674. } else if (ret != RMB_MBA_XPU_UNLOCKED &&
  675. ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
  676. dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
  677. ret = -EINVAL;
  678. goto halt_axi_ports;
  679. }
  680. qproc->dump_mba_loaded = true;
  681. return 0;
  682. halt_axi_ports:
  683. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
  684. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
  685. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
  686. reclaim_mba:
  687. xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
  688. qproc->mba_phys,
  689. qproc->mba_size);
  690. if (xfermemop_ret) {
  691. dev_err(qproc->dev,
  692. "Failed to reclaim mba buffer, system may become unstable\n");
  693. }
  694. disable_active_clks:
  695. q6v5_clk_disable(qproc->dev, qproc->active_clks,
  696. qproc->active_clk_count);
  697. assert_reset:
  698. q6v5_reset_assert(qproc);
  699. disable_reset_clks:
  700. q6v5_clk_disable(qproc->dev, qproc->reset_clks,
  701. qproc->reset_clk_count);
  702. disable_vdd:
  703. q6v5_regulator_disable(qproc, qproc->active_regs,
  704. qproc->active_reg_count);
  705. disable_proxy_clk:
  706. q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
  707. qproc->proxy_clk_count);
  708. disable_proxy_reg:
  709. q6v5_regulator_disable(qproc, qproc->proxy_regs,
  710. qproc->proxy_reg_count);
  711. disable_proxy_pds:
  712. q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
  713. disable_active_pds:
  714. q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
  715. disable_irqs:
  716. qcom_q6v5_unprepare(&qproc->q6v5);
  717. return ret;
  718. }
  719. static void q6v5_mba_reclaim(struct q6v5 *qproc)
  720. {
  721. int ret;
  722. u32 val;
  723. qproc->dump_mba_loaded = false;
  724. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
  725. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
  726. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
  727. if (qproc->version == MSS_MSM8996) {
  728. /*
  729. * To avoid high MX current during LPASS/MSS restart.
  730. */
  731. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  732. val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
  733. QDSP6v56_CLAMP_QMC_MEM;
  734. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  735. }
  736. q6v5_reset_assert(qproc);
  737. q6v5_clk_disable(qproc->dev, qproc->reset_clks,
  738. qproc->reset_clk_count);
  739. q6v5_clk_disable(qproc->dev, qproc->active_clks,
  740. qproc->active_clk_count);
  741. q6v5_regulator_disable(qproc, qproc->active_regs,
  742. qproc->active_reg_count);
  743. q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
  744. /* In case of failure or coredump scenario where reclaiming MBA memory
  745. * could not happen reclaim it here.
  746. */
  747. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
  748. qproc->mba_phys,
  749. qproc->mba_size);
  750. WARN_ON(ret);
  751. ret = qcom_q6v5_unprepare(&qproc->q6v5);
  752. if (ret) {
  753. q6v5_pds_disable(qproc, qproc->proxy_pds,
  754. qproc->proxy_pd_count);
  755. q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
  756. qproc->proxy_clk_count);
  757. q6v5_regulator_disable(qproc, qproc->proxy_regs,
  758. qproc->proxy_reg_count);
  759. }
  760. }
  761. static int q6v5_reload_mba(struct rproc *rproc)
  762. {
  763. struct q6v5 *qproc = rproc->priv;
  764. const struct firmware *fw;
  765. int ret;
  766. ret = request_firmware(&fw, rproc->firmware, qproc->dev);
  767. if (ret < 0)
  768. return ret;
  769. q6v5_load(rproc, fw);
  770. ret = q6v5_mba_load(qproc);
  771. release_firmware(fw);
  772. return ret;
  773. }
  774. static int q6v5_mpss_load(struct q6v5 *qproc)
  775. {
  776. const struct elf32_phdr *phdrs;
  777. const struct elf32_phdr *phdr;
  778. const struct firmware *seg_fw;
  779. const struct firmware *fw;
  780. struct elf32_hdr *ehdr;
  781. phys_addr_t mpss_reloc;
  782. phys_addr_t boot_addr;
  783. phys_addr_t min_addr = PHYS_ADDR_MAX;
  784. phys_addr_t max_addr = 0;
  785. bool relocate = false;
  786. char *fw_name;
  787. size_t fw_name_len;
  788. ssize_t offset;
  789. size_t size = 0;
  790. void *ptr;
  791. int ret;
  792. int i;
  793. fw_name_len = strlen(qproc->hexagon_mdt_image);
  794. if (fw_name_len <= 4)
  795. return -EINVAL;
  796. fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
  797. if (!fw_name)
  798. return -ENOMEM;
  799. ret = request_firmware(&fw, fw_name, qproc->dev);
  800. if (ret < 0) {
  801. dev_err(qproc->dev, "unable to load %s\n", fw_name);
  802. goto out;
  803. }
  804. /* Initialize the RMB validator */
  805. writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
  806. ret = q6v5_mpss_init_image(qproc, fw);
  807. if (ret)
  808. goto release_firmware;
  809. ehdr = (struct elf32_hdr *)fw->data;
  810. phdrs = (struct elf32_phdr *)(ehdr + 1);
  811. for (i = 0; i < ehdr->e_phnum; i++) {
  812. phdr = &phdrs[i];
  813. if (!q6v5_phdr_valid(phdr))
  814. continue;
  815. if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
  816. relocate = true;
  817. if (phdr->p_paddr < min_addr)
  818. min_addr = phdr->p_paddr;
  819. if (phdr->p_paddr + phdr->p_memsz > max_addr)
  820. max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
  821. }
  822. /**
  823. * In case of a modem subsystem restart on secure devices, the modem
  824. * memory can be reclaimed only after MBA is loaded. For modem cold
  825. * boot this will be a nop
  826. */
  827. q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false,
  828. qproc->mpss_phys, qproc->mpss_size);
  829. mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
  830. qproc->mpss_reloc = mpss_reloc;
  831. /* Load firmware segments */
  832. for (i = 0; i < ehdr->e_phnum; i++) {
  833. phdr = &phdrs[i];
  834. if (!q6v5_phdr_valid(phdr))
  835. continue;
  836. offset = phdr->p_paddr - mpss_reloc;
  837. if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
  838. dev_err(qproc->dev, "segment outside memory range\n");
  839. ret = -EINVAL;
  840. goto release_firmware;
  841. }
  842. ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz);
  843. if (!ptr) {
  844. dev_err(qproc->dev,
  845. "unable to map memory region: %pa+%zx-%x\n",
  846. &qproc->mpss_phys, offset, phdr->p_memsz);
  847. goto release_firmware;
  848. }
  849. if (phdr->p_filesz && phdr->p_offset < fw->size) {
  850. /* Firmware is large enough to be non-split */
  851. if (phdr->p_offset + phdr->p_filesz > fw->size) {
  852. dev_err(qproc->dev,
  853. "failed to load segment %d from truncated file %s\n",
  854. i, fw_name);
  855. ret = -EINVAL;
  856. iounmap(ptr);
  857. goto release_firmware;
  858. }
  859. memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
  860. } else if (phdr->p_filesz) {
  861. /* Replace "xxx.xxx" with "xxx.bxx" */
  862. sprintf(fw_name + fw_name_len - 3, "b%02d", i);
  863. ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
  864. ptr, phdr->p_filesz);
  865. if (ret) {
  866. dev_err(qproc->dev, "failed to load %s\n", fw_name);
  867. iounmap(ptr);
  868. goto release_firmware;
  869. }
  870. release_firmware(seg_fw);
  871. }
  872. if (phdr->p_memsz > phdr->p_filesz) {
  873. memset(ptr + phdr->p_filesz, 0,
  874. phdr->p_memsz - phdr->p_filesz);
  875. }
  876. iounmap(ptr);
  877. size += phdr->p_memsz;
  878. }
  879. /* Transfer ownership of modem ddr region to q6 */
  880. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
  881. qproc->mpss_phys, qproc->mpss_size);
  882. if (ret) {
  883. dev_err(qproc->dev,
  884. "assigning Q6 access to mpss memory failed: %d\n", ret);
  885. ret = -EAGAIN;
  886. goto release_firmware;
  887. }
  888. boot_addr = relocate ? qproc->mpss_phys : min_addr;
  889. writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
  890. writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
  891. writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
  892. ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
  893. if (ret == -ETIMEDOUT)
  894. dev_err(qproc->dev, "MPSS authentication timed out\n");
  895. else if (ret < 0)
  896. dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
  897. release_firmware:
  898. release_firmware(fw);
  899. out:
  900. kfree(fw_name);
  901. return ret < 0 ? ret : 0;
  902. }
  903. static void qcom_q6v5_dump_segment(struct rproc *rproc,
  904. struct rproc_dump_segment *segment,
  905. void *dest)
  906. {
  907. int ret = 0;
  908. struct q6v5 *qproc = rproc->priv;
  909. unsigned long mask = BIT((unsigned long)segment->priv);
  910. int offset = segment->da - qproc->mpss_reloc;
  911. void *ptr = NULL;
  912. /* Unlock mba before copying segments */
  913. if (!qproc->dump_mba_loaded) {
  914. ret = q6v5_reload_mba(rproc);
  915. if (!ret) {
  916. /* Reset ownership back to Linux to copy segments */
  917. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
  918. false,
  919. qproc->mpss_phys,
  920. qproc->mpss_size);
  921. }
  922. }
  923. if (!ret)
  924. ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size);
  925. if (ptr) {
  926. memcpy(dest, ptr, segment->size);
  927. iounmap(ptr);
  928. } else {
  929. memset(dest, 0xff, segment->size);
  930. }
  931. qproc->dump_segment_mask |= mask;
  932. /* Reclaim mba after copying segments */
  933. if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
  934. if (qproc->dump_mba_loaded) {
  935. /* Try to reset ownership back to Q6 */
  936. q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
  937. true,
  938. qproc->mpss_phys,
  939. qproc->mpss_size);
  940. q6v5_mba_reclaim(qproc);
  941. }
  942. }
  943. }
  944. static int q6v5_start(struct rproc *rproc)
  945. {
  946. struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
  947. int xfermemop_ret;
  948. int ret;
  949. ret = q6v5_mba_load(qproc);
  950. if (ret)
  951. return ret;
  952. dev_info(qproc->dev, "MBA booted, loading mpss\n");
  953. ret = q6v5_mpss_load(qproc);
  954. if (ret)
  955. goto reclaim_mpss;
  956. ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
  957. if (ret == -ETIMEDOUT) {
  958. dev_err(qproc->dev, "start timed out\n");
  959. goto reclaim_mpss;
  960. }
  961. xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
  962. qproc->mba_phys,
  963. qproc->mba_size);
  964. if (xfermemop_ret)
  965. dev_err(qproc->dev,
  966. "Failed to reclaim mba buffer system may become unstable\n");
  967. /* Reset Dump Segment Mask */
  968. qproc->dump_segment_mask = 0;
  969. qproc->running = true;
  970. return 0;
  971. reclaim_mpss:
  972. q6v5_mba_reclaim(qproc);
  973. return ret;
  974. }
  975. static int q6v5_stop(struct rproc *rproc)
  976. {
  977. struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
  978. int ret;
  979. qproc->running = false;
  980. ret = qcom_q6v5_request_stop(&qproc->q6v5);
  981. if (ret == -ETIMEDOUT)
  982. dev_err(qproc->dev, "timed out on wait\n");
  983. q6v5_mba_reclaim(qproc);
  984. return 0;
  985. }
  986. static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
  987. {
  988. struct q6v5 *qproc = rproc->priv;
  989. int offset;
  990. offset = da - qproc->mpss_reloc;
  991. if (offset < 0 || offset + len > qproc->mpss_size)
  992. return NULL;
  993. return qproc->mpss_region + offset;
  994. }
  995. static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
  996. const struct firmware *mba_fw)
  997. {
  998. const struct firmware *fw;
  999. const struct elf32_phdr *phdrs;
  1000. const struct elf32_phdr *phdr;
  1001. const struct elf32_hdr *ehdr;
  1002. struct q6v5 *qproc = rproc->priv;
  1003. unsigned long i;
  1004. int ret;
  1005. ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
  1006. if (ret < 0) {
  1007. dev_err(qproc->dev, "unable to load %s\n",
  1008. qproc->hexagon_mdt_image);
  1009. return ret;
  1010. }
  1011. ehdr = (struct elf32_hdr *)fw->data;
  1012. phdrs = (struct elf32_phdr *)(ehdr + 1);
  1013. qproc->dump_complete_mask = 0;
  1014. for (i = 0; i < ehdr->e_phnum; i++) {
  1015. phdr = &phdrs[i];
  1016. if (!q6v5_phdr_valid(phdr))
  1017. continue;
  1018. ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
  1019. phdr->p_memsz,
  1020. qcom_q6v5_dump_segment,
  1021. (void *)i);
  1022. if (ret)
  1023. break;
  1024. qproc->dump_complete_mask |= BIT(i);
  1025. }
  1026. release_firmware(fw);
  1027. return ret;
  1028. }
  1029. static const struct rproc_ops q6v5_ops = {
  1030. .start = q6v5_start,
  1031. .stop = q6v5_stop,
  1032. .da_to_va = q6v5_da_to_va,
  1033. .parse_fw = qcom_q6v5_register_dump_segments,
  1034. .load = q6v5_load,
  1035. };
  1036. static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
  1037. {
  1038. struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
  1039. q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
  1040. qproc->proxy_clk_count);
  1041. q6v5_regulator_disable(qproc, qproc->proxy_regs,
  1042. qproc->proxy_reg_count);
  1043. q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
  1044. }
  1045. static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
  1046. {
  1047. struct of_phandle_args args;
  1048. struct resource *res;
  1049. int ret;
  1050. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
  1051. qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
  1052. if (IS_ERR(qproc->reg_base))
  1053. return PTR_ERR(qproc->reg_base);
  1054. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
  1055. qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
  1056. if (IS_ERR(qproc->rmb_base))
  1057. return PTR_ERR(qproc->rmb_base);
  1058. ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
  1059. "qcom,halt-regs", 3, 0, &args);
  1060. if (ret < 0) {
  1061. dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
  1062. return -EINVAL;
  1063. }
  1064. qproc->halt_map = syscon_node_to_regmap(args.np);
  1065. of_node_put(args.np);
  1066. if (IS_ERR(qproc->halt_map))
  1067. return PTR_ERR(qproc->halt_map);
  1068. qproc->halt_q6 = args.args[0];
  1069. qproc->halt_modem = args.args[1];
  1070. qproc->halt_nc = args.args[2];
  1071. return 0;
  1072. }
  1073. static int q6v5_init_clocks(struct device *dev, struct clk **clks,
  1074. char **clk_names)
  1075. {
  1076. int i;
  1077. if (!clk_names)
  1078. return 0;
  1079. for (i = 0; clk_names[i]; i++) {
  1080. clks[i] = devm_clk_get(dev, clk_names[i]);
  1081. if (IS_ERR(clks[i])) {
  1082. int rc = PTR_ERR(clks[i]);
  1083. if (rc != -EPROBE_DEFER)
  1084. dev_err(dev, "Failed to get %s clock\n",
  1085. clk_names[i]);
  1086. return rc;
  1087. }
  1088. }
  1089. return i;
  1090. }
  1091. static int q6v5_pds_attach(struct device *dev, struct device **devs,
  1092. char **pd_names)
  1093. {
  1094. size_t num_pds = 0;
  1095. int ret;
  1096. int i;
  1097. if (!pd_names)
  1098. return 0;
  1099. while (pd_names[num_pds])
  1100. num_pds++;
  1101. for (i = 0; i < num_pds; i++) {
  1102. devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
  1103. if (IS_ERR_OR_NULL(devs[i])) {
  1104. ret = PTR_ERR(devs[i]) ? : -ENODATA;
  1105. goto unroll_attach;
  1106. }
  1107. }
  1108. return num_pds;
  1109. unroll_attach:
  1110. for (i--; i >= 0; i--)
  1111. dev_pm_domain_detach(devs[i], false);
  1112. return ret;
  1113. };
  1114. static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
  1115. size_t pd_count)
  1116. {
  1117. int i;
  1118. for (i = 0; i < pd_count; i++)
  1119. dev_pm_domain_detach(pds[i], false);
  1120. }
  1121. static int q6v5_init_reset(struct q6v5 *qproc)
  1122. {
  1123. qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
  1124. "mss_restart");
  1125. if (IS_ERR(qproc->mss_restart)) {
  1126. dev_err(qproc->dev, "failed to acquire mss restart\n");
  1127. return PTR_ERR(qproc->mss_restart);
  1128. }
  1129. if (qproc->has_alt_reset) {
  1130. qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
  1131. "pdc_reset");
  1132. if (IS_ERR(qproc->pdc_reset)) {
  1133. dev_err(qproc->dev, "failed to acquire pdc reset\n");
  1134. return PTR_ERR(qproc->pdc_reset);
  1135. }
  1136. }
  1137. return 0;
  1138. }
  1139. static int q6v5_alloc_memory_region(struct q6v5 *qproc)
  1140. {
  1141. struct device_node *child;
  1142. struct device_node *node;
  1143. struct resource r;
  1144. int ret;
  1145. child = of_get_child_by_name(qproc->dev->of_node, "mba");
  1146. node = of_parse_phandle(child, "memory-region", 0);
  1147. ret = of_address_to_resource(node, 0, &r);
  1148. if (ret) {
  1149. dev_err(qproc->dev, "unable to resolve mba region\n");
  1150. return ret;
  1151. }
  1152. of_node_put(node);
  1153. qproc->mba_phys = r.start;
  1154. qproc->mba_size = resource_size(&r);
  1155. qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
  1156. if (!qproc->mba_region) {
  1157. dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
  1158. &r.start, qproc->mba_size);
  1159. return -EBUSY;
  1160. }
  1161. child = of_get_child_by_name(qproc->dev->of_node, "mpss");
  1162. node = of_parse_phandle(child, "memory-region", 0);
  1163. ret = of_address_to_resource(node, 0, &r);
  1164. if (ret) {
  1165. dev_err(qproc->dev, "unable to resolve mpss region\n");
  1166. return ret;
  1167. }
  1168. of_node_put(node);
  1169. qproc->mpss_phys = qproc->mpss_reloc = r.start;
  1170. qproc->mpss_size = resource_size(&r);
  1171. return 0;
  1172. }
  1173. static int q6v5_probe(struct platform_device *pdev)
  1174. {
  1175. const struct rproc_hexagon_res *desc;
  1176. struct q6v5 *qproc;
  1177. struct rproc *rproc;
  1178. const char *mba_image;
  1179. int ret;
  1180. desc = of_device_get_match_data(&pdev->dev);
  1181. if (!desc)
  1182. return -EINVAL;
  1183. if (desc->need_mem_protection && !qcom_scm_is_available())
  1184. return -EPROBE_DEFER;
  1185. mba_image = desc->hexagon_mba_image;
  1186. ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
  1187. 0, &mba_image);
  1188. if (ret < 0 && ret != -EINVAL)
  1189. return ret;
  1190. rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
  1191. mba_image, sizeof(*qproc));
  1192. if (!rproc) {
  1193. dev_err(&pdev->dev, "failed to allocate rproc\n");
  1194. return -ENOMEM;
  1195. }
  1196. rproc->auto_boot = false;
  1197. qproc = (struct q6v5 *)rproc->priv;
  1198. qproc->dev = &pdev->dev;
  1199. qproc->rproc = rproc;
  1200. qproc->hexagon_mdt_image = "modem.mdt";
  1201. ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
  1202. 1, &qproc->hexagon_mdt_image);
  1203. if (ret < 0 && ret != -EINVAL)
  1204. goto free_rproc;
  1205. platform_set_drvdata(pdev, qproc);
  1206. ret = q6v5_init_mem(qproc, pdev);
  1207. if (ret)
  1208. goto free_rproc;
  1209. ret = q6v5_alloc_memory_region(qproc);
  1210. if (ret)
  1211. goto free_rproc;
  1212. ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
  1213. desc->proxy_clk_names);
  1214. if (ret < 0) {
  1215. dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
  1216. goto free_rproc;
  1217. }
  1218. qproc->proxy_clk_count = ret;
  1219. ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
  1220. desc->reset_clk_names);
  1221. if (ret < 0) {
  1222. dev_err(&pdev->dev, "Failed to get reset clocks.\n");
  1223. goto free_rproc;
  1224. }
  1225. qproc->reset_clk_count = ret;
  1226. ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
  1227. desc->active_clk_names);
  1228. if (ret < 0) {
  1229. dev_err(&pdev->dev, "Failed to get active clocks.\n");
  1230. goto free_rproc;
  1231. }
  1232. qproc->active_clk_count = ret;
  1233. ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
  1234. desc->proxy_supply);
  1235. if (ret < 0) {
  1236. dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
  1237. goto free_rproc;
  1238. }
  1239. qproc->proxy_reg_count = ret;
  1240. ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
  1241. desc->active_supply);
  1242. if (ret < 0) {
  1243. dev_err(&pdev->dev, "Failed to get active regulators.\n");
  1244. goto free_rproc;
  1245. }
  1246. qproc->active_reg_count = ret;
  1247. ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
  1248. desc->active_pd_names);
  1249. if (ret < 0) {
  1250. dev_err(&pdev->dev, "Failed to attach active power domains\n");
  1251. goto free_rproc;
  1252. }
  1253. qproc->active_pd_count = ret;
  1254. ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
  1255. desc->proxy_pd_names);
  1256. if (ret < 0) {
  1257. dev_err(&pdev->dev, "Failed to init power domains\n");
  1258. goto detach_active_pds;
  1259. }
  1260. qproc->proxy_pd_count = ret;
  1261. qproc->has_alt_reset = desc->has_alt_reset;
  1262. ret = q6v5_init_reset(qproc);
  1263. if (ret)
  1264. goto detach_proxy_pds;
  1265. qproc->version = desc->version;
  1266. qproc->need_mem_protection = desc->need_mem_protection;
  1267. ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
  1268. qcom_msa_handover);
  1269. if (ret)
  1270. goto detach_proxy_pds;
  1271. qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
  1272. qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
  1273. qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
  1274. qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
  1275. qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
  1276. qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
  1277. if (IS_ERR(qproc->sysmon)) {
  1278. ret = PTR_ERR(qproc->sysmon);
  1279. goto detach_proxy_pds;
  1280. }
  1281. ret = rproc_add(rproc);
  1282. if (ret)
  1283. goto detach_proxy_pds;
  1284. return 0;
  1285. detach_proxy_pds:
  1286. q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
  1287. detach_active_pds:
  1288. q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
  1289. free_rproc:
  1290. rproc_free(rproc);
  1291. return ret;
  1292. }
  1293. static int q6v5_remove(struct platform_device *pdev)
  1294. {
  1295. struct q6v5 *qproc = platform_get_drvdata(pdev);
  1296. rproc_del(qproc->rproc);
  1297. qcom_remove_sysmon_subdev(qproc->sysmon);
  1298. qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
  1299. qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
  1300. qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
  1301. q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
  1302. q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
  1303. rproc_free(qproc->rproc);
  1304. return 0;
  1305. }
  1306. static const struct rproc_hexagon_res sdm845_mss = {
  1307. .hexagon_mba_image = "mba.mbn",
  1308. .proxy_clk_names = (char*[]){
  1309. "xo",
  1310. "prng",
  1311. NULL
  1312. },
  1313. .reset_clk_names = (char*[]){
  1314. "iface",
  1315. "snoc_axi",
  1316. NULL
  1317. },
  1318. .active_clk_names = (char*[]){
  1319. "bus",
  1320. "mem",
  1321. "gpll0_mss",
  1322. "mnoc_axi",
  1323. NULL
  1324. },
  1325. .active_pd_names = (char*[]){
  1326. "load_state",
  1327. NULL
  1328. },
  1329. .proxy_pd_names = (char*[]){
  1330. "cx",
  1331. "mx",
  1332. "mss",
  1333. NULL
  1334. },
  1335. .need_mem_protection = true,
  1336. .has_alt_reset = true,
  1337. .version = MSS_SDM845,
  1338. };
  1339. static const struct rproc_hexagon_res msm8996_mss = {
  1340. .hexagon_mba_image = "mba.mbn",
  1341. .proxy_supply = (struct qcom_mss_reg_res[]) {
  1342. {
  1343. .supply = "pll",
  1344. .uA = 100000,
  1345. },
  1346. {}
  1347. },
  1348. .proxy_clk_names = (char*[]){
  1349. "xo",
  1350. "pnoc",
  1351. "qdss",
  1352. NULL
  1353. },
  1354. .active_clk_names = (char*[]){
  1355. "iface",
  1356. "bus",
  1357. "mem",
  1358. "gpll0_mss",
  1359. "snoc_axi",
  1360. "mnoc_axi",
  1361. NULL
  1362. },
  1363. .need_mem_protection = true,
  1364. .has_alt_reset = false,
  1365. .version = MSS_MSM8996,
  1366. };
  1367. static const struct rproc_hexagon_res msm8916_mss = {
  1368. .hexagon_mba_image = "mba.mbn",
  1369. .proxy_supply = (struct qcom_mss_reg_res[]) {
  1370. {
  1371. .supply = "mx",
  1372. .uV = 1050000,
  1373. },
  1374. {
  1375. .supply = "cx",
  1376. .uA = 100000,
  1377. },
  1378. {
  1379. .supply = "pll",
  1380. .uA = 100000,
  1381. },
  1382. {}
  1383. },
  1384. .proxy_clk_names = (char*[]){
  1385. "xo",
  1386. NULL
  1387. },
  1388. .active_clk_names = (char*[]){
  1389. "iface",
  1390. "bus",
  1391. "mem",
  1392. NULL
  1393. },
  1394. .need_mem_protection = false,
  1395. .has_alt_reset = false,
  1396. .version = MSS_MSM8916,
  1397. };
  1398. static const struct rproc_hexagon_res msm8974_mss = {
  1399. .hexagon_mba_image = "mba.b00",
  1400. .proxy_supply = (struct qcom_mss_reg_res[]) {
  1401. {
  1402. .supply = "mx",
  1403. .uV = 1050000,
  1404. },
  1405. {
  1406. .supply = "cx",
  1407. .uA = 100000,
  1408. },
  1409. {
  1410. .supply = "pll",
  1411. .uA = 100000,
  1412. },
  1413. {}
  1414. },
  1415. .active_supply = (struct qcom_mss_reg_res[]) {
  1416. {
  1417. .supply = "mss",
  1418. .uV = 1050000,
  1419. .uA = 100000,
  1420. },
  1421. {}
  1422. },
  1423. .proxy_clk_names = (char*[]){
  1424. "xo",
  1425. NULL
  1426. },
  1427. .active_clk_names = (char*[]){
  1428. "iface",
  1429. "bus",
  1430. "mem",
  1431. NULL
  1432. },
  1433. .need_mem_protection = false,
  1434. .has_alt_reset = false,
  1435. .version = MSS_MSM8974,
  1436. };
  1437. static const struct of_device_id q6v5_of_match[] = {
  1438. { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
  1439. { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
  1440. { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
  1441. { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
  1442. { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
  1443. { },
  1444. };
  1445. MODULE_DEVICE_TABLE(of, q6v5_of_match);
  1446. static struct platform_driver q6v5_driver = {
  1447. .probe = q6v5_probe,
  1448. .remove = q6v5_remove,
  1449. .driver = {
  1450. .name = "qcom-q6v5-mss",
  1451. .of_match_table = q6v5_of_match,
  1452. },
  1453. };
  1454. module_platform_driver(q6v5_driver);
  1455. MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
  1456. MODULE_LICENSE("GPL v2");