ufs-qcom.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
  1. /*
  2. * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. */
  14. #include <linux/time.h>
  15. #include <linux/of.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/phy/phy.h>
  18. #include <linux/phy/phy-qcom-ufs.h>
  19. #include "ufshcd.h"
  20. #include "unipro.h"
  21. #include "ufs-qcom.h"
  22. #include "ufshci.h"
  23. static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
  24. static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result);
  25. static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
  26. const char *speed_mode);
  27. static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
  28. static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
  29. {
  30. int err = 0;
  31. err = ufshcd_dme_get(hba,
  32. UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
  33. if (err)
  34. dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
  35. __func__, err);
  36. return err;
  37. }
  38. static int ufs_qcom_host_clk_get(struct device *dev,
  39. const char *name, struct clk **clk_out)
  40. {
  41. struct clk *clk;
  42. int err = 0;
  43. clk = devm_clk_get(dev, name);
  44. if (IS_ERR(clk)) {
  45. err = PTR_ERR(clk);
  46. dev_err(dev, "%s: failed to get %s err %d",
  47. __func__, name, err);
  48. } else {
  49. *clk_out = clk;
  50. }
  51. return err;
  52. }
  53. static int ufs_qcom_host_clk_enable(struct device *dev,
  54. const char *name, struct clk *clk)
  55. {
  56. int err = 0;
  57. err = clk_prepare_enable(clk);
  58. if (err)
  59. dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
  60. return err;
  61. }
  62. static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
  63. {
  64. if (!host->is_lane_clks_enabled)
  65. return;
  66. clk_disable_unprepare(host->tx_l1_sync_clk);
  67. clk_disable_unprepare(host->tx_l0_sync_clk);
  68. clk_disable_unprepare(host->rx_l1_sync_clk);
  69. clk_disable_unprepare(host->rx_l0_sync_clk);
  70. host->is_lane_clks_enabled = false;
  71. }
  72. static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
  73. {
  74. int err = 0;
  75. struct device *dev = host->hba->dev;
  76. if (host->is_lane_clks_enabled)
  77. return 0;
  78. err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
  79. host->rx_l0_sync_clk);
  80. if (err)
  81. goto out;
  82. err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
  83. host->tx_l0_sync_clk);
  84. if (err)
  85. goto disable_rx_l0;
  86. err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
  87. host->rx_l1_sync_clk);
  88. if (err)
  89. goto disable_tx_l0;
  90. err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
  91. host->tx_l1_sync_clk);
  92. if (err)
  93. goto disable_rx_l1;
  94. host->is_lane_clks_enabled = true;
  95. goto out;
  96. disable_rx_l1:
  97. clk_disable_unprepare(host->rx_l1_sync_clk);
  98. disable_tx_l0:
  99. clk_disable_unprepare(host->tx_l0_sync_clk);
  100. disable_rx_l0:
  101. clk_disable_unprepare(host->rx_l0_sync_clk);
  102. out:
  103. return err;
  104. }
  105. static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
  106. {
  107. int err = 0;
  108. struct device *dev = host->hba->dev;
  109. err = ufs_qcom_host_clk_get(dev,
  110. "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
  111. if (err)
  112. goto out;
  113. err = ufs_qcom_host_clk_get(dev,
  114. "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
  115. if (err)
  116. goto out;
  117. err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
  118. &host->rx_l1_sync_clk);
  119. if (err)
  120. goto out;
  121. err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
  122. &host->tx_l1_sync_clk);
  123. out:
  124. return err;
  125. }
  126. static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
  127. {
  128. struct ufs_qcom_host *host = hba->priv;
  129. struct phy *phy = host->generic_phy;
  130. u32 tx_lanes;
  131. int err = 0;
  132. err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
  133. if (err)
  134. goto out;
  135. err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
  136. if (err)
  137. dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
  138. __func__);
  139. out:
  140. return err;
  141. }
  142. static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
  143. {
  144. int err;
  145. u32 tx_fsm_val = 0;
  146. unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
  147. do {
  148. err = ufshcd_dme_get(hba,
  149. UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val);
  150. if (err || tx_fsm_val == TX_FSM_HIBERN8)
  151. break;
  152. /* sleep for max. 200us */
  153. usleep_range(100, 200);
  154. } while (time_before(jiffies, timeout));
  155. /*
  156. * we might have scheduled out for long during polling so
  157. * check the state again.
  158. */
  159. if (time_after(jiffies, timeout))
  160. err = ufshcd_dme_get(hba,
  161. UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val);
  162. if (err) {
  163. dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
  164. __func__, err);
  165. } else if (tx_fsm_val != TX_FSM_HIBERN8) {
  166. err = tx_fsm_val;
  167. dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
  168. __func__, err);
  169. }
  170. return err;
  171. }
  172. static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
  173. {
  174. struct ufs_qcom_host *host = hba->priv;
  175. struct phy *phy = host->generic_phy;
  176. int ret = 0;
  177. bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
  178. ? true : false;
  179. /* Assert PHY reset and apply PHY calibration values */
  180. ufs_qcom_assert_reset(hba);
  181. /* provide 1ms delay to let the reset pulse propagate */
  182. usleep_range(1000, 1100);
  183. ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
  184. if (ret) {
  185. dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
  186. __func__, ret);
  187. goto out;
  188. }
  189. /* De-assert PHY reset and start serdes */
  190. ufs_qcom_deassert_reset(hba);
  191. /*
  192. * after reset deassertion, phy will need all ref clocks,
  193. * voltage, current to settle down before starting serdes.
  194. */
  195. usleep_range(1000, 1100);
  196. ret = ufs_qcom_phy_start_serdes(phy);
  197. if (ret) {
  198. dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
  199. __func__, ret);
  200. goto out;
  201. }
  202. ret = ufs_qcom_phy_is_pcs_ready(phy);
  203. if (ret)
  204. dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
  205. __func__, ret);
  206. out:
  207. return ret;
  208. }
  209. /*
  210. * The UTP controller has a number of internal clock gating cells (CGCs).
  211. * Internal hardware sub-modules within the UTP controller control the CGCs.
  212. * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
  213. * in a specific operation, UTP controller CGCs are by default disabled and
  214. * this function enables them (after every UFS link startup) to save some power
  215. * leakage.
  216. */
  217. static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
  218. {
  219. ufshcd_writel(hba,
  220. ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
  221. REG_UFS_CFG2);
  222. /* Ensure that HW clock gating is enabled before next operations */
  223. mb();
  224. }
  225. static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status)
  226. {
  227. struct ufs_qcom_host *host = hba->priv;
  228. int err = 0;
  229. switch (status) {
  230. case PRE_CHANGE:
  231. ufs_qcom_power_up_sequence(hba);
  232. /*
  233. * The PHY PLL output is the source of tx/rx lane symbol
  234. * clocks, hence, enable the lane clocks only after PHY
  235. * is initialized.
  236. */
  237. err = ufs_qcom_enable_lane_clks(host);
  238. break;
  239. case POST_CHANGE:
  240. /* check if UFS PHY moved from DISABLED to HIBERN8 */
  241. err = ufs_qcom_check_hibern8(hba);
  242. ufs_qcom_enable_hw_clk_gating(hba);
  243. break;
  244. default:
  245. dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
  246. err = -EINVAL;
  247. break;
  248. }
  249. return err;
  250. }
  251. /**
  252. * Returns non-zero for success (which rate of core_clk) and 0
  253. * in case of a failure
  254. */
  255. static unsigned long
  256. ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate)
  257. {
  258. struct ufs_qcom_host *host = hba->priv;
  259. struct ufs_clk_info *clki;
  260. u32 core_clk_period_in_ns;
  261. u32 tx_clk_cycles_per_us = 0;
  262. unsigned long core_clk_rate = 0;
  263. u32 core_clk_cycles_per_us = 0;
  264. static u32 pwm_fr_table[][2] = {
  265. {UFS_PWM_G1, 0x1},
  266. {UFS_PWM_G2, 0x1},
  267. {UFS_PWM_G3, 0x1},
  268. {UFS_PWM_G4, 0x1},
  269. };
  270. static u32 hs_fr_table_rA[][2] = {
  271. {UFS_HS_G1, 0x1F},
  272. {UFS_HS_G2, 0x3e},
  273. };
  274. static u32 hs_fr_table_rB[][2] = {
  275. {UFS_HS_G1, 0x24},
  276. {UFS_HS_G2, 0x49},
  277. };
  278. /*
  279. * The Qunipro controller does not use following registers:
  280. * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
  281. * UFS_REG_PA_LINK_STARTUP_TIMER
  282. * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
  283. * Aggregation logic.
  284. */
  285. if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
  286. goto out;
  287. if (gear == 0) {
  288. dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
  289. goto out_error;
  290. }
  291. list_for_each_entry(clki, &hba->clk_list_head, list) {
  292. if (!strcmp(clki->name, "core_clk"))
  293. core_clk_rate = clk_get_rate(clki->clk);
  294. }
  295. /* If frequency is smaller than 1MHz, set to 1MHz */
  296. if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
  297. core_clk_rate = DEFAULT_CLK_RATE_HZ;
  298. core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
  299. ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
  300. core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
  301. core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
  302. core_clk_period_in_ns &= MASK_CLK_NS_REG;
  303. switch (hs) {
  304. case FASTAUTO_MODE:
  305. case FAST_MODE:
  306. if (rate == PA_HS_MODE_A) {
  307. if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
  308. dev_err(hba->dev,
  309. "%s: index %d exceeds table size %zu\n",
  310. __func__, gear,
  311. ARRAY_SIZE(hs_fr_table_rA));
  312. goto out_error;
  313. }
  314. tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
  315. } else if (rate == PA_HS_MODE_B) {
  316. if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
  317. dev_err(hba->dev,
  318. "%s: index %d exceeds table size %zu\n",
  319. __func__, gear,
  320. ARRAY_SIZE(hs_fr_table_rB));
  321. goto out_error;
  322. }
  323. tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
  324. } else {
  325. dev_err(hba->dev, "%s: invalid rate = %d\n",
  326. __func__, rate);
  327. goto out_error;
  328. }
  329. break;
  330. case SLOWAUTO_MODE:
  331. case SLOW_MODE:
  332. if (gear > ARRAY_SIZE(pwm_fr_table)) {
  333. dev_err(hba->dev,
  334. "%s: index %d exceeds table size %zu\n",
  335. __func__, gear,
  336. ARRAY_SIZE(pwm_fr_table));
  337. goto out_error;
  338. }
  339. tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
  340. break;
  341. case UNCHANGED:
  342. default:
  343. dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
  344. goto out_error;
  345. }
  346. /* this register 2 fields shall be written at once */
  347. ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
  348. REG_UFS_TX_SYMBOL_CLK_NS_US);
  349. goto out;
  350. out_error:
  351. core_clk_rate = 0;
  352. out:
  353. return core_clk_rate;
  354. }
  355. static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, bool status)
  356. {
  357. unsigned long core_clk_rate = 0;
  358. u32 core_clk_cycles_per_100ms;
  359. switch (status) {
  360. case PRE_CHANGE:
  361. core_clk_rate = ufs_qcom_cfg_timers(hba, UFS_PWM_G1,
  362. SLOWAUTO_MODE, 0);
  363. if (!core_clk_rate) {
  364. dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
  365. __func__);
  366. return -EINVAL;
  367. }
  368. core_clk_cycles_per_100ms =
  369. (core_clk_rate / MSEC_PER_SEC) * 100;
  370. ufshcd_writel(hba, core_clk_cycles_per_100ms,
  371. REG_UFS_PA_LINK_STARTUP_TIMER);
  372. break;
  373. case POST_CHANGE:
  374. ufs_qcom_link_startup_post_change(hba);
  375. break;
  376. default:
  377. break;
  378. }
  379. return 0;
  380. }
  381. static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  382. {
  383. struct ufs_qcom_host *host = hba->priv;
  384. struct phy *phy = host->generic_phy;
  385. int ret = 0;
  386. if (ufs_qcom_is_link_off(hba)) {
  387. /*
  388. * Disable the tx/rx lane symbol clocks before PHY is
  389. * powered down as the PLL source should be disabled
  390. * after downstream clocks are disabled.
  391. */
  392. ufs_qcom_disable_lane_clks(host);
  393. phy_power_off(phy);
  394. /* Assert PHY soft reset */
  395. ufs_qcom_assert_reset(hba);
  396. goto out;
  397. }
  398. /*
  399. * If UniPro link is not active, PHY ref_clk, main PHY analog power
  400. * rail and low noise analog power rail for PLL can be switched off.
  401. */
  402. if (!ufs_qcom_is_link_active(hba))
  403. phy_power_off(phy);
  404. out:
  405. return ret;
  406. }
  407. static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  408. {
  409. struct ufs_qcom_host *host = hba->priv;
  410. struct phy *phy = host->generic_phy;
  411. int err;
  412. err = phy_power_on(phy);
  413. if (err) {
  414. dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
  415. __func__, err);
  416. goto out;
  417. }
  418. hba->is_sys_suspended = false;
  419. out:
  420. return err;
  421. }
  422. struct ufs_qcom_dev_params {
  423. u32 pwm_rx_gear; /* pwm rx gear to work in */
  424. u32 pwm_tx_gear; /* pwm tx gear to work in */
  425. u32 hs_rx_gear; /* hs rx gear to work in */
  426. u32 hs_tx_gear; /* hs tx gear to work in */
  427. u32 rx_lanes; /* number of rx lanes */
  428. u32 tx_lanes; /* number of tx lanes */
  429. u32 rx_pwr_pwm; /* rx pwm working pwr */
  430. u32 tx_pwr_pwm; /* tx pwm working pwr */
  431. u32 rx_pwr_hs; /* rx hs working pwr */
  432. u32 tx_pwr_hs; /* tx hs working pwr */
  433. u32 hs_rate; /* rate A/B to work in HS */
  434. u32 desired_working_mode;
  435. };
  436. static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
  437. struct ufs_pa_layer_attr *dev_max,
  438. struct ufs_pa_layer_attr *agreed_pwr)
  439. {
  440. int min_qcom_gear;
  441. int min_dev_gear;
  442. bool is_dev_sup_hs = false;
  443. bool is_qcom_max_hs = false;
  444. if (dev_max->pwr_rx == FAST_MODE)
  445. is_dev_sup_hs = true;
  446. if (qcom_param->desired_working_mode == FAST) {
  447. is_qcom_max_hs = true;
  448. min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
  449. qcom_param->hs_tx_gear);
  450. } else {
  451. min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
  452. qcom_param->pwm_tx_gear);
  453. }
  454. /*
  455. * device doesn't support HS but qcom_param->desired_working_mode is
  456. * HS, thus device and qcom_param don't agree
  457. */
  458. if (!is_dev_sup_hs && is_qcom_max_hs) {
  459. pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
  460. __func__);
  461. return -ENOTSUPP;
  462. } else if (is_dev_sup_hs && is_qcom_max_hs) {
  463. /*
  464. * since device supports HS, it supports FAST_MODE.
  465. * since qcom_param->desired_working_mode is also HS
  466. * then final decision (FAST/FASTAUTO) is done according
  467. * to qcom_params as it is the restricting factor
  468. */
  469. agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
  470. qcom_param->rx_pwr_hs;
  471. } else {
  472. /*
  473. * here qcom_param->desired_working_mode is PWM.
  474. * it doesn't matter whether device supports HS or PWM,
  475. * in both cases qcom_param->desired_working_mode will
  476. * determine the mode
  477. */
  478. agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
  479. qcom_param->rx_pwr_pwm;
  480. }
  481. /*
  482. * we would like tx to work in the minimum number of lanes
  483. * between device capability and vendor preferences.
  484. * the same decision will be made for rx
  485. */
  486. agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
  487. qcom_param->tx_lanes);
  488. agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
  489. qcom_param->rx_lanes);
  490. /* device maximum gear is the minimum between device rx and tx gears */
  491. min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
  492. /*
  493. * if both device capabilities and vendor pre-defined preferences are
  494. * both HS or both PWM then set the minimum gear to be the chosen
  495. * working gear.
  496. * if one is PWM and one is HS then the one that is PWM get to decide
  497. * what is the gear, as it is the one that also decided previously what
  498. * pwr the device will be configured to.
  499. */
  500. if ((is_dev_sup_hs && is_qcom_max_hs) ||
  501. (!is_dev_sup_hs && !is_qcom_max_hs))
  502. agreed_pwr->gear_rx = agreed_pwr->gear_tx =
  503. min_t(u32, min_dev_gear, min_qcom_gear);
  504. else if (!is_dev_sup_hs)
  505. agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
  506. else
  507. agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
  508. agreed_pwr->hs_rate = qcom_param->hs_rate;
  509. return 0;
  510. }
  511. static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
  512. {
  513. int vote;
  514. int err = 0;
  515. char mode[BUS_VECTOR_NAME_LEN];
  516. ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
  517. vote = ufs_qcom_get_bus_vote(host, mode);
  518. if (vote >= 0)
  519. err = ufs_qcom_set_bus_vote(host, vote);
  520. else
  521. err = vote;
  522. if (err)
  523. dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
  524. else
  525. host->bus_vote.saved_vote = vote;
  526. return err;
  527. }
  528. static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
  529. bool status,
  530. struct ufs_pa_layer_attr *dev_max_params,
  531. struct ufs_pa_layer_attr *dev_req_params)
  532. {
  533. u32 val;
  534. struct ufs_qcom_host *host = hba->priv;
  535. struct phy *phy = host->generic_phy;
  536. struct ufs_qcom_dev_params ufs_qcom_cap;
  537. int ret = 0;
  538. int res = 0;
  539. if (!dev_req_params) {
  540. pr_err("%s: incoming dev_req_params is NULL\n", __func__);
  541. ret = -EINVAL;
  542. goto out;
  543. }
  544. switch (status) {
  545. case PRE_CHANGE:
  546. ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
  547. ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
  548. ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
  549. ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
  550. ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
  551. ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
  552. ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
  553. ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
  554. ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
  555. ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
  556. ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
  557. ufs_qcom_cap.desired_working_mode =
  558. UFS_QCOM_LIMIT_DESIRED_MODE;
  559. ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
  560. dev_max_params,
  561. dev_req_params);
  562. if (ret) {
  563. pr_err("%s: failed to determine capabilities\n",
  564. __func__);
  565. goto out;
  566. }
  567. break;
  568. case POST_CHANGE:
  569. if (!ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
  570. dev_req_params->pwr_rx,
  571. dev_req_params->hs_rate)) {
  572. dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
  573. __func__);
  574. /*
  575. * we return error code at the end of the routine,
  576. * but continue to configure UFS_PHY_TX_LANE_ENABLE
  577. * and bus voting as usual
  578. */
  579. ret = -EINVAL;
  580. }
  581. val = ~(MAX_U32 << dev_req_params->lane_tx);
  582. res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
  583. if (res) {
  584. dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
  585. __func__, res);
  586. ret = res;
  587. }
  588. /* cache the power mode parameters to use internally */
  589. memcpy(&host->dev_req_params,
  590. dev_req_params, sizeof(*dev_req_params));
  591. ufs_qcom_update_bus_bw_vote(host);
  592. break;
  593. default:
  594. ret = -EINVAL;
  595. break;
  596. }
  597. out:
  598. return ret;
  599. }
  600. static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
  601. {
  602. struct ufs_qcom_host *host = hba->priv;
  603. if (host->hw_ver.major == 0x1)
  604. return UFSHCI_VERSION_11;
  605. else
  606. return UFSHCI_VERSION_20;
  607. }
  608. /**
  609. * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
  610. * @hba: host controller instance
  611. *
  612. * QCOM UFS host controller might have some non standard behaviours (quirks)
  613. * than what is specified by UFSHCI specification. Advertise all such
  614. * quirks to standard UFS host controller driver so standard takes them into
  615. * account.
  616. */
  617. static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
  618. {
  619. struct ufs_qcom_host *host = hba->priv;
  620. if (host->hw_ver.major == 0x01) {
  621. hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
  622. | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
  623. | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
  624. if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
  625. hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
  626. }
  627. if (host->hw_ver.major >= 0x2) {
  628. hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
  629. hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
  630. if (!ufs_qcom_cap_qunipro(host))
  631. /* Legacy UniPro mode still need following quirks */
  632. hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
  633. | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
  634. | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
  635. }
  636. }
  637. static void ufs_qcom_set_caps(struct ufs_hba *hba)
  638. {
  639. struct ufs_qcom_host *host = hba->priv;
  640. if (host->hw_ver.major >= 0x2)
  641. host->caps = UFS_QCOM_CAP_QUNIPRO;
  642. }
  643. static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
  644. const char *speed_mode)
  645. {
  646. struct device *dev = host->hba->dev;
  647. struct device_node *np = dev->of_node;
  648. int err;
  649. const char *key = "qcom,bus-vector-names";
  650. if (!speed_mode) {
  651. err = -EINVAL;
  652. goto out;
  653. }
  654. if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
  655. err = of_property_match_string(np, key, "MAX");
  656. else
  657. err = of_property_match_string(np, key, speed_mode);
  658. out:
  659. if (err < 0)
  660. dev_err(dev, "%s: Invalid %s mode %d\n",
  661. __func__, speed_mode, err);
  662. return err;
  663. }
  664. static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
  665. {
  666. int err = 0;
  667. if (vote != host->bus_vote.curr_vote)
  668. host->bus_vote.curr_vote = vote;
  669. return err;
  670. }
  671. static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
  672. {
  673. int gear = max_t(u32, p->gear_rx, p->gear_tx);
  674. int lanes = max_t(u32, p->lane_rx, p->lane_tx);
  675. int pwr;
  676. /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
  677. if (!gear)
  678. gear = 1;
  679. if (!lanes)
  680. lanes = 1;
  681. if (!p->pwr_rx && !p->pwr_tx) {
  682. pwr = SLOWAUTO_MODE;
  683. snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
  684. } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
  685. p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
  686. pwr = FAST_MODE;
  687. snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
  688. p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
  689. } else {
  690. pwr = SLOW_MODE;
  691. snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
  692. "PWM", gear, lanes);
  693. }
  694. }
  695. static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
  696. {
  697. struct ufs_qcom_host *host = hba->priv;
  698. int err = 0;
  699. int vote = 0;
  700. /*
  701. * In case ufs_qcom_init() is not yet done, simply ignore.
  702. * This ufs_qcom_setup_clocks() shall be called from
  703. * ufs_qcom_init() after init is done.
  704. */
  705. if (!host)
  706. return 0;
  707. if (on) {
  708. err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
  709. if (err)
  710. goto out;
  711. err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
  712. if (err) {
  713. dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
  714. __func__, err);
  715. ufs_qcom_phy_disable_iface_clk(host->generic_phy);
  716. goto out;
  717. }
  718. /* enable the device ref clock */
  719. ufs_qcom_phy_enable_dev_ref_clk(host->generic_phy);
  720. vote = host->bus_vote.saved_vote;
  721. if (vote == host->bus_vote.min_bw_vote)
  722. ufs_qcom_update_bus_bw_vote(host);
  723. } else {
  724. /* M-PHY RMMI interface clocks can be turned off */
  725. ufs_qcom_phy_disable_iface_clk(host->generic_phy);
  726. if (!ufs_qcom_is_link_active(hba)) {
  727. /* turn off UFS local PHY ref_clk */
  728. ufs_qcom_phy_disable_ref_clk(host->generic_phy);
  729. /* disable device ref_clk */
  730. ufs_qcom_phy_disable_dev_ref_clk(host->generic_phy);
  731. }
  732. vote = host->bus_vote.min_bw_vote;
  733. }
  734. err = ufs_qcom_set_bus_vote(host, vote);
  735. if (err)
  736. dev_err(hba->dev, "%s: set bus vote failed %d\n",
  737. __func__, err);
  738. out:
  739. return err;
  740. }
  741. static ssize_t
  742. show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
  743. char *buf)
  744. {
  745. struct ufs_hba *hba = dev_get_drvdata(dev);
  746. struct ufs_qcom_host *host = hba->priv;
  747. return snprintf(buf, PAGE_SIZE, "%u\n",
  748. host->bus_vote.is_max_bw_needed);
  749. }
  750. static ssize_t
  751. store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
  752. const char *buf, size_t count)
  753. {
  754. struct ufs_hba *hba = dev_get_drvdata(dev);
  755. struct ufs_qcom_host *host = hba->priv;
  756. uint32_t value;
  757. if (!kstrtou32(buf, 0, &value)) {
  758. host->bus_vote.is_max_bw_needed = !!value;
  759. ufs_qcom_update_bus_bw_vote(host);
  760. }
  761. return count;
  762. }
  763. static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
  764. {
  765. int err;
  766. struct device *dev = host->hba->dev;
  767. struct device_node *np = dev->of_node;
  768. err = of_property_count_strings(np, "qcom,bus-vector-names");
  769. if (err < 0 ) {
  770. dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
  771. __func__, err);
  772. goto out;
  773. }
  774. /* cache the vote index for minimum and maximum bandwidth */
  775. host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
  776. host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
  777. host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
  778. host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
  779. sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
  780. host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
  781. host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
  782. err = device_create_file(dev, &host->bus_vote.max_bus_bw);
  783. out:
  784. return err;
  785. }
  786. #define ANDROID_BOOT_DEV_MAX 30
  787. static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
  788. static int get_android_boot_dev(char *str)
  789. {
  790. strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
  791. return 1;
  792. }
  793. __setup("androidboot.bootdevice=", get_android_boot_dev);
  794. /**
  795. * ufs_qcom_init - bind phy with controller
  796. * @hba: host controller instance
  797. *
  798. * Binds PHY with controller and powers up PHY enabling clocks
  799. * and regulators.
  800. *
  801. * Returns -EPROBE_DEFER if binding fails, returns negative error
  802. * on phy power up failure and returns zero on success.
  803. */
  804. static int ufs_qcom_init(struct ufs_hba *hba)
  805. {
  806. int err;
  807. struct device *dev = hba->dev;
  808. struct ufs_qcom_host *host;
  809. if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
  810. return -ENODEV;
  811. host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
  812. if (!host) {
  813. err = -ENOMEM;
  814. dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
  815. goto out;
  816. }
  817. host->hba = hba;
  818. hba->priv = (void *)host;
  819. host->generic_phy = devm_phy_get(dev, "ufsphy");
  820. if (IS_ERR(host->generic_phy)) {
  821. err = PTR_ERR(host->generic_phy);
  822. dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
  823. goto out;
  824. }
  825. err = ufs_qcom_bus_register(host);
  826. if (err)
  827. goto out_host_free;
  828. ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
  829. &host->hw_ver.minor, &host->hw_ver.step);
  830. /* update phy revision information before calling phy_init() */
  831. ufs_qcom_phy_save_controller_version(host->generic_phy,
  832. host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
  833. phy_init(host->generic_phy);
  834. err = phy_power_on(host->generic_phy);
  835. if (err)
  836. goto out_unregister_bus;
  837. err = ufs_qcom_init_lane_clks(host);
  838. if (err)
  839. goto out_disable_phy;
  840. ufs_qcom_set_caps(hba);
  841. ufs_qcom_advertise_quirks(hba);
  842. hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING;
  843. hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
  844. ufs_qcom_setup_clocks(hba, true);
  845. if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
  846. ufs_qcom_hosts[hba->dev->id] = host;
  847. goto out;
  848. out_disable_phy:
  849. phy_power_off(host->generic_phy);
  850. out_unregister_bus:
  851. phy_exit(host->generic_phy);
  852. out_host_free:
  853. devm_kfree(dev, host);
  854. hba->priv = NULL;
  855. out:
  856. return err;
  857. }
  858. static void ufs_qcom_exit(struct ufs_hba *hba)
  859. {
  860. struct ufs_qcom_host *host = hba->priv;
  861. ufs_qcom_disable_lane_clks(host);
  862. phy_power_off(host->generic_phy);
  863. }
  864. static
  865. void ufs_qcom_clk_scale_notify(struct ufs_hba *hba)
  866. {
  867. struct ufs_qcom_host *host = hba->priv;
  868. struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
  869. if (!dev_req_params)
  870. return;
  871. ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
  872. dev_req_params->pwr_rx,
  873. dev_req_params->hs_rate);
  874. }
  875. /**
  876. * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
  877. *
  878. * The variant operations configure the necessary controller and PHY
  879. * handshake during initialization.
  880. */
  881. static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
  882. .name = "qcom",
  883. .init = ufs_qcom_init,
  884. .exit = ufs_qcom_exit,
  885. .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
  886. .clk_scale_notify = ufs_qcom_clk_scale_notify,
  887. .setup_clocks = ufs_qcom_setup_clocks,
  888. .hce_enable_notify = ufs_qcom_hce_enable_notify,
  889. .link_startup_notify = ufs_qcom_link_startup_notify,
  890. .pwr_change_notify = ufs_qcom_pwr_change_notify,
  891. .suspend = ufs_qcom_suspend,
  892. .resume = ufs_qcom_resume,
  893. };
  894. EXPORT_SYMBOL(ufs_hba_qcom_vops);