ufs-qcom.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715
  1. /*
  2. * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. */
  14. #include <linux/time.h>
  15. #include <linux/of.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/phy/phy.h>
  18. #include <linux/phy/phy-qcom-ufs.h>
  19. #include "ufshcd.h"
  20. #include "ufshcd-pltfrm.h"
  21. #include "unipro.h"
  22. #include "ufs-qcom.h"
  23. #include "ufshci.h"
  24. #include "ufs_quirks.h"
  25. #define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
  26. (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
  27. enum {
  28. TSTBUS_UAWM,
  29. TSTBUS_UARM,
  30. TSTBUS_TXUC,
  31. TSTBUS_RXUC,
  32. TSTBUS_DFC,
  33. TSTBUS_TRLUT,
  34. TSTBUS_TMRLUT,
  35. TSTBUS_OCSC,
  36. TSTBUS_UTP_HCI,
  37. TSTBUS_COMBINED,
  38. TSTBUS_WRAPPER,
  39. TSTBUS_UNIPRO,
  40. TSTBUS_MAX,
  41. };
  42. static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
  43. static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
  44. static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
  45. static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
  46. u32 clk_cycles);
  47. static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
  48. char *prefix)
  49. {
  50. print_hex_dump(KERN_ERR, prefix,
  51. len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
  52. 16, 4, (void __force *)hba->mmio_base + offset,
  53. len * 4, false);
  54. }
  55. static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
  56. char *prefix, void *priv)
  57. {
  58. ufs_qcom_dump_regs(hba, offset, len, prefix);
  59. }
  60. static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
  61. {
  62. int err = 0;
  63. err = ufshcd_dme_get(hba,
  64. UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
  65. if (err)
  66. dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
  67. __func__, err);
  68. return err;
  69. }
  70. static int ufs_qcom_host_clk_get(struct device *dev,
  71. const char *name, struct clk **clk_out)
  72. {
  73. struct clk *clk;
  74. int err = 0;
  75. clk = devm_clk_get(dev, name);
  76. if (IS_ERR(clk)) {
  77. err = PTR_ERR(clk);
  78. dev_err(dev, "%s: failed to get %s err %d",
  79. __func__, name, err);
  80. } else {
  81. *clk_out = clk;
  82. }
  83. return err;
  84. }
  85. static int ufs_qcom_host_clk_enable(struct device *dev,
  86. const char *name, struct clk *clk)
  87. {
  88. int err = 0;
  89. err = clk_prepare_enable(clk);
  90. if (err)
  91. dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
  92. return err;
  93. }
  94. static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
  95. {
  96. if (!host->is_lane_clks_enabled)
  97. return;
  98. if (host->hba->lanes_per_direction > 1)
  99. clk_disable_unprepare(host->tx_l1_sync_clk);
  100. clk_disable_unprepare(host->tx_l0_sync_clk);
  101. if (host->hba->lanes_per_direction > 1)
  102. clk_disable_unprepare(host->rx_l1_sync_clk);
  103. clk_disable_unprepare(host->rx_l0_sync_clk);
  104. host->is_lane_clks_enabled = false;
  105. }
  106. static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
  107. {
  108. int err = 0;
  109. struct device *dev = host->hba->dev;
  110. if (host->is_lane_clks_enabled)
  111. return 0;
  112. err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
  113. host->rx_l0_sync_clk);
  114. if (err)
  115. goto out;
  116. err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
  117. host->tx_l0_sync_clk);
  118. if (err)
  119. goto disable_rx_l0;
  120. if (host->hba->lanes_per_direction > 1) {
  121. err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
  122. host->rx_l1_sync_clk);
  123. if (err)
  124. goto disable_tx_l0;
  125. err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
  126. host->tx_l1_sync_clk);
  127. if (err)
  128. goto disable_rx_l1;
  129. }
  130. host->is_lane_clks_enabled = true;
  131. goto out;
  132. disable_rx_l1:
  133. if (host->hba->lanes_per_direction > 1)
  134. clk_disable_unprepare(host->rx_l1_sync_clk);
  135. disable_tx_l0:
  136. clk_disable_unprepare(host->tx_l0_sync_clk);
  137. disable_rx_l0:
  138. clk_disable_unprepare(host->rx_l0_sync_clk);
  139. out:
  140. return err;
  141. }
  142. static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
  143. {
  144. int err = 0;
  145. struct device *dev = host->hba->dev;
  146. err = ufs_qcom_host_clk_get(dev,
  147. "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
  148. if (err)
  149. goto out;
  150. err = ufs_qcom_host_clk_get(dev,
  151. "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
  152. if (err)
  153. goto out;
  154. /* In case of single lane per direction, don't read lane1 clocks */
  155. if (host->hba->lanes_per_direction > 1) {
  156. err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
  157. &host->rx_l1_sync_clk);
  158. if (err)
  159. goto out;
  160. err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
  161. &host->tx_l1_sync_clk);
  162. }
  163. out:
  164. return err;
  165. }
  166. static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
  167. {
  168. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  169. struct phy *phy = host->generic_phy;
  170. u32 tx_lanes;
  171. int err = 0;
  172. err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
  173. if (err)
  174. goto out;
  175. err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
  176. if (err)
  177. dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
  178. __func__);
  179. out:
  180. return err;
  181. }
  182. static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
  183. {
  184. int err;
  185. u32 tx_fsm_val = 0;
  186. unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
  187. do {
  188. err = ufshcd_dme_get(hba,
  189. UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
  190. UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
  191. &tx_fsm_val);
  192. if (err || tx_fsm_val == TX_FSM_HIBERN8)
  193. break;
  194. /* sleep for max. 200us */
  195. usleep_range(100, 200);
  196. } while (time_before(jiffies, timeout));
  197. /*
  198. * we might have scheduled out for long during polling so
  199. * check the state again.
  200. */
  201. if (time_after(jiffies, timeout))
  202. err = ufshcd_dme_get(hba,
  203. UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
  204. UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
  205. &tx_fsm_val);
  206. if (err) {
  207. dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
  208. __func__, err);
  209. } else if (tx_fsm_val != TX_FSM_HIBERN8) {
  210. err = tx_fsm_val;
  211. dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
  212. __func__, err);
  213. }
  214. return err;
  215. }
  216. static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
  217. {
  218. ufshcd_rmwl(host->hba, QUNIPRO_SEL,
  219. ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
  220. REG_UFS_CFG1);
  221. /* make sure above configuration is applied before we return */
  222. mb();
  223. }
  224. static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
  225. {
  226. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  227. struct phy *phy = host->generic_phy;
  228. int ret = 0;
  229. bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
  230. ? true : false;
  231. /* Assert PHY reset and apply PHY calibration values */
  232. ufs_qcom_assert_reset(hba);
  233. /* provide 1ms delay to let the reset pulse propagate */
  234. usleep_range(1000, 1100);
  235. ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
  236. if (ret) {
  237. dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
  238. __func__, ret);
  239. goto out;
  240. }
  241. /* De-assert PHY reset and start serdes */
  242. ufs_qcom_deassert_reset(hba);
  243. /*
  244. * after reset deassertion, phy will need all ref clocks,
  245. * voltage, current to settle down before starting serdes.
  246. */
  247. usleep_range(1000, 1100);
  248. ret = ufs_qcom_phy_start_serdes(phy);
  249. if (ret) {
  250. dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
  251. __func__, ret);
  252. goto out;
  253. }
  254. ret = ufs_qcom_phy_is_pcs_ready(phy);
  255. if (ret)
  256. dev_err(hba->dev,
  257. "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
  258. __func__, ret);
  259. ufs_qcom_select_unipro_mode(host);
  260. out:
  261. return ret;
  262. }
  263. /*
  264. * The UTP controller has a number of internal clock gating cells (CGCs).
  265. * Internal hardware sub-modules within the UTP controller control the CGCs.
  266. * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
  267. * in a specific operation, UTP controller CGCs are by default disabled and
  268. * this function enables them (after every UFS link startup) to save some power
  269. * leakage.
  270. */
  271. static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
  272. {
  273. ufshcd_writel(hba,
  274. ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
  275. REG_UFS_CFG2);
  276. /* Ensure that HW clock gating is enabled before next operations */
  277. mb();
  278. }
  279. static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
  280. enum ufs_notify_change_status status)
  281. {
  282. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  283. int err = 0;
  284. switch (status) {
  285. case PRE_CHANGE:
  286. ufs_qcom_power_up_sequence(hba);
  287. /*
  288. * The PHY PLL output is the source of tx/rx lane symbol
  289. * clocks, hence, enable the lane clocks only after PHY
  290. * is initialized.
  291. */
  292. err = ufs_qcom_enable_lane_clks(host);
  293. break;
  294. case POST_CHANGE:
  295. /* check if UFS PHY moved from DISABLED to HIBERN8 */
  296. err = ufs_qcom_check_hibern8(hba);
  297. ufs_qcom_enable_hw_clk_gating(hba);
  298. break;
  299. default:
  300. dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
  301. err = -EINVAL;
  302. break;
  303. }
  304. return err;
  305. }
  306. /**
  307. * Returns zero for success and non-zero in case of a failure
  308. */
  309. static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
  310. u32 hs, u32 rate, bool update_link_startup_timer)
  311. {
  312. int ret = 0;
  313. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  314. struct ufs_clk_info *clki;
  315. u32 core_clk_period_in_ns;
  316. u32 tx_clk_cycles_per_us = 0;
  317. unsigned long core_clk_rate = 0;
  318. u32 core_clk_cycles_per_us = 0;
  319. static u32 pwm_fr_table[][2] = {
  320. {UFS_PWM_G1, 0x1},
  321. {UFS_PWM_G2, 0x1},
  322. {UFS_PWM_G3, 0x1},
  323. {UFS_PWM_G4, 0x1},
  324. };
  325. static u32 hs_fr_table_rA[][2] = {
  326. {UFS_HS_G1, 0x1F},
  327. {UFS_HS_G2, 0x3e},
  328. {UFS_HS_G3, 0x7D},
  329. };
  330. static u32 hs_fr_table_rB[][2] = {
  331. {UFS_HS_G1, 0x24},
  332. {UFS_HS_G2, 0x49},
  333. {UFS_HS_G3, 0x92},
  334. };
  335. /*
  336. * The Qunipro controller does not use following registers:
  337. * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
  338. * UFS_REG_PA_LINK_STARTUP_TIMER
  339. * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
  340. * Aggregation logic.
  341. */
  342. if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
  343. goto out;
  344. if (gear == 0) {
  345. dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
  346. goto out_error;
  347. }
  348. list_for_each_entry(clki, &hba->clk_list_head, list) {
  349. if (!strcmp(clki->name, "core_clk"))
  350. core_clk_rate = clk_get_rate(clki->clk);
  351. }
  352. /* If frequency is smaller than 1MHz, set to 1MHz */
  353. if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
  354. core_clk_rate = DEFAULT_CLK_RATE_HZ;
  355. core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
  356. if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
  357. ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
  358. /*
  359. * make sure above write gets applied before we return from
  360. * this function.
  361. */
  362. mb();
  363. }
  364. if (ufs_qcom_cap_qunipro(host))
  365. goto out;
  366. core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
  367. core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
  368. core_clk_period_in_ns &= MASK_CLK_NS_REG;
  369. switch (hs) {
  370. case FASTAUTO_MODE:
  371. case FAST_MODE:
  372. if (rate == PA_HS_MODE_A) {
  373. if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
  374. dev_err(hba->dev,
  375. "%s: index %d exceeds table size %zu\n",
  376. __func__, gear,
  377. ARRAY_SIZE(hs_fr_table_rA));
  378. goto out_error;
  379. }
  380. tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
  381. } else if (rate == PA_HS_MODE_B) {
  382. if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
  383. dev_err(hba->dev,
  384. "%s: index %d exceeds table size %zu\n",
  385. __func__, gear,
  386. ARRAY_SIZE(hs_fr_table_rB));
  387. goto out_error;
  388. }
  389. tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
  390. } else {
  391. dev_err(hba->dev, "%s: invalid rate = %d\n",
  392. __func__, rate);
  393. goto out_error;
  394. }
  395. break;
  396. case SLOWAUTO_MODE:
  397. case SLOW_MODE:
  398. if (gear > ARRAY_SIZE(pwm_fr_table)) {
  399. dev_err(hba->dev,
  400. "%s: index %d exceeds table size %zu\n",
  401. __func__, gear,
  402. ARRAY_SIZE(pwm_fr_table));
  403. goto out_error;
  404. }
  405. tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
  406. break;
  407. case UNCHANGED:
  408. default:
  409. dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
  410. goto out_error;
  411. }
  412. if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
  413. (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
  414. /* this register 2 fields shall be written at once */
  415. ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
  416. REG_UFS_TX_SYMBOL_CLK_NS_US);
  417. /*
  418. * make sure above write gets applied before we return from
  419. * this function.
  420. */
  421. mb();
  422. }
  423. if (update_link_startup_timer) {
  424. ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
  425. REG_UFS_PA_LINK_STARTUP_TIMER);
  426. /*
  427. * make sure that this configuration is applied before
  428. * we return
  429. */
  430. mb();
  431. }
  432. goto out;
  433. out_error:
  434. ret = -EINVAL;
  435. out:
  436. return ret;
  437. }
  438. static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
  439. enum ufs_notify_change_status status)
  440. {
  441. int err = 0;
  442. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  443. switch (status) {
  444. case PRE_CHANGE:
  445. if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
  446. 0, true)) {
  447. dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
  448. __func__);
  449. err = -EINVAL;
  450. goto out;
  451. }
  452. if (ufs_qcom_cap_qunipro(host))
  453. /*
  454. * set unipro core clock cycles to 150 & clear clock
  455. * divider
  456. */
  457. err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
  458. 150);
  459. /*
  460. * Some UFS devices (and may be host) have issues if LCC is
  461. * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
  462. * before link startup which will make sure that both host
  463. * and device TX LCC are disabled once link startup is
  464. * completed.
  465. */
  466. if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
  467. err = ufshcd_dme_set(hba,
  468. UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
  469. 0);
  470. break;
  471. case POST_CHANGE:
  472. ufs_qcom_link_startup_post_change(hba);
  473. break;
  474. default:
  475. break;
  476. }
  477. out:
  478. return err;
  479. }
  480. static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  481. {
  482. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  483. struct phy *phy = host->generic_phy;
  484. int ret = 0;
  485. if (ufs_qcom_is_link_off(hba)) {
  486. /*
  487. * Disable the tx/rx lane symbol clocks before PHY is
  488. * powered down as the PLL source should be disabled
  489. * after downstream clocks are disabled.
  490. */
  491. ufs_qcom_disable_lane_clks(host);
  492. phy_power_off(phy);
  493. /* Assert PHY soft reset */
  494. ufs_qcom_assert_reset(hba);
  495. goto out;
  496. }
  497. /*
  498. * If UniPro link is not active, PHY ref_clk, main PHY analog power
  499. * rail and low noise analog power rail for PLL can be switched off.
  500. */
  501. if (!ufs_qcom_is_link_active(hba)) {
  502. ufs_qcom_disable_lane_clks(host);
  503. phy_power_off(phy);
  504. }
  505. out:
  506. return ret;
  507. }
  508. static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  509. {
  510. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  511. struct phy *phy = host->generic_phy;
  512. int err;
  513. err = phy_power_on(phy);
  514. if (err) {
  515. dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
  516. __func__, err);
  517. goto out;
  518. }
  519. err = ufs_qcom_enable_lane_clks(host);
  520. if (err)
  521. goto out;
  522. hba->is_sys_suspended = false;
  523. out:
  524. return err;
  525. }
  526. struct ufs_qcom_dev_params {
  527. u32 pwm_rx_gear; /* pwm rx gear to work in */
  528. u32 pwm_tx_gear; /* pwm tx gear to work in */
  529. u32 hs_rx_gear; /* hs rx gear to work in */
  530. u32 hs_tx_gear; /* hs tx gear to work in */
  531. u32 rx_lanes; /* number of rx lanes */
  532. u32 tx_lanes; /* number of tx lanes */
  533. u32 rx_pwr_pwm; /* rx pwm working pwr */
  534. u32 tx_pwr_pwm; /* tx pwm working pwr */
  535. u32 rx_pwr_hs; /* rx hs working pwr */
  536. u32 tx_pwr_hs; /* tx hs working pwr */
  537. u32 hs_rate; /* rate A/B to work in HS */
  538. u32 desired_working_mode;
  539. };
  540. static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
  541. struct ufs_pa_layer_attr *dev_max,
  542. struct ufs_pa_layer_attr *agreed_pwr)
  543. {
  544. int min_qcom_gear;
  545. int min_dev_gear;
  546. bool is_dev_sup_hs = false;
  547. bool is_qcom_max_hs = false;
  548. if (dev_max->pwr_rx == FAST_MODE)
  549. is_dev_sup_hs = true;
  550. if (qcom_param->desired_working_mode == FAST) {
  551. is_qcom_max_hs = true;
  552. min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
  553. qcom_param->hs_tx_gear);
  554. } else {
  555. min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
  556. qcom_param->pwm_tx_gear);
  557. }
  558. /*
  559. * device doesn't support HS but qcom_param->desired_working_mode is
  560. * HS, thus device and qcom_param don't agree
  561. */
  562. if (!is_dev_sup_hs && is_qcom_max_hs) {
  563. pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
  564. __func__);
  565. return -ENOTSUPP;
  566. } else if (is_dev_sup_hs && is_qcom_max_hs) {
  567. /*
  568. * since device supports HS, it supports FAST_MODE.
  569. * since qcom_param->desired_working_mode is also HS
  570. * then final decision (FAST/FASTAUTO) is done according
  571. * to qcom_params as it is the restricting factor
  572. */
  573. agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
  574. qcom_param->rx_pwr_hs;
  575. } else {
  576. /*
  577. * here qcom_param->desired_working_mode is PWM.
  578. * it doesn't matter whether device supports HS or PWM,
  579. * in both cases qcom_param->desired_working_mode will
  580. * determine the mode
  581. */
  582. agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
  583. qcom_param->rx_pwr_pwm;
  584. }
  585. /*
  586. * we would like tx to work in the minimum number of lanes
  587. * between device capability and vendor preferences.
  588. * the same decision will be made for rx
  589. */
  590. agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
  591. qcom_param->tx_lanes);
  592. agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
  593. qcom_param->rx_lanes);
  594. /* device maximum gear is the minimum between device rx and tx gears */
  595. min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
  596. /*
  597. * if both device capabilities and vendor pre-defined preferences are
  598. * both HS or both PWM then set the minimum gear to be the chosen
  599. * working gear.
  600. * if one is PWM and one is HS then the one that is PWM get to decide
  601. * what is the gear, as it is the one that also decided previously what
  602. * pwr the device will be configured to.
  603. */
  604. if ((is_dev_sup_hs && is_qcom_max_hs) ||
  605. (!is_dev_sup_hs && !is_qcom_max_hs))
  606. agreed_pwr->gear_rx = agreed_pwr->gear_tx =
  607. min_t(u32, min_dev_gear, min_qcom_gear);
  608. else if (!is_dev_sup_hs)
  609. agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
  610. else
  611. agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
  612. agreed_pwr->hs_rate = qcom_param->hs_rate;
  613. return 0;
  614. }
  615. #ifdef CONFIG_MSM_BUS_SCALING
  616. static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
  617. const char *speed_mode)
  618. {
  619. struct device *dev = host->hba->dev;
  620. struct device_node *np = dev->of_node;
  621. int err;
  622. const char *key = "qcom,bus-vector-names";
  623. if (!speed_mode) {
  624. err = -EINVAL;
  625. goto out;
  626. }
  627. if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
  628. err = of_property_match_string(np, key, "MAX");
  629. else
  630. err = of_property_match_string(np, key, speed_mode);
  631. out:
  632. if (err < 0)
  633. dev_err(dev, "%s: Invalid %s mode %d\n",
  634. __func__, speed_mode, err);
  635. return err;
  636. }
  637. static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
  638. {
  639. int gear = max_t(u32, p->gear_rx, p->gear_tx);
  640. int lanes = max_t(u32, p->lane_rx, p->lane_tx);
  641. int pwr;
  642. /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
  643. if (!gear)
  644. gear = 1;
  645. if (!lanes)
  646. lanes = 1;
  647. if (!p->pwr_rx && !p->pwr_tx) {
  648. pwr = SLOWAUTO_MODE;
  649. snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
  650. } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
  651. p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
  652. pwr = FAST_MODE;
  653. snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
  654. p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
  655. } else {
  656. pwr = SLOW_MODE;
  657. snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
  658. "PWM", gear, lanes);
  659. }
  660. }
  661. static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
  662. {
  663. int err = 0;
  664. if (vote != host->bus_vote.curr_vote) {
  665. err = msm_bus_scale_client_update_request(
  666. host->bus_vote.client_handle, vote);
  667. if (err) {
  668. dev_err(host->hba->dev,
  669. "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
  670. __func__, host->bus_vote.client_handle,
  671. vote, err);
  672. goto out;
  673. }
  674. host->bus_vote.curr_vote = vote;
  675. }
  676. out:
  677. return err;
  678. }
  679. static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
  680. {
  681. int vote;
  682. int err = 0;
  683. char mode[BUS_VECTOR_NAME_LEN];
  684. ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
  685. vote = ufs_qcom_get_bus_vote(host, mode);
  686. if (vote >= 0)
  687. err = ufs_qcom_set_bus_vote(host, vote);
  688. else
  689. err = vote;
  690. if (err)
  691. dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
  692. else
  693. host->bus_vote.saved_vote = vote;
  694. return err;
  695. }
  696. static ssize_t
  697. show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
  698. char *buf)
  699. {
  700. struct ufs_hba *hba = dev_get_drvdata(dev);
  701. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  702. return snprintf(buf, PAGE_SIZE, "%u\n",
  703. host->bus_vote.is_max_bw_needed);
  704. }
  705. static ssize_t
  706. store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
  707. const char *buf, size_t count)
  708. {
  709. struct ufs_hba *hba = dev_get_drvdata(dev);
  710. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  711. uint32_t value;
  712. if (!kstrtou32(buf, 0, &value)) {
  713. host->bus_vote.is_max_bw_needed = !!value;
  714. ufs_qcom_update_bus_bw_vote(host);
  715. }
  716. return count;
  717. }
  718. static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
  719. {
  720. int err;
  721. struct msm_bus_scale_pdata *bus_pdata;
  722. struct device *dev = host->hba->dev;
  723. struct platform_device *pdev = to_platform_device(dev);
  724. struct device_node *np = dev->of_node;
  725. bus_pdata = msm_bus_cl_get_pdata(pdev);
  726. if (!bus_pdata) {
  727. dev_err(dev, "%s: failed to get bus vectors\n", __func__);
  728. err = -ENODATA;
  729. goto out;
  730. }
  731. err = of_property_count_strings(np, "qcom,bus-vector-names");
  732. if (err < 0 || err != bus_pdata->num_usecases) {
  733. dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
  734. __func__, err);
  735. goto out;
  736. }
  737. host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
  738. if (!host->bus_vote.client_handle) {
  739. dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
  740. __func__);
  741. err = -EFAULT;
  742. goto out;
  743. }
  744. /* cache the vote index for minimum and maximum bandwidth */
  745. host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
  746. host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
  747. host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
  748. host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
  749. sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
  750. host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
  751. host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
  752. err = device_create_file(dev, &host->bus_vote.max_bus_bw);
  753. out:
  754. return err;
  755. }
  756. #else /* CONFIG_MSM_BUS_SCALING */
  757. static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
  758. {
  759. return 0;
  760. }
  761. static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
  762. {
  763. return 0;
  764. }
  765. static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
  766. {
  767. return 0;
  768. }
  769. #endif /* CONFIG_MSM_BUS_SCALING */
  770. static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
  771. {
  772. if (host->dev_ref_clk_ctrl_mmio &&
  773. (enable ^ host->is_dev_ref_clk_enabled)) {
  774. u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
  775. if (enable)
  776. temp |= host->dev_ref_clk_en_mask;
  777. else
  778. temp &= ~host->dev_ref_clk_en_mask;
  779. /*
  780. * If we are here to disable this clock it might be immediately
  781. * after entering into hibern8 in which case we need to make
  782. * sure that device ref_clk is active at least 1us after the
  783. * hibern8 enter.
  784. */
  785. if (!enable)
  786. udelay(1);
  787. writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
  788. /* ensure that ref_clk is enabled/disabled before we return */
  789. wmb();
  790. /*
  791. * If we call hibern8 exit after this, we need to make sure that
  792. * device ref_clk is stable for at least 1us before the hibern8
  793. * exit command.
  794. */
  795. if (enable)
  796. udelay(1);
  797. host->is_dev_ref_clk_enabled = enable;
  798. }
  799. }
  800. static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
  801. enum ufs_notify_change_status status,
  802. struct ufs_pa_layer_attr *dev_max_params,
  803. struct ufs_pa_layer_attr *dev_req_params)
  804. {
  805. u32 val;
  806. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  807. struct phy *phy = host->generic_phy;
  808. struct ufs_qcom_dev_params ufs_qcom_cap;
  809. int ret = 0;
  810. int res = 0;
  811. if (!dev_req_params) {
  812. pr_err("%s: incoming dev_req_params is NULL\n", __func__);
  813. ret = -EINVAL;
  814. goto out;
  815. }
  816. switch (status) {
  817. case PRE_CHANGE:
  818. ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
  819. ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
  820. ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
  821. ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
  822. ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
  823. ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
  824. ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
  825. ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
  826. ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
  827. ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
  828. ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
  829. ufs_qcom_cap.desired_working_mode =
  830. UFS_QCOM_LIMIT_DESIRED_MODE;
  831. if (host->hw_ver.major == 0x1) {
  832. /*
  833. * HS-G3 operations may not reliably work on legacy QCOM
  834. * UFS host controller hardware even though capability
  835. * exchange during link startup phase may end up
  836. * negotiating maximum supported gear as G3.
  837. * Hence downgrade the maximum supported gear to HS-G2.
  838. */
  839. if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
  840. ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
  841. if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
  842. ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
  843. }
  844. ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
  845. dev_max_params,
  846. dev_req_params);
  847. if (ret) {
  848. pr_err("%s: failed to determine capabilities\n",
  849. __func__);
  850. goto out;
  851. }
  852. /* enable the device ref clock before changing to HS mode */
  853. if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
  854. ufshcd_is_hs_mode(dev_req_params))
  855. ufs_qcom_dev_ref_clk_ctrl(host, true);
  856. break;
  857. case POST_CHANGE:
  858. if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
  859. dev_req_params->pwr_rx,
  860. dev_req_params->hs_rate, false)) {
  861. dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
  862. __func__);
  863. /*
  864. * we return error code at the end of the routine,
  865. * but continue to configure UFS_PHY_TX_LANE_ENABLE
  866. * and bus voting as usual
  867. */
  868. ret = -EINVAL;
  869. }
  870. val = ~(MAX_U32 << dev_req_params->lane_tx);
  871. res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
  872. if (res) {
  873. dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
  874. __func__, res);
  875. ret = res;
  876. }
  877. /* cache the power mode parameters to use internally */
  878. memcpy(&host->dev_req_params,
  879. dev_req_params, sizeof(*dev_req_params));
  880. ufs_qcom_update_bus_bw_vote(host);
  881. /* disable the device ref clock if entered PWM mode */
  882. if (ufshcd_is_hs_mode(&hba->pwr_info) &&
  883. !ufshcd_is_hs_mode(dev_req_params))
  884. ufs_qcom_dev_ref_clk_ctrl(host, false);
  885. break;
  886. default:
  887. ret = -EINVAL;
  888. break;
  889. }
  890. out:
  891. return ret;
  892. }
  893. static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
  894. {
  895. int err;
  896. u32 pa_vs_config_reg1;
  897. err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
  898. &pa_vs_config_reg1);
  899. if (err)
  900. goto out;
  901. /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
  902. err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
  903. (pa_vs_config_reg1 | (1 << 12)));
  904. out:
  905. return err;
  906. }
  907. static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
  908. {
  909. int err = 0;
  910. if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
  911. err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
  912. return err;
  913. }
  914. static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
  915. {
  916. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  917. if (host->hw_ver.major == 0x1)
  918. return UFSHCI_VERSION_11;
  919. else
  920. return UFSHCI_VERSION_20;
  921. }
  922. /**
  923. * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
  924. * @hba: host controller instance
  925. *
  926. * QCOM UFS host controller might have some non standard behaviours (quirks)
  927. * than what is specified by UFSHCI specification. Advertise all such
  928. * quirks to standard UFS host controller driver so standard takes them into
  929. * account.
  930. */
  931. static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
  932. {
  933. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  934. if (host->hw_ver.major == 0x01) {
  935. hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
  936. | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
  937. | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
  938. if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
  939. hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
  940. hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
  941. }
  942. if (host->hw_ver.major >= 0x2) {
  943. hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
  944. if (!ufs_qcom_cap_qunipro(host))
  945. /* Legacy UniPro mode still need following quirks */
  946. hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
  947. | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
  948. | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
  949. }
  950. }
  951. static void ufs_qcom_set_caps(struct ufs_hba *hba)
  952. {
  953. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  954. hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
  955. hba->caps |= UFSHCD_CAP_CLK_SCALING;
  956. hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
  957. if (host->hw_ver.major >= 0x2) {
  958. host->caps = UFS_QCOM_CAP_QUNIPRO |
  959. UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
  960. }
  961. }
  962. /**
  963. * ufs_qcom_setup_clocks - enables/disable clocks
  964. * @hba: host controller instance
  965. * @on: If true, enable clocks else disable them.
  966. *
  967. * Returns 0 on success, non-zero on failure.
  968. */
  969. static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
  970. {
  971. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  972. int err;
  973. int vote = 0;
  974. /*
  975. * In case ufs_qcom_init() is not yet done, simply ignore.
  976. * This ufs_qcom_setup_clocks() shall be called from
  977. * ufs_qcom_init() after init is done.
  978. */
  979. if (!host)
  980. return 0;
  981. if (on) {
  982. err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
  983. if (err)
  984. goto out;
  985. err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
  986. if (err) {
  987. dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
  988. __func__, err);
  989. ufs_qcom_phy_disable_iface_clk(host->generic_phy);
  990. goto out;
  991. }
  992. /* enable the device ref clock for HS mode*/
  993. if (ufshcd_is_hs_mode(&hba->pwr_info))
  994. ufs_qcom_dev_ref_clk_ctrl(host, true);
  995. vote = host->bus_vote.saved_vote;
  996. if (vote == host->bus_vote.min_bw_vote)
  997. ufs_qcom_update_bus_bw_vote(host);
  998. } else {
  999. /* M-PHY RMMI interface clocks can be turned off */
  1000. ufs_qcom_phy_disable_iface_clk(host->generic_phy);
  1001. if (!ufs_qcom_is_link_active(hba))
  1002. /* disable device ref_clk */
  1003. ufs_qcom_dev_ref_clk_ctrl(host, false);
  1004. vote = host->bus_vote.min_bw_vote;
  1005. }
  1006. err = ufs_qcom_set_bus_vote(host, vote);
  1007. if (err)
  1008. dev_err(hba->dev, "%s: set bus vote failed %d\n",
  1009. __func__, err);
  1010. out:
  1011. return err;
  1012. }
  1013. #define ANDROID_BOOT_DEV_MAX 30
  1014. static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
  1015. #ifndef MODULE
  1016. static int __init get_android_boot_dev(char *str)
  1017. {
  1018. strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
  1019. return 1;
  1020. }
  1021. __setup("androidboot.bootdevice=", get_android_boot_dev);
  1022. #endif
  1023. /**
  1024. * ufs_qcom_init - bind phy with controller
  1025. * @hba: host controller instance
  1026. *
  1027. * Binds PHY with controller and powers up PHY enabling clocks
  1028. * and regulators.
  1029. *
  1030. * Returns -EPROBE_DEFER if binding fails, returns negative error
  1031. * on phy power up failure and returns zero on success.
  1032. */
  1033. static int ufs_qcom_init(struct ufs_hba *hba)
  1034. {
  1035. int err;
  1036. struct device *dev = hba->dev;
  1037. struct platform_device *pdev = to_platform_device(dev);
  1038. struct ufs_qcom_host *host;
  1039. struct resource *res;
  1040. if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
  1041. return -ENODEV;
  1042. host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
  1043. if (!host) {
  1044. err = -ENOMEM;
  1045. dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
  1046. goto out;
  1047. }
  1048. /* Make a two way bind between the qcom host and the hba */
  1049. host->hba = hba;
  1050. ufshcd_set_variant(hba, host);
  1051. /*
  1052. * voting/devoting device ref_clk source is time consuming hence
  1053. * skip devoting it during aggressive clock gating. This clock
  1054. * will still be gated off during runtime suspend.
  1055. */
  1056. host->generic_phy = devm_phy_get(dev, "ufsphy");
  1057. if (IS_ERR(host->generic_phy)) {
  1058. err = PTR_ERR(host->generic_phy);
  1059. dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
  1060. goto out;
  1061. }
  1062. err = ufs_qcom_bus_register(host);
  1063. if (err)
  1064. goto out_host_free;
  1065. ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
  1066. &host->hw_ver.minor, &host->hw_ver.step);
  1067. /*
  1068. * for newer controllers, device reference clock control bit has
  1069. * moved inside UFS controller register address space itself.
  1070. */
  1071. if (host->hw_ver.major >= 0x02) {
  1072. host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
  1073. host->dev_ref_clk_en_mask = BIT(26);
  1074. } else {
  1075. /* "dev_ref_clk_ctrl_mem" is optional resource */
  1076. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1077. if (res) {
  1078. host->dev_ref_clk_ctrl_mmio =
  1079. devm_ioremap_resource(dev, res);
  1080. if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
  1081. dev_warn(dev,
  1082. "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
  1083. __func__,
  1084. PTR_ERR(host->dev_ref_clk_ctrl_mmio));
  1085. host->dev_ref_clk_ctrl_mmio = NULL;
  1086. }
  1087. host->dev_ref_clk_en_mask = BIT(5);
  1088. }
  1089. }
  1090. /* update phy revision information before calling phy_init() */
  1091. ufs_qcom_phy_save_controller_version(host->generic_phy,
  1092. host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
  1093. phy_init(host->generic_phy);
  1094. err = phy_power_on(host->generic_phy);
  1095. if (err)
  1096. goto out_unregister_bus;
  1097. err = ufs_qcom_init_lane_clks(host);
  1098. if (err)
  1099. goto out_disable_phy;
  1100. ufs_qcom_set_caps(hba);
  1101. ufs_qcom_advertise_quirks(hba);
  1102. ufs_qcom_setup_clocks(hba, true);
  1103. if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
  1104. ufs_qcom_hosts[hba->dev->id] = host;
  1105. host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
  1106. ufs_qcom_get_default_testbus_cfg(host);
  1107. err = ufs_qcom_testbus_config(host);
  1108. if (err) {
  1109. dev_warn(dev, "%s: failed to configure the testbus %d\n",
  1110. __func__, err);
  1111. err = 0;
  1112. }
  1113. goto out;
  1114. out_disable_phy:
  1115. phy_power_off(host->generic_phy);
  1116. out_unregister_bus:
  1117. phy_exit(host->generic_phy);
  1118. out_host_free:
  1119. devm_kfree(dev, host);
  1120. ufshcd_set_variant(hba, NULL);
  1121. out:
  1122. return err;
  1123. }
  1124. static void ufs_qcom_exit(struct ufs_hba *hba)
  1125. {
  1126. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  1127. ufs_qcom_disable_lane_clks(host);
  1128. phy_power_off(host->generic_phy);
  1129. }
  1130. static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
  1131. u32 clk_cycles)
  1132. {
  1133. int err;
  1134. u32 core_clk_ctrl_reg;
  1135. if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
  1136. return -EINVAL;
  1137. err = ufshcd_dme_get(hba,
  1138. UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
  1139. &core_clk_ctrl_reg);
  1140. if (err)
  1141. goto out;
  1142. core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
  1143. core_clk_ctrl_reg |= clk_cycles;
  1144. /* Clear CORE_CLK_DIV_EN */
  1145. core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
  1146. err = ufshcd_dme_set(hba,
  1147. UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
  1148. core_clk_ctrl_reg);
  1149. out:
  1150. return err;
  1151. }
  1152. static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
  1153. {
  1154. /* nothing to do as of now */
  1155. return 0;
  1156. }
  1157. static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
  1158. {
  1159. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  1160. if (!ufs_qcom_cap_qunipro(host))
  1161. return 0;
  1162. /* set unipro core clock cycles to 150 and clear clock divider */
  1163. return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
  1164. }
  1165. static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
  1166. {
  1167. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  1168. int err;
  1169. u32 core_clk_ctrl_reg;
  1170. if (!ufs_qcom_cap_qunipro(host))
  1171. return 0;
  1172. err = ufshcd_dme_get(hba,
  1173. UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
  1174. &core_clk_ctrl_reg);
  1175. /* make sure CORE_CLK_DIV_EN is cleared */
  1176. if (!err &&
  1177. (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
  1178. core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
  1179. err = ufshcd_dme_set(hba,
  1180. UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
  1181. core_clk_ctrl_reg);
  1182. }
  1183. return err;
  1184. }
  1185. static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
  1186. {
  1187. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  1188. if (!ufs_qcom_cap_qunipro(host))
  1189. return 0;
  1190. /* set unipro core clock cycles to 75 and clear clock divider */
  1191. return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
  1192. }
  1193. static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
  1194. bool scale_up, enum ufs_notify_change_status status)
  1195. {
  1196. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  1197. struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
  1198. int err = 0;
  1199. if (status == PRE_CHANGE) {
  1200. if (scale_up)
  1201. err = ufs_qcom_clk_scale_up_pre_change(hba);
  1202. else
  1203. err = ufs_qcom_clk_scale_down_pre_change(hba);
  1204. } else {
  1205. if (scale_up)
  1206. err = ufs_qcom_clk_scale_up_post_change(hba);
  1207. else
  1208. err = ufs_qcom_clk_scale_down_post_change(hba);
  1209. if (err || !dev_req_params)
  1210. goto out;
  1211. ufs_qcom_cfg_timers(hba,
  1212. dev_req_params->gear_rx,
  1213. dev_req_params->pwr_rx,
  1214. dev_req_params->hs_rate,
  1215. false);
  1216. ufs_qcom_update_bus_bw_vote(host);
  1217. }
  1218. out:
  1219. return err;
  1220. }
  1221. static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
  1222. void *priv, void (*print_fn)(struct ufs_hba *hba,
  1223. int offset, int num_regs, char *str, void *priv))
  1224. {
  1225. u32 reg;
  1226. struct ufs_qcom_host *host;
  1227. if (unlikely(!hba)) {
  1228. pr_err("%s: hba is NULL\n", __func__);
  1229. return;
  1230. }
  1231. if (unlikely(!print_fn)) {
  1232. dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
  1233. return;
  1234. }
  1235. host = ufshcd_get_variant(hba);
  1236. if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
  1237. return;
  1238. reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
  1239. print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
  1240. reg = ufshcd_readl(hba, REG_UFS_CFG1);
  1241. reg |= UFS_BIT(17);
  1242. ufshcd_writel(hba, reg, REG_UFS_CFG1);
  1243. reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
  1244. print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
  1245. reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
  1246. print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
  1247. reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
  1248. print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
  1249. ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1);
  1250. reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
  1251. print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
  1252. reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
  1253. print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
  1254. reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
  1255. print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
  1256. reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
  1257. print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
  1258. reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
  1259. print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
  1260. reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
  1261. print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
  1262. reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
  1263. print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
  1264. }
  1265. static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
  1266. {
  1267. if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
  1268. ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
  1269. else
  1270. ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
  1271. }
  1272. static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
  1273. {
  1274. /* provide a legal default configuration */
  1275. host->testbus.select_major = TSTBUS_UAWM;
  1276. host->testbus.select_minor = 1;
  1277. }
  1278. static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
  1279. {
  1280. if (host->testbus.select_major >= TSTBUS_MAX) {
  1281. dev_err(host->hba->dev,
  1282. "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
  1283. __func__, host->testbus.select_major);
  1284. return false;
  1285. }
  1286. /*
  1287. * Not performing check for each individual select_major
  1288. * mappings of select_minor, since there is no harm in
  1289. * configuring a non-existent select_minor
  1290. */
  1291. if (host->testbus.select_minor > 0x1F) {
  1292. dev_err(host->hba->dev,
  1293. "%s: 0x%05X is not a legal testbus option\n",
  1294. __func__, host->testbus.select_minor);
  1295. return false;
  1296. }
  1297. return true;
  1298. }
  1299. int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
  1300. {
  1301. int reg;
  1302. int offset;
  1303. u32 mask = TEST_BUS_SUB_SEL_MASK;
  1304. if (!host)
  1305. return -EINVAL;
  1306. if (!ufs_qcom_testbus_cfg_is_ok(host))
  1307. return -EPERM;
  1308. switch (host->testbus.select_major) {
  1309. case TSTBUS_UAWM:
  1310. reg = UFS_TEST_BUS_CTRL_0;
  1311. offset = 24;
  1312. break;
  1313. case TSTBUS_UARM:
  1314. reg = UFS_TEST_BUS_CTRL_0;
  1315. offset = 16;
  1316. break;
  1317. case TSTBUS_TXUC:
  1318. reg = UFS_TEST_BUS_CTRL_0;
  1319. offset = 8;
  1320. break;
  1321. case TSTBUS_RXUC:
  1322. reg = UFS_TEST_BUS_CTRL_0;
  1323. offset = 0;
  1324. break;
  1325. case TSTBUS_DFC:
  1326. reg = UFS_TEST_BUS_CTRL_1;
  1327. offset = 24;
  1328. break;
  1329. case TSTBUS_TRLUT:
  1330. reg = UFS_TEST_BUS_CTRL_1;
  1331. offset = 16;
  1332. break;
  1333. case TSTBUS_TMRLUT:
  1334. reg = UFS_TEST_BUS_CTRL_1;
  1335. offset = 8;
  1336. break;
  1337. case TSTBUS_OCSC:
  1338. reg = UFS_TEST_BUS_CTRL_1;
  1339. offset = 0;
  1340. break;
  1341. case TSTBUS_WRAPPER:
  1342. reg = UFS_TEST_BUS_CTRL_2;
  1343. offset = 16;
  1344. break;
  1345. case TSTBUS_COMBINED:
  1346. reg = UFS_TEST_BUS_CTRL_2;
  1347. offset = 8;
  1348. break;
  1349. case TSTBUS_UTP_HCI:
  1350. reg = UFS_TEST_BUS_CTRL_2;
  1351. offset = 0;
  1352. break;
  1353. case TSTBUS_UNIPRO:
  1354. reg = UFS_UNIPRO_CFG;
  1355. offset = 1;
  1356. break;
  1357. /*
  1358. * No need for a default case, since
  1359. * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
  1360. * is legal
  1361. */
  1362. }
  1363. mask <<= offset;
  1364. pm_runtime_get_sync(host->hba->dev);
  1365. ufshcd_hold(host->hba, false);
  1366. ufshcd_rmwl(host->hba, TEST_BUS_SEL,
  1367. (u32)host->testbus.select_major << 19,
  1368. REG_UFS_CFG1);
  1369. ufshcd_rmwl(host->hba, mask,
  1370. (u32)host->testbus.select_minor << offset,
  1371. reg);
  1372. ufs_qcom_enable_test_bus(host);
  1373. ufshcd_release(host->hba);
  1374. pm_runtime_put_sync(host->hba->dev);
  1375. return 0;
  1376. }
  1377. static void ufs_qcom_testbus_read(struct ufs_hba *hba)
  1378. {
  1379. ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
  1380. }
  1381. static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
  1382. {
  1383. ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
  1384. "HCI Vendor Specific Registers ");
  1385. ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
  1386. ufs_qcom_testbus_read(hba);
  1387. }
  1388. /**
  1389. * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
  1390. *
  1391. * The variant operations configure the necessary controller and PHY
  1392. * handshake during initialization.
  1393. */
  1394. static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
  1395. .name = "qcom",
  1396. .init = ufs_qcom_init,
  1397. .exit = ufs_qcom_exit,
  1398. .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
  1399. .clk_scale_notify = ufs_qcom_clk_scale_notify,
  1400. .setup_clocks = ufs_qcom_setup_clocks,
  1401. .hce_enable_notify = ufs_qcom_hce_enable_notify,
  1402. .link_startup_notify = ufs_qcom_link_startup_notify,
  1403. .pwr_change_notify = ufs_qcom_pwr_change_notify,
  1404. .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
  1405. .suspend = ufs_qcom_suspend,
  1406. .resume = ufs_qcom_resume,
  1407. .dbg_register_dump = ufs_qcom_dump_dbg_regs,
  1408. };
  1409. /**
  1410. * ufs_qcom_probe - probe routine of the driver
  1411. * @pdev: pointer to Platform device handle
  1412. *
  1413. * Return zero for success and non-zero for failure
  1414. */
  1415. static int ufs_qcom_probe(struct platform_device *pdev)
  1416. {
  1417. int err;
  1418. struct device *dev = &pdev->dev;
  1419. /* Perform generic probe */
  1420. err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
  1421. if (err)
  1422. dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
  1423. return err;
  1424. }
  1425. /**
  1426. * ufs_qcom_remove - set driver_data of the device to NULL
  1427. * @pdev: pointer to platform device handle
  1428. *
  1429. * Always returns 0
  1430. */
  1431. static int ufs_qcom_remove(struct platform_device *pdev)
  1432. {
  1433. struct ufs_hba *hba = platform_get_drvdata(pdev);
  1434. pm_runtime_get_sync(&(pdev)->dev);
  1435. ufshcd_remove(hba);
  1436. return 0;
  1437. }
  1438. static const struct of_device_id ufs_qcom_of_match[] = {
  1439. { .compatible = "qcom,ufshc"},
  1440. {},
  1441. };
  1442. MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
  1443. static const struct dev_pm_ops ufs_qcom_pm_ops = {
  1444. .suspend = ufshcd_pltfrm_suspend,
  1445. .resume = ufshcd_pltfrm_resume,
  1446. .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
  1447. .runtime_resume = ufshcd_pltfrm_runtime_resume,
  1448. .runtime_idle = ufshcd_pltfrm_runtime_idle,
  1449. };
  1450. static struct platform_driver ufs_qcom_pltform = {
  1451. .probe = ufs_qcom_probe,
  1452. .remove = ufs_qcom_remove,
  1453. .shutdown = ufshcd_pltfrm_shutdown,
  1454. .driver = {
  1455. .name = "ufshcd-qcom",
  1456. .pm = &ufs_qcom_pm_ops,
  1457. .of_match_table = of_match_ptr(ufs_qcom_of_match),
  1458. },
  1459. };
  1460. module_platform_driver(ufs_qcom_pltform);
  1461. MODULE_LICENSE("GPL v2");