dsi_host.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003
  1. /*
  2. * Copyright (c) 2015, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/err.h>
  16. #include <linux/gpio.h>
  17. #include <linux/gpio/consumer.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/of_device.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/regulator/consumer.h>
  23. #include <linux/spinlock.h>
  24. #include <video/mipi_display.h>
  25. #include "dsi.h"
  26. #include "dsi.xml.h"
  27. #define MSM_DSI_VER_MAJOR_V2 0x02
  28. #define MSM_DSI_VER_MAJOR_6G 0x03
  29. #define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
  30. #define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
  31. #define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
  32. #define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
  33. #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
  34. #define DSI_6G_REG_SHIFT 4
  35. struct dsi_config {
  36. u32 major;
  37. u32 minor;
  38. u32 io_offset;
  39. struct dsi_reg_config reg_cfg;
  40. };
  41. static const struct dsi_config dsi_cfgs[] = {
  42. {MSM_DSI_VER_MAJOR_V2, 0, 0, {0,} },
  43. { /* 8974 v1 */
  44. .major = MSM_DSI_VER_MAJOR_6G,
  45. .minor = MSM_DSI_6G_VER_MINOR_V1_0,
  46. .io_offset = DSI_6G_REG_SHIFT,
  47. .reg_cfg = {
  48. .num = 4,
  49. .regs = {
  50. {"gdsc", -1, -1, -1, -1},
  51. {"vdd", 3000000, 3000000, 150000, 100},
  52. {"vdda", 1200000, 1200000, 100000, 100},
  53. {"vddio", 1800000, 1800000, 100000, 100},
  54. },
  55. },
  56. },
  57. { /* 8974 v2 */
  58. .major = MSM_DSI_VER_MAJOR_6G,
  59. .minor = MSM_DSI_6G_VER_MINOR_V1_1,
  60. .io_offset = DSI_6G_REG_SHIFT,
  61. .reg_cfg = {
  62. .num = 4,
  63. .regs = {
  64. {"gdsc", -1, -1, -1, -1},
  65. {"vdd", 3000000, 3000000, 150000, 100},
  66. {"vdda", 1200000, 1200000, 100000, 100},
  67. {"vddio", 1800000, 1800000, 100000, 100},
  68. },
  69. },
  70. },
  71. { /* 8974 v3 */
  72. .major = MSM_DSI_VER_MAJOR_6G,
  73. .minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
  74. .io_offset = DSI_6G_REG_SHIFT,
  75. .reg_cfg = {
  76. .num = 4,
  77. .regs = {
  78. {"gdsc", -1, -1, -1, -1},
  79. {"vdd", 3000000, 3000000, 150000, 100},
  80. {"vdda", 1200000, 1200000, 100000, 100},
  81. {"vddio", 1800000, 1800000, 100000, 100},
  82. },
  83. },
  84. },
  85. { /* 8084 */
  86. .major = MSM_DSI_VER_MAJOR_6G,
  87. .minor = MSM_DSI_6G_VER_MINOR_V1_2,
  88. .io_offset = DSI_6G_REG_SHIFT,
  89. .reg_cfg = {
  90. .num = 4,
  91. .regs = {
  92. {"gdsc", -1, -1, -1, -1},
  93. {"vdd", 3000000, 3000000, 150000, 100},
  94. {"vdda", 1200000, 1200000, 100000, 100},
  95. {"vddio", 1800000, 1800000, 100000, 100},
  96. },
  97. },
  98. },
  99. { /* 8916 */
  100. .major = MSM_DSI_VER_MAJOR_6G,
  101. .minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
  102. .io_offset = DSI_6G_REG_SHIFT,
  103. .reg_cfg = {
  104. .num = 4,
  105. .regs = {
  106. {"gdsc", -1, -1, -1, -1},
  107. {"vdd", 2850000, 2850000, 100000, 100},
  108. {"vdda", 1200000, 1200000, 100000, 100},
  109. {"vddio", 1800000, 1800000, 100000, 100},
  110. },
  111. },
  112. },
  113. };
  114. static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
  115. {
  116. u32 ver;
  117. u32 ver_6g;
  118. if (!major || !minor)
  119. return -EINVAL;
  120. /* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
  121. * makes all other registers 4-byte shifted down.
  122. */
  123. ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION);
  124. if (ver_6g == 0) {
  125. ver = msm_readl(base + REG_DSI_VERSION);
  126. ver = FIELD(ver, DSI_VERSION_MAJOR);
  127. if (ver <= MSM_DSI_VER_MAJOR_V2) {
  128. /* old versions */
  129. *major = ver;
  130. *minor = 0;
  131. return 0;
  132. } else {
  133. return -EINVAL;
  134. }
  135. } else {
  136. ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
  137. ver = FIELD(ver, DSI_VERSION_MAJOR);
  138. if (ver == MSM_DSI_VER_MAJOR_6G) {
  139. /* 6G version */
  140. *major = ver;
  141. *minor = ver_6g;
  142. return 0;
  143. } else {
  144. return -EINVAL;
  145. }
  146. }
  147. }
  148. #define DSI_ERR_STATE_ACK 0x0000
  149. #define DSI_ERR_STATE_TIMEOUT 0x0001
  150. #define DSI_ERR_STATE_DLN0_PHY 0x0002
  151. #define DSI_ERR_STATE_FIFO 0x0004
  152. #define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
  153. #define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
  154. #define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
  155. #define DSI_CLK_CTRL_ENABLE_CLKS \
  156. (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
  157. DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
  158. DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
  159. DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
  160. struct msm_dsi_host {
  161. struct mipi_dsi_host base;
  162. struct platform_device *pdev;
  163. struct drm_device *dev;
  164. int id;
  165. void __iomem *ctrl_base;
  166. struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
  167. struct clk *mdp_core_clk;
  168. struct clk *ahb_clk;
  169. struct clk *axi_clk;
  170. struct clk *mmss_misc_ahb_clk;
  171. struct clk *byte_clk;
  172. struct clk *esc_clk;
  173. struct clk *pixel_clk;
  174. struct clk *byte_clk_src;
  175. struct clk *pixel_clk_src;
  176. u32 byte_clk_rate;
  177. struct gpio_desc *disp_en_gpio;
  178. struct gpio_desc *te_gpio;
  179. const struct dsi_config *cfg;
  180. struct completion dma_comp;
  181. struct completion video_comp;
  182. struct mutex dev_mutex;
  183. struct mutex cmd_mutex;
  184. struct mutex clk_mutex;
  185. spinlock_t intr_lock; /* Protect interrupt ctrl register */
  186. u32 err_work_state;
  187. struct work_struct err_work;
  188. struct workqueue_struct *workqueue;
  189. struct drm_gem_object *tx_gem_obj;
  190. u8 *rx_buf;
  191. struct drm_display_mode *mode;
  192. /* Panel info */
  193. struct device_node *panel_node;
  194. unsigned int channel;
  195. unsigned int lanes;
  196. enum mipi_dsi_pixel_format format;
  197. unsigned long mode_flags;
  198. u32 dma_cmd_ctrl_restore;
  199. bool registered;
  200. bool power_on;
  201. int irq;
  202. };
  203. static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
  204. {
  205. switch (fmt) {
  206. case MIPI_DSI_FMT_RGB565: return 16;
  207. case MIPI_DSI_FMT_RGB666_PACKED: return 18;
  208. case MIPI_DSI_FMT_RGB666:
  209. case MIPI_DSI_FMT_RGB888:
  210. default: return 24;
  211. }
  212. }
  213. static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
  214. {
  215. return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
  216. }
  217. static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
  218. {
  219. msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
  220. }
  221. static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
  222. static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
  223. static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
  224. {
  225. const struct dsi_config *cfg;
  226. struct regulator *gdsc_reg;
  227. int i, ret;
  228. u32 major = 0, minor = 0;
  229. gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
  230. if (IS_ERR(gdsc_reg)) {
  231. pr_err("%s: cannot get gdsc\n", __func__);
  232. goto fail;
  233. }
  234. ret = regulator_enable(gdsc_reg);
  235. if (ret) {
  236. pr_err("%s: unable to enable gdsc\n", __func__);
  237. regulator_put(gdsc_reg);
  238. goto fail;
  239. }
  240. ret = clk_prepare_enable(msm_host->ahb_clk);
  241. if (ret) {
  242. pr_err("%s: unable to enable ahb_clk\n", __func__);
  243. regulator_disable(gdsc_reg);
  244. regulator_put(gdsc_reg);
  245. goto fail;
  246. }
  247. ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
  248. clk_disable_unprepare(msm_host->ahb_clk);
  249. regulator_disable(gdsc_reg);
  250. regulator_put(gdsc_reg);
  251. if (ret) {
  252. pr_err("%s: Invalid version\n", __func__);
  253. goto fail;
  254. }
  255. for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
  256. cfg = dsi_cfgs + i;
  257. if ((cfg->major == major) && (cfg->minor == minor))
  258. return cfg;
  259. }
  260. pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
  261. fail:
  262. return NULL;
  263. }
  264. static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
  265. {
  266. return container_of(host, struct msm_dsi_host, base);
  267. }
  268. static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
  269. {
  270. struct regulator_bulk_data *s = msm_host->supplies;
  271. const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
  272. int num = msm_host->cfg->reg_cfg.num;
  273. int i;
  274. DBG("");
  275. for (i = num - 1; i >= 0; i--)
  276. if (regs[i].disable_load >= 0)
  277. regulator_set_load(s[i].consumer,
  278. regs[i].disable_load);
  279. regulator_bulk_disable(num, s);
  280. }
  281. static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
  282. {
  283. struct regulator_bulk_data *s = msm_host->supplies;
  284. const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
  285. int num = msm_host->cfg->reg_cfg.num;
  286. int ret, i;
  287. DBG("");
  288. for (i = 0; i < num; i++) {
  289. if (regs[i].enable_load >= 0) {
  290. ret = regulator_set_load(s[i].consumer,
  291. regs[i].enable_load);
  292. if (ret < 0) {
  293. pr_err("regulator %d set op mode failed, %d\n",
  294. i, ret);
  295. goto fail;
  296. }
  297. }
  298. }
  299. ret = regulator_bulk_enable(num, s);
  300. if (ret < 0) {
  301. pr_err("regulator enable failed, %d\n", ret);
  302. goto fail;
  303. }
  304. return 0;
  305. fail:
  306. for (i--; i >= 0; i--)
  307. regulator_set_load(s[i].consumer, regs[i].disable_load);
  308. return ret;
  309. }
  310. static int dsi_regulator_init(struct msm_dsi_host *msm_host)
  311. {
  312. struct regulator_bulk_data *s = msm_host->supplies;
  313. const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
  314. int num = msm_host->cfg->reg_cfg.num;
  315. int i, ret;
  316. for (i = 0; i < num; i++)
  317. s[i].supply = regs[i].name;
  318. ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
  319. if (ret < 0) {
  320. pr_err("%s: failed to init regulator, ret=%d\n",
  321. __func__, ret);
  322. return ret;
  323. }
  324. for (i = 0; i < num; i++) {
  325. if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
  326. ret = regulator_set_voltage(s[i].consumer,
  327. regs[i].min_voltage, regs[i].max_voltage);
  328. if (ret < 0) {
  329. pr_err("regulator %d set voltage failed, %d\n",
  330. i, ret);
  331. return ret;
  332. }
  333. }
  334. }
  335. return 0;
  336. }
  337. static int dsi_clk_init(struct msm_dsi_host *msm_host)
  338. {
  339. struct device *dev = &msm_host->pdev->dev;
  340. int ret = 0;
  341. msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
  342. if (IS_ERR(msm_host->mdp_core_clk)) {
  343. ret = PTR_ERR(msm_host->mdp_core_clk);
  344. pr_err("%s: Unable to get mdp core clk. ret=%d\n",
  345. __func__, ret);
  346. goto exit;
  347. }
  348. msm_host->ahb_clk = devm_clk_get(dev, "iface_clk");
  349. if (IS_ERR(msm_host->ahb_clk)) {
  350. ret = PTR_ERR(msm_host->ahb_clk);
  351. pr_err("%s: Unable to get mdss ahb clk. ret=%d\n",
  352. __func__, ret);
  353. goto exit;
  354. }
  355. msm_host->axi_clk = devm_clk_get(dev, "bus_clk");
  356. if (IS_ERR(msm_host->axi_clk)) {
  357. ret = PTR_ERR(msm_host->axi_clk);
  358. pr_err("%s: Unable to get axi bus clk. ret=%d\n",
  359. __func__, ret);
  360. goto exit;
  361. }
  362. msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
  363. if (IS_ERR(msm_host->mmss_misc_ahb_clk)) {
  364. ret = PTR_ERR(msm_host->mmss_misc_ahb_clk);
  365. pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n",
  366. __func__, ret);
  367. goto exit;
  368. }
  369. msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
  370. if (IS_ERR(msm_host->byte_clk)) {
  371. ret = PTR_ERR(msm_host->byte_clk);
  372. pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
  373. __func__, ret);
  374. msm_host->byte_clk = NULL;
  375. goto exit;
  376. }
  377. msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
  378. if (IS_ERR(msm_host->pixel_clk)) {
  379. ret = PTR_ERR(msm_host->pixel_clk);
  380. pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
  381. __func__, ret);
  382. msm_host->pixel_clk = NULL;
  383. goto exit;
  384. }
  385. msm_host->esc_clk = devm_clk_get(dev, "core_clk");
  386. if (IS_ERR(msm_host->esc_clk)) {
  387. ret = PTR_ERR(msm_host->esc_clk);
  388. pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
  389. __func__, ret);
  390. msm_host->esc_clk = NULL;
  391. goto exit;
  392. }
  393. msm_host->byte_clk_src = devm_clk_get(dev, "byte_clk_src");
  394. if (IS_ERR(msm_host->byte_clk_src)) {
  395. ret = PTR_ERR(msm_host->byte_clk_src);
  396. pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
  397. msm_host->byte_clk_src = NULL;
  398. goto exit;
  399. }
  400. msm_host->pixel_clk_src = devm_clk_get(dev, "pixel_clk_src");
  401. if (IS_ERR(msm_host->pixel_clk_src)) {
  402. ret = PTR_ERR(msm_host->pixel_clk_src);
  403. pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
  404. msm_host->pixel_clk_src = NULL;
  405. goto exit;
  406. }
  407. exit:
  408. return ret;
  409. }
  410. static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
  411. {
  412. int ret;
  413. DBG("id=%d", msm_host->id);
  414. ret = clk_prepare_enable(msm_host->mdp_core_clk);
  415. if (ret) {
  416. pr_err("%s: failed to enable mdp_core_clock, %d\n",
  417. __func__, ret);
  418. goto core_clk_err;
  419. }
  420. ret = clk_prepare_enable(msm_host->ahb_clk);
  421. if (ret) {
  422. pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
  423. goto ahb_clk_err;
  424. }
  425. ret = clk_prepare_enable(msm_host->axi_clk);
  426. if (ret) {
  427. pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
  428. goto axi_clk_err;
  429. }
  430. ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk);
  431. if (ret) {
  432. pr_err("%s: failed to enable mmss misc ahb clk, %d\n",
  433. __func__, ret);
  434. goto misc_ahb_clk_err;
  435. }
  436. return 0;
  437. misc_ahb_clk_err:
  438. clk_disable_unprepare(msm_host->axi_clk);
  439. axi_clk_err:
  440. clk_disable_unprepare(msm_host->ahb_clk);
  441. ahb_clk_err:
  442. clk_disable_unprepare(msm_host->mdp_core_clk);
  443. core_clk_err:
  444. return ret;
  445. }
  446. static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
  447. {
  448. DBG("");
  449. clk_disable_unprepare(msm_host->mmss_misc_ahb_clk);
  450. clk_disable_unprepare(msm_host->axi_clk);
  451. clk_disable_unprepare(msm_host->ahb_clk);
  452. clk_disable_unprepare(msm_host->mdp_core_clk);
  453. }
  454. static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
  455. {
  456. int ret;
  457. DBG("Set clk rates: pclk=%d, byteclk=%d",
  458. msm_host->mode->clock, msm_host->byte_clk_rate);
  459. ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
  460. if (ret) {
  461. pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
  462. goto error;
  463. }
  464. ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
  465. if (ret) {
  466. pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
  467. goto error;
  468. }
  469. ret = clk_prepare_enable(msm_host->esc_clk);
  470. if (ret) {
  471. pr_err("%s: Failed to enable dsi esc clk\n", __func__);
  472. goto error;
  473. }
  474. ret = clk_prepare_enable(msm_host->byte_clk);
  475. if (ret) {
  476. pr_err("%s: Failed to enable dsi byte clk\n", __func__);
  477. goto byte_clk_err;
  478. }
  479. ret = clk_prepare_enable(msm_host->pixel_clk);
  480. if (ret) {
  481. pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
  482. goto pixel_clk_err;
  483. }
  484. return 0;
  485. pixel_clk_err:
  486. clk_disable_unprepare(msm_host->byte_clk);
  487. byte_clk_err:
  488. clk_disable_unprepare(msm_host->esc_clk);
  489. error:
  490. return ret;
  491. }
  492. static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
  493. {
  494. clk_disable_unprepare(msm_host->esc_clk);
  495. clk_disable_unprepare(msm_host->pixel_clk);
  496. clk_disable_unprepare(msm_host->byte_clk);
  497. }
  498. static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
  499. {
  500. int ret = 0;
  501. mutex_lock(&msm_host->clk_mutex);
  502. if (enable) {
  503. ret = dsi_bus_clk_enable(msm_host);
  504. if (ret) {
  505. pr_err("%s: Can not enable bus clk, %d\n",
  506. __func__, ret);
  507. goto unlock_ret;
  508. }
  509. ret = dsi_link_clk_enable(msm_host);
  510. if (ret) {
  511. pr_err("%s: Can not enable link clk, %d\n",
  512. __func__, ret);
  513. dsi_bus_clk_disable(msm_host);
  514. goto unlock_ret;
  515. }
  516. } else {
  517. dsi_link_clk_disable(msm_host);
  518. dsi_bus_clk_disable(msm_host);
  519. }
  520. unlock_ret:
  521. mutex_unlock(&msm_host->clk_mutex);
  522. return ret;
  523. }
  524. static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
  525. {
  526. struct drm_display_mode *mode = msm_host->mode;
  527. u8 lanes = msm_host->lanes;
  528. u32 bpp = dsi_get_bpp(msm_host->format);
  529. u32 pclk_rate;
  530. if (!mode) {
  531. pr_err("%s: mode not set\n", __func__);
  532. return -EINVAL;
  533. }
  534. pclk_rate = mode->clock * 1000;
  535. if (lanes > 0) {
  536. msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
  537. } else {
  538. pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
  539. msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
  540. }
  541. DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
  542. return 0;
  543. }
  544. static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
  545. {
  546. DBG("");
  547. dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
  548. /* Make sure fully reset */
  549. wmb();
  550. udelay(1000);
  551. dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
  552. udelay(100);
  553. }
  554. static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
  555. {
  556. u32 intr;
  557. unsigned long flags;
  558. spin_lock_irqsave(&msm_host->intr_lock, flags);
  559. intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
  560. if (enable)
  561. intr |= mask;
  562. else
  563. intr &= ~mask;
  564. DBG("intr=%x enable=%d", intr, enable);
  565. dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
  566. spin_unlock_irqrestore(&msm_host->intr_lock, flags);
  567. }
  568. static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
  569. {
  570. if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
  571. return BURST_MODE;
  572. else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
  573. return NON_BURST_SYNCH_PULSE;
  574. return NON_BURST_SYNCH_EVENT;
  575. }
  576. static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
  577. const enum mipi_dsi_pixel_format mipi_fmt)
  578. {
  579. switch (mipi_fmt) {
  580. case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
  581. case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
  582. case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
  583. case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
  584. default: return VID_DST_FORMAT_RGB888;
  585. }
  586. }
  587. static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
  588. const enum mipi_dsi_pixel_format mipi_fmt)
  589. {
  590. switch (mipi_fmt) {
  591. case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
  592. case MIPI_DSI_FMT_RGB666_PACKED:
  593. case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
  594. case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
  595. default: return CMD_DST_FORMAT_RGB888;
  596. }
  597. }
  598. static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
  599. u32 clk_pre, u32 clk_post)
  600. {
  601. u32 flags = msm_host->mode_flags;
  602. enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
  603. u32 data = 0;
  604. if (!enable) {
  605. dsi_write(msm_host, REG_DSI_CTRL, 0);
  606. return;
  607. }
  608. if (flags & MIPI_DSI_MODE_VIDEO) {
  609. if (flags & MIPI_DSI_MODE_VIDEO_HSE)
  610. data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
  611. if (flags & MIPI_DSI_MODE_VIDEO_HFP)
  612. data |= DSI_VID_CFG0_HFP_POWER_STOP;
  613. if (flags & MIPI_DSI_MODE_VIDEO_HBP)
  614. data |= DSI_VID_CFG0_HBP_POWER_STOP;
  615. if (flags & MIPI_DSI_MODE_VIDEO_HSA)
  616. data |= DSI_VID_CFG0_HSA_POWER_STOP;
  617. /* Always set low power stop mode for BLLP
  618. * to let command engine send packets
  619. */
  620. data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
  621. DSI_VID_CFG0_BLLP_POWER_STOP;
  622. data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
  623. data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
  624. data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
  625. dsi_write(msm_host, REG_DSI_VID_CFG0, data);
  626. /* Do not swap RGB colors */
  627. data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
  628. dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
  629. } else {
  630. /* Do not swap RGB colors */
  631. data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
  632. data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
  633. dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
  634. data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
  635. DSI_CMD_CFG1_WR_MEM_CONTINUE(
  636. MIPI_DCS_WRITE_MEMORY_CONTINUE);
  637. /* Always insert DCS command */
  638. data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
  639. dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
  640. }
  641. dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
  642. DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
  643. DSI_CMD_DMA_CTRL_LOW_POWER);
  644. data = 0;
  645. /* Always assume dedicated TE pin */
  646. data |= DSI_TRIG_CTRL_TE;
  647. data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
  648. data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
  649. data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
  650. if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
  651. (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
  652. data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
  653. dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
  654. data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
  655. DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
  656. dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
  657. data = 0;
  658. if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
  659. data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
  660. dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
  661. /* allow only ack-err-status to generate interrupt */
  662. dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
  663. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
  664. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  665. data = DSI_CTRL_CLK_EN;
  666. DBG("lane number=%d", msm_host->lanes);
  667. if (msm_host->lanes == 2) {
  668. data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
  669. /* swap lanes for 2-lane panel for better performance */
  670. dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
  671. DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
  672. } else {
  673. /* Take 4 lanes as default */
  674. data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
  675. DSI_CTRL_LANE3;
  676. /* Do not swap lanes for 4-lane panel */
  677. dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
  678. DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
  679. }
  680. if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
  681. dsi_write(msm_host, REG_DSI_LANE_CTRL,
  682. DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
  683. data |= DSI_CTRL_ENABLE;
  684. dsi_write(msm_host, REG_DSI_CTRL, data);
  685. }
  686. static void dsi_timing_setup(struct msm_dsi_host *msm_host)
  687. {
  688. struct drm_display_mode *mode = msm_host->mode;
  689. u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
  690. u32 h_total = mode->htotal;
  691. u32 v_total = mode->vtotal;
  692. u32 hs_end = mode->hsync_end - mode->hsync_start;
  693. u32 vs_end = mode->vsync_end - mode->vsync_start;
  694. u32 ha_start = h_total - mode->hsync_start;
  695. u32 ha_end = ha_start + mode->hdisplay;
  696. u32 va_start = v_total - mode->vsync_start;
  697. u32 va_end = va_start + mode->vdisplay;
  698. u32 wc;
  699. DBG("");
  700. if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
  701. dsi_write(msm_host, REG_DSI_ACTIVE_H,
  702. DSI_ACTIVE_H_START(ha_start) |
  703. DSI_ACTIVE_H_END(ha_end));
  704. dsi_write(msm_host, REG_DSI_ACTIVE_V,
  705. DSI_ACTIVE_V_START(va_start) |
  706. DSI_ACTIVE_V_END(va_end));
  707. dsi_write(msm_host, REG_DSI_TOTAL,
  708. DSI_TOTAL_H_TOTAL(h_total - 1) |
  709. DSI_TOTAL_V_TOTAL(v_total - 1));
  710. dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
  711. DSI_ACTIVE_HSYNC_START(hs_start) |
  712. DSI_ACTIVE_HSYNC_END(hs_end));
  713. dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
  714. dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
  715. DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
  716. DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
  717. } else { /* command mode */
  718. /* image data and 1 byte write_memory_start cmd */
  719. wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
  720. dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
  721. DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
  722. DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
  723. msm_host->channel) |
  724. DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
  725. MIPI_DSI_DCS_LONG_WRITE));
  726. dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
  727. DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
  728. DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
  729. }
  730. }
  731. static void dsi_sw_reset(struct msm_dsi_host *msm_host)
  732. {
  733. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  734. wmb(); /* clocks need to be enabled before reset */
  735. dsi_write(msm_host, REG_DSI_RESET, 1);
  736. wmb(); /* make sure reset happen */
  737. dsi_write(msm_host, REG_DSI_RESET, 0);
  738. }
  739. static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
  740. bool video_mode, bool enable)
  741. {
  742. u32 dsi_ctrl;
  743. dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
  744. if (!enable) {
  745. dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
  746. DSI_CTRL_CMD_MODE_EN);
  747. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
  748. DSI_IRQ_MASK_VIDEO_DONE, 0);
  749. } else {
  750. if (video_mode) {
  751. dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
  752. } else { /* command mode */
  753. dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
  754. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
  755. }
  756. dsi_ctrl |= DSI_CTRL_ENABLE;
  757. }
  758. dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
  759. }
  760. static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
  761. {
  762. u32 data;
  763. data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
  764. if (mode == 0)
  765. data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
  766. else
  767. data |= DSI_CMD_DMA_CTRL_LOW_POWER;
  768. dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
  769. }
  770. static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
  771. {
  772. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
  773. reinit_completion(&msm_host->video_comp);
  774. wait_for_completion_timeout(&msm_host->video_comp,
  775. msecs_to_jiffies(70));
  776. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
  777. }
  778. static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
  779. {
  780. if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
  781. return;
  782. if (msm_host->power_on) {
  783. dsi_wait4video_done(msm_host);
  784. /* delay 4 ms to skip BLLP */
  785. usleep_range(2000, 4000);
  786. }
  787. }
  788. /* dsi_cmd */
  789. static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
  790. {
  791. struct drm_device *dev = msm_host->dev;
  792. int ret;
  793. u32 iova;
  794. mutex_lock(&dev->struct_mutex);
  795. msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
  796. if (IS_ERR(msm_host->tx_gem_obj)) {
  797. ret = PTR_ERR(msm_host->tx_gem_obj);
  798. pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
  799. msm_host->tx_gem_obj = NULL;
  800. mutex_unlock(&dev->struct_mutex);
  801. return ret;
  802. }
  803. ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
  804. if (ret) {
  805. pr_err("%s: failed to get iova, %d\n", __func__, ret);
  806. return ret;
  807. }
  808. mutex_unlock(&dev->struct_mutex);
  809. if (iova & 0x07) {
  810. pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
  811. return -EINVAL;
  812. }
  813. return 0;
  814. }
  815. static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
  816. {
  817. struct drm_device *dev = msm_host->dev;
  818. if (msm_host->tx_gem_obj) {
  819. msm_gem_put_iova(msm_host->tx_gem_obj, 0);
  820. mutex_lock(&dev->struct_mutex);
  821. msm_gem_free_object(msm_host->tx_gem_obj);
  822. msm_host->tx_gem_obj = NULL;
  823. mutex_unlock(&dev->struct_mutex);
  824. }
  825. }
  826. /*
  827. * prepare cmd buffer to be txed
  828. */
  829. static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
  830. const struct mipi_dsi_msg *msg)
  831. {
  832. struct mipi_dsi_packet packet;
  833. int len;
  834. int ret;
  835. u8 *data;
  836. ret = mipi_dsi_create_packet(&packet, msg);
  837. if (ret) {
  838. pr_err("%s: create packet failed, %d\n", __func__, ret);
  839. return ret;
  840. }
  841. len = (packet.size + 3) & (~0x3);
  842. if (len > tx_gem->size) {
  843. pr_err("%s: packet size is too big\n", __func__);
  844. return -EINVAL;
  845. }
  846. data = msm_gem_vaddr(tx_gem);
  847. if (IS_ERR(data)) {
  848. ret = PTR_ERR(data);
  849. pr_err("%s: get vaddr failed, %d\n", __func__, ret);
  850. return ret;
  851. }
  852. /* MSM specific command format in memory */
  853. data[0] = packet.header[1];
  854. data[1] = packet.header[2];
  855. data[2] = packet.header[0];
  856. data[3] = BIT(7); /* Last packet */
  857. if (mipi_dsi_packet_format_is_long(msg->type))
  858. data[3] |= BIT(6);
  859. if (msg->rx_buf && msg->rx_len)
  860. data[3] |= BIT(5);
  861. /* Long packet */
  862. if (packet.payload && packet.payload_length)
  863. memcpy(data + 4, packet.payload, packet.payload_length);
  864. /* Append 0xff to the end */
  865. if (packet.size < len)
  866. memset(data + packet.size, 0xff, len - packet.size);
  867. return len;
  868. }
  869. /*
  870. * dsi_short_read1_resp: 1 parameter
  871. */
  872. static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  873. {
  874. u8 *data = msg->rx_buf;
  875. if (data && (msg->rx_len >= 1)) {
  876. *data = buf[1]; /* strip out dcs type */
  877. return 1;
  878. } else {
  879. pr_err("%s: read data does not match with rx_buf len %zu\n",
  880. __func__, msg->rx_len);
  881. return -EINVAL;
  882. }
  883. }
  884. /*
  885. * dsi_short_read2_resp: 2 parameter
  886. */
  887. static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  888. {
  889. u8 *data = msg->rx_buf;
  890. if (data && (msg->rx_len >= 2)) {
  891. data[0] = buf[1]; /* strip out dcs type */
  892. data[1] = buf[2];
  893. return 2;
  894. } else {
  895. pr_err("%s: read data does not match with rx_buf len %zu\n",
  896. __func__, msg->rx_len);
  897. return -EINVAL;
  898. }
  899. }
  900. static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  901. {
  902. /* strip out 4 byte dcs header */
  903. if (msg->rx_buf && msg->rx_len)
  904. memcpy(msg->rx_buf, buf + 4, msg->rx_len);
  905. return msg->rx_len;
  906. }
  907. static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
  908. {
  909. int ret;
  910. u32 iova;
  911. bool triggered;
  912. ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
  913. if (ret) {
  914. pr_err("%s: failed to get iova: %d\n", __func__, ret);
  915. return ret;
  916. }
  917. reinit_completion(&msm_host->dma_comp);
  918. dsi_wait4video_eng_busy(msm_host);
  919. triggered = msm_dsi_manager_cmd_xfer_trigger(
  920. msm_host->id, iova, len);
  921. if (triggered) {
  922. ret = wait_for_completion_timeout(&msm_host->dma_comp,
  923. msecs_to_jiffies(200));
  924. DBG("ret=%d", ret);
  925. if (ret == 0)
  926. ret = -ETIMEDOUT;
  927. else
  928. ret = len;
  929. } else
  930. ret = len;
  931. return ret;
  932. }
  933. static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
  934. u8 *buf, int rx_byte, int pkt_size)
  935. {
  936. u32 *lp, *temp, data;
  937. int i, j = 0, cnt;
  938. u32 read_cnt;
  939. u8 reg[16];
  940. int repeated_bytes = 0;
  941. int buf_offset = buf - msm_host->rx_buf;
  942. lp = (u32 *)buf;
  943. temp = (u32 *)reg;
  944. cnt = (rx_byte + 3) >> 2;
  945. if (cnt > 4)
  946. cnt = 4; /* 4 x 32 bits registers only */
  947. if (rx_byte == 4)
  948. read_cnt = 4;
  949. else
  950. read_cnt = pkt_size + 6;
  951. /*
  952. * In case of multiple reads from the panel, after the first read, there
  953. * is possibility that there are some bytes in the payload repeating in
  954. * the RDBK_DATA registers. Since we read all the parameters from the
  955. * panel right from the first byte for every pass. We need to skip the
  956. * repeating bytes and then append the new parameters to the rx buffer.
  957. */
  958. if (read_cnt > 16) {
  959. int bytes_shifted;
  960. /* Any data more than 16 bytes will be shifted out.
  961. * The temp read buffer should already contain these bytes.
  962. * The remaining bytes in read buffer are the repeated bytes.
  963. */
  964. bytes_shifted = read_cnt - 16;
  965. repeated_bytes = buf_offset - bytes_shifted;
  966. }
  967. for (i = cnt - 1; i >= 0; i--) {
  968. data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
  969. *temp++ = ntohl(data); /* to host byte order */
  970. DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
  971. }
  972. for (i = repeated_bytes; i < 16; i++)
  973. buf[j++] = reg[i];
  974. return j;
  975. }
  976. static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
  977. const struct mipi_dsi_msg *msg)
  978. {
  979. int len, ret;
  980. int bllp_len = msm_host->mode->hdisplay *
  981. dsi_get_bpp(msm_host->format) / 8;
  982. len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg);
  983. if (!len) {
  984. pr_err("%s: failed to add cmd type = 0x%x\n",
  985. __func__, msg->type);
  986. return -EINVAL;
  987. }
  988. /* for video mode, do not send cmds more than
  989. * one pixel line, since it only transmit it
  990. * during BLLP.
  991. */
  992. /* TODO: if the command is sent in LP mode, the bit rate is only
  993. * half of esc clk rate. In this case, if the video is already
  994. * actively streaming, we need to check more carefully if the
  995. * command can be fit into one BLLP.
  996. */
  997. if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
  998. pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
  999. __func__, len);
  1000. return -EINVAL;
  1001. }
  1002. ret = dsi_cmd_dma_tx(msm_host, len);
  1003. if (ret < len) {
  1004. pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
  1005. __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
  1006. return -ECOMM;
  1007. }
  1008. return len;
  1009. }
  1010. static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
  1011. {
  1012. u32 data0, data1;
  1013. data0 = dsi_read(msm_host, REG_DSI_CTRL);
  1014. data1 = data0;
  1015. data1 &= ~DSI_CTRL_ENABLE;
  1016. dsi_write(msm_host, REG_DSI_CTRL, data1);
  1017. /*
  1018. * dsi controller need to be disabled before
  1019. * clocks turned on
  1020. */
  1021. wmb();
  1022. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  1023. wmb(); /* make sure clocks enabled */
  1024. /* dsi controller can only be reset while clocks are running */
  1025. dsi_write(msm_host, REG_DSI_RESET, 1);
  1026. wmb(); /* make sure reset happen */
  1027. dsi_write(msm_host, REG_DSI_RESET, 0);
  1028. wmb(); /* controller out of reset */
  1029. dsi_write(msm_host, REG_DSI_CTRL, data0);
  1030. wmb(); /* make sure dsi controller enabled again */
  1031. }
  1032. static void dsi_err_worker(struct work_struct *work)
  1033. {
  1034. struct msm_dsi_host *msm_host =
  1035. container_of(work, struct msm_dsi_host, err_work);
  1036. u32 status = msm_host->err_work_state;
  1037. pr_err_ratelimited("%s: status=%x\n", __func__, status);
  1038. if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
  1039. dsi_sw_reset_restore(msm_host);
  1040. /* It is safe to clear here because error irq is disabled. */
  1041. msm_host->err_work_state = 0;
  1042. /* enable dsi error interrupt */
  1043. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
  1044. }
  1045. static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
  1046. {
  1047. u32 status;
  1048. status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
  1049. if (status) {
  1050. dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
  1051. /* Writing of an extra 0 needed to clear error bits */
  1052. dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
  1053. msm_host->err_work_state |= DSI_ERR_STATE_ACK;
  1054. }
  1055. }
  1056. static void dsi_timeout_status(struct msm_dsi_host *msm_host)
  1057. {
  1058. u32 status;
  1059. status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
  1060. if (status) {
  1061. dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
  1062. msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
  1063. }
  1064. }
  1065. static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
  1066. {
  1067. u32 status;
  1068. status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
  1069. if (status) {
  1070. dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
  1071. msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
  1072. }
  1073. }
  1074. static void dsi_fifo_status(struct msm_dsi_host *msm_host)
  1075. {
  1076. u32 status;
  1077. status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
  1078. /* fifo underflow, overflow */
  1079. if (status) {
  1080. dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
  1081. msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
  1082. if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
  1083. msm_host->err_work_state |=
  1084. DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
  1085. }
  1086. }
  1087. static void dsi_status(struct msm_dsi_host *msm_host)
  1088. {
  1089. u32 status;
  1090. status = dsi_read(msm_host, REG_DSI_STATUS0);
  1091. if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
  1092. dsi_write(msm_host, REG_DSI_STATUS0, status);
  1093. msm_host->err_work_state |=
  1094. DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
  1095. }
  1096. }
  1097. static void dsi_clk_status(struct msm_dsi_host *msm_host)
  1098. {
  1099. u32 status;
  1100. status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
  1101. if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
  1102. dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
  1103. msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
  1104. }
  1105. }
  1106. static void dsi_error(struct msm_dsi_host *msm_host)
  1107. {
  1108. /* disable dsi error interrupt */
  1109. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
  1110. dsi_clk_status(msm_host);
  1111. dsi_fifo_status(msm_host);
  1112. dsi_ack_err_status(msm_host);
  1113. dsi_timeout_status(msm_host);
  1114. dsi_status(msm_host);
  1115. dsi_dln0_phy_err(msm_host);
  1116. queue_work(msm_host->workqueue, &msm_host->err_work);
  1117. }
  1118. static irqreturn_t dsi_host_irq(int irq, void *ptr)
  1119. {
  1120. struct msm_dsi_host *msm_host = ptr;
  1121. u32 isr;
  1122. unsigned long flags;
  1123. if (!msm_host->ctrl_base)
  1124. return IRQ_HANDLED;
  1125. spin_lock_irqsave(&msm_host->intr_lock, flags);
  1126. isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
  1127. dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
  1128. spin_unlock_irqrestore(&msm_host->intr_lock, flags);
  1129. DBG("isr=0x%x, id=%d", isr, msm_host->id);
  1130. if (isr & DSI_IRQ_ERROR)
  1131. dsi_error(msm_host);
  1132. if (isr & DSI_IRQ_VIDEO_DONE)
  1133. complete(&msm_host->video_comp);
  1134. if (isr & DSI_IRQ_CMD_DMA_DONE)
  1135. complete(&msm_host->dma_comp);
  1136. return IRQ_HANDLED;
  1137. }
  1138. static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
  1139. struct device *panel_device)
  1140. {
  1141. msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
  1142. "disp-enable",
  1143. GPIOD_OUT_LOW);
  1144. if (IS_ERR(msm_host->disp_en_gpio)) {
  1145. DBG("cannot get disp-enable-gpios %ld",
  1146. PTR_ERR(msm_host->disp_en_gpio));
  1147. return PTR_ERR(msm_host->disp_en_gpio);
  1148. }
  1149. msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te", GPIOD_IN);
  1150. if (IS_ERR(msm_host->te_gpio)) {
  1151. DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
  1152. return PTR_ERR(msm_host->te_gpio);
  1153. }
  1154. return 0;
  1155. }
  1156. static int dsi_host_attach(struct mipi_dsi_host *host,
  1157. struct mipi_dsi_device *dsi)
  1158. {
  1159. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1160. int ret;
  1161. msm_host->channel = dsi->channel;
  1162. msm_host->lanes = dsi->lanes;
  1163. msm_host->format = dsi->format;
  1164. msm_host->mode_flags = dsi->mode_flags;
  1165. msm_host->panel_node = dsi->dev.of_node;
  1166. /* Some gpios defined in panel DT need to be controlled by host */
  1167. ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
  1168. if (ret)
  1169. return ret;
  1170. DBG("id=%d", msm_host->id);
  1171. if (msm_host->dev)
  1172. drm_helper_hpd_irq_event(msm_host->dev);
  1173. return 0;
  1174. }
  1175. static int dsi_host_detach(struct mipi_dsi_host *host,
  1176. struct mipi_dsi_device *dsi)
  1177. {
  1178. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1179. msm_host->panel_node = NULL;
  1180. DBG("id=%d", msm_host->id);
  1181. if (msm_host->dev)
  1182. drm_helper_hpd_irq_event(msm_host->dev);
  1183. return 0;
  1184. }
  1185. static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
  1186. const struct mipi_dsi_msg *msg)
  1187. {
  1188. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1189. int ret;
  1190. if (!msg || !msm_host->power_on)
  1191. return -EINVAL;
  1192. mutex_lock(&msm_host->cmd_mutex);
  1193. ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
  1194. mutex_unlock(&msm_host->cmd_mutex);
  1195. return ret;
  1196. }
  1197. static struct mipi_dsi_host_ops dsi_host_ops = {
  1198. .attach = dsi_host_attach,
  1199. .detach = dsi_host_detach,
  1200. .transfer = dsi_host_transfer,
  1201. };
  1202. int msm_dsi_host_init(struct msm_dsi *msm_dsi)
  1203. {
  1204. struct msm_dsi_host *msm_host = NULL;
  1205. struct platform_device *pdev = msm_dsi->pdev;
  1206. int ret;
  1207. msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
  1208. if (!msm_host) {
  1209. pr_err("%s: FAILED: cannot alloc dsi host\n",
  1210. __func__);
  1211. ret = -ENOMEM;
  1212. goto fail;
  1213. }
  1214. ret = of_property_read_u32(pdev->dev.of_node,
  1215. "qcom,dsi-host-index", &msm_host->id);
  1216. if (ret) {
  1217. dev_err(&pdev->dev,
  1218. "%s: host index not specified, ret=%d\n",
  1219. __func__, ret);
  1220. goto fail;
  1221. }
  1222. msm_host->pdev = pdev;
  1223. ret = dsi_clk_init(msm_host);
  1224. if (ret) {
  1225. pr_err("%s: unable to initialize dsi clks\n", __func__);
  1226. goto fail;
  1227. }
  1228. msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
  1229. if (IS_ERR(msm_host->ctrl_base)) {
  1230. pr_err("%s: unable to map Dsi ctrl base\n", __func__);
  1231. ret = PTR_ERR(msm_host->ctrl_base);
  1232. goto fail;
  1233. }
  1234. msm_host->cfg = dsi_get_config(msm_host);
  1235. if (!msm_host->cfg) {
  1236. ret = -EINVAL;
  1237. pr_err("%s: get config failed\n", __func__);
  1238. goto fail;
  1239. }
  1240. ret = dsi_regulator_init(msm_host);
  1241. if (ret) {
  1242. pr_err("%s: regulator init failed\n", __func__);
  1243. goto fail;
  1244. }
  1245. msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
  1246. if (!msm_host->rx_buf) {
  1247. pr_err("%s: alloc rx temp buf failed\n", __func__);
  1248. goto fail;
  1249. }
  1250. init_completion(&msm_host->dma_comp);
  1251. init_completion(&msm_host->video_comp);
  1252. mutex_init(&msm_host->dev_mutex);
  1253. mutex_init(&msm_host->cmd_mutex);
  1254. mutex_init(&msm_host->clk_mutex);
  1255. spin_lock_init(&msm_host->intr_lock);
  1256. /* setup workqueue */
  1257. msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
  1258. INIT_WORK(&msm_host->err_work, dsi_err_worker);
  1259. msm_dsi->host = &msm_host->base;
  1260. msm_dsi->id = msm_host->id;
  1261. DBG("Dsi Host %d initialized", msm_host->id);
  1262. return 0;
  1263. fail:
  1264. return ret;
  1265. }
  1266. void msm_dsi_host_destroy(struct mipi_dsi_host *host)
  1267. {
  1268. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1269. DBG("");
  1270. dsi_tx_buf_free(msm_host);
  1271. if (msm_host->workqueue) {
  1272. flush_workqueue(msm_host->workqueue);
  1273. destroy_workqueue(msm_host->workqueue);
  1274. msm_host->workqueue = NULL;
  1275. }
  1276. mutex_destroy(&msm_host->clk_mutex);
  1277. mutex_destroy(&msm_host->cmd_mutex);
  1278. mutex_destroy(&msm_host->dev_mutex);
  1279. }
  1280. int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
  1281. struct drm_device *dev)
  1282. {
  1283. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1284. struct platform_device *pdev = msm_host->pdev;
  1285. int ret;
  1286. msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
  1287. if (msm_host->irq < 0) {
  1288. ret = msm_host->irq;
  1289. dev_err(dev->dev, "failed to get irq: %d\n", ret);
  1290. return ret;
  1291. }
  1292. ret = devm_request_irq(&pdev->dev, msm_host->irq,
  1293. dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
  1294. "dsi_isr", msm_host);
  1295. if (ret < 0) {
  1296. dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
  1297. msm_host->irq, ret);
  1298. return ret;
  1299. }
  1300. msm_host->dev = dev;
  1301. ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
  1302. if (ret) {
  1303. pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
  1304. return ret;
  1305. }
  1306. return 0;
  1307. }
  1308. int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
  1309. {
  1310. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1311. struct device_node *node;
  1312. int ret;
  1313. /* Register mipi dsi host */
  1314. if (!msm_host->registered) {
  1315. host->dev = &msm_host->pdev->dev;
  1316. host->ops = &dsi_host_ops;
  1317. ret = mipi_dsi_host_register(host);
  1318. if (ret)
  1319. return ret;
  1320. msm_host->registered = true;
  1321. /* If the panel driver has not been probed after host register,
  1322. * we should defer the host's probe.
  1323. * It makes sure panel is connected when fbcon detects
  1324. * connector status and gets the proper display mode to
  1325. * create framebuffer.
  1326. */
  1327. if (check_defer) {
  1328. node = of_get_child_by_name(msm_host->pdev->dev.of_node,
  1329. "panel");
  1330. if (node) {
  1331. if (!of_drm_find_panel(node))
  1332. return -EPROBE_DEFER;
  1333. }
  1334. }
  1335. }
  1336. return 0;
  1337. }
  1338. void msm_dsi_host_unregister(struct mipi_dsi_host *host)
  1339. {
  1340. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1341. if (msm_host->registered) {
  1342. mipi_dsi_host_unregister(host);
  1343. host->dev = NULL;
  1344. host->ops = NULL;
  1345. msm_host->registered = false;
  1346. }
  1347. }
  1348. int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
  1349. const struct mipi_dsi_msg *msg)
  1350. {
  1351. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1352. /* TODO: make sure dsi_cmd_mdp is idle.
  1353. * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
  1354. * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
  1355. * How to handle the old versions? Wait for mdp cmd done?
  1356. */
  1357. /*
  1358. * mdss interrupt is generated in mdp core clock domain
  1359. * mdp clock need to be enabled to receive dsi interrupt
  1360. */
  1361. dsi_clk_ctrl(msm_host, 1);
  1362. /* TODO: vote for bus bandwidth */
  1363. if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
  1364. dsi_set_tx_power_mode(0, msm_host);
  1365. msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
  1366. dsi_write(msm_host, REG_DSI_CTRL,
  1367. msm_host->dma_cmd_ctrl_restore |
  1368. DSI_CTRL_CMD_MODE_EN |
  1369. DSI_CTRL_ENABLE);
  1370. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
  1371. return 0;
  1372. }
  1373. void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
  1374. const struct mipi_dsi_msg *msg)
  1375. {
  1376. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1377. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
  1378. dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
  1379. if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
  1380. dsi_set_tx_power_mode(1, msm_host);
  1381. /* TODO: unvote for bus bandwidth */
  1382. dsi_clk_ctrl(msm_host, 0);
  1383. }
  1384. int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
  1385. const struct mipi_dsi_msg *msg)
  1386. {
  1387. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1388. return dsi_cmds2buf_tx(msm_host, msg);
  1389. }
  1390. int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
  1391. const struct mipi_dsi_msg *msg)
  1392. {
  1393. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1394. int data_byte, rx_byte, dlen, end;
  1395. int short_response, diff, pkt_size, ret = 0;
  1396. char cmd;
  1397. int rlen = msg->rx_len;
  1398. u8 *buf;
  1399. if (rlen <= 2) {
  1400. short_response = 1;
  1401. pkt_size = rlen;
  1402. rx_byte = 4;
  1403. } else {
  1404. short_response = 0;
  1405. data_byte = 10; /* first read */
  1406. if (rlen < data_byte)
  1407. pkt_size = rlen;
  1408. else
  1409. pkt_size = data_byte;
  1410. rx_byte = data_byte + 6; /* 4 header + 2 crc */
  1411. }
  1412. buf = msm_host->rx_buf;
  1413. end = 0;
  1414. while (!end) {
  1415. u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
  1416. struct mipi_dsi_msg max_pkt_size_msg = {
  1417. .channel = msg->channel,
  1418. .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
  1419. .tx_len = 2,
  1420. .tx_buf = tx,
  1421. };
  1422. DBG("rlen=%d pkt_size=%d rx_byte=%d",
  1423. rlen, pkt_size, rx_byte);
  1424. ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
  1425. if (ret < 2) {
  1426. pr_err("%s: Set max pkt size failed, %d\n",
  1427. __func__, ret);
  1428. return -EINVAL;
  1429. }
  1430. if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
  1431. (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
  1432. /* Clear the RDBK_DATA registers */
  1433. dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
  1434. DSI_RDBK_DATA_CTRL_CLR);
  1435. wmb(); /* make sure the RDBK registers are cleared */
  1436. dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
  1437. wmb(); /* release cleared status before transfer */
  1438. }
  1439. ret = dsi_cmds2buf_tx(msm_host, msg);
  1440. if (ret < msg->tx_len) {
  1441. pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
  1442. return ret;
  1443. }
  1444. /*
  1445. * once cmd_dma_done interrupt received,
  1446. * return data from client is ready and stored
  1447. * at RDBK_DATA register already
  1448. * since rx fifo is 16 bytes, dcs header is kept at first loop,
  1449. * after that dcs header lost during shift into registers
  1450. */
  1451. dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
  1452. if (dlen <= 0)
  1453. return 0;
  1454. if (short_response)
  1455. break;
  1456. if (rlen <= data_byte) {
  1457. diff = data_byte - rlen;
  1458. end = 1;
  1459. } else {
  1460. diff = 0;
  1461. rlen -= data_byte;
  1462. }
  1463. if (!end) {
  1464. dlen -= 2; /* 2 crc */
  1465. dlen -= diff;
  1466. buf += dlen; /* next start position */
  1467. data_byte = 14; /* NOT first read */
  1468. if (rlen < data_byte)
  1469. pkt_size += rlen;
  1470. else
  1471. pkt_size += data_byte;
  1472. DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
  1473. }
  1474. }
  1475. /*
  1476. * For single Long read, if the requested rlen < 10,
  1477. * we need to shift the start position of rx
  1478. * data buffer to skip the bytes which are not
  1479. * updated.
  1480. */
  1481. if (pkt_size < 10 && !short_response)
  1482. buf = msm_host->rx_buf + (10 - rlen);
  1483. else
  1484. buf = msm_host->rx_buf;
  1485. cmd = buf[0];
  1486. switch (cmd) {
  1487. case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
  1488. pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
  1489. ret = 0;
  1490. break;
  1491. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
  1492. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
  1493. ret = dsi_short_read1_resp(buf, msg);
  1494. break;
  1495. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
  1496. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
  1497. ret = dsi_short_read2_resp(buf, msg);
  1498. break;
  1499. case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
  1500. case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
  1501. ret = dsi_long_read_resp(buf, msg);
  1502. break;
  1503. default:
  1504. pr_warn("%s:Invalid response cmd\n", __func__);
  1505. ret = 0;
  1506. }
  1507. return ret;
  1508. }
  1509. void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
  1510. {
  1511. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1512. dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
  1513. dsi_write(msm_host, REG_DSI_DMA_LEN, len);
  1514. dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
  1515. /* Make sure trigger happens */
  1516. wmb();
  1517. }
  1518. int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
  1519. struct msm_dsi_pll *src_pll)
  1520. {
  1521. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1522. struct clk *byte_clk_provider, *pixel_clk_provider;
  1523. int ret;
  1524. ret = msm_dsi_pll_get_clk_provider(src_pll,
  1525. &byte_clk_provider, &pixel_clk_provider);
  1526. if (ret) {
  1527. pr_info("%s: can't get provider from pll, don't set parent\n",
  1528. __func__);
  1529. return 0;
  1530. }
  1531. ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
  1532. if (ret) {
  1533. pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
  1534. __func__, ret);
  1535. goto exit;
  1536. }
  1537. ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
  1538. if (ret) {
  1539. pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
  1540. __func__, ret);
  1541. goto exit;
  1542. }
  1543. exit:
  1544. return ret;
  1545. }
  1546. int msm_dsi_host_enable(struct mipi_dsi_host *host)
  1547. {
  1548. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1549. dsi_op_mode_config(msm_host,
  1550. !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
  1551. /* TODO: clock should be turned off for command mode,
  1552. * and only turned on before MDP START.
  1553. * This part of code should be enabled once mdp driver support it.
  1554. */
  1555. /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
  1556. dsi_clk_ctrl(msm_host, 0); */
  1557. return 0;
  1558. }
  1559. int msm_dsi_host_disable(struct mipi_dsi_host *host)
  1560. {
  1561. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1562. dsi_op_mode_config(msm_host,
  1563. !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
  1564. /* Since we have disabled INTF, the video engine won't stop so that
  1565. * the cmd engine will be blocked.
  1566. * Reset to disable video engine so that we can send off cmd.
  1567. */
  1568. dsi_sw_reset(msm_host);
  1569. return 0;
  1570. }
  1571. int msm_dsi_host_power_on(struct mipi_dsi_host *host)
  1572. {
  1573. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1574. u32 clk_pre = 0, clk_post = 0;
  1575. int ret = 0;
  1576. mutex_lock(&msm_host->dev_mutex);
  1577. if (msm_host->power_on) {
  1578. DBG("dsi host already on");
  1579. goto unlock_ret;
  1580. }
  1581. ret = dsi_calc_clk_rate(msm_host);
  1582. if (ret) {
  1583. pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
  1584. goto unlock_ret;
  1585. }
  1586. ret = dsi_host_regulator_enable(msm_host);
  1587. if (ret) {
  1588. pr_err("%s:Failed to enable vregs.ret=%d\n",
  1589. __func__, ret);
  1590. goto unlock_ret;
  1591. }
  1592. ret = dsi_bus_clk_enable(msm_host);
  1593. if (ret) {
  1594. pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
  1595. goto fail_disable_reg;
  1596. }
  1597. dsi_phy_sw_reset(msm_host);
  1598. ret = msm_dsi_manager_phy_enable(msm_host->id,
  1599. msm_host->byte_clk_rate * 8,
  1600. clk_get_rate(msm_host->esc_clk),
  1601. &clk_pre, &clk_post);
  1602. dsi_bus_clk_disable(msm_host);
  1603. if (ret) {
  1604. pr_err("%s: failed to enable phy, %d\n", __func__, ret);
  1605. goto fail_disable_reg;
  1606. }
  1607. ret = dsi_clk_ctrl(msm_host, 1);
  1608. if (ret) {
  1609. pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
  1610. goto fail_disable_reg;
  1611. }
  1612. dsi_timing_setup(msm_host);
  1613. dsi_sw_reset(msm_host);
  1614. dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
  1615. if (msm_host->disp_en_gpio)
  1616. gpiod_set_value(msm_host->disp_en_gpio, 1);
  1617. msm_host->power_on = true;
  1618. mutex_unlock(&msm_host->dev_mutex);
  1619. return 0;
  1620. fail_disable_reg:
  1621. dsi_host_regulator_disable(msm_host);
  1622. unlock_ret:
  1623. mutex_unlock(&msm_host->dev_mutex);
  1624. return ret;
  1625. }
  1626. int msm_dsi_host_power_off(struct mipi_dsi_host *host)
  1627. {
  1628. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1629. mutex_lock(&msm_host->dev_mutex);
  1630. if (!msm_host->power_on) {
  1631. DBG("dsi host already off");
  1632. goto unlock_ret;
  1633. }
  1634. dsi_ctrl_config(msm_host, false, 0, 0);
  1635. if (msm_host->disp_en_gpio)
  1636. gpiod_set_value(msm_host->disp_en_gpio, 0);
  1637. msm_dsi_manager_phy_disable(msm_host->id);
  1638. dsi_clk_ctrl(msm_host, 0);
  1639. dsi_host_regulator_disable(msm_host);
  1640. DBG("-");
  1641. msm_host->power_on = false;
  1642. unlock_ret:
  1643. mutex_unlock(&msm_host->dev_mutex);
  1644. return 0;
  1645. }
  1646. int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
  1647. struct drm_display_mode *mode)
  1648. {
  1649. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1650. if (msm_host->mode) {
  1651. drm_mode_destroy(msm_host->dev, msm_host->mode);
  1652. msm_host->mode = NULL;
  1653. }
  1654. msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
  1655. if (IS_ERR(msm_host->mode)) {
  1656. pr_err("%s: cannot duplicate mode\n", __func__);
  1657. return PTR_ERR(msm_host->mode);
  1658. }
  1659. return 0;
  1660. }
  1661. struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
  1662. unsigned long *panel_flags)
  1663. {
  1664. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1665. struct drm_panel *panel;
  1666. panel = of_drm_find_panel(msm_host->panel_node);
  1667. if (panel_flags)
  1668. *panel_flags = msm_host->mode_flags;
  1669. return panel;
  1670. }