mtk_dsi.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207
  1. /*
  2. * Copyright (c) 2015 MediaTek Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <drm/drmP.h>
  14. #include <drm/drm_atomic_helper.h>
  15. #include <drm/drm_crtc_helper.h>
  16. #include <drm/drm_mipi_dsi.h>
  17. #include <drm/drm_panel.h>
  18. #include <drm/drm_of.h>
  19. #include <linux/clk.h>
  20. #include <linux/component.h>
  21. #include <linux/iopoll.h>
  22. #include <linux/irq.h>
  23. #include <linux/of.h>
  24. #include <linux/of_platform.h>
  25. #include <linux/phy/phy.h>
  26. #include <linux/platform_device.h>
  27. #include <video/mipi_display.h>
  28. #include <video/videomode.h>
  29. #include "mtk_drm_ddp_comp.h"
  30. #define DSI_START 0x00
  31. #define DSI_INTEN 0x08
  32. #define DSI_INTSTA 0x0c
  33. #define LPRX_RD_RDY_INT_FLAG BIT(0)
  34. #define CMD_DONE_INT_FLAG BIT(1)
  35. #define TE_RDY_INT_FLAG BIT(2)
  36. #define VM_DONE_INT_FLAG BIT(3)
  37. #define EXT_TE_RDY_INT_FLAG BIT(4)
  38. #define DSI_BUSY BIT(31)
  39. #define DSI_CON_CTRL 0x10
  40. #define DSI_RESET BIT(0)
  41. #define DSI_EN BIT(1)
  42. #define DSI_MODE_CTRL 0x14
  43. #define MODE (3)
  44. #define CMD_MODE 0
  45. #define SYNC_PULSE_MODE 1
  46. #define SYNC_EVENT_MODE 2
  47. #define BURST_MODE 3
  48. #define FRM_MODE BIT(16)
  49. #define MIX_MODE BIT(17)
  50. #define DSI_TXRX_CTRL 0x18
  51. #define VC_NUM BIT(1)
  52. #define LANE_NUM (0xf << 2)
  53. #define DIS_EOT BIT(6)
  54. #define NULL_EN BIT(7)
  55. #define TE_FREERUN BIT(8)
  56. #define EXT_TE_EN BIT(9)
  57. #define EXT_TE_EDGE BIT(10)
  58. #define MAX_RTN_SIZE (0xf << 12)
  59. #define HSTX_CKLP_EN BIT(16)
  60. #define DSI_PSCTRL 0x1c
  61. #define DSI_PS_WC 0x3fff
  62. #define DSI_PS_SEL (3 << 16)
  63. #define PACKED_PS_16BIT_RGB565 (0 << 16)
  64. #define LOOSELY_PS_18BIT_RGB666 (1 << 16)
  65. #define PACKED_PS_18BIT_RGB666 (2 << 16)
  66. #define PACKED_PS_24BIT_RGB888 (3 << 16)
  67. #define DSI_VSA_NL 0x20
  68. #define DSI_VBP_NL 0x24
  69. #define DSI_VFP_NL 0x28
  70. #define DSI_VACT_NL 0x2C
  71. #define DSI_HSA_WC 0x50
  72. #define DSI_HBP_WC 0x54
  73. #define DSI_HFP_WC 0x58
  74. #define DSI_CMDQ_SIZE 0x60
  75. #define CMDQ_SIZE 0x3f
  76. #define DSI_HSTX_CKL_WC 0x64
  77. #define DSI_RX_DATA0 0x74
  78. #define DSI_RX_DATA1 0x78
  79. #define DSI_RX_DATA2 0x7c
  80. #define DSI_RX_DATA3 0x80
  81. #define DSI_RACK 0x84
  82. #define RACK BIT(0)
  83. #define DSI_PHY_LCCON 0x104
  84. #define LC_HS_TX_EN BIT(0)
  85. #define LC_ULPM_EN BIT(1)
  86. #define LC_WAKEUP_EN BIT(2)
  87. #define DSI_PHY_LD0CON 0x108
  88. #define LD0_HS_TX_EN BIT(0)
  89. #define LD0_ULPM_EN BIT(1)
  90. #define LD0_WAKEUP_EN BIT(2)
  91. #define DSI_PHY_TIMECON0 0x110
  92. #define LPX (0xff << 0)
  93. #define HS_PREP (0xff << 8)
  94. #define HS_ZERO (0xff << 16)
  95. #define HS_TRAIL (0xff << 24)
  96. #define DSI_PHY_TIMECON1 0x114
  97. #define TA_GO (0xff << 0)
  98. #define TA_SURE (0xff << 8)
  99. #define TA_GET (0xff << 16)
  100. #define DA_HS_EXIT (0xff << 24)
  101. #define DSI_PHY_TIMECON2 0x118
  102. #define CONT_DET (0xff << 0)
  103. #define CLK_ZERO (0xff << 16)
  104. #define CLK_TRAIL (0xff << 24)
  105. #define DSI_PHY_TIMECON3 0x11c
  106. #define CLK_HS_PREP (0xff << 0)
  107. #define CLK_HS_POST (0xff << 8)
  108. #define CLK_HS_EXIT (0xff << 16)
  109. #define DSI_VM_CMD_CON 0x130
  110. #define VM_CMD_EN BIT(0)
  111. #define TS_VFP_EN BIT(5)
  112. #define DSI_CMDQ0 0x180
  113. #define CONFIG (0xff << 0)
  114. #define SHORT_PACKET 0
  115. #define LONG_PACKET 2
  116. #define BTA BIT(2)
  117. #define DATA_ID (0xff << 8)
  118. #define DATA_0 (0xff << 16)
  119. #define DATA_1 (0xff << 24)
  120. #define T_LPX 5
  121. #define T_HS_PREP 6
  122. #define T_HS_TRAIL 8
  123. #define T_HS_EXIT 7
  124. #define T_HS_ZERO 10
  125. #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
  126. #define MTK_DSI_HOST_IS_READ(type) \
  127. ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \
  128. (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \
  129. (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
  130. (type == MIPI_DSI_DCS_READ))
  131. struct phy;
  132. struct mtk_dsi {
  133. struct mtk_ddp_comp ddp_comp;
  134. struct device *dev;
  135. struct mipi_dsi_host host;
  136. struct drm_encoder encoder;
  137. struct drm_connector conn;
  138. struct drm_panel *panel;
  139. struct drm_bridge *bridge;
  140. struct phy *phy;
  141. void __iomem *regs;
  142. struct clk *engine_clk;
  143. struct clk *digital_clk;
  144. struct clk *hs_clk;
  145. u32 data_rate;
  146. unsigned long mode_flags;
  147. enum mipi_dsi_pixel_format format;
  148. unsigned int lanes;
  149. struct videomode vm;
  150. int refcount;
  151. bool enabled;
  152. u32 irq_data;
  153. wait_queue_head_t irq_wait_queue;
  154. };
  155. static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
  156. {
  157. return container_of(e, struct mtk_dsi, encoder);
  158. }
  159. static inline struct mtk_dsi *connector_to_dsi(struct drm_connector *c)
  160. {
  161. return container_of(c, struct mtk_dsi, conn);
  162. }
  163. static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h)
  164. {
  165. return container_of(h, struct mtk_dsi, host);
  166. }
  167. static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
  168. {
  169. u32 temp = readl(dsi->regs + offset);
  170. writel((temp & ~mask) | (data & mask), dsi->regs + offset);
  171. }
  172. static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
  173. {
  174. u32 timcon0, timcon1, timcon2, timcon3;
  175. u32 ui, cycle_time;
  176. ui = 1000 / dsi->data_rate + 0x01;
  177. cycle_time = 8000 / dsi->data_rate + 0x01;
  178. timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
  179. timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
  180. T_HS_EXIT << 24;
  181. timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
  182. (NS_TO_CYCLE(0x150, cycle_time) << 16);
  183. timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
  184. NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
  185. writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
  186. writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
  187. writel(timcon2, dsi->regs + DSI_PHY_TIMECON2);
  188. writel(timcon3, dsi->regs + DSI_PHY_TIMECON3);
  189. }
  190. static void mtk_dsi_enable(struct mtk_dsi *dsi)
  191. {
  192. mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, DSI_EN);
  193. }
  194. static void mtk_dsi_disable(struct mtk_dsi *dsi)
  195. {
  196. mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0);
  197. }
  198. static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
  199. {
  200. mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET);
  201. mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
  202. }
  203. static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
  204. {
  205. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
  206. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
  207. }
  208. static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
  209. {
  210. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
  211. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN);
  212. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0);
  213. }
  214. static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
  215. {
  216. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0);
  217. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
  218. }
  219. static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
  220. {
  221. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
  222. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN);
  223. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0);
  224. }
  225. static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
  226. {
  227. u32 tmp_reg1;
  228. tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON);
  229. return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
  230. }
  231. static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
  232. {
  233. if (enter && !mtk_dsi_clk_hs_state(dsi))
  234. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN);
  235. else if (!enter && mtk_dsi_clk_hs_state(dsi))
  236. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
  237. }
  238. static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
  239. {
  240. u32 vid_mode = CMD_MODE;
  241. if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
  242. if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
  243. vid_mode = BURST_MODE;
  244. else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
  245. vid_mode = SYNC_PULSE_MODE;
  246. else
  247. vid_mode = SYNC_EVENT_MODE;
  248. }
  249. writel(vid_mode, dsi->regs + DSI_MODE_CTRL);
  250. }
  251. static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
  252. {
  253. mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
  254. mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
  255. }
  256. static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
  257. {
  258. struct videomode *vm = &dsi->vm;
  259. u32 dsi_buf_bpp, ps_wc;
  260. u32 ps_bpp_mode;
  261. if (dsi->format == MIPI_DSI_FMT_RGB565)
  262. dsi_buf_bpp = 2;
  263. else
  264. dsi_buf_bpp = 3;
  265. ps_wc = vm->hactive * dsi_buf_bpp;
  266. ps_bpp_mode = ps_wc;
  267. switch (dsi->format) {
  268. case MIPI_DSI_FMT_RGB888:
  269. ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
  270. break;
  271. case MIPI_DSI_FMT_RGB666:
  272. ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
  273. break;
  274. case MIPI_DSI_FMT_RGB666_PACKED:
  275. ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
  276. break;
  277. case MIPI_DSI_FMT_RGB565:
  278. ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
  279. break;
  280. }
  281. writel(vm->vactive, dsi->regs + DSI_VACT_NL);
  282. writel(ps_bpp_mode, dsi->regs + DSI_PSCTRL);
  283. writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
  284. }
  285. static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
  286. {
  287. u32 tmp_reg;
  288. switch (dsi->lanes) {
  289. case 1:
  290. tmp_reg = 1 << 2;
  291. break;
  292. case 2:
  293. tmp_reg = 3 << 2;
  294. break;
  295. case 3:
  296. tmp_reg = 7 << 2;
  297. break;
  298. case 4:
  299. tmp_reg = 0xf << 2;
  300. break;
  301. default:
  302. tmp_reg = 0xf << 2;
  303. break;
  304. }
  305. tmp_reg |= (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) << 6;
  306. tmp_reg |= (dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET) >> 3;
  307. writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
  308. }
  309. static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
  310. {
  311. u32 dsi_tmp_buf_bpp;
  312. u32 tmp_reg;
  313. switch (dsi->format) {
  314. case MIPI_DSI_FMT_RGB888:
  315. tmp_reg = PACKED_PS_24BIT_RGB888;
  316. dsi_tmp_buf_bpp = 3;
  317. break;
  318. case MIPI_DSI_FMT_RGB666:
  319. tmp_reg = LOOSELY_PS_18BIT_RGB666;
  320. dsi_tmp_buf_bpp = 3;
  321. break;
  322. case MIPI_DSI_FMT_RGB666_PACKED:
  323. tmp_reg = PACKED_PS_18BIT_RGB666;
  324. dsi_tmp_buf_bpp = 3;
  325. break;
  326. case MIPI_DSI_FMT_RGB565:
  327. tmp_reg = PACKED_PS_16BIT_RGB565;
  328. dsi_tmp_buf_bpp = 2;
  329. break;
  330. default:
  331. tmp_reg = PACKED_PS_24BIT_RGB888;
  332. dsi_tmp_buf_bpp = 3;
  333. break;
  334. }
  335. tmp_reg += dsi->vm.hactive * dsi_tmp_buf_bpp & DSI_PS_WC;
  336. writel(tmp_reg, dsi->regs + DSI_PSCTRL);
  337. }
  338. static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
  339. {
  340. u32 horizontal_sync_active_byte;
  341. u32 horizontal_backporch_byte;
  342. u32 horizontal_frontporch_byte;
  343. u32 dsi_tmp_buf_bpp;
  344. struct videomode *vm = &dsi->vm;
  345. if (dsi->format == MIPI_DSI_FMT_RGB565)
  346. dsi_tmp_buf_bpp = 2;
  347. else
  348. dsi_tmp_buf_bpp = 3;
  349. writel(vm->vsync_len, dsi->regs + DSI_VSA_NL);
  350. writel(vm->vback_porch, dsi->regs + DSI_VBP_NL);
  351. writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
  352. writel(vm->vactive, dsi->regs + DSI_VACT_NL);
  353. horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
  354. if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
  355. horizontal_backporch_byte =
  356. (vm->hback_porch * dsi_tmp_buf_bpp - 10);
  357. else
  358. horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
  359. dsi_tmp_buf_bpp - 10);
  360. horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12);
  361. writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
  362. writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
  363. writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
  364. mtk_dsi_ps_control(dsi);
  365. }
  366. static void mtk_dsi_start(struct mtk_dsi *dsi)
  367. {
  368. writel(0, dsi->regs + DSI_START);
  369. writel(1, dsi->regs + DSI_START);
  370. }
  371. static void mtk_dsi_stop(struct mtk_dsi *dsi)
  372. {
  373. writel(0, dsi->regs + DSI_START);
  374. }
  375. static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi)
  376. {
  377. writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL);
  378. }
  379. static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi)
  380. {
  381. u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
  382. writel(inten, dsi->regs + DSI_INTEN);
  383. }
  384. static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit)
  385. {
  386. dsi->irq_data |= irq_bit;
  387. }
  388. static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit)
  389. {
  390. dsi->irq_data &= ~irq_bit;
  391. }
  392. static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag,
  393. unsigned int timeout)
  394. {
  395. s32 ret = 0;
  396. unsigned long jiffies = msecs_to_jiffies(timeout);
  397. ret = wait_event_interruptible_timeout(dsi->irq_wait_queue,
  398. dsi->irq_data & irq_flag,
  399. jiffies);
  400. if (ret == 0) {
  401. DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag);
  402. mtk_dsi_enable(dsi);
  403. mtk_dsi_reset_engine(dsi);
  404. }
  405. return ret;
  406. }
  407. static irqreturn_t mtk_dsi_irq(int irq, void *dev_id)
  408. {
  409. struct mtk_dsi *dsi = dev_id;
  410. u32 status, tmp;
  411. u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
  412. status = readl(dsi->regs + DSI_INTSTA) & flag;
  413. if (status) {
  414. do {
  415. mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK);
  416. tmp = readl(dsi->regs + DSI_INTSTA);
  417. } while (tmp & DSI_BUSY);
  418. mtk_dsi_mask(dsi, DSI_INTSTA, status, 0);
  419. mtk_dsi_irq_data_set(dsi, status);
  420. wake_up_interruptible(&dsi->irq_wait_queue);
  421. }
  422. return IRQ_HANDLED;
  423. }
  424. static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
  425. {
  426. mtk_dsi_irq_data_clear(dsi, irq_flag);
  427. mtk_dsi_set_cmd_mode(dsi);
  428. if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) {
  429. DRM_ERROR("failed to switch cmd mode\n");
  430. return -ETIME;
  431. } else {
  432. return 0;
  433. }
  434. }
  435. static int mtk_dsi_poweron(struct mtk_dsi *dsi)
  436. {
  437. struct device *dev = dsi->dev;
  438. int ret;
  439. u64 pixel_clock, total_bits;
  440. u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
  441. if (++dsi->refcount != 1)
  442. return 0;
  443. switch (dsi->format) {
  444. case MIPI_DSI_FMT_RGB565:
  445. bit_per_pixel = 16;
  446. break;
  447. case MIPI_DSI_FMT_RGB666_PACKED:
  448. bit_per_pixel = 18;
  449. break;
  450. case MIPI_DSI_FMT_RGB666:
  451. case MIPI_DSI_FMT_RGB888:
  452. default:
  453. bit_per_pixel = 24;
  454. break;
  455. }
  456. /**
  457. * htotal_time = htotal * byte_per_pixel / num_lanes
  458. * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
  459. * mipi_ratio = (htotal_time + overhead_time) / htotal_time
  460. * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
  461. */
  462. pixel_clock = dsi->vm.pixelclock;
  463. htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
  464. dsi->vm.hsync_len;
  465. htotal_bits = htotal * bit_per_pixel;
  466. overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
  467. T_HS_EXIT;
  468. overhead_bits = overhead_cycles * dsi->lanes * 8;
  469. total_bits = htotal_bits + overhead_bits;
  470. dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
  471. htotal * dsi->lanes);
  472. ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
  473. if (ret < 0) {
  474. dev_err(dev, "Failed to set data rate: %d\n", ret);
  475. goto err_refcount;
  476. }
  477. phy_power_on(dsi->phy);
  478. ret = clk_prepare_enable(dsi->engine_clk);
  479. if (ret < 0) {
  480. dev_err(dev, "Failed to enable engine clock: %d\n", ret);
  481. goto err_phy_power_off;
  482. }
  483. ret = clk_prepare_enable(dsi->digital_clk);
  484. if (ret < 0) {
  485. dev_err(dev, "Failed to enable digital clock: %d\n", ret);
  486. goto err_disable_engine_clk;
  487. }
  488. mtk_dsi_enable(dsi);
  489. mtk_dsi_reset_engine(dsi);
  490. mtk_dsi_phy_timconfig(dsi);
  491. mtk_dsi_rxtx_control(dsi);
  492. mtk_dsi_ps_control_vact(dsi);
  493. mtk_dsi_set_vm_cmd(dsi);
  494. mtk_dsi_config_vdo_timing(dsi);
  495. mtk_dsi_set_interrupt_enable(dsi);
  496. mtk_dsi_clk_ulp_mode_leave(dsi);
  497. mtk_dsi_lane0_ulp_mode_leave(dsi);
  498. mtk_dsi_clk_hs_mode(dsi, 0);
  499. if (dsi->panel) {
  500. if (drm_panel_prepare(dsi->panel)) {
  501. DRM_ERROR("failed to prepare the panel\n");
  502. goto err_disable_digital_clk;
  503. }
  504. }
  505. return 0;
  506. err_disable_digital_clk:
  507. clk_disable_unprepare(dsi->digital_clk);
  508. err_disable_engine_clk:
  509. clk_disable_unprepare(dsi->engine_clk);
  510. err_phy_power_off:
  511. phy_power_off(dsi->phy);
  512. err_refcount:
  513. dsi->refcount--;
  514. return ret;
  515. }
  516. static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
  517. {
  518. if (WARN_ON(dsi->refcount == 0))
  519. return;
  520. if (--dsi->refcount != 0)
  521. return;
  522. /*
  523. * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
  524. * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
  525. * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
  526. * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
  527. * after dsi is fully set.
  528. */
  529. mtk_dsi_stop(dsi);
  530. if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
  531. if (dsi->panel) {
  532. if (drm_panel_unprepare(dsi->panel)) {
  533. DRM_ERROR("failed to unprepare the panel\n");
  534. return;
  535. }
  536. }
  537. }
  538. mtk_dsi_reset_engine(dsi);
  539. mtk_dsi_lane0_ulp_mode_enter(dsi);
  540. mtk_dsi_clk_ulp_mode_enter(dsi);
  541. mtk_dsi_disable(dsi);
  542. clk_disable_unprepare(dsi->engine_clk);
  543. clk_disable_unprepare(dsi->digital_clk);
  544. phy_power_off(dsi->phy);
  545. }
  546. static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
  547. {
  548. int ret;
  549. if (dsi->enabled)
  550. return;
  551. ret = mtk_dsi_poweron(dsi);
  552. if (ret < 0) {
  553. DRM_ERROR("failed to power on dsi\n");
  554. return;
  555. }
  556. mtk_dsi_set_mode(dsi);
  557. mtk_dsi_clk_hs_mode(dsi, 1);
  558. mtk_dsi_start(dsi);
  559. if (dsi->panel) {
  560. if (drm_panel_enable(dsi->panel)) {
  561. DRM_ERROR("failed to enable the panel\n");
  562. goto err_dsi_power_off;
  563. }
  564. }
  565. dsi->enabled = true;
  566. return;
  567. err_dsi_power_off:
  568. mtk_dsi_stop(dsi);
  569. mtk_dsi_poweroff(dsi);
  570. }
  571. static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
  572. {
  573. if (!dsi->enabled)
  574. return;
  575. if (dsi->panel) {
  576. if (drm_panel_disable(dsi->panel)) {
  577. DRM_ERROR("failed to disable the panel\n");
  578. return;
  579. }
  580. }
  581. mtk_dsi_poweroff(dsi);
  582. dsi->enabled = false;
  583. }
  584. static void mtk_dsi_encoder_destroy(struct drm_encoder *encoder)
  585. {
  586. drm_encoder_cleanup(encoder);
  587. }
  588. static const struct drm_encoder_funcs mtk_dsi_encoder_funcs = {
  589. .destroy = mtk_dsi_encoder_destroy,
  590. };
  591. static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
  592. const struct drm_display_mode *mode,
  593. struct drm_display_mode *adjusted_mode)
  594. {
  595. return true;
  596. }
  597. static void mtk_dsi_encoder_mode_set(struct drm_encoder *encoder,
  598. struct drm_display_mode *mode,
  599. struct drm_display_mode *adjusted)
  600. {
  601. struct mtk_dsi *dsi = encoder_to_dsi(encoder);
  602. drm_display_mode_to_videomode(adjusted, &dsi->vm);
  603. }
  604. static void mtk_dsi_encoder_disable(struct drm_encoder *encoder)
  605. {
  606. struct mtk_dsi *dsi = encoder_to_dsi(encoder);
  607. mtk_output_dsi_disable(dsi);
  608. }
  609. static void mtk_dsi_encoder_enable(struct drm_encoder *encoder)
  610. {
  611. struct mtk_dsi *dsi = encoder_to_dsi(encoder);
  612. mtk_output_dsi_enable(dsi);
  613. }
  614. static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
  615. {
  616. struct mtk_dsi *dsi = connector_to_dsi(connector);
  617. return drm_panel_get_modes(dsi->panel);
  618. }
  619. static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
  620. .mode_fixup = mtk_dsi_encoder_mode_fixup,
  621. .mode_set = mtk_dsi_encoder_mode_set,
  622. .disable = mtk_dsi_encoder_disable,
  623. .enable = mtk_dsi_encoder_enable,
  624. };
  625. static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
  626. .fill_modes = drm_helper_probe_single_connector_modes,
  627. .destroy = drm_connector_cleanup,
  628. .reset = drm_atomic_helper_connector_reset,
  629. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  630. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  631. };
  632. static const struct drm_connector_helper_funcs
  633. mtk_dsi_connector_helper_funcs = {
  634. .get_modes = mtk_dsi_connector_get_modes,
  635. };
  636. static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi)
  637. {
  638. int ret;
  639. ret = drm_connector_init(drm, &dsi->conn, &mtk_dsi_connector_funcs,
  640. DRM_MODE_CONNECTOR_DSI);
  641. if (ret) {
  642. DRM_ERROR("Failed to connector init to drm\n");
  643. return ret;
  644. }
  645. drm_connector_helper_add(&dsi->conn, &mtk_dsi_connector_helper_funcs);
  646. dsi->conn.dpms = DRM_MODE_DPMS_OFF;
  647. drm_connector_attach_encoder(&dsi->conn, &dsi->encoder);
  648. if (dsi->panel) {
  649. ret = drm_panel_attach(dsi->panel, &dsi->conn);
  650. if (ret) {
  651. DRM_ERROR("Failed to attach panel to drm\n");
  652. goto err_connector_cleanup;
  653. }
  654. }
  655. return 0;
  656. err_connector_cleanup:
  657. drm_connector_cleanup(&dsi->conn);
  658. return ret;
  659. }
  660. static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
  661. {
  662. int ret;
  663. ret = drm_encoder_init(drm, &dsi->encoder, &mtk_dsi_encoder_funcs,
  664. DRM_MODE_ENCODER_DSI, NULL);
  665. if (ret) {
  666. DRM_ERROR("Failed to encoder init to drm\n");
  667. return ret;
  668. }
  669. drm_encoder_helper_add(&dsi->encoder, &mtk_dsi_encoder_helper_funcs);
  670. /*
  671. * Currently display data paths are statically assigned to a crtc each.
  672. * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0
  673. */
  674. dsi->encoder.possible_crtcs = 1;
  675. /* If there's a bridge, attach to it and let it create the connector */
  676. ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
  677. if (ret) {
  678. DRM_ERROR("Failed to attach bridge to drm\n");
  679. /* Otherwise create our own connector and attach to a panel */
  680. ret = mtk_dsi_create_connector(drm, dsi);
  681. if (ret)
  682. goto err_encoder_cleanup;
  683. }
  684. return 0;
  685. err_encoder_cleanup:
  686. drm_encoder_cleanup(&dsi->encoder);
  687. return ret;
  688. }
  689. static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
  690. {
  691. drm_encoder_cleanup(&dsi->encoder);
  692. /* Skip connector cleanup if creation was delegated to the bridge */
  693. if (dsi->conn.dev)
  694. drm_connector_cleanup(&dsi->conn);
  695. if (dsi->panel)
  696. drm_panel_detach(dsi->panel);
  697. }
  698. static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
  699. {
  700. struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
  701. mtk_dsi_poweron(dsi);
  702. }
  703. static void mtk_dsi_ddp_stop(struct mtk_ddp_comp *comp)
  704. {
  705. struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
  706. mtk_dsi_poweroff(dsi);
  707. }
  708. static const struct mtk_ddp_comp_funcs mtk_dsi_funcs = {
  709. .start = mtk_dsi_ddp_start,
  710. .stop = mtk_dsi_ddp_stop,
  711. };
  712. static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
  713. struct mipi_dsi_device *device)
  714. {
  715. struct mtk_dsi *dsi = host_to_dsi(host);
  716. dsi->lanes = device->lanes;
  717. dsi->format = device->format;
  718. dsi->mode_flags = device->mode_flags;
  719. if (dsi->conn.dev)
  720. drm_helper_hpd_irq_event(dsi->conn.dev);
  721. return 0;
  722. }
  723. static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
  724. struct mipi_dsi_device *device)
  725. {
  726. struct mtk_dsi *dsi = host_to_dsi(host);
  727. if (dsi->conn.dev)
  728. drm_helper_hpd_irq_event(dsi->conn.dev);
  729. return 0;
  730. }
  731. static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
  732. {
  733. int ret;
  734. u32 val;
  735. ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
  736. 4, 2000000);
  737. if (ret) {
  738. DRM_WARN("polling dsi wait not busy timeout!\n");
  739. mtk_dsi_enable(dsi);
  740. mtk_dsi_reset_engine(dsi);
  741. }
  742. }
  743. static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data)
  744. {
  745. switch (type) {
  746. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
  747. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
  748. return 1;
  749. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
  750. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
  751. return 2;
  752. case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
  753. case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
  754. return read_data[1] + read_data[2] * 16;
  755. case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
  756. DRM_INFO("type is 0x02, try again\n");
  757. break;
  758. default:
  759. DRM_INFO("type(0x%x) not recognized\n", type);
  760. break;
  761. }
  762. return 0;
  763. }
  764. static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
  765. {
  766. const char *tx_buf = msg->tx_buf;
  767. u8 config, cmdq_size, cmdq_off, type = msg->type;
  768. u32 reg_val, cmdq_mask, i;
  769. if (MTK_DSI_HOST_IS_READ(type))
  770. config = BTA;
  771. else
  772. config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET;
  773. if (msg->tx_len > 2) {
  774. cmdq_size = 1 + (msg->tx_len + 3) / 4;
  775. cmdq_off = 4;
  776. cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1;
  777. reg_val = (msg->tx_len << 16) | (type << 8) | config;
  778. } else {
  779. cmdq_size = 1;
  780. cmdq_off = 2;
  781. cmdq_mask = CONFIG | DATA_ID;
  782. reg_val = (type << 8) | config;
  783. }
  784. for (i = 0; i < msg->tx_len; i++)
  785. writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i);
  786. mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val);
  787. mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
  788. }
  789. static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi,
  790. const struct mipi_dsi_msg *msg, u8 flag)
  791. {
  792. mtk_dsi_wait_for_idle(dsi);
  793. mtk_dsi_irq_data_clear(dsi, flag);
  794. mtk_dsi_cmdq(dsi, msg);
  795. mtk_dsi_start(dsi);
  796. if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000))
  797. return -ETIME;
  798. else
  799. return 0;
  800. }
  801. static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
  802. const struct mipi_dsi_msg *msg)
  803. {
  804. struct mtk_dsi *dsi = host_to_dsi(host);
  805. u32 recv_cnt, i;
  806. u8 read_data[16];
  807. void *src_addr;
  808. u8 irq_flag = CMD_DONE_INT_FLAG;
  809. if (readl(dsi->regs + DSI_MODE_CTRL) & MODE) {
  810. DRM_ERROR("dsi engine is not command mode\n");
  811. return -EINVAL;
  812. }
  813. if (MTK_DSI_HOST_IS_READ(msg->type))
  814. irq_flag |= LPRX_RD_RDY_INT_FLAG;
  815. if (mtk_dsi_host_send_cmd(dsi, msg, irq_flag) < 0)
  816. return -ETIME;
  817. if (!MTK_DSI_HOST_IS_READ(msg->type))
  818. return 0;
  819. if (!msg->rx_buf) {
  820. DRM_ERROR("dsi receive buffer size may be NULL\n");
  821. return -EINVAL;
  822. }
  823. for (i = 0; i < 16; i++)
  824. *(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i);
  825. recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data);
  826. if (recv_cnt > 2)
  827. src_addr = &read_data[4];
  828. else
  829. src_addr = &read_data[1];
  830. if (recv_cnt > 10)
  831. recv_cnt = 10;
  832. if (recv_cnt > msg->rx_len)
  833. recv_cnt = msg->rx_len;
  834. if (recv_cnt)
  835. memcpy(msg->rx_buf, src_addr, recv_cnt);
  836. DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
  837. recv_cnt, *((u8 *)(msg->tx_buf)));
  838. return recv_cnt;
  839. }
  840. static const struct mipi_dsi_host_ops mtk_dsi_ops = {
  841. .attach = mtk_dsi_host_attach,
  842. .detach = mtk_dsi_host_detach,
  843. .transfer = mtk_dsi_host_transfer,
  844. };
  845. static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
  846. {
  847. int ret;
  848. struct drm_device *drm = data;
  849. struct mtk_dsi *dsi = dev_get_drvdata(dev);
  850. ret = mtk_ddp_comp_register(drm, &dsi->ddp_comp);
  851. if (ret < 0) {
  852. dev_err(dev, "Failed to register component %pOF: %d\n",
  853. dev->of_node, ret);
  854. return ret;
  855. }
  856. ret = mipi_dsi_host_register(&dsi->host);
  857. if (ret < 0) {
  858. dev_err(dev, "failed to register DSI host: %d\n", ret);
  859. goto err_ddp_comp_unregister;
  860. }
  861. ret = mtk_dsi_create_conn_enc(drm, dsi);
  862. if (ret) {
  863. DRM_ERROR("Encoder create failed with %d\n", ret);
  864. goto err_unregister;
  865. }
  866. return 0;
  867. err_unregister:
  868. mipi_dsi_host_unregister(&dsi->host);
  869. err_ddp_comp_unregister:
  870. mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
  871. return ret;
  872. }
  873. static void mtk_dsi_unbind(struct device *dev, struct device *master,
  874. void *data)
  875. {
  876. struct drm_device *drm = data;
  877. struct mtk_dsi *dsi = dev_get_drvdata(dev);
  878. mtk_dsi_destroy_conn_enc(dsi);
  879. mipi_dsi_host_unregister(&dsi->host);
  880. mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
  881. }
  882. static const struct component_ops mtk_dsi_component_ops = {
  883. .bind = mtk_dsi_bind,
  884. .unbind = mtk_dsi_unbind,
  885. };
  886. static int mtk_dsi_probe(struct platform_device *pdev)
  887. {
  888. struct mtk_dsi *dsi;
  889. struct device *dev = &pdev->dev;
  890. struct resource *regs;
  891. int irq_num;
  892. int comp_id;
  893. int ret;
  894. dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
  895. if (!dsi)
  896. return -ENOMEM;
  897. dsi->host.ops = &mtk_dsi_ops;
  898. dsi->host.dev = dev;
  899. ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
  900. &dsi->panel, &dsi->bridge);
  901. if (ret)
  902. return ret;
  903. dsi->engine_clk = devm_clk_get(dev, "engine");
  904. if (IS_ERR(dsi->engine_clk)) {
  905. ret = PTR_ERR(dsi->engine_clk);
  906. dev_err(dev, "Failed to get engine clock: %d\n", ret);
  907. return ret;
  908. }
  909. dsi->digital_clk = devm_clk_get(dev, "digital");
  910. if (IS_ERR(dsi->digital_clk)) {
  911. ret = PTR_ERR(dsi->digital_clk);
  912. dev_err(dev, "Failed to get digital clock: %d\n", ret);
  913. return ret;
  914. }
  915. dsi->hs_clk = devm_clk_get(dev, "hs");
  916. if (IS_ERR(dsi->hs_clk)) {
  917. ret = PTR_ERR(dsi->hs_clk);
  918. dev_err(dev, "Failed to get hs clock: %d\n", ret);
  919. return ret;
  920. }
  921. regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  922. dsi->regs = devm_ioremap_resource(dev, regs);
  923. if (IS_ERR(dsi->regs)) {
  924. ret = PTR_ERR(dsi->regs);
  925. dev_err(dev, "Failed to ioremap memory: %d\n", ret);
  926. return ret;
  927. }
  928. dsi->phy = devm_phy_get(dev, "dphy");
  929. if (IS_ERR(dsi->phy)) {
  930. ret = PTR_ERR(dsi->phy);
  931. dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
  932. return ret;
  933. }
  934. comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
  935. if (comp_id < 0) {
  936. dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
  937. return comp_id;
  938. }
  939. ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
  940. &mtk_dsi_funcs);
  941. if (ret) {
  942. dev_err(dev, "Failed to initialize component: %d\n", ret);
  943. return ret;
  944. }
  945. irq_num = platform_get_irq(pdev, 0);
  946. if (irq_num < 0) {
  947. dev_err(&pdev->dev, "failed to request dsi irq resource\n");
  948. return -EPROBE_DEFER;
  949. }
  950. irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
  951. ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
  952. IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
  953. if (ret) {
  954. dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
  955. return -EPROBE_DEFER;
  956. }
  957. init_waitqueue_head(&dsi->irq_wait_queue);
  958. platform_set_drvdata(pdev, dsi);
  959. return component_add(&pdev->dev, &mtk_dsi_component_ops);
  960. }
  961. static int mtk_dsi_remove(struct platform_device *pdev)
  962. {
  963. struct mtk_dsi *dsi = platform_get_drvdata(pdev);
  964. mtk_output_dsi_disable(dsi);
  965. component_del(&pdev->dev, &mtk_dsi_component_ops);
  966. return 0;
  967. }
  968. static const struct of_device_id mtk_dsi_of_match[] = {
  969. { .compatible = "mediatek,mt2701-dsi" },
  970. { .compatible = "mediatek,mt8173-dsi" },
  971. { },
  972. };
  973. struct platform_driver mtk_dsi_driver = {
  974. .probe = mtk_dsi_probe,
  975. .remove = mtk_dsi_remove,
  976. .driver = {
  977. .name = "mtk-dsi",
  978. .of_match_table = mtk_dsi_of_match,
  979. },
  980. };