serial-tegra.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * serial_tegra.c
  4. *
  5. * High-speed serial driver for NVIDIA Tegra SoCs
  6. *
  7. * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved.
  8. *
  9. * Author: Laxman Dewangan <ldewangan@nvidia.com>
  10. */
  11. #include <linux/clk.h>
  12. #include <linux/debugfs.h>
  13. #include <linux/delay.h>
  14. #include <linux/dmaengine.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/dmapool.h>
  17. #include <linux/err.h>
  18. #include <linux/io.h>
  19. #include <linux/irq.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/reset.h>
  26. #include <linux/serial.h>
  27. #include <linux/serial_8250.h>
  28. #include <linux/serial_core.h>
  29. #include <linux/serial_reg.h>
  30. #include <linux/slab.h>
  31. #include <linux/string.h>
  32. #include <linux/termios.h>
  33. #include <linux/tty.h>
  34. #include <linux/tty_flip.h>
  35. #define TEGRA_UART_TYPE "TEGRA_UART"
  36. #define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
  37. #define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
  38. #define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
  39. #define TEGRA_UART_LSR_TXFIFO_FULL 0x100
  40. #define TEGRA_UART_IER_EORD 0x20
  41. #define TEGRA_UART_MCR_RTS_EN 0x40
  42. #define TEGRA_UART_MCR_CTS_EN 0x20
  43. #define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
  44. UART_LSR_PE | UART_LSR_FE)
  45. #define TEGRA_UART_IRDA_CSR 0x08
  46. #define TEGRA_UART_SIR_ENABLED 0x80
  47. #define TEGRA_UART_TX_PIO 1
  48. #define TEGRA_UART_TX_DMA 2
  49. #define TEGRA_UART_MIN_DMA 16
  50. #define TEGRA_UART_FIFO_SIZE 32
  51. /*
  52. * Tx fifo trigger level setting in tegra uart is in
  53. * reverse way then conventional uart.
  54. */
  55. #define TEGRA_UART_TX_TRIG_16B 0x00
  56. #define TEGRA_UART_TX_TRIG_8B 0x10
  57. #define TEGRA_UART_TX_TRIG_4B 0x20
  58. #define TEGRA_UART_TX_TRIG_1B 0x30
  59. #define TEGRA_UART_MAXIMUM 8
  60. /* Default UART setting when started: 115200 no parity, stop, 8 data bits */
  61. #define TEGRA_UART_DEFAULT_BAUD 115200
  62. #define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
  63. /* Tx transfer mode */
  64. #define TEGRA_TX_PIO 1
  65. #define TEGRA_TX_DMA 2
  66. #define TEGRA_UART_FCR_IIR_FIFO_EN 0x40
  67. /**
  68. * tegra_uart_chip_data: SOC specific data.
  69. *
  70. * @tx_fifo_full_status: Status flag available for checking tx fifo full.
  71. * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
  72. * Tegra30 does not allow this.
  73. * @support_clk_src_div: Clock source support the clock divider.
  74. */
  75. struct tegra_uart_chip_data {
  76. bool tx_fifo_full_status;
  77. bool allow_txfifo_reset_fifo_mode;
  78. bool support_clk_src_div;
  79. bool fifo_mode_enable_status;
  80. int uart_max_port;
  81. int max_dma_burst_bytes;
  82. int error_tolerance_low_range;
  83. int error_tolerance_high_range;
  84. };
  85. struct tegra_baud_tolerance {
  86. u32 lower_range_baud;
  87. u32 upper_range_baud;
  88. s32 tolerance;
  89. };
  90. struct tegra_uart_port {
  91. struct uart_port uport;
  92. const struct tegra_uart_chip_data *cdata;
  93. struct clk *uart_clk;
  94. struct reset_control *rst;
  95. unsigned int current_baud;
  96. /* Register shadow */
  97. unsigned long fcr_shadow;
  98. unsigned long mcr_shadow;
  99. unsigned long lcr_shadow;
  100. unsigned long ier_shadow;
  101. bool rts_active;
  102. int tx_in_progress;
  103. unsigned int tx_bytes;
  104. bool enable_modem_interrupt;
  105. bool rx_timeout;
  106. int rx_in_progress;
  107. int symb_bit;
  108. struct dma_chan *rx_dma_chan;
  109. struct dma_chan *tx_dma_chan;
  110. dma_addr_t rx_dma_buf_phys;
  111. dma_addr_t tx_dma_buf_phys;
  112. unsigned char *rx_dma_buf_virt;
  113. unsigned char *tx_dma_buf_virt;
  114. struct dma_async_tx_descriptor *tx_dma_desc;
  115. struct dma_async_tx_descriptor *rx_dma_desc;
  116. dma_cookie_t tx_cookie;
  117. dma_cookie_t rx_cookie;
  118. unsigned int tx_bytes_requested;
  119. unsigned int rx_bytes_requested;
  120. struct tegra_baud_tolerance *baud_tolerance;
  121. int n_adjustable_baud_rates;
  122. int required_rate;
  123. int configured_rate;
  124. bool use_rx_pio;
  125. bool use_tx_pio;
  126. };
  127. static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
  128. static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
  129. static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
  130. bool dma_to_memory);
  131. static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
  132. unsigned long reg)
  133. {
  134. return readl(tup->uport.membase + (reg << tup->uport.regshift));
  135. }
  136. static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
  137. unsigned long reg)
  138. {
  139. writel(val, tup->uport.membase + (reg << tup->uport.regshift));
  140. }
  141. static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
  142. {
  143. return container_of(u, struct tegra_uart_port, uport);
  144. }
  145. static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
  146. {
  147. struct tegra_uart_port *tup = to_tegra_uport(u);
  148. /*
  149. * RI - Ring detector is active
  150. * CD/DCD/CAR - Carrier detect is always active. For some reason
  151. * linux has different names for carrier detect.
  152. * DSR - Data Set ready is active as the hardware doesn't support it.
  153. * Don't know if the linux support this yet?
  154. * CTS - Clear to send. Always set to active, as the hardware handles
  155. * CTS automatically.
  156. */
  157. if (tup->enable_modem_interrupt)
  158. return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
  159. return TIOCM_CTS;
  160. }
  161. static void set_rts(struct tegra_uart_port *tup, bool active)
  162. {
  163. unsigned long mcr;
  164. mcr = tup->mcr_shadow;
  165. if (active)
  166. mcr |= TEGRA_UART_MCR_RTS_EN;
  167. else
  168. mcr &= ~TEGRA_UART_MCR_RTS_EN;
  169. if (mcr != tup->mcr_shadow) {
  170. tegra_uart_write(tup, mcr, UART_MCR);
  171. tup->mcr_shadow = mcr;
  172. }
  173. }
  174. static void set_dtr(struct tegra_uart_port *tup, bool active)
  175. {
  176. unsigned long mcr;
  177. mcr = tup->mcr_shadow;
  178. if (active)
  179. mcr |= UART_MCR_DTR;
  180. else
  181. mcr &= ~UART_MCR_DTR;
  182. if (mcr != tup->mcr_shadow) {
  183. tegra_uart_write(tup, mcr, UART_MCR);
  184. tup->mcr_shadow = mcr;
  185. }
  186. }
  187. static void set_loopbk(struct tegra_uart_port *tup, bool active)
  188. {
  189. unsigned long mcr = tup->mcr_shadow;
  190. if (active)
  191. mcr |= UART_MCR_LOOP;
  192. else
  193. mcr &= ~UART_MCR_LOOP;
  194. if (mcr != tup->mcr_shadow) {
  195. tegra_uart_write(tup, mcr, UART_MCR);
  196. tup->mcr_shadow = mcr;
  197. }
  198. }
  199. static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
  200. {
  201. struct tegra_uart_port *tup = to_tegra_uport(u);
  202. int enable;
  203. tup->rts_active = !!(mctrl & TIOCM_RTS);
  204. set_rts(tup, tup->rts_active);
  205. enable = !!(mctrl & TIOCM_DTR);
  206. set_dtr(tup, enable);
  207. enable = !!(mctrl & TIOCM_LOOP);
  208. set_loopbk(tup, enable);
  209. }
  210. static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
  211. {
  212. struct tegra_uart_port *tup = to_tegra_uport(u);
  213. unsigned long lcr;
  214. lcr = tup->lcr_shadow;
  215. if (break_ctl)
  216. lcr |= UART_LCR_SBC;
  217. else
  218. lcr &= ~UART_LCR_SBC;
  219. tegra_uart_write(tup, lcr, UART_LCR);
  220. tup->lcr_shadow = lcr;
  221. }
  222. /**
  223. * tegra_uart_wait_cycle_time: Wait for N UART clock periods
  224. *
  225. * @tup: Tegra serial port data structure.
  226. * @cycles: Number of clock periods to wait.
  227. *
  228. * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
  229. * clock speed is 16X the current baud rate.
  230. */
  231. static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
  232. unsigned int cycles)
  233. {
  234. if (tup->current_baud)
  235. udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
  236. }
  237. /* Wait for a symbol-time. */
  238. static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
  239. unsigned int syms)
  240. {
  241. if (tup->current_baud)
  242. udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
  243. tup->current_baud));
  244. }
  245. static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup)
  246. {
  247. unsigned long iir;
  248. unsigned int tmout = 100;
  249. do {
  250. iir = tegra_uart_read(tup, UART_IIR);
  251. if (iir & TEGRA_UART_FCR_IIR_FIFO_EN)
  252. return 0;
  253. udelay(1);
  254. } while (--tmout);
  255. return -ETIMEDOUT;
  256. }
  257. static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
  258. {
  259. unsigned long fcr = tup->fcr_shadow;
  260. unsigned int lsr, tmout = 10000;
  261. if (tup->rts_active)
  262. set_rts(tup, false);
  263. if (tup->cdata->allow_txfifo_reset_fifo_mode) {
  264. fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
  265. tegra_uart_write(tup, fcr, UART_FCR);
  266. } else {
  267. fcr &= ~UART_FCR_ENABLE_FIFO;
  268. tegra_uart_write(tup, fcr, UART_FCR);
  269. udelay(60);
  270. fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
  271. tegra_uart_write(tup, fcr, UART_FCR);
  272. fcr |= UART_FCR_ENABLE_FIFO;
  273. tegra_uart_write(tup, fcr, UART_FCR);
  274. if (tup->cdata->fifo_mode_enable_status)
  275. tegra_uart_wait_fifo_mode_enabled(tup);
  276. }
  277. /* Dummy read to ensure the write is posted */
  278. tegra_uart_read(tup, UART_SCR);
  279. /*
  280. * For all tegra devices (up to t210), there is a hardware issue that
  281. * requires software to wait for 32 UART clock periods for the flush
  282. * to propagate, otherwise data could be lost.
  283. */
  284. tegra_uart_wait_cycle_time(tup, 32);
  285. do {
  286. lsr = tegra_uart_read(tup, UART_LSR);
  287. if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
  288. break;
  289. udelay(1);
  290. } while (--tmout);
  291. if (tup->rts_active)
  292. set_rts(tup, true);
  293. }
  294. static long tegra_get_tolerance_rate(struct tegra_uart_port *tup,
  295. unsigned int baud, long rate)
  296. {
  297. int i;
  298. for (i = 0; i < tup->n_adjustable_baud_rates; ++i) {
  299. if (baud >= tup->baud_tolerance[i].lower_range_baud &&
  300. baud <= tup->baud_tolerance[i].upper_range_baud)
  301. return (rate + (rate *
  302. tup->baud_tolerance[i].tolerance) / 10000);
  303. }
  304. return rate;
  305. }
  306. static int tegra_check_rate_in_range(struct tegra_uart_port *tup)
  307. {
  308. long diff;
  309. diff = ((long)(tup->configured_rate - tup->required_rate) * 10000)
  310. / tup->required_rate;
  311. if (diff < (tup->cdata->error_tolerance_low_range * 100) ||
  312. diff > (tup->cdata->error_tolerance_high_range * 100)) {
  313. dev_err(tup->uport.dev,
  314. "configured baud rate is out of range by %ld", diff);
  315. return -EIO;
  316. }
  317. return 0;
  318. }
  319. static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
  320. {
  321. unsigned long rate;
  322. unsigned int divisor;
  323. unsigned long lcr;
  324. unsigned long flags;
  325. int ret;
  326. if (tup->current_baud == baud)
  327. return 0;
  328. if (tup->cdata->support_clk_src_div) {
  329. rate = baud * 16;
  330. tup->required_rate = rate;
  331. if (tup->n_adjustable_baud_rates)
  332. rate = tegra_get_tolerance_rate(tup, baud, rate);
  333. ret = clk_set_rate(tup->uart_clk, rate);
  334. if (ret < 0) {
  335. dev_err(tup->uport.dev,
  336. "clk_set_rate() failed for rate %lu\n", rate);
  337. return ret;
  338. }
  339. tup->configured_rate = clk_get_rate(tup->uart_clk);
  340. divisor = 1;
  341. ret = tegra_check_rate_in_range(tup);
  342. if (ret < 0)
  343. return ret;
  344. } else {
  345. rate = clk_get_rate(tup->uart_clk);
  346. divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
  347. }
  348. spin_lock_irqsave(&tup->uport.lock, flags);
  349. lcr = tup->lcr_shadow;
  350. lcr |= UART_LCR_DLAB;
  351. tegra_uart_write(tup, lcr, UART_LCR);
  352. tegra_uart_write(tup, divisor & 0xFF, UART_TX);
  353. tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
  354. lcr &= ~UART_LCR_DLAB;
  355. tegra_uart_write(tup, lcr, UART_LCR);
  356. /* Dummy read to ensure the write is posted */
  357. tegra_uart_read(tup, UART_SCR);
  358. spin_unlock_irqrestore(&tup->uport.lock, flags);
  359. tup->current_baud = baud;
  360. /* wait two character intervals at new rate */
  361. tegra_uart_wait_sym_time(tup, 2);
  362. return 0;
  363. }
  364. static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
  365. unsigned long lsr)
  366. {
  367. char flag = TTY_NORMAL;
  368. if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
  369. if (lsr & UART_LSR_OE) {
  370. /* Overrrun error */
  371. flag = TTY_OVERRUN;
  372. tup->uport.icount.overrun++;
  373. dev_err(tup->uport.dev, "Got overrun errors\n");
  374. } else if (lsr & UART_LSR_PE) {
  375. /* Parity error */
  376. flag = TTY_PARITY;
  377. tup->uport.icount.parity++;
  378. dev_err(tup->uport.dev, "Got Parity errors\n");
  379. } else if (lsr & UART_LSR_FE) {
  380. flag = TTY_FRAME;
  381. tup->uport.icount.frame++;
  382. dev_err(tup->uport.dev, "Got frame errors\n");
  383. } else if (lsr & UART_LSR_BI) {
  384. /*
  385. * Break error
  386. * If FIFO read error without any data, reset Rx FIFO
  387. */
  388. if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
  389. tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
  390. if (tup->uport.ignore_status_mask & UART_LSR_BI)
  391. return TTY_BREAK;
  392. flag = TTY_BREAK;
  393. tup->uport.icount.brk++;
  394. dev_dbg(tup->uport.dev, "Got Break\n");
  395. }
  396. uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag);
  397. }
  398. return flag;
  399. }
  400. static int tegra_uart_request_port(struct uart_port *u)
  401. {
  402. return 0;
  403. }
  404. static void tegra_uart_release_port(struct uart_port *u)
  405. {
  406. /* Nothing to do here */
  407. }
  408. static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
  409. {
  410. struct circ_buf *xmit = &tup->uport.state->xmit;
  411. int i;
  412. for (i = 0; i < max_bytes; i++) {
  413. BUG_ON(uart_circ_empty(xmit));
  414. if (tup->cdata->tx_fifo_full_status) {
  415. unsigned long lsr = tegra_uart_read(tup, UART_LSR);
  416. if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
  417. break;
  418. }
  419. tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
  420. xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
  421. tup->uport.icount.tx++;
  422. }
  423. }
  424. static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
  425. unsigned int bytes)
  426. {
  427. if (bytes > TEGRA_UART_MIN_DMA)
  428. bytes = TEGRA_UART_MIN_DMA;
  429. tup->tx_in_progress = TEGRA_UART_TX_PIO;
  430. tup->tx_bytes = bytes;
  431. tup->ier_shadow |= UART_IER_THRI;
  432. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  433. }
  434. static void tegra_uart_tx_dma_complete(void *args)
  435. {
  436. struct tegra_uart_port *tup = args;
  437. struct circ_buf *xmit = &tup->uport.state->xmit;
  438. struct dma_tx_state state;
  439. unsigned long flags;
  440. unsigned int count;
  441. dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
  442. count = tup->tx_bytes_requested - state.residue;
  443. async_tx_ack(tup->tx_dma_desc);
  444. spin_lock_irqsave(&tup->uport.lock, flags);
  445. xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
  446. tup->tx_in_progress = 0;
  447. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  448. uart_write_wakeup(&tup->uport);
  449. tegra_uart_start_next_tx(tup);
  450. spin_unlock_irqrestore(&tup->uport.lock, flags);
  451. }
  452. static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
  453. unsigned long count)
  454. {
  455. struct circ_buf *xmit = &tup->uport.state->xmit;
  456. dma_addr_t tx_phys_addr;
  457. dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
  458. UART_XMIT_SIZE, DMA_TO_DEVICE);
  459. tup->tx_bytes = count & ~(0xF);
  460. tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
  461. tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
  462. tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
  463. DMA_PREP_INTERRUPT);
  464. if (!tup->tx_dma_desc) {
  465. dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
  466. return -EIO;
  467. }
  468. tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
  469. tup->tx_dma_desc->callback_param = tup;
  470. tup->tx_in_progress = TEGRA_UART_TX_DMA;
  471. tup->tx_bytes_requested = tup->tx_bytes;
  472. tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
  473. dma_async_issue_pending(tup->tx_dma_chan);
  474. return 0;
  475. }
  476. static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
  477. {
  478. unsigned long tail;
  479. unsigned long count;
  480. struct circ_buf *xmit = &tup->uport.state->xmit;
  481. if (!tup->current_baud)
  482. return;
  483. tail = (unsigned long)&xmit->buf[xmit->tail];
  484. count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
  485. if (!count)
  486. return;
  487. if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA)
  488. tegra_uart_start_pio_tx(tup, count);
  489. else if (BYTES_TO_ALIGN(tail) > 0)
  490. tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
  491. else
  492. tegra_uart_start_tx_dma(tup, count);
  493. }
  494. /* Called by serial core driver with u->lock taken. */
  495. static void tegra_uart_start_tx(struct uart_port *u)
  496. {
  497. struct tegra_uart_port *tup = to_tegra_uport(u);
  498. struct circ_buf *xmit = &u->state->xmit;
  499. if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
  500. tegra_uart_start_next_tx(tup);
  501. }
  502. static unsigned int tegra_uart_tx_empty(struct uart_port *u)
  503. {
  504. struct tegra_uart_port *tup = to_tegra_uport(u);
  505. unsigned int ret = 0;
  506. unsigned long flags;
  507. spin_lock_irqsave(&u->lock, flags);
  508. if (!tup->tx_in_progress) {
  509. unsigned long lsr = tegra_uart_read(tup, UART_LSR);
  510. if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
  511. ret = TIOCSER_TEMT;
  512. }
  513. spin_unlock_irqrestore(&u->lock, flags);
  514. return ret;
  515. }
  516. static void tegra_uart_stop_tx(struct uart_port *u)
  517. {
  518. struct tegra_uart_port *tup = to_tegra_uport(u);
  519. struct circ_buf *xmit = &tup->uport.state->xmit;
  520. struct dma_tx_state state;
  521. unsigned int count;
  522. if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
  523. return;
  524. dmaengine_terminate_all(tup->tx_dma_chan);
  525. dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
  526. count = tup->tx_bytes_requested - state.residue;
  527. async_tx_ack(tup->tx_dma_desc);
  528. xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
  529. tup->tx_in_progress = 0;
  530. }
  531. static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
  532. {
  533. struct circ_buf *xmit = &tup->uport.state->xmit;
  534. tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
  535. tup->tx_in_progress = 0;
  536. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  537. uart_write_wakeup(&tup->uport);
  538. tegra_uart_start_next_tx(tup);
  539. }
  540. static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
  541. struct tty_port *tty)
  542. {
  543. do {
  544. char flag = TTY_NORMAL;
  545. unsigned long lsr = 0;
  546. unsigned char ch;
  547. lsr = tegra_uart_read(tup, UART_LSR);
  548. if (!(lsr & UART_LSR_DR))
  549. break;
  550. flag = tegra_uart_decode_rx_error(tup, lsr);
  551. if (flag != TTY_NORMAL)
  552. continue;
  553. ch = (unsigned char) tegra_uart_read(tup, UART_RX);
  554. tup->uport.icount.rx++;
  555. if (uart_handle_sysrq_char(&tup->uport, ch))
  556. continue;
  557. if (tup->uport.ignore_status_mask & UART_LSR_DR)
  558. continue;
  559. if (tty)
  560. tty_insert_flip_char(tty, ch, flag);
  561. } while (1);
  562. }
  563. static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
  564. struct tty_port *tty,
  565. unsigned int count)
  566. {
  567. int copied;
  568. /* If count is zero, then there is no data to be copied */
  569. if (!count)
  570. return;
  571. tup->uport.icount.rx += count;
  572. if (!tty) {
  573. dev_err(tup->uport.dev, "No tty port\n");
  574. return;
  575. }
  576. if (tup->uport.ignore_status_mask & UART_LSR_DR)
  577. return;
  578. dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
  579. TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
  580. copied = tty_insert_flip_string(tty,
  581. ((unsigned char *)(tup->rx_dma_buf_virt)), count);
  582. if (copied != count) {
  583. WARN_ON(1);
  584. dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
  585. }
  586. dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
  587. TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
  588. }
  589. static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
  590. unsigned int residue)
  591. {
  592. struct tty_port *port = &tup->uport.state->port;
  593. struct tty_struct *tty = tty_port_tty_get(port);
  594. unsigned int count;
  595. async_tx_ack(tup->rx_dma_desc);
  596. count = tup->rx_bytes_requested - residue;
  597. /* If we are here, DMA is stopped */
  598. tegra_uart_copy_rx_to_tty(tup, port, count);
  599. tegra_uart_handle_rx_pio(tup, port);
  600. if (tty) {
  601. tty_flip_buffer_push(port);
  602. tty_kref_put(tty);
  603. }
  604. }
  605. static void tegra_uart_rx_dma_complete(void *args)
  606. {
  607. struct tegra_uart_port *tup = args;
  608. struct uart_port *u = &tup->uport;
  609. unsigned long flags;
  610. struct dma_tx_state state;
  611. enum dma_status status;
  612. spin_lock_irqsave(&u->lock, flags);
  613. status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  614. if (status == DMA_IN_PROGRESS) {
  615. dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
  616. goto done;
  617. }
  618. /* Deactivate flow control to stop sender */
  619. if (tup->rts_active)
  620. set_rts(tup, false);
  621. tegra_uart_rx_buffer_push(tup, 0);
  622. tegra_uart_start_rx_dma(tup);
  623. /* Activate flow control to start transfer */
  624. if (tup->rts_active)
  625. set_rts(tup, true);
  626. done:
  627. spin_unlock_irqrestore(&u->lock, flags);
  628. }
  629. static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
  630. {
  631. struct dma_tx_state state;
  632. /* Deactivate flow control to stop sender */
  633. if (tup->rts_active)
  634. set_rts(tup, false);
  635. dmaengine_terminate_all(tup->rx_dma_chan);
  636. dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  637. tegra_uart_rx_buffer_push(tup, state.residue);
  638. tegra_uart_start_rx_dma(tup);
  639. if (tup->rts_active)
  640. set_rts(tup, true);
  641. }
  642. static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
  643. {
  644. unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
  645. tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
  646. tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
  647. DMA_PREP_INTERRUPT);
  648. if (!tup->rx_dma_desc) {
  649. dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
  650. return -EIO;
  651. }
  652. tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
  653. tup->rx_dma_desc->callback_param = tup;
  654. dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
  655. count, DMA_TO_DEVICE);
  656. tup->rx_bytes_requested = count;
  657. tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
  658. dma_async_issue_pending(tup->rx_dma_chan);
  659. return 0;
  660. }
  661. static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
  662. {
  663. struct tegra_uart_port *tup = to_tegra_uport(u);
  664. unsigned long msr;
  665. msr = tegra_uart_read(tup, UART_MSR);
  666. if (!(msr & UART_MSR_ANY_DELTA))
  667. return;
  668. if (msr & UART_MSR_TERI)
  669. tup->uport.icount.rng++;
  670. if (msr & UART_MSR_DDSR)
  671. tup->uport.icount.dsr++;
  672. /* We may only get DDCD when HW init and reset */
  673. if (msr & UART_MSR_DDCD)
  674. uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
  675. /* Will start/stop_tx accordingly */
  676. if (msr & UART_MSR_DCTS)
  677. uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
  678. }
  679. static void do_handle_rx_pio(struct tegra_uart_port *tup)
  680. {
  681. struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
  682. struct tty_port *port = &tup->uport.state->port;
  683. tegra_uart_handle_rx_pio(tup, port);
  684. if (tty) {
  685. tty_flip_buffer_push(port);
  686. tty_kref_put(tty);
  687. }
  688. }
  689. static irqreturn_t tegra_uart_isr(int irq, void *data)
  690. {
  691. struct tegra_uart_port *tup = data;
  692. struct uart_port *u = &tup->uport;
  693. unsigned long iir;
  694. unsigned long ier;
  695. bool is_rx_int = false;
  696. unsigned long flags;
  697. spin_lock_irqsave(&u->lock, flags);
  698. while (1) {
  699. iir = tegra_uart_read(tup, UART_IIR);
  700. if (iir & UART_IIR_NO_INT) {
  701. if (!tup->use_rx_pio && is_rx_int) {
  702. tegra_uart_handle_rx_dma(tup);
  703. if (tup->rx_in_progress) {
  704. ier = tup->ier_shadow;
  705. ier |= (UART_IER_RLSI | UART_IER_RTOIE |
  706. TEGRA_UART_IER_EORD);
  707. tup->ier_shadow = ier;
  708. tegra_uart_write(tup, ier, UART_IER);
  709. }
  710. }
  711. spin_unlock_irqrestore(&u->lock, flags);
  712. return IRQ_HANDLED;
  713. }
  714. switch ((iir >> 1) & 0x7) {
  715. case 0: /* Modem signal change interrupt */
  716. tegra_uart_handle_modem_signal_change(u);
  717. break;
  718. case 1: /* Transmit interrupt only triggered when using PIO */
  719. tup->ier_shadow &= ~UART_IER_THRI;
  720. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  721. tegra_uart_handle_tx_pio(tup);
  722. break;
  723. case 4: /* End of data */
  724. case 6: /* Rx timeout */
  725. case 2: /* Receive */
  726. if (!tup->use_rx_pio && !is_rx_int) {
  727. is_rx_int = true;
  728. /* Disable Rx interrupts */
  729. ier = tup->ier_shadow;
  730. ier |= UART_IER_RDI;
  731. tegra_uart_write(tup, ier, UART_IER);
  732. ier &= ~(UART_IER_RDI | UART_IER_RLSI |
  733. UART_IER_RTOIE | TEGRA_UART_IER_EORD);
  734. tup->ier_shadow = ier;
  735. tegra_uart_write(tup, ier, UART_IER);
  736. } else {
  737. do_handle_rx_pio(tup);
  738. }
  739. break;
  740. case 3: /* Receive error */
  741. tegra_uart_decode_rx_error(tup,
  742. tegra_uart_read(tup, UART_LSR));
  743. break;
  744. case 5: /* break nothing to handle */
  745. case 7: /* break nothing to handle */
  746. break;
  747. }
  748. }
  749. }
  750. static void tegra_uart_stop_rx(struct uart_port *u)
  751. {
  752. struct tegra_uart_port *tup = to_tegra_uport(u);
  753. struct tty_port *port = &tup->uport.state->port;
  754. struct dma_tx_state state;
  755. unsigned long ier;
  756. if (tup->rts_active)
  757. set_rts(tup, false);
  758. if (!tup->rx_in_progress)
  759. return;
  760. tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */
  761. ier = tup->ier_shadow;
  762. ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
  763. TEGRA_UART_IER_EORD);
  764. tup->ier_shadow = ier;
  765. tegra_uart_write(tup, ier, UART_IER);
  766. tup->rx_in_progress = 0;
  767. if (tup->rx_dma_chan && !tup->use_rx_pio) {
  768. dmaengine_terminate_all(tup->rx_dma_chan);
  769. dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  770. tegra_uart_rx_buffer_push(tup, state.residue);
  771. } else {
  772. tegra_uart_handle_rx_pio(tup, port);
  773. }
  774. }
  775. static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
  776. {
  777. unsigned long flags;
  778. unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
  779. unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
  780. unsigned long wait_time;
  781. unsigned long lsr;
  782. unsigned long msr;
  783. unsigned long mcr;
  784. /* Disable interrupts */
  785. tegra_uart_write(tup, 0, UART_IER);
  786. lsr = tegra_uart_read(tup, UART_LSR);
  787. if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
  788. msr = tegra_uart_read(tup, UART_MSR);
  789. mcr = tegra_uart_read(tup, UART_MCR);
  790. if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
  791. dev_err(tup->uport.dev,
  792. "Tx Fifo not empty, CTS disabled, waiting\n");
  793. /* Wait for Tx fifo to be empty */
  794. while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
  795. wait_time = min(fifo_empty_time, 100lu);
  796. udelay(wait_time);
  797. fifo_empty_time -= wait_time;
  798. if (!fifo_empty_time) {
  799. msr = tegra_uart_read(tup, UART_MSR);
  800. mcr = tegra_uart_read(tup, UART_MCR);
  801. if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
  802. (msr & UART_MSR_CTS))
  803. dev_err(tup->uport.dev,
  804. "Slave not ready\n");
  805. break;
  806. }
  807. lsr = tegra_uart_read(tup, UART_LSR);
  808. }
  809. }
  810. spin_lock_irqsave(&tup->uport.lock, flags);
  811. /* Reset the Rx and Tx FIFOs */
  812. tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
  813. tup->current_baud = 0;
  814. spin_unlock_irqrestore(&tup->uport.lock, flags);
  815. tup->rx_in_progress = 0;
  816. tup->tx_in_progress = 0;
  817. if (!tup->use_rx_pio)
  818. tegra_uart_dma_channel_free(tup, true);
  819. if (!tup->use_tx_pio)
  820. tegra_uart_dma_channel_free(tup, false);
  821. clk_disable_unprepare(tup->uart_clk);
  822. }
  823. static int tegra_uart_hw_init(struct tegra_uart_port *tup)
  824. {
  825. int ret;
  826. tup->fcr_shadow = 0;
  827. tup->mcr_shadow = 0;
  828. tup->lcr_shadow = 0;
  829. tup->ier_shadow = 0;
  830. tup->current_baud = 0;
  831. clk_prepare_enable(tup->uart_clk);
  832. /* Reset the UART controller to clear all previous status.*/
  833. reset_control_assert(tup->rst);
  834. udelay(10);
  835. reset_control_deassert(tup->rst);
  836. tup->rx_in_progress = 0;
  837. tup->tx_in_progress = 0;
  838. /*
  839. * Set the trigger level
  840. *
  841. * For PIO mode:
  842. *
  843. * For receive, this will interrupt the CPU after that many number of
  844. * bytes are received, for the remaining bytes the receive timeout
  845. * interrupt is received. Rx high watermark is set to 4.
  846. *
  847. * For transmit, if the trasnmit interrupt is enabled, this will
  848. * interrupt the CPU when the number of entries in the FIFO reaches the
  849. * low watermark. Tx low watermark is set to 16 bytes.
  850. *
  851. * For DMA mode:
  852. *
  853. * Set the Tx trigger to 16. This should match the DMA burst size that
  854. * programmed in the DMA registers.
  855. */
  856. tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
  857. if (tup->use_rx_pio) {
  858. tup->fcr_shadow |= UART_FCR_R_TRIG_11;
  859. } else {
  860. if (tup->cdata->max_dma_burst_bytes == 8)
  861. tup->fcr_shadow |= UART_FCR_R_TRIG_10;
  862. else
  863. tup->fcr_shadow |= UART_FCR_R_TRIG_01;
  864. }
  865. tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
  866. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  867. /* Dummy read to ensure the write is posted */
  868. tegra_uart_read(tup, UART_SCR);
  869. if (tup->cdata->fifo_mode_enable_status) {
  870. ret = tegra_uart_wait_fifo_mode_enabled(tup);
  871. if (ret < 0) {
  872. dev_err(tup->uport.dev,
  873. "Failed to enable FIFO mode: %d\n", ret);
  874. return ret;
  875. }
  876. } else {
  877. /*
  878. * For all tegra devices (up to t210), there is a hardware
  879. * issue that requires software to wait for 3 UART clock
  880. * periods after enabling the TX fifo, otherwise data could
  881. * be lost.
  882. */
  883. tegra_uart_wait_cycle_time(tup, 3);
  884. }
  885. /*
  886. * Initialize the UART with default configuration
  887. * (115200, N, 8, 1) so that the receive DMA buffer may be
  888. * enqueued
  889. */
  890. ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
  891. if (ret < 0) {
  892. dev_err(tup->uport.dev, "Failed to set baud rate\n");
  893. return ret;
  894. }
  895. if (!tup->use_rx_pio) {
  896. tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
  897. tup->fcr_shadow |= UART_FCR_DMA_SELECT;
  898. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  899. ret = tegra_uart_start_rx_dma(tup);
  900. if (ret < 0) {
  901. dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
  902. return ret;
  903. }
  904. } else {
  905. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  906. }
  907. tup->rx_in_progress = 1;
  908. /*
  909. * Enable IE_RXS for the receive status interrupts like line errros.
  910. * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
  911. *
  912. * If using DMA mode, enable EORD instead of receive interrupt which
  913. * will interrupt after the UART is done with the receive instead of
  914. * the interrupt when the FIFO "threshold" is reached.
  915. *
  916. * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
  917. * the DATA is sitting in the FIFO and couldn't be transferred to the
  918. * DMA as the DMA size alignment (4 bytes) is not met. EORD will be
  919. * triggered when there is a pause of the incomming data stream for 4
  920. * characters long.
  921. *
  922. * For pauses in the data which is not aligned to 4 bytes, we get
  923. * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
  924. * then the EORD.
  925. */
  926. if (!tup->use_rx_pio)
  927. tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE |
  928. TEGRA_UART_IER_EORD;
  929. else
  930. tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
  931. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  932. return 0;
  933. }
  934. static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
  935. bool dma_to_memory)
  936. {
  937. if (dma_to_memory) {
  938. dmaengine_terminate_all(tup->rx_dma_chan);
  939. dma_release_channel(tup->rx_dma_chan);
  940. dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
  941. tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
  942. tup->rx_dma_chan = NULL;
  943. tup->rx_dma_buf_phys = 0;
  944. tup->rx_dma_buf_virt = NULL;
  945. } else {
  946. dmaengine_terminate_all(tup->tx_dma_chan);
  947. dma_release_channel(tup->tx_dma_chan);
  948. dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
  949. UART_XMIT_SIZE, DMA_TO_DEVICE);
  950. tup->tx_dma_chan = NULL;
  951. tup->tx_dma_buf_phys = 0;
  952. tup->tx_dma_buf_virt = NULL;
  953. }
  954. }
  955. static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
  956. bool dma_to_memory)
  957. {
  958. struct dma_chan *dma_chan;
  959. unsigned char *dma_buf;
  960. dma_addr_t dma_phys;
  961. int ret;
  962. struct dma_slave_config dma_sconfig;
  963. dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
  964. dma_to_memory ? "rx" : "tx");
  965. if (IS_ERR(dma_chan)) {
  966. ret = PTR_ERR(dma_chan);
  967. dev_err(tup->uport.dev,
  968. "DMA channel alloc failed: %d\n", ret);
  969. return ret;
  970. }
  971. if (dma_to_memory) {
  972. dma_buf = dma_alloc_coherent(tup->uport.dev,
  973. TEGRA_UART_RX_DMA_BUFFER_SIZE,
  974. &dma_phys, GFP_KERNEL);
  975. if (!dma_buf) {
  976. dev_err(tup->uport.dev,
  977. "Not able to allocate the dma buffer\n");
  978. dma_release_channel(dma_chan);
  979. return -ENOMEM;
  980. }
  981. dma_sconfig.src_addr = tup->uport.mapbase;
  982. dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  983. dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
  984. tup->rx_dma_chan = dma_chan;
  985. tup->rx_dma_buf_virt = dma_buf;
  986. tup->rx_dma_buf_phys = dma_phys;
  987. } else {
  988. dma_phys = dma_map_single(tup->uport.dev,
  989. tup->uport.state->xmit.buf, UART_XMIT_SIZE,
  990. DMA_TO_DEVICE);
  991. if (dma_mapping_error(tup->uport.dev, dma_phys)) {
  992. dev_err(tup->uport.dev, "dma_map_single tx failed\n");
  993. dma_release_channel(dma_chan);
  994. return -ENOMEM;
  995. }
  996. dma_buf = tup->uport.state->xmit.buf;
  997. dma_sconfig.dst_addr = tup->uport.mapbase;
  998. dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  999. dma_sconfig.dst_maxburst = 16;
  1000. tup->tx_dma_chan = dma_chan;
  1001. tup->tx_dma_buf_virt = dma_buf;
  1002. tup->tx_dma_buf_phys = dma_phys;
  1003. }
  1004. ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
  1005. if (ret < 0) {
  1006. dev_err(tup->uport.dev,
  1007. "Dma slave config failed, err = %d\n", ret);
  1008. tegra_uart_dma_channel_free(tup, dma_to_memory);
  1009. return ret;
  1010. }
  1011. return 0;
  1012. }
  1013. static int tegra_uart_startup(struct uart_port *u)
  1014. {
  1015. struct tegra_uart_port *tup = to_tegra_uport(u);
  1016. int ret;
  1017. if (!tup->use_tx_pio) {
  1018. ret = tegra_uart_dma_channel_allocate(tup, false);
  1019. if (ret < 0) {
  1020. dev_err(u->dev, "Tx Dma allocation failed, err = %d\n",
  1021. ret);
  1022. return ret;
  1023. }
  1024. }
  1025. if (!tup->use_rx_pio) {
  1026. ret = tegra_uart_dma_channel_allocate(tup, true);
  1027. if (ret < 0) {
  1028. dev_err(u->dev, "Rx Dma allocation failed, err = %d\n",
  1029. ret);
  1030. goto fail_rx_dma;
  1031. }
  1032. }
  1033. ret = tegra_uart_hw_init(tup);
  1034. if (ret < 0) {
  1035. dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
  1036. goto fail_hw_init;
  1037. }
  1038. ret = request_irq(u->irq, tegra_uart_isr, 0,
  1039. dev_name(u->dev), tup);
  1040. if (ret < 0) {
  1041. dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
  1042. goto fail_hw_init;
  1043. }
  1044. return 0;
  1045. fail_hw_init:
  1046. if (!tup->use_rx_pio)
  1047. tegra_uart_dma_channel_free(tup, true);
  1048. fail_rx_dma:
  1049. if (!tup->use_tx_pio)
  1050. tegra_uart_dma_channel_free(tup, false);
  1051. return ret;
  1052. }
  1053. /*
  1054. * Flush any TX data submitted for DMA and PIO. Called when the
  1055. * TX circular buffer is reset.
  1056. */
  1057. static void tegra_uart_flush_buffer(struct uart_port *u)
  1058. {
  1059. struct tegra_uart_port *tup = to_tegra_uport(u);
  1060. tup->tx_bytes = 0;
  1061. if (tup->tx_dma_chan)
  1062. dmaengine_terminate_all(tup->tx_dma_chan);
  1063. }
  1064. static void tegra_uart_shutdown(struct uart_port *u)
  1065. {
  1066. struct tegra_uart_port *tup = to_tegra_uport(u);
  1067. tegra_uart_hw_deinit(tup);
  1068. free_irq(u->irq, tup);
  1069. }
  1070. static void tegra_uart_enable_ms(struct uart_port *u)
  1071. {
  1072. struct tegra_uart_port *tup = to_tegra_uport(u);
  1073. if (tup->enable_modem_interrupt) {
  1074. tup->ier_shadow |= UART_IER_MSI;
  1075. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  1076. }
  1077. }
  1078. static void tegra_uart_set_termios(struct uart_port *u,
  1079. struct ktermios *termios, struct ktermios *oldtermios)
  1080. {
  1081. struct tegra_uart_port *tup = to_tegra_uport(u);
  1082. unsigned int baud;
  1083. unsigned long flags;
  1084. unsigned int lcr;
  1085. int symb_bit = 1;
  1086. struct clk *parent_clk = clk_get_parent(tup->uart_clk);
  1087. unsigned long parent_clk_rate = clk_get_rate(parent_clk);
  1088. int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
  1089. int ret;
  1090. max_divider *= 16;
  1091. spin_lock_irqsave(&u->lock, flags);
  1092. /* Changing configuration, it is safe to stop any rx now */
  1093. if (tup->rts_active)
  1094. set_rts(tup, false);
  1095. /* Clear all interrupts as configuration is going to be changed */
  1096. tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
  1097. tegra_uart_read(tup, UART_IER);
  1098. tegra_uart_write(tup, 0, UART_IER);
  1099. tegra_uart_read(tup, UART_IER);
  1100. /* Parity */
  1101. lcr = tup->lcr_shadow;
  1102. lcr &= ~UART_LCR_PARITY;
  1103. /* CMSPAR isn't supported by this driver */
  1104. termios->c_cflag &= ~CMSPAR;
  1105. if ((termios->c_cflag & PARENB) == PARENB) {
  1106. symb_bit++;
  1107. if (termios->c_cflag & PARODD) {
  1108. lcr |= UART_LCR_PARITY;
  1109. lcr &= ~UART_LCR_EPAR;
  1110. lcr &= ~UART_LCR_SPAR;
  1111. } else {
  1112. lcr |= UART_LCR_PARITY;
  1113. lcr |= UART_LCR_EPAR;
  1114. lcr &= ~UART_LCR_SPAR;
  1115. }
  1116. }
  1117. lcr &= ~UART_LCR_WLEN8;
  1118. switch (termios->c_cflag & CSIZE) {
  1119. case CS5:
  1120. lcr |= UART_LCR_WLEN5;
  1121. symb_bit += 5;
  1122. break;
  1123. case CS6:
  1124. lcr |= UART_LCR_WLEN6;
  1125. symb_bit += 6;
  1126. break;
  1127. case CS7:
  1128. lcr |= UART_LCR_WLEN7;
  1129. symb_bit += 7;
  1130. break;
  1131. default:
  1132. lcr |= UART_LCR_WLEN8;
  1133. symb_bit += 8;
  1134. break;
  1135. }
  1136. /* Stop bits */
  1137. if (termios->c_cflag & CSTOPB) {
  1138. lcr |= UART_LCR_STOP;
  1139. symb_bit += 2;
  1140. } else {
  1141. lcr &= ~UART_LCR_STOP;
  1142. symb_bit++;
  1143. }
  1144. tegra_uart_write(tup, lcr, UART_LCR);
  1145. tup->lcr_shadow = lcr;
  1146. tup->symb_bit = symb_bit;
  1147. /* Baud rate. */
  1148. baud = uart_get_baud_rate(u, termios, oldtermios,
  1149. parent_clk_rate/max_divider,
  1150. parent_clk_rate/16);
  1151. spin_unlock_irqrestore(&u->lock, flags);
  1152. ret = tegra_set_baudrate(tup, baud);
  1153. if (ret < 0) {
  1154. dev_err(tup->uport.dev, "Failed to set baud rate\n");
  1155. return;
  1156. }
  1157. if (tty_termios_baud_rate(termios))
  1158. tty_termios_encode_baud_rate(termios, baud, baud);
  1159. spin_lock_irqsave(&u->lock, flags);
  1160. /* Flow control */
  1161. if (termios->c_cflag & CRTSCTS) {
  1162. tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
  1163. tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
  1164. tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
  1165. /* if top layer has asked to set rts active then do so here */
  1166. if (tup->rts_active)
  1167. set_rts(tup, true);
  1168. } else {
  1169. tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
  1170. tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
  1171. tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
  1172. }
  1173. /* update the port timeout based on new settings */
  1174. uart_update_timeout(u, termios->c_cflag, baud);
  1175. /* Make sure all writes have completed */
  1176. tegra_uart_read(tup, UART_IER);
  1177. /* Re-enable interrupt */
  1178. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  1179. tegra_uart_read(tup, UART_IER);
  1180. tup->uport.ignore_status_mask = 0;
  1181. /* Ignore all characters if CREAD is not set */
  1182. if ((termios->c_cflag & CREAD) == 0)
  1183. tup->uport.ignore_status_mask |= UART_LSR_DR;
  1184. if (termios->c_iflag & IGNBRK)
  1185. tup->uport.ignore_status_mask |= UART_LSR_BI;
  1186. spin_unlock_irqrestore(&u->lock, flags);
  1187. }
  1188. static const char *tegra_uart_type(struct uart_port *u)
  1189. {
  1190. return TEGRA_UART_TYPE;
  1191. }
  1192. static const struct uart_ops tegra_uart_ops = {
  1193. .tx_empty = tegra_uart_tx_empty,
  1194. .set_mctrl = tegra_uart_set_mctrl,
  1195. .get_mctrl = tegra_uart_get_mctrl,
  1196. .stop_tx = tegra_uart_stop_tx,
  1197. .start_tx = tegra_uart_start_tx,
  1198. .stop_rx = tegra_uart_stop_rx,
  1199. .flush_buffer = tegra_uart_flush_buffer,
  1200. .enable_ms = tegra_uart_enable_ms,
  1201. .break_ctl = tegra_uart_break_ctl,
  1202. .startup = tegra_uart_startup,
  1203. .shutdown = tegra_uart_shutdown,
  1204. .set_termios = tegra_uart_set_termios,
  1205. .type = tegra_uart_type,
  1206. .request_port = tegra_uart_request_port,
  1207. .release_port = tegra_uart_release_port,
  1208. };
  1209. static struct uart_driver tegra_uart_driver = {
  1210. .owner = THIS_MODULE,
  1211. .driver_name = "tegra_hsuart",
  1212. .dev_name = "ttyTHS",
  1213. .cons = NULL,
  1214. .nr = TEGRA_UART_MAXIMUM,
  1215. };
  1216. static int tegra_uart_parse_dt(struct platform_device *pdev,
  1217. struct tegra_uart_port *tup)
  1218. {
  1219. struct device_node *np = pdev->dev.of_node;
  1220. int port;
  1221. int ret;
  1222. int index;
  1223. u32 pval;
  1224. int count;
  1225. int n_entries;
  1226. port = of_alias_get_id(np, "serial");
  1227. if (port < 0) {
  1228. dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
  1229. return port;
  1230. }
  1231. tup->uport.line = port;
  1232. tup->enable_modem_interrupt = of_property_read_bool(np,
  1233. "nvidia,enable-modem-interrupt");
  1234. index = of_property_match_string(np, "dma-names", "rx");
  1235. if (index < 0) {
  1236. tup->use_rx_pio = true;
  1237. dev_info(&pdev->dev, "RX in PIO mode\n");
  1238. }
  1239. index = of_property_match_string(np, "dma-names", "tx");
  1240. if (index < 0) {
  1241. tup->use_tx_pio = true;
  1242. dev_info(&pdev->dev, "TX in PIO mode\n");
  1243. }
  1244. n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates");
  1245. if (n_entries > 0) {
  1246. tup->n_adjustable_baud_rates = n_entries / 3;
  1247. tup->baud_tolerance =
  1248. devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) *
  1249. sizeof(*tup->baud_tolerance), GFP_KERNEL);
  1250. if (!tup->baud_tolerance)
  1251. return -ENOMEM;
  1252. for (count = 0, index = 0; count < n_entries; count += 3,
  1253. index++) {
  1254. ret =
  1255. of_property_read_u32_index(np,
  1256. "nvidia,adjust-baud-rates",
  1257. count, &pval);
  1258. if (!ret)
  1259. tup->baud_tolerance[index].lower_range_baud =
  1260. pval;
  1261. ret =
  1262. of_property_read_u32_index(np,
  1263. "nvidia,adjust-baud-rates",
  1264. count + 1, &pval);
  1265. if (!ret)
  1266. tup->baud_tolerance[index].upper_range_baud =
  1267. pval;
  1268. ret =
  1269. of_property_read_u32_index(np,
  1270. "nvidia,adjust-baud-rates",
  1271. count + 2, &pval);
  1272. if (!ret)
  1273. tup->baud_tolerance[index].tolerance =
  1274. (s32)pval;
  1275. }
  1276. } else {
  1277. tup->n_adjustable_baud_rates = 0;
  1278. }
  1279. return 0;
  1280. }
  1281. static struct tegra_uart_chip_data tegra20_uart_chip_data = {
  1282. .tx_fifo_full_status = false,
  1283. .allow_txfifo_reset_fifo_mode = true,
  1284. .support_clk_src_div = false,
  1285. .fifo_mode_enable_status = false,
  1286. .uart_max_port = 5,
  1287. .max_dma_burst_bytes = 4,
  1288. .error_tolerance_low_range = 0,
  1289. .error_tolerance_high_range = 4,
  1290. };
  1291. static struct tegra_uart_chip_data tegra30_uart_chip_data = {
  1292. .tx_fifo_full_status = true,
  1293. .allow_txfifo_reset_fifo_mode = false,
  1294. .support_clk_src_div = true,
  1295. .fifo_mode_enable_status = false,
  1296. .uart_max_port = 5,
  1297. .max_dma_burst_bytes = 4,
  1298. .error_tolerance_low_range = 0,
  1299. .error_tolerance_high_range = 4,
  1300. };
  1301. static struct tegra_uart_chip_data tegra186_uart_chip_data = {
  1302. .tx_fifo_full_status = true,
  1303. .allow_txfifo_reset_fifo_mode = false,
  1304. .support_clk_src_div = true,
  1305. .fifo_mode_enable_status = true,
  1306. .uart_max_port = 8,
  1307. .max_dma_burst_bytes = 8,
  1308. .error_tolerance_low_range = 0,
  1309. .error_tolerance_high_range = 4,
  1310. };
  1311. static struct tegra_uart_chip_data tegra194_uart_chip_data = {
  1312. .tx_fifo_full_status = true,
  1313. .allow_txfifo_reset_fifo_mode = false,
  1314. .support_clk_src_div = true,
  1315. .fifo_mode_enable_status = true,
  1316. .uart_max_port = 8,
  1317. .max_dma_burst_bytes = 8,
  1318. .error_tolerance_low_range = -2,
  1319. .error_tolerance_high_range = 2,
  1320. };
  1321. static const struct of_device_id tegra_uart_of_match[] = {
  1322. {
  1323. .compatible = "nvidia,tegra30-hsuart",
  1324. .data = &tegra30_uart_chip_data,
  1325. }, {
  1326. .compatible = "nvidia,tegra20-hsuart",
  1327. .data = &tegra20_uart_chip_data,
  1328. }, {
  1329. .compatible = "nvidia,tegra186-hsuart",
  1330. .data = &tegra186_uart_chip_data,
  1331. }, {
  1332. .compatible = "nvidia,tegra194-hsuart",
  1333. .data = &tegra194_uart_chip_data,
  1334. }, {
  1335. },
  1336. };
  1337. MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
  1338. static int tegra_uart_probe(struct platform_device *pdev)
  1339. {
  1340. struct tegra_uart_port *tup;
  1341. struct uart_port *u;
  1342. struct resource *resource;
  1343. int ret;
  1344. const struct tegra_uart_chip_data *cdata;
  1345. const struct of_device_id *match;
  1346. match = of_match_device(tegra_uart_of_match, &pdev->dev);
  1347. if (!match) {
  1348. dev_err(&pdev->dev, "Error: No device match found\n");
  1349. return -ENODEV;
  1350. }
  1351. cdata = match->data;
  1352. tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
  1353. if (!tup) {
  1354. dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
  1355. return -ENOMEM;
  1356. }
  1357. ret = tegra_uart_parse_dt(pdev, tup);
  1358. if (ret < 0)
  1359. return ret;
  1360. u = &tup->uport;
  1361. u->dev = &pdev->dev;
  1362. u->ops = &tegra_uart_ops;
  1363. u->type = PORT_TEGRA;
  1364. u->fifosize = 32;
  1365. tup->cdata = cdata;
  1366. platform_set_drvdata(pdev, tup);
  1367. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1368. if (!resource) {
  1369. dev_err(&pdev->dev, "No IO memory resource\n");
  1370. return -ENODEV;
  1371. }
  1372. u->mapbase = resource->start;
  1373. u->membase = devm_ioremap_resource(&pdev->dev, resource);
  1374. if (IS_ERR(u->membase))
  1375. return PTR_ERR(u->membase);
  1376. tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
  1377. if (IS_ERR(tup->uart_clk)) {
  1378. dev_err(&pdev->dev, "Couldn't get the clock\n");
  1379. return PTR_ERR(tup->uart_clk);
  1380. }
  1381. tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
  1382. if (IS_ERR(tup->rst)) {
  1383. dev_err(&pdev->dev, "Couldn't get the reset\n");
  1384. return PTR_ERR(tup->rst);
  1385. }
  1386. u->iotype = UPIO_MEM32;
  1387. ret = platform_get_irq(pdev, 0);
  1388. if (ret < 0)
  1389. return ret;
  1390. u->irq = ret;
  1391. u->regshift = 2;
  1392. ret = uart_add_one_port(&tegra_uart_driver, u);
  1393. if (ret < 0) {
  1394. dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
  1395. return ret;
  1396. }
  1397. return ret;
  1398. }
  1399. static int tegra_uart_remove(struct platform_device *pdev)
  1400. {
  1401. struct tegra_uart_port *tup = platform_get_drvdata(pdev);
  1402. struct uart_port *u = &tup->uport;
  1403. uart_remove_one_port(&tegra_uart_driver, u);
  1404. return 0;
  1405. }
  1406. #ifdef CONFIG_PM_SLEEP
  1407. static int tegra_uart_suspend(struct device *dev)
  1408. {
  1409. struct tegra_uart_port *tup = dev_get_drvdata(dev);
  1410. struct uart_port *u = &tup->uport;
  1411. return uart_suspend_port(&tegra_uart_driver, u);
  1412. }
  1413. static int tegra_uart_resume(struct device *dev)
  1414. {
  1415. struct tegra_uart_port *tup = dev_get_drvdata(dev);
  1416. struct uart_port *u = &tup->uport;
  1417. return uart_resume_port(&tegra_uart_driver, u);
  1418. }
  1419. #endif
  1420. static const struct dev_pm_ops tegra_uart_pm_ops = {
  1421. SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
  1422. };
  1423. static struct platform_driver tegra_uart_platform_driver = {
  1424. .probe = tegra_uart_probe,
  1425. .remove = tegra_uart_remove,
  1426. .driver = {
  1427. .name = "serial-tegra",
  1428. .of_match_table = tegra_uart_of_match,
  1429. .pm = &tegra_uart_pm_ops,
  1430. },
  1431. };
  1432. static int __init tegra_uart_init(void)
  1433. {
  1434. int ret;
  1435. struct device_node *node;
  1436. const struct of_device_id *match = NULL;
  1437. const struct tegra_uart_chip_data *cdata = NULL;
  1438. node = of_find_matching_node(NULL, tegra_uart_of_match);
  1439. if (node)
  1440. match = of_match_node(tegra_uart_of_match, node);
  1441. if (match)
  1442. cdata = match->data;
  1443. if (cdata)
  1444. tegra_uart_driver.nr = cdata->uart_max_port;
  1445. ret = uart_register_driver(&tegra_uart_driver);
  1446. if (ret < 0) {
  1447. pr_err("Could not register %s driver\n",
  1448. tegra_uart_driver.driver_name);
  1449. return ret;
  1450. }
  1451. ret = platform_driver_register(&tegra_uart_platform_driver);
  1452. if (ret < 0) {
  1453. pr_err("Uart platform driver register failed, e = %d\n", ret);
  1454. uart_unregister_driver(&tegra_uart_driver);
  1455. return ret;
  1456. }
  1457. return 0;
  1458. }
  1459. static void __exit tegra_uart_exit(void)
  1460. {
  1461. pr_info("Unloading tegra uart driver\n");
  1462. platform_driver_unregister(&tegra_uart_platform_driver);
  1463. uart_unregister_driver(&tegra_uart_driver);
  1464. }
  1465. module_init(tegra_uart_init);
  1466. module_exit(tegra_uart_exit);
  1467. MODULE_ALIAS("platform:serial-tegra");
  1468. MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
  1469. MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
  1470. MODULE_LICENSE("GPL v2");