i2c-at91.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130
  1. /*
  2. * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
  3. *
  4. * Copyright (C) 2011 Weinmann Medical GmbH
  5. * Author: Nikolaus Voss <n.voss@weinmann.de>
  6. *
  7. * Evolved from original work by:
  8. * Copyright (C) 2004 Rick Bronson
  9. * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
  10. *
  11. * Borrowed heavily from original work by:
  12. * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. */
  19. #include <linux/clk.h>
  20. #include <linux/completion.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/err.h>
  24. #include <linux/i2c.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/module.h>
  28. #include <linux/of.h>
  29. #include <linux/of_device.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/slab.h>
  32. #include <linux/platform_data/dma-atmel.h>
  33. #include <linux/pm_runtime.h>
  34. #include <linux/pinctrl/consumer.h>
  35. #define DEFAULT_TWI_CLK_HZ 100000 /* max 400 Kbits/s */
  36. #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
  37. #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
  38. #define AUTOSUSPEND_TIMEOUT 2000
  39. /* AT91 TWI register definitions */
  40. #define AT91_TWI_CR 0x0000 /* Control Register */
  41. #define AT91_TWI_START BIT(0) /* Send a Start Condition */
  42. #define AT91_TWI_STOP BIT(1) /* Send a Stop Condition */
  43. #define AT91_TWI_MSEN BIT(2) /* Master Transfer Enable */
  44. #define AT91_TWI_MSDIS BIT(3) /* Master Transfer Disable */
  45. #define AT91_TWI_SVEN BIT(4) /* Slave Transfer Enable */
  46. #define AT91_TWI_SVDIS BIT(5) /* Slave Transfer Disable */
  47. #define AT91_TWI_QUICK BIT(6) /* SMBus quick command */
  48. #define AT91_TWI_SWRST BIT(7) /* Software Reset */
  49. #define AT91_TWI_ACMEN BIT(16) /* Alternative Command Mode Enable */
  50. #define AT91_TWI_ACMDIS BIT(17) /* Alternative Command Mode Disable */
  51. #define AT91_TWI_THRCLR BIT(24) /* Transmit Holding Register Clear */
  52. #define AT91_TWI_RHRCLR BIT(25) /* Receive Holding Register Clear */
  53. #define AT91_TWI_LOCKCLR BIT(26) /* Lock Clear */
  54. #define AT91_TWI_FIFOEN BIT(28) /* FIFO Enable */
  55. #define AT91_TWI_FIFODIS BIT(29) /* FIFO Disable */
  56. #define AT91_TWI_MMR 0x0004 /* Master Mode Register */
  57. #define AT91_TWI_IADRSZ_1 0x0100 /* Internal Device Address Size */
  58. #define AT91_TWI_MREAD BIT(12) /* Master Read Direction */
  59. #define AT91_TWI_IADR 0x000c /* Internal Address Register */
  60. #define AT91_TWI_CWGR 0x0010 /* Clock Waveform Generator Reg */
  61. #define AT91_TWI_SR 0x0020 /* Status Register */
  62. #define AT91_TWI_TXCOMP BIT(0) /* Transmission Complete */
  63. #define AT91_TWI_RXRDY BIT(1) /* Receive Holding Register Ready */
  64. #define AT91_TWI_TXRDY BIT(2) /* Transmit Holding Register Ready */
  65. #define AT91_TWI_OVRE BIT(6) /* Overrun Error */
  66. #define AT91_TWI_UNRE BIT(7) /* Underrun Error */
  67. #define AT91_TWI_NACK BIT(8) /* Not Acknowledged */
  68. #define AT91_TWI_LOCK BIT(23) /* TWI Lock due to Frame Errors */
  69. #define AT91_TWI_INT_MASK \
  70. (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
  71. #define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
  72. #define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
  73. #define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
  74. #define AT91_TWI_RHR 0x0030 /* Receive Holding Register */
  75. #define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
  76. #define AT91_TWI_ACR 0x0040 /* Alternative Command Register */
  77. #define AT91_TWI_ACR_DATAL(len) ((len) & 0xff)
  78. #define AT91_TWI_ACR_DIR BIT(8)
  79. #define AT91_TWI_FMR 0x0050 /* FIFO Mode Register */
  80. #define AT91_TWI_FMR_TXRDYM(mode) (((mode) & 0x3) << 0)
  81. #define AT91_TWI_FMR_TXRDYM_MASK (0x3 << 0)
  82. #define AT91_TWI_FMR_RXRDYM(mode) (((mode) & 0x3) << 4)
  83. #define AT91_TWI_FMR_RXRDYM_MASK (0x3 << 4)
  84. #define AT91_TWI_ONE_DATA 0x0
  85. #define AT91_TWI_TWO_DATA 0x1
  86. #define AT91_TWI_FOUR_DATA 0x2
  87. #define AT91_TWI_FLR 0x0054 /* FIFO Level Register */
  88. #define AT91_TWI_FSR 0x0060 /* FIFO Status Register */
  89. #define AT91_TWI_FIER 0x0064 /* FIFO Interrupt Enable Register */
  90. #define AT91_TWI_FIDR 0x0068 /* FIFO Interrupt Disable Register */
  91. #define AT91_TWI_FIMR 0x006c /* FIFO Interrupt Mask Register */
  92. #define AT91_TWI_VER 0x00fc /* Version Register */
  93. struct at91_twi_pdata {
  94. unsigned clk_max_div;
  95. unsigned clk_offset;
  96. bool has_unre_flag;
  97. bool has_alt_cmd;
  98. struct at_dma_slave dma_slave;
  99. };
  100. struct at91_twi_dma {
  101. struct dma_chan *chan_rx;
  102. struct dma_chan *chan_tx;
  103. struct scatterlist sg[2];
  104. struct dma_async_tx_descriptor *data_desc;
  105. enum dma_data_direction direction;
  106. bool buf_mapped;
  107. bool xfer_in_progress;
  108. };
  109. struct at91_twi_dev {
  110. struct device *dev;
  111. void __iomem *base;
  112. struct completion cmd_complete;
  113. struct clk *clk;
  114. u8 *buf;
  115. size_t buf_len;
  116. struct i2c_msg *msg;
  117. int irq;
  118. unsigned imr;
  119. unsigned transfer_status;
  120. struct i2c_adapter adapter;
  121. unsigned twi_cwgr_reg;
  122. struct at91_twi_pdata *pdata;
  123. bool use_dma;
  124. bool recv_len_abort;
  125. u32 fifo_size;
  126. struct at91_twi_dma dma;
  127. };
  128. static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
  129. {
  130. return readl_relaxed(dev->base + reg);
  131. }
  132. static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
  133. {
  134. writel_relaxed(val, dev->base + reg);
  135. }
  136. static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
  137. {
  138. at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
  139. }
  140. static void at91_twi_irq_save(struct at91_twi_dev *dev)
  141. {
  142. dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
  143. at91_disable_twi_interrupts(dev);
  144. }
  145. static void at91_twi_irq_restore(struct at91_twi_dev *dev)
  146. {
  147. at91_twi_write(dev, AT91_TWI_IER, dev->imr);
  148. }
  149. static void at91_init_twi_bus(struct at91_twi_dev *dev)
  150. {
  151. at91_disable_twi_interrupts(dev);
  152. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST);
  153. /* FIFO should be enabled immediately after the software reset */
  154. if (dev->fifo_size)
  155. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
  156. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
  157. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
  158. at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
  159. }
  160. /*
  161. * Calculate symmetric clock as stated in datasheet:
  162. * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
  163. */
  164. static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
  165. {
  166. int ckdiv, cdiv, div;
  167. struct at91_twi_pdata *pdata = dev->pdata;
  168. int offset = pdata->clk_offset;
  169. int max_ckdiv = pdata->clk_max_div;
  170. div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
  171. 2 * twi_clk) - offset);
  172. ckdiv = fls(div >> 8);
  173. cdiv = div >> ckdiv;
  174. if (ckdiv > max_ckdiv) {
  175. dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
  176. ckdiv, max_ckdiv);
  177. ckdiv = max_ckdiv;
  178. cdiv = 255;
  179. }
  180. dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv;
  181. dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv);
  182. }
  183. static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
  184. {
  185. struct at91_twi_dma *dma = &dev->dma;
  186. at91_twi_irq_save(dev);
  187. if (dma->xfer_in_progress) {
  188. if (dma->direction == DMA_FROM_DEVICE)
  189. dmaengine_terminate_all(dma->chan_rx);
  190. else
  191. dmaengine_terminate_all(dma->chan_tx);
  192. dma->xfer_in_progress = false;
  193. }
  194. if (dma->buf_mapped) {
  195. dma_unmap_single(dev->dev, sg_dma_address(&dma->sg[0]),
  196. dev->buf_len, dma->direction);
  197. dma->buf_mapped = false;
  198. }
  199. at91_twi_irq_restore(dev);
  200. }
  201. static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
  202. {
  203. if (!dev->buf_len)
  204. return;
  205. /* 8bit write works with and without FIFO */
  206. writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
  207. /* send stop when last byte has been written */
  208. if (--dev->buf_len == 0)
  209. if (!dev->pdata->has_alt_cmd)
  210. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  211. dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
  212. ++dev->buf;
  213. }
  214. static void at91_twi_write_data_dma_callback(void *data)
  215. {
  216. struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
  217. dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
  218. dev->buf_len, DMA_TO_DEVICE);
  219. /*
  220. * When this callback is called, THR/TX FIFO is likely not to be empty
  221. * yet. So we have to wait for TXCOMP or NACK bits to be set into the
  222. * Status Register to be sure that the STOP bit has been sent and the
  223. * transfer is completed. The NACK interrupt has already been enabled,
  224. * we just have to enable TXCOMP one.
  225. */
  226. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
  227. if (!dev->pdata->has_alt_cmd)
  228. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  229. }
  230. static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
  231. {
  232. dma_addr_t dma_addr;
  233. struct dma_async_tx_descriptor *txdesc;
  234. struct at91_twi_dma *dma = &dev->dma;
  235. struct dma_chan *chan_tx = dma->chan_tx;
  236. unsigned int sg_len = 1;
  237. if (!dev->buf_len)
  238. return;
  239. dma->direction = DMA_TO_DEVICE;
  240. at91_twi_irq_save(dev);
  241. dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
  242. DMA_TO_DEVICE);
  243. if (dma_mapping_error(dev->dev, dma_addr)) {
  244. dev_err(dev->dev, "dma map failed\n");
  245. return;
  246. }
  247. dma->buf_mapped = true;
  248. at91_twi_irq_restore(dev);
  249. if (dev->fifo_size) {
  250. size_t part1_len, part2_len;
  251. struct scatterlist *sg;
  252. unsigned fifo_mr;
  253. sg_len = 0;
  254. part1_len = dev->buf_len & ~0x3;
  255. if (part1_len) {
  256. sg = &dma->sg[sg_len++];
  257. sg_dma_len(sg) = part1_len;
  258. sg_dma_address(sg) = dma_addr;
  259. }
  260. part2_len = dev->buf_len & 0x3;
  261. if (part2_len) {
  262. sg = &dma->sg[sg_len++];
  263. sg_dma_len(sg) = part2_len;
  264. sg_dma_address(sg) = dma_addr + part1_len;
  265. }
  266. /*
  267. * DMA controller is triggered when at least 4 data can be
  268. * written into the TX FIFO
  269. */
  270. fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
  271. fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK;
  272. fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA);
  273. at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
  274. } else {
  275. sg_dma_len(&dma->sg[0]) = dev->buf_len;
  276. sg_dma_address(&dma->sg[0]) = dma_addr;
  277. }
  278. txdesc = dmaengine_prep_slave_sg(chan_tx, dma->sg, sg_len,
  279. DMA_MEM_TO_DEV,
  280. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  281. if (!txdesc) {
  282. dev_err(dev->dev, "dma prep slave sg failed\n");
  283. goto error;
  284. }
  285. txdesc->callback = at91_twi_write_data_dma_callback;
  286. txdesc->callback_param = dev;
  287. dma->xfer_in_progress = true;
  288. dmaengine_submit(txdesc);
  289. dma_async_issue_pending(chan_tx);
  290. return;
  291. error:
  292. at91_twi_dma_cleanup(dev);
  293. }
  294. static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
  295. {
  296. if (!dev->buf_len)
  297. return;
  298. /* 8bit read works with and without FIFO */
  299. *dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR);
  300. --dev->buf_len;
  301. /* return if aborting, we only needed to read RHR to clear RXRDY*/
  302. if (dev->recv_len_abort)
  303. return;
  304. /* handle I2C_SMBUS_BLOCK_DATA */
  305. if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
  306. /* ensure length byte is a valid value */
  307. if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
  308. dev->msg->flags &= ~I2C_M_RECV_LEN;
  309. dev->buf_len += *dev->buf;
  310. dev->msg->len = dev->buf_len + 1;
  311. dev_dbg(dev->dev, "received block length %d\n",
  312. dev->buf_len);
  313. } else {
  314. /* abort and send the stop by reading one more byte */
  315. dev->recv_len_abort = true;
  316. dev->buf_len = 1;
  317. }
  318. }
  319. /* send stop if second but last byte has been read */
  320. if (!dev->pdata->has_alt_cmd && dev->buf_len == 1)
  321. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  322. dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
  323. ++dev->buf;
  324. }
  325. static void at91_twi_read_data_dma_callback(void *data)
  326. {
  327. struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
  328. unsigned ier = AT91_TWI_TXCOMP;
  329. dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
  330. dev->buf_len, DMA_FROM_DEVICE);
  331. if (!dev->pdata->has_alt_cmd) {
  332. /* The last two bytes have to be read without using dma */
  333. dev->buf += dev->buf_len - 2;
  334. dev->buf_len = 2;
  335. ier |= AT91_TWI_RXRDY;
  336. }
  337. at91_twi_write(dev, AT91_TWI_IER, ier);
  338. }
  339. static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
  340. {
  341. dma_addr_t dma_addr;
  342. struct dma_async_tx_descriptor *rxdesc;
  343. struct at91_twi_dma *dma = &dev->dma;
  344. struct dma_chan *chan_rx = dma->chan_rx;
  345. size_t buf_len;
  346. buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
  347. dma->direction = DMA_FROM_DEVICE;
  348. /* Keep in mind that we won't use dma to read the last two bytes */
  349. at91_twi_irq_save(dev);
  350. dma_addr = dma_map_single(dev->dev, dev->buf, buf_len, DMA_FROM_DEVICE);
  351. if (dma_mapping_error(dev->dev, dma_addr)) {
  352. dev_err(dev->dev, "dma map failed\n");
  353. return;
  354. }
  355. dma->buf_mapped = true;
  356. at91_twi_irq_restore(dev);
  357. if (dev->fifo_size && IS_ALIGNED(buf_len, 4)) {
  358. unsigned fifo_mr;
  359. /*
  360. * DMA controller is triggered when at least 4 data can be
  361. * read from the RX FIFO
  362. */
  363. fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
  364. fifo_mr &= ~AT91_TWI_FMR_RXRDYM_MASK;
  365. fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA);
  366. at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
  367. }
  368. sg_dma_len(&dma->sg[0]) = buf_len;
  369. sg_dma_address(&dma->sg[0]) = dma_addr;
  370. rxdesc = dmaengine_prep_slave_sg(chan_rx, dma->sg, 1, DMA_DEV_TO_MEM,
  371. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  372. if (!rxdesc) {
  373. dev_err(dev->dev, "dma prep slave sg failed\n");
  374. goto error;
  375. }
  376. rxdesc->callback = at91_twi_read_data_dma_callback;
  377. rxdesc->callback_param = dev;
  378. dma->xfer_in_progress = true;
  379. dmaengine_submit(rxdesc);
  380. dma_async_issue_pending(dma->chan_rx);
  381. return;
  382. error:
  383. at91_twi_dma_cleanup(dev);
  384. }
  385. static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
  386. {
  387. struct at91_twi_dev *dev = dev_id;
  388. const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
  389. const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
  390. if (!irqstatus)
  391. return IRQ_NONE;
  392. else if (irqstatus & AT91_TWI_RXRDY)
  393. at91_twi_read_next_byte(dev);
  394. else if (irqstatus & AT91_TWI_TXRDY)
  395. at91_twi_write_next_byte(dev);
  396. /* catch error flags */
  397. dev->transfer_status |= status;
  398. if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
  399. at91_disable_twi_interrupts(dev);
  400. complete(&dev->cmd_complete);
  401. }
  402. return IRQ_HANDLED;
  403. }
  404. static int at91_do_twi_transfer(struct at91_twi_dev *dev)
  405. {
  406. int ret;
  407. unsigned long time_left;
  408. bool has_unre_flag = dev->pdata->has_unre_flag;
  409. bool has_alt_cmd = dev->pdata->has_alt_cmd;
  410. /*
  411. * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
  412. * read flag but shows the state of the transmission at the time the
  413. * Status Register is read. According to the programmer datasheet,
  414. * TXCOMP is set when both holding register and internal shifter are
  415. * empty and STOP condition has been sent.
  416. * Consequently, we should enable NACK interrupt rather than TXCOMP to
  417. * detect transmission failure.
  418. * Indeed let's take the case of an i2c write command using DMA.
  419. * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and
  420. * TXCOMP bits are set together into the Status Register.
  421. * LOCK is a clear on write bit, which is set to prevent the DMA
  422. * controller from sending new data on the i2c bus after a NACK
  423. * condition has happened. Once locked, this i2c peripheral stops
  424. * triggering the DMA controller for new data but it is more than
  425. * likely that a new DMA transaction is already in progress, writing
  426. * into the Transmit Holding Register. Since the peripheral is locked,
  427. * these new data won't be sent to the i2c bus but they will remain
  428. * into the Transmit Holding Register, so TXCOMP bit is cleared.
  429. * Then when the interrupt handler is called, the Status Register is
  430. * read: the TXCOMP bit is clear but NACK bit is still set. The driver
  431. * manage the error properly, without waiting for timeout.
  432. * This case can be reproduced easyly when writing into an at24 eeprom.
  433. *
  434. * Besides, the TXCOMP bit is already set before the i2c transaction
  435. * has been started. For read transactions, this bit is cleared when
  436. * writing the START bit into the Control Register. So the
  437. * corresponding interrupt can safely be enabled just after.
  438. * However for write transactions managed by the CPU, we first write
  439. * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
  440. * interrupt. If TXCOMP interrupt were enabled before writing into THR,
  441. * the interrupt handler would be called immediately and the i2c command
  442. * would be reported as completed.
  443. * Also when a write transaction is managed by the DMA controller,
  444. * enabling the TXCOMP interrupt in this function may lead to a race
  445. * condition since we don't know whether the TXCOMP interrupt is enabled
  446. * before or after the DMA has started to write into THR. So the TXCOMP
  447. * interrupt is enabled later by at91_twi_write_data_dma_callback().
  448. * Immediately after in that DMA callback, if the alternative command
  449. * mode is not used, we still need to send the STOP condition manually
  450. * writing the corresponding bit into the Control Register.
  451. */
  452. dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
  453. (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
  454. reinit_completion(&dev->cmd_complete);
  455. dev->transfer_status = 0;
  456. if (dev->fifo_size) {
  457. unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
  458. /* Reset FIFO mode register */
  459. fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK |
  460. AT91_TWI_FMR_RXRDYM_MASK);
  461. fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA);
  462. fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA);
  463. at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
  464. /* Flush FIFOs */
  465. at91_twi_write(dev, AT91_TWI_CR,
  466. AT91_TWI_THRCLR | AT91_TWI_RHRCLR);
  467. }
  468. if (!dev->buf_len) {
  469. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
  470. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
  471. } else if (dev->msg->flags & I2C_M_RD) {
  472. unsigned start_flags = AT91_TWI_START;
  473. if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
  474. dev_err(dev->dev, "RXRDY still set!");
  475. at91_twi_read(dev, AT91_TWI_RHR);
  476. }
  477. /* if only one byte is to be read, immediately stop transfer */
  478. if (!has_alt_cmd && dev->buf_len <= 1 &&
  479. !(dev->msg->flags & I2C_M_RECV_LEN))
  480. start_flags |= AT91_TWI_STOP;
  481. at91_twi_write(dev, AT91_TWI_CR, start_flags);
  482. /*
  483. * When using dma without alternative command mode, the last
  484. * byte has to be read manually in order to not send the stop
  485. * command too late and then to receive extra data.
  486. * In practice, there are some issues if you use the dma to
  487. * read n-1 bytes because of latency.
  488. * Reading n-2 bytes with dma and the two last ones manually
  489. * seems to be the best solution.
  490. */
  491. if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
  492. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
  493. at91_twi_read_data_dma(dev);
  494. } else {
  495. at91_twi_write(dev, AT91_TWI_IER,
  496. AT91_TWI_TXCOMP |
  497. AT91_TWI_NACK |
  498. AT91_TWI_RXRDY);
  499. }
  500. } else {
  501. if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
  502. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
  503. at91_twi_write_data_dma(dev);
  504. } else {
  505. at91_twi_write_next_byte(dev);
  506. at91_twi_write(dev, AT91_TWI_IER,
  507. AT91_TWI_TXCOMP |
  508. AT91_TWI_NACK |
  509. AT91_TWI_TXRDY);
  510. }
  511. }
  512. time_left = wait_for_completion_timeout(&dev->cmd_complete,
  513. dev->adapter.timeout);
  514. if (time_left == 0) {
  515. dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
  516. dev_err(dev->dev, "controller timed out\n");
  517. at91_init_twi_bus(dev);
  518. ret = -ETIMEDOUT;
  519. goto error;
  520. }
  521. if (dev->transfer_status & AT91_TWI_NACK) {
  522. dev_dbg(dev->dev, "received nack\n");
  523. ret = -EREMOTEIO;
  524. goto error;
  525. }
  526. if (dev->transfer_status & AT91_TWI_OVRE) {
  527. dev_err(dev->dev, "overrun while reading\n");
  528. ret = -EIO;
  529. goto error;
  530. }
  531. if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
  532. dev_err(dev->dev, "underrun while writing\n");
  533. ret = -EIO;
  534. goto error;
  535. }
  536. if ((has_alt_cmd || dev->fifo_size) &&
  537. (dev->transfer_status & AT91_TWI_LOCK)) {
  538. dev_err(dev->dev, "tx locked\n");
  539. ret = -EIO;
  540. goto error;
  541. }
  542. if (dev->recv_len_abort) {
  543. dev_err(dev->dev, "invalid smbus block length recvd\n");
  544. ret = -EPROTO;
  545. goto error;
  546. }
  547. dev_dbg(dev->dev, "transfer complete\n");
  548. return 0;
  549. error:
  550. /* first stop DMA transfer if still in progress */
  551. at91_twi_dma_cleanup(dev);
  552. /* then flush THR/FIFO and unlock TX if locked */
  553. if ((has_alt_cmd || dev->fifo_size) &&
  554. (dev->transfer_status & AT91_TWI_LOCK)) {
  555. dev_dbg(dev->dev, "unlock tx\n");
  556. at91_twi_write(dev, AT91_TWI_CR,
  557. AT91_TWI_THRCLR | AT91_TWI_LOCKCLR);
  558. }
  559. return ret;
  560. }
  561. static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
  562. {
  563. struct at91_twi_dev *dev = i2c_get_adapdata(adap);
  564. int ret;
  565. unsigned int_addr_flag = 0;
  566. struct i2c_msg *m_start = msg;
  567. bool is_read, use_alt_cmd = false;
  568. dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
  569. ret = pm_runtime_get_sync(dev->dev);
  570. if (ret < 0)
  571. goto out;
  572. if (num == 2) {
  573. int internal_address = 0;
  574. int i;
  575. /* 1st msg is put into the internal address, start with 2nd */
  576. m_start = &msg[1];
  577. for (i = 0; i < msg->len; ++i) {
  578. const unsigned addr = msg->buf[msg->len - 1 - i];
  579. internal_address |= addr << (8 * i);
  580. int_addr_flag += AT91_TWI_IADRSZ_1;
  581. }
  582. at91_twi_write(dev, AT91_TWI_IADR, internal_address);
  583. }
  584. is_read = (m_start->flags & I2C_M_RD);
  585. if (dev->pdata->has_alt_cmd) {
  586. if (m_start->len > 0) {
  587. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
  588. at91_twi_write(dev, AT91_TWI_ACR,
  589. AT91_TWI_ACR_DATAL(m_start->len) |
  590. ((is_read) ? AT91_TWI_ACR_DIR : 0));
  591. use_alt_cmd = true;
  592. } else {
  593. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
  594. }
  595. }
  596. at91_twi_write(dev, AT91_TWI_MMR,
  597. (m_start->addr << 16) |
  598. int_addr_flag |
  599. ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
  600. dev->buf_len = m_start->len;
  601. dev->buf = m_start->buf;
  602. dev->msg = m_start;
  603. dev->recv_len_abort = false;
  604. ret = at91_do_twi_transfer(dev);
  605. ret = (ret < 0) ? ret : num;
  606. out:
  607. pm_runtime_mark_last_busy(dev->dev);
  608. pm_runtime_put_autosuspend(dev->dev);
  609. return ret;
  610. }
  611. /*
  612. * The hardware can handle at most two messages concatenated by a
  613. * repeated start via it's internal address feature.
  614. */
  615. static struct i2c_adapter_quirks at91_twi_quirks = {
  616. .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR,
  617. .max_comb_1st_msg_len = 3,
  618. };
  619. static u32 at91_twi_func(struct i2c_adapter *adapter)
  620. {
  621. return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
  622. | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
  623. }
  624. static struct i2c_algorithm at91_twi_algorithm = {
  625. .master_xfer = at91_twi_xfer,
  626. .functionality = at91_twi_func,
  627. };
  628. static struct at91_twi_pdata at91rm9200_config = {
  629. .clk_max_div = 5,
  630. .clk_offset = 3,
  631. .has_unre_flag = true,
  632. .has_alt_cmd = false,
  633. };
  634. static struct at91_twi_pdata at91sam9261_config = {
  635. .clk_max_div = 5,
  636. .clk_offset = 4,
  637. .has_unre_flag = false,
  638. .has_alt_cmd = false,
  639. };
  640. static struct at91_twi_pdata at91sam9260_config = {
  641. .clk_max_div = 7,
  642. .clk_offset = 4,
  643. .has_unre_flag = false,
  644. .has_alt_cmd = false,
  645. };
  646. static struct at91_twi_pdata at91sam9g20_config = {
  647. .clk_max_div = 7,
  648. .clk_offset = 4,
  649. .has_unre_flag = false,
  650. .has_alt_cmd = false,
  651. };
  652. static struct at91_twi_pdata at91sam9g10_config = {
  653. .clk_max_div = 7,
  654. .clk_offset = 4,
  655. .has_unre_flag = false,
  656. .has_alt_cmd = false,
  657. };
  658. static const struct platform_device_id at91_twi_devtypes[] = {
  659. {
  660. .name = "i2c-at91rm9200",
  661. .driver_data = (unsigned long) &at91rm9200_config,
  662. }, {
  663. .name = "i2c-at91sam9261",
  664. .driver_data = (unsigned long) &at91sam9261_config,
  665. }, {
  666. .name = "i2c-at91sam9260",
  667. .driver_data = (unsigned long) &at91sam9260_config,
  668. }, {
  669. .name = "i2c-at91sam9g20",
  670. .driver_data = (unsigned long) &at91sam9g20_config,
  671. }, {
  672. .name = "i2c-at91sam9g10",
  673. .driver_data = (unsigned long) &at91sam9g10_config,
  674. }, {
  675. /* sentinel */
  676. }
  677. };
  678. #if defined(CONFIG_OF)
  679. static struct at91_twi_pdata at91sam9x5_config = {
  680. .clk_max_div = 7,
  681. .clk_offset = 4,
  682. .has_unre_flag = false,
  683. .has_alt_cmd = false,
  684. };
  685. static struct at91_twi_pdata sama5d2_config = {
  686. .clk_max_div = 7,
  687. .clk_offset = 4,
  688. .has_unre_flag = true,
  689. .has_alt_cmd = true,
  690. };
  691. static const struct of_device_id atmel_twi_dt_ids[] = {
  692. {
  693. .compatible = "atmel,at91rm9200-i2c",
  694. .data = &at91rm9200_config,
  695. } , {
  696. .compatible = "atmel,at91sam9260-i2c",
  697. .data = &at91sam9260_config,
  698. } , {
  699. .compatible = "atmel,at91sam9261-i2c",
  700. .data = &at91sam9261_config,
  701. } , {
  702. .compatible = "atmel,at91sam9g20-i2c",
  703. .data = &at91sam9g20_config,
  704. } , {
  705. .compatible = "atmel,at91sam9g10-i2c",
  706. .data = &at91sam9g10_config,
  707. }, {
  708. .compatible = "atmel,at91sam9x5-i2c",
  709. .data = &at91sam9x5_config,
  710. }, {
  711. .compatible = "atmel,sama5d2-i2c",
  712. .data = &sama5d2_config,
  713. }, {
  714. /* sentinel */
  715. }
  716. };
  717. MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
  718. #endif
  719. static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
  720. {
  721. int ret = 0;
  722. struct dma_slave_config slave_config;
  723. struct at91_twi_dma *dma = &dev->dma;
  724. enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  725. /*
  726. * The actual width of the access will be chosen in
  727. * dmaengine_prep_slave_sg():
  728. * for each buffer in the scatter-gather list, if its size is aligned
  729. * to addr_width then addr_width accesses will be performed to transfer
  730. * the buffer. On the other hand, if the buffer size is not aligned to
  731. * addr_width then the buffer is transferred using single byte accesses.
  732. * Please refer to the Atmel eXtended DMA controller driver.
  733. * When FIFOs are used, the TXRDYM threshold can always be set to
  734. * trigger the XDMAC when at least 4 data can be written into the TX
  735. * FIFO, even if single byte accesses are performed.
  736. * However the RXRDYM threshold must be set to fit the access width,
  737. * deduced from buffer length, so the XDMAC is triggered properly to
  738. * read data from the RX FIFO.
  739. */
  740. if (dev->fifo_size)
  741. addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  742. memset(&slave_config, 0, sizeof(slave_config));
  743. slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
  744. slave_config.src_addr_width = addr_width;
  745. slave_config.src_maxburst = 1;
  746. slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
  747. slave_config.dst_addr_width = addr_width;
  748. slave_config.dst_maxburst = 1;
  749. slave_config.device_fc = false;
  750. dma->chan_tx = dma_request_slave_channel_reason(dev->dev, "tx");
  751. if (IS_ERR(dma->chan_tx)) {
  752. ret = PTR_ERR(dma->chan_tx);
  753. dma->chan_tx = NULL;
  754. goto error;
  755. }
  756. dma->chan_rx = dma_request_slave_channel_reason(dev->dev, "rx");
  757. if (IS_ERR(dma->chan_rx)) {
  758. ret = PTR_ERR(dma->chan_rx);
  759. dma->chan_rx = NULL;
  760. goto error;
  761. }
  762. slave_config.direction = DMA_MEM_TO_DEV;
  763. if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
  764. dev_err(dev->dev, "failed to configure tx channel\n");
  765. ret = -EINVAL;
  766. goto error;
  767. }
  768. slave_config.direction = DMA_DEV_TO_MEM;
  769. if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
  770. dev_err(dev->dev, "failed to configure rx channel\n");
  771. ret = -EINVAL;
  772. goto error;
  773. }
  774. sg_init_table(dma->sg, 2);
  775. dma->buf_mapped = false;
  776. dma->xfer_in_progress = false;
  777. dev->use_dma = true;
  778. dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
  779. dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
  780. return ret;
  781. error:
  782. if (ret != -EPROBE_DEFER)
  783. dev_info(dev->dev, "can't use DMA, error %d\n", ret);
  784. if (dma->chan_rx)
  785. dma_release_channel(dma->chan_rx);
  786. if (dma->chan_tx)
  787. dma_release_channel(dma->chan_tx);
  788. return ret;
  789. }
  790. static struct at91_twi_pdata *at91_twi_get_driver_data(
  791. struct platform_device *pdev)
  792. {
  793. if (pdev->dev.of_node) {
  794. const struct of_device_id *match;
  795. match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
  796. if (!match)
  797. return NULL;
  798. return (struct at91_twi_pdata *)match->data;
  799. }
  800. return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
  801. }
  802. static int at91_twi_probe(struct platform_device *pdev)
  803. {
  804. struct at91_twi_dev *dev;
  805. struct resource *mem;
  806. int rc;
  807. u32 phy_addr;
  808. u32 bus_clk_rate;
  809. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  810. if (!dev)
  811. return -ENOMEM;
  812. init_completion(&dev->cmd_complete);
  813. dev->dev = &pdev->dev;
  814. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  815. if (!mem)
  816. return -ENODEV;
  817. phy_addr = mem->start;
  818. dev->pdata = at91_twi_get_driver_data(pdev);
  819. if (!dev->pdata)
  820. return -ENODEV;
  821. dev->base = devm_ioremap_resource(&pdev->dev, mem);
  822. if (IS_ERR(dev->base))
  823. return PTR_ERR(dev->base);
  824. dev->irq = platform_get_irq(pdev, 0);
  825. if (dev->irq < 0)
  826. return dev->irq;
  827. rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
  828. dev_name(dev->dev), dev);
  829. if (rc) {
  830. dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
  831. return rc;
  832. }
  833. platform_set_drvdata(pdev, dev);
  834. dev->clk = devm_clk_get(dev->dev, NULL);
  835. if (IS_ERR(dev->clk)) {
  836. dev_err(dev->dev, "no clock defined\n");
  837. return -ENODEV;
  838. }
  839. clk_prepare_enable(dev->clk);
  840. if (dev->dev->of_node) {
  841. rc = at91_twi_configure_dma(dev, phy_addr);
  842. if (rc == -EPROBE_DEFER)
  843. return rc;
  844. }
  845. if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
  846. &dev->fifo_size)) {
  847. dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
  848. }
  849. rc = of_property_read_u32(dev->dev->of_node, "clock-frequency",
  850. &bus_clk_rate);
  851. if (rc)
  852. bus_clk_rate = DEFAULT_TWI_CLK_HZ;
  853. at91_calc_twi_clock(dev, bus_clk_rate);
  854. at91_init_twi_bus(dev);
  855. snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
  856. i2c_set_adapdata(&dev->adapter, dev);
  857. dev->adapter.owner = THIS_MODULE;
  858. dev->adapter.class = I2C_CLASS_DEPRECATED;
  859. dev->adapter.algo = &at91_twi_algorithm;
  860. dev->adapter.quirks = &at91_twi_quirks;
  861. dev->adapter.dev.parent = dev->dev;
  862. dev->adapter.nr = pdev->id;
  863. dev->adapter.timeout = AT91_I2C_TIMEOUT;
  864. dev->adapter.dev.of_node = pdev->dev.of_node;
  865. pm_runtime_set_autosuspend_delay(dev->dev, AUTOSUSPEND_TIMEOUT);
  866. pm_runtime_use_autosuspend(dev->dev);
  867. pm_runtime_set_active(dev->dev);
  868. pm_runtime_enable(dev->dev);
  869. rc = i2c_add_numbered_adapter(&dev->adapter);
  870. if (rc) {
  871. dev_err(dev->dev, "Adapter %s registration failed\n",
  872. dev->adapter.name);
  873. clk_disable_unprepare(dev->clk);
  874. pm_runtime_disable(dev->dev);
  875. pm_runtime_set_suspended(dev->dev);
  876. return rc;
  877. }
  878. dev_info(dev->dev, "AT91 i2c bus driver (hw version: %#x).\n",
  879. at91_twi_read(dev, AT91_TWI_VER));
  880. return 0;
  881. }
  882. static int at91_twi_remove(struct platform_device *pdev)
  883. {
  884. struct at91_twi_dev *dev = platform_get_drvdata(pdev);
  885. i2c_del_adapter(&dev->adapter);
  886. clk_disable_unprepare(dev->clk);
  887. pm_runtime_disable(dev->dev);
  888. pm_runtime_set_suspended(dev->dev);
  889. return 0;
  890. }
  891. #ifdef CONFIG_PM
  892. static int at91_twi_runtime_suspend(struct device *dev)
  893. {
  894. struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
  895. clk_disable_unprepare(twi_dev->clk);
  896. pinctrl_pm_select_sleep_state(dev);
  897. return 0;
  898. }
  899. static int at91_twi_runtime_resume(struct device *dev)
  900. {
  901. struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
  902. pinctrl_pm_select_default_state(dev);
  903. return clk_prepare_enable(twi_dev->clk);
  904. }
  905. static int at91_twi_suspend_noirq(struct device *dev)
  906. {
  907. if (!pm_runtime_status_suspended(dev))
  908. at91_twi_runtime_suspend(dev);
  909. return 0;
  910. }
  911. static int at91_twi_resume_noirq(struct device *dev)
  912. {
  913. int ret;
  914. if (!pm_runtime_status_suspended(dev)) {
  915. ret = at91_twi_runtime_resume(dev);
  916. if (ret)
  917. return ret;
  918. }
  919. pm_runtime_mark_last_busy(dev);
  920. pm_request_autosuspend(dev);
  921. return 0;
  922. }
  923. static const struct dev_pm_ops at91_twi_pm = {
  924. .suspend_noirq = at91_twi_suspend_noirq,
  925. .resume_noirq = at91_twi_resume_noirq,
  926. .runtime_suspend = at91_twi_runtime_suspend,
  927. .runtime_resume = at91_twi_runtime_resume,
  928. };
  929. #define at91_twi_pm_ops (&at91_twi_pm)
  930. #else
  931. #define at91_twi_pm_ops NULL
  932. #endif
  933. static struct platform_driver at91_twi_driver = {
  934. .probe = at91_twi_probe,
  935. .remove = at91_twi_remove,
  936. .id_table = at91_twi_devtypes,
  937. .driver = {
  938. .name = "at91_i2c",
  939. .of_match_table = of_match_ptr(atmel_twi_dt_ids),
  940. .pm = at91_twi_pm_ops,
  941. },
  942. };
  943. static int __init at91_twi_init(void)
  944. {
  945. return platform_driver_register(&at91_twi_driver);
  946. }
  947. static void __exit at91_twi_exit(void)
  948. {
  949. platform_driver_unregister(&at91_twi_driver);
  950. }
  951. subsys_initcall(at91_twi_init);
  952. module_exit(at91_twi_exit);
  953. MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
  954. MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
  955. MODULE_LICENSE("GPL");
  956. MODULE_ALIAS("platform:at91_i2c");