spi-stm32.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323
  1. /*
  2. * STMicroelectronics STM32 SPI Controller driver (master mode only)
  3. *
  4. * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
  5. * Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
  6. *
  7. * License terms: GPL V2.0.
  8. *
  9. * spi_stm32 driver is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License version 2 as published by
  11. * the Free Software Foundation.
  12. *
  13. * spi_stm32 driver is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
  16. * details.
  17. *
  18. * You should have received a copy of the GNU General Public License along with
  19. * spi_stm32 driver. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <linux/debugfs.h>
  22. #include <linux/clk.h>
  23. #include <linux/delay.h>
  24. #include <linux/dmaengine.h>
  25. #include <linux/gpio.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/iopoll.h>
  28. #include <linux/module.h>
  29. #include <linux/of_platform.h>
  30. #include <linux/pm_runtime.h>
  31. #include <linux/reset.h>
  32. #include <linux/spi/spi.h>
  33. #define DRIVER_NAME "spi_stm32"
  34. /* STM32 SPI registers */
  35. #define STM32_SPI_CR1 0x00
  36. #define STM32_SPI_CR2 0x04
  37. #define STM32_SPI_CFG1 0x08
  38. #define STM32_SPI_CFG2 0x0C
  39. #define STM32_SPI_IER 0x10
  40. #define STM32_SPI_SR 0x14
  41. #define STM32_SPI_IFCR 0x18
  42. #define STM32_SPI_TXDR 0x20
  43. #define STM32_SPI_RXDR 0x30
  44. #define STM32_SPI_I2SCFGR 0x50
  45. /* STM32_SPI_CR1 bit fields */
  46. #define SPI_CR1_SPE BIT(0)
  47. #define SPI_CR1_MASRX BIT(8)
  48. #define SPI_CR1_CSTART BIT(9)
  49. #define SPI_CR1_CSUSP BIT(10)
  50. #define SPI_CR1_HDDIR BIT(11)
  51. #define SPI_CR1_SSI BIT(12)
  52. /* STM32_SPI_CR2 bit fields */
  53. #define SPI_CR2_TSIZE_SHIFT 0
  54. #define SPI_CR2_TSIZE GENMASK(15, 0)
  55. /* STM32_SPI_CFG1 bit fields */
  56. #define SPI_CFG1_DSIZE_SHIFT 0
  57. #define SPI_CFG1_DSIZE GENMASK(4, 0)
  58. #define SPI_CFG1_FTHLV_SHIFT 5
  59. #define SPI_CFG1_FTHLV GENMASK(8, 5)
  60. #define SPI_CFG1_RXDMAEN BIT(14)
  61. #define SPI_CFG1_TXDMAEN BIT(15)
  62. #define SPI_CFG1_MBR_SHIFT 28
  63. #define SPI_CFG1_MBR GENMASK(30, 28)
  64. #define SPI_CFG1_MBR_MIN 0
  65. #define SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28)
  66. /* STM32_SPI_CFG2 bit fields */
  67. #define SPI_CFG2_MIDI_SHIFT 4
  68. #define SPI_CFG2_MIDI GENMASK(7, 4)
  69. #define SPI_CFG2_COMM_SHIFT 17
  70. #define SPI_CFG2_COMM GENMASK(18, 17)
  71. #define SPI_CFG2_SP_SHIFT 19
  72. #define SPI_CFG2_SP GENMASK(21, 19)
  73. #define SPI_CFG2_MASTER BIT(22)
  74. #define SPI_CFG2_LSBFRST BIT(23)
  75. #define SPI_CFG2_CPHA BIT(24)
  76. #define SPI_CFG2_CPOL BIT(25)
  77. #define SPI_CFG2_SSM BIT(26)
  78. #define SPI_CFG2_AFCNTR BIT(31)
  79. /* STM32_SPI_IER bit fields */
  80. #define SPI_IER_RXPIE BIT(0)
  81. #define SPI_IER_TXPIE BIT(1)
  82. #define SPI_IER_DXPIE BIT(2)
  83. #define SPI_IER_EOTIE BIT(3)
  84. #define SPI_IER_TXTFIE BIT(4)
  85. #define SPI_IER_OVRIE BIT(6)
  86. #define SPI_IER_MODFIE BIT(9)
  87. #define SPI_IER_ALL GENMASK(10, 0)
  88. /* STM32_SPI_SR bit fields */
  89. #define SPI_SR_RXP BIT(0)
  90. #define SPI_SR_TXP BIT(1)
  91. #define SPI_SR_EOT BIT(3)
  92. #define SPI_SR_OVR BIT(6)
  93. #define SPI_SR_MODF BIT(9)
  94. #define SPI_SR_SUSP BIT(11)
  95. #define SPI_SR_RXPLVL_SHIFT 13
  96. #define SPI_SR_RXPLVL GENMASK(14, 13)
  97. #define SPI_SR_RXWNE BIT(15)
  98. /* STM32_SPI_IFCR bit fields */
  99. #define SPI_IFCR_ALL GENMASK(11, 3)
  100. /* STM32_SPI_I2SCFGR bit fields */
  101. #define SPI_I2SCFGR_I2SMOD BIT(0)
  102. /* SPI Master Baud Rate min/max divisor */
  103. #define SPI_MBR_DIV_MIN (2 << SPI_CFG1_MBR_MIN)
  104. #define SPI_MBR_DIV_MAX (2 << SPI_CFG1_MBR_MAX)
  105. /* SPI Communication mode */
  106. #define SPI_FULL_DUPLEX 0
  107. #define SPI_SIMPLEX_TX 1
  108. #define SPI_SIMPLEX_RX 2
  109. #define SPI_HALF_DUPLEX 3
  110. #define SPI_1HZ_NS 1000000000
  111. /**
  112. * struct stm32_spi - private data of the SPI controller
  113. * @dev: driver model representation of the controller
  114. * @master: controller master interface
  115. * @base: virtual memory area
  116. * @clk: hw kernel clock feeding the SPI clock generator
  117. * @clk_rate: rate of the hw kernel clock feeding the SPI clock generator
  118. * @rst: SPI controller reset line
  119. * @lock: prevent I/O concurrent access
  120. * @irq: SPI controller interrupt line
  121. * @fifo_size: size of the embedded fifo in bytes
  122. * @cur_midi: master inter-data idleness in ns
  123. * @cur_speed: speed configured in Hz
  124. * @cur_bpw: number of bits in a single SPI data frame
  125. * @cur_fthlv: fifo threshold level (data frames in a single data packet)
  126. * @cur_comm: SPI communication mode
  127. * @cur_xferlen: current transfer length in bytes
  128. * @cur_usedma: boolean to know if dma is used in current transfer
  129. * @tx_buf: data to be written, or NULL
  130. * @rx_buf: data to be read, or NULL
  131. * @tx_len: number of data to be written in bytes
  132. * @rx_len: number of data to be read in bytes
  133. * @dma_tx: dma channel for TX transfer
  134. * @dma_rx: dma channel for RX transfer
  135. * @phys_addr: SPI registers physical base address
  136. */
  137. struct stm32_spi {
  138. struct device *dev;
  139. struct spi_master *master;
  140. void __iomem *base;
  141. struct clk *clk;
  142. u32 clk_rate;
  143. struct reset_control *rst;
  144. spinlock_t lock; /* prevent I/O concurrent access */
  145. int irq;
  146. unsigned int fifo_size;
  147. unsigned int cur_midi;
  148. unsigned int cur_speed;
  149. unsigned int cur_bpw;
  150. unsigned int cur_fthlv;
  151. unsigned int cur_comm;
  152. unsigned int cur_xferlen;
  153. bool cur_usedma;
  154. const void *tx_buf;
  155. void *rx_buf;
  156. int tx_len;
  157. int rx_len;
  158. struct dma_chan *dma_tx;
  159. struct dma_chan *dma_rx;
  160. dma_addr_t phys_addr;
  161. };
  162. static inline void stm32_spi_set_bits(struct stm32_spi *spi,
  163. u32 offset, u32 bits)
  164. {
  165. writel_relaxed(readl_relaxed(spi->base + offset) | bits,
  166. spi->base + offset);
  167. }
  168. static inline void stm32_spi_clr_bits(struct stm32_spi *spi,
  169. u32 offset, u32 bits)
  170. {
  171. writel_relaxed(readl_relaxed(spi->base + offset) & ~bits,
  172. spi->base + offset);
  173. }
  174. /**
  175. * stm32_spi_get_fifo_size - Return fifo size
  176. * @spi: pointer to the spi controller data structure
  177. */
  178. static int stm32_spi_get_fifo_size(struct stm32_spi *spi)
  179. {
  180. unsigned long flags;
  181. u32 count = 0;
  182. spin_lock_irqsave(&spi->lock, flags);
  183. stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
  184. while (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)
  185. writeb_relaxed(++count, spi->base + STM32_SPI_TXDR);
  186. stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
  187. spin_unlock_irqrestore(&spi->lock, flags);
  188. dev_dbg(spi->dev, "%d x 8-bit fifo size\n", count);
  189. return count;
  190. }
  191. /**
  192. * stm32_spi_get_bpw_mask - Return bits per word mask
  193. * @spi: pointer to the spi controller data structure
  194. */
  195. static int stm32_spi_get_bpw_mask(struct stm32_spi *spi)
  196. {
  197. unsigned long flags;
  198. u32 cfg1, max_bpw;
  199. spin_lock_irqsave(&spi->lock, flags);
  200. /*
  201. * The most significant bit at DSIZE bit field is reserved when the
  202. * maximum data size of periperal instances is limited to 16-bit
  203. */
  204. stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_DSIZE);
  205. cfg1 = readl_relaxed(spi->base + STM32_SPI_CFG1);
  206. max_bpw = (cfg1 & SPI_CFG1_DSIZE) >> SPI_CFG1_DSIZE_SHIFT;
  207. max_bpw += 1;
  208. spin_unlock_irqrestore(&spi->lock, flags);
  209. dev_dbg(spi->dev, "%d-bit maximum data frame\n", max_bpw);
  210. return SPI_BPW_RANGE_MASK(4, max_bpw);
  211. }
  212. /**
  213. * stm32_spi_prepare_mbr - Determine SPI_CFG1.MBR value
  214. * @spi: pointer to the spi controller data structure
  215. * @speed_hz: requested speed
  216. *
  217. * Return SPI_CFG1.MBR value in case of success or -EINVAL
  218. */
  219. static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
  220. {
  221. u32 div, mbrdiv;
  222. div = DIV_ROUND_UP(spi->clk_rate, speed_hz);
  223. /*
  224. * SPI framework set xfer->speed_hz to master->max_speed_hz if
  225. * xfer->speed_hz is greater than master->max_speed_hz, and it returns
  226. * an error when xfer->speed_hz is lower than master->min_speed_hz, so
  227. * no need to check it there.
  228. * However, we need to ensure the following calculations.
  229. */
  230. if (div < SPI_MBR_DIV_MIN ||
  231. div > SPI_MBR_DIV_MAX)
  232. return -EINVAL;
  233. /* Determine the first power of 2 greater than or equal to div */
  234. if (div & (div - 1))
  235. mbrdiv = fls(div);
  236. else
  237. mbrdiv = fls(div) - 1;
  238. spi->cur_speed = spi->clk_rate / (1 << mbrdiv);
  239. return mbrdiv - 1;
  240. }
  241. /**
  242. * stm32_spi_prepare_fthlv - Determine FIFO threshold level
  243. * @spi: pointer to the spi controller data structure
  244. */
  245. static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi)
  246. {
  247. u32 fthlv, half_fifo;
  248. /* data packet should not exceed 1/2 of fifo space */
  249. half_fifo = (spi->fifo_size / 2);
  250. if (spi->cur_bpw <= 8)
  251. fthlv = half_fifo;
  252. else if (spi->cur_bpw <= 16)
  253. fthlv = half_fifo / 2;
  254. else
  255. fthlv = half_fifo / 4;
  256. /* align packet size with data registers access */
  257. if (spi->cur_bpw > 8)
  258. fthlv -= (fthlv % 2); /* multiple of 2 */
  259. else
  260. fthlv -= (fthlv % 4); /* multiple of 4 */
  261. return fthlv;
  262. }
  263. /**
  264. * stm32_spi_write_txfifo - Write bytes in Transmit Data Register
  265. * @spi: pointer to the spi controller data structure
  266. *
  267. * Read from tx_buf depends on remaining bytes to avoid to read beyond
  268. * tx_buf end.
  269. */
  270. static void stm32_spi_write_txfifo(struct stm32_spi *spi)
  271. {
  272. while ((spi->tx_len > 0) &&
  273. (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)) {
  274. u32 offs = spi->cur_xferlen - spi->tx_len;
  275. if (spi->tx_len >= sizeof(u32)) {
  276. const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs);
  277. writel_relaxed(*tx_buf32, spi->base + STM32_SPI_TXDR);
  278. spi->tx_len -= sizeof(u32);
  279. } else if (spi->tx_len >= sizeof(u16)) {
  280. const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
  281. writew_relaxed(*tx_buf16, spi->base + STM32_SPI_TXDR);
  282. spi->tx_len -= sizeof(u16);
  283. } else {
  284. const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
  285. writeb_relaxed(*tx_buf8, spi->base + STM32_SPI_TXDR);
  286. spi->tx_len -= sizeof(u8);
  287. }
  288. }
  289. dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
  290. }
  291. /**
  292. * stm32_spi_read_rxfifo - Read bytes in Receive Data Register
  293. * @spi: pointer to the spi controller data structure
  294. *
  295. * Write in rx_buf depends on remaining bytes to avoid to write beyond
  296. * rx_buf end.
  297. */
  298. static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
  299. {
  300. u32 sr = readl_relaxed(spi->base + STM32_SPI_SR);
  301. u32 rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
  302. while ((spi->rx_len > 0) &&
  303. ((sr & SPI_SR_RXP) ||
  304. (flush && ((sr & SPI_SR_RXWNE) || (rxplvl > 0))))) {
  305. u32 offs = spi->cur_xferlen - spi->rx_len;
  306. if ((spi->rx_len >= sizeof(u32)) ||
  307. (flush && (sr & SPI_SR_RXWNE))) {
  308. u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
  309. *rx_buf32 = readl_relaxed(spi->base + STM32_SPI_RXDR);
  310. spi->rx_len -= sizeof(u32);
  311. } else if ((spi->rx_len >= sizeof(u16)) ||
  312. (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
  313. u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
  314. *rx_buf16 = readw_relaxed(spi->base + STM32_SPI_RXDR);
  315. spi->rx_len -= sizeof(u16);
  316. } else {
  317. u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
  318. *rx_buf8 = readb_relaxed(spi->base + STM32_SPI_RXDR);
  319. spi->rx_len -= sizeof(u8);
  320. }
  321. sr = readl_relaxed(spi->base + STM32_SPI_SR);
  322. rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
  323. }
  324. dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
  325. flush ? "(flush)" : "", spi->rx_len);
  326. }
  327. /**
  328. * stm32_spi_enable - Enable SPI controller
  329. * @spi: pointer to the spi controller data structure
  330. *
  331. * SPI data transfer is enabled but spi_ker_ck is idle.
  332. * SPI_CFG1 and SPI_CFG2 are now write protected.
  333. */
  334. static void stm32_spi_enable(struct stm32_spi *spi)
  335. {
  336. dev_dbg(spi->dev, "enable controller\n");
  337. stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
  338. }
  339. /**
  340. * stm32_spi_disable - Disable SPI controller
  341. * @spi: pointer to the spi controller data structure
  342. *
  343. * RX-Fifo is flushed when SPI controller is disabled. To prevent any data
  344. * loss, use stm32_spi_read_rxfifo(flush) to read the remaining bytes in
  345. * RX-Fifo.
  346. */
  347. static void stm32_spi_disable(struct stm32_spi *spi)
  348. {
  349. unsigned long flags;
  350. u32 cr1, sr;
  351. dev_dbg(spi->dev, "disable controller\n");
  352. spin_lock_irqsave(&spi->lock, flags);
  353. cr1 = readl_relaxed(spi->base + STM32_SPI_CR1);
  354. if (!(cr1 & SPI_CR1_SPE)) {
  355. spin_unlock_irqrestore(&spi->lock, flags);
  356. return;
  357. }
  358. /* Wait on EOT or suspend the flow */
  359. if (readl_relaxed_poll_timeout_atomic(spi->base + STM32_SPI_SR,
  360. sr, !(sr & SPI_SR_EOT),
  361. 10, 100000) < 0) {
  362. if (cr1 & SPI_CR1_CSTART) {
  363. writel_relaxed(cr1 | SPI_CR1_CSUSP,
  364. spi->base + STM32_SPI_CR1);
  365. if (readl_relaxed_poll_timeout_atomic(
  366. spi->base + STM32_SPI_SR,
  367. sr, !(sr & SPI_SR_SUSP),
  368. 10, 100000) < 0)
  369. dev_warn(spi->dev,
  370. "Suspend request timeout\n");
  371. }
  372. }
  373. if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
  374. stm32_spi_read_rxfifo(spi, true);
  375. if (spi->cur_usedma && spi->tx_buf)
  376. dmaengine_terminate_all(spi->dma_tx);
  377. if (spi->cur_usedma && spi->rx_buf)
  378. dmaengine_terminate_all(spi->dma_rx);
  379. stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
  380. stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN |
  381. SPI_CFG1_RXDMAEN);
  382. /* Disable interrupts and clear status flags */
  383. writel_relaxed(0, spi->base + STM32_SPI_IER);
  384. writel_relaxed(SPI_IFCR_ALL, spi->base + STM32_SPI_IFCR);
  385. spin_unlock_irqrestore(&spi->lock, flags);
  386. }
  387. /**
  388. * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
  389. *
  390. * If the current transfer size is greater than fifo size, use DMA.
  391. */
  392. static bool stm32_spi_can_dma(struct spi_master *master,
  393. struct spi_device *spi_dev,
  394. struct spi_transfer *transfer)
  395. {
  396. struct stm32_spi *spi = spi_master_get_devdata(master);
  397. dev_dbg(spi->dev, "%s: %s\n", __func__,
  398. (transfer->len > spi->fifo_size) ? "true" : "false");
  399. return (transfer->len > spi->fifo_size);
  400. }
  401. /**
  402. * stm32_spi_irq - Interrupt handler for SPI controller events
  403. * @irq: interrupt line
  404. * @dev_id: SPI controller master interface
  405. */
  406. static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
  407. {
  408. struct spi_master *master = dev_id;
  409. struct stm32_spi *spi = spi_master_get_devdata(master);
  410. u32 sr, ier, mask;
  411. unsigned long flags;
  412. bool end = false;
  413. spin_lock_irqsave(&spi->lock, flags);
  414. sr = readl_relaxed(spi->base + STM32_SPI_SR);
  415. ier = readl_relaxed(spi->base + STM32_SPI_IER);
  416. mask = ier;
  417. /* EOTIE is triggered on EOT, SUSP and TXC events. */
  418. mask |= SPI_SR_SUSP;
  419. /*
  420. * When TXTF is set, DXPIE and TXPIE are cleared. So in case of
  421. * Full-Duplex, need to poll RXP event to know if there are remaining
  422. * data, before disabling SPI.
  423. */
  424. if (spi->rx_buf && !spi->cur_usedma)
  425. mask |= SPI_SR_RXP;
  426. if (!(sr & mask)) {
  427. dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
  428. sr, ier);
  429. spin_unlock_irqrestore(&spi->lock, flags);
  430. return IRQ_NONE;
  431. }
  432. if (sr & SPI_SR_SUSP) {
  433. dev_warn(spi->dev, "Communication suspended\n");
  434. if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
  435. stm32_spi_read_rxfifo(spi, false);
  436. /*
  437. * If communication is suspended while using DMA, it means
  438. * that something went wrong, so stop the current transfer
  439. */
  440. if (spi->cur_usedma)
  441. end = true;
  442. }
  443. if (sr & SPI_SR_MODF) {
  444. dev_warn(spi->dev, "Mode fault: transfer aborted\n");
  445. end = true;
  446. }
  447. if (sr & SPI_SR_OVR) {
  448. dev_warn(spi->dev, "Overrun: received value discarded\n");
  449. if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
  450. stm32_spi_read_rxfifo(spi, false);
  451. /*
  452. * If overrun is detected while using DMA, it means that
  453. * something went wrong, so stop the current transfer
  454. */
  455. if (spi->cur_usedma)
  456. end = true;
  457. }
  458. if (sr & SPI_SR_EOT) {
  459. if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
  460. stm32_spi_read_rxfifo(spi, true);
  461. end = true;
  462. }
  463. if (sr & SPI_SR_TXP)
  464. if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0)))
  465. stm32_spi_write_txfifo(spi);
  466. if (sr & SPI_SR_RXP)
  467. if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
  468. stm32_spi_read_rxfifo(spi, false);
  469. writel_relaxed(mask, spi->base + STM32_SPI_IFCR);
  470. spin_unlock_irqrestore(&spi->lock, flags);
  471. if (end) {
  472. spi_finalize_current_transfer(master);
  473. stm32_spi_disable(spi);
  474. }
  475. return IRQ_HANDLED;
  476. }
  477. /**
  478. * stm32_spi_setup - setup device chip select
  479. */
  480. static int stm32_spi_setup(struct spi_device *spi_dev)
  481. {
  482. int ret = 0;
  483. if (!gpio_is_valid(spi_dev->cs_gpio)) {
  484. dev_err(&spi_dev->dev, "%d is not a valid gpio\n",
  485. spi_dev->cs_gpio);
  486. return -EINVAL;
  487. }
  488. dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__,
  489. spi_dev->cs_gpio,
  490. (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high");
  491. ret = gpio_direction_output(spi_dev->cs_gpio,
  492. !(spi_dev->mode & SPI_CS_HIGH));
  493. return ret;
  494. }
  495. /**
  496. * stm32_spi_prepare_msg - set up the controller to transfer a single message
  497. */
  498. static int stm32_spi_prepare_msg(struct spi_master *master,
  499. struct spi_message *msg)
  500. {
  501. struct stm32_spi *spi = spi_master_get_devdata(master);
  502. struct spi_device *spi_dev = msg->spi;
  503. struct device_node *np = spi_dev->dev.of_node;
  504. unsigned long flags;
  505. u32 cfg2_clrb = 0, cfg2_setb = 0;
  506. /* SPI slave device may need time between data frames */
  507. spi->cur_midi = 0;
  508. if (np && !of_property_read_u32(np, "st,spi-midi-ns", &spi->cur_midi))
  509. dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi);
  510. if (spi_dev->mode & SPI_CPOL)
  511. cfg2_setb |= SPI_CFG2_CPOL;
  512. else
  513. cfg2_clrb |= SPI_CFG2_CPOL;
  514. if (spi_dev->mode & SPI_CPHA)
  515. cfg2_setb |= SPI_CFG2_CPHA;
  516. else
  517. cfg2_clrb |= SPI_CFG2_CPHA;
  518. if (spi_dev->mode & SPI_LSB_FIRST)
  519. cfg2_setb |= SPI_CFG2_LSBFRST;
  520. else
  521. cfg2_clrb |= SPI_CFG2_LSBFRST;
  522. dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n",
  523. spi_dev->mode & SPI_CPOL,
  524. spi_dev->mode & SPI_CPHA,
  525. spi_dev->mode & SPI_LSB_FIRST,
  526. spi_dev->mode & SPI_CS_HIGH);
  527. spin_lock_irqsave(&spi->lock, flags);
  528. if (cfg2_clrb || cfg2_setb)
  529. writel_relaxed(
  530. (readl_relaxed(spi->base + STM32_SPI_CFG2) &
  531. ~cfg2_clrb) | cfg2_setb,
  532. spi->base + STM32_SPI_CFG2);
  533. spin_unlock_irqrestore(&spi->lock, flags);
  534. return 0;
  535. }
  536. /**
  537. * stm32_spi_dma_cb - dma callback
  538. *
  539. * DMA callback is called when the transfer is complete or when an error
  540. * occurs. If the transfer is complete, EOT flag is raised.
  541. */
  542. static void stm32_spi_dma_cb(void *data)
  543. {
  544. struct stm32_spi *spi = data;
  545. unsigned long flags;
  546. u32 sr;
  547. spin_lock_irqsave(&spi->lock, flags);
  548. sr = readl_relaxed(spi->base + STM32_SPI_SR);
  549. spin_unlock_irqrestore(&spi->lock, flags);
  550. if (!(sr & SPI_SR_EOT))
  551. dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
  552. /* Now wait for EOT, or SUSP or OVR in case of error */
  553. }
  554. /**
  555. * stm32_spi_dma_config - configure dma slave channel depending on current
  556. * transfer bits_per_word.
  557. */
  558. static void stm32_spi_dma_config(struct stm32_spi *spi,
  559. struct dma_slave_config *dma_conf,
  560. enum dma_transfer_direction dir)
  561. {
  562. enum dma_slave_buswidth buswidth;
  563. u32 maxburst;
  564. if (spi->cur_bpw <= 8)
  565. buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
  566. else if (spi->cur_bpw <= 16)
  567. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  568. else
  569. buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
  570. /* Valid for DMA Half or Full Fifo threshold */
  571. if (spi->cur_fthlv == 2)
  572. maxburst = 1;
  573. else
  574. maxburst = spi->cur_fthlv;
  575. memset(dma_conf, 0, sizeof(struct dma_slave_config));
  576. dma_conf->direction = dir;
  577. if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */
  578. dma_conf->src_addr = spi->phys_addr + STM32_SPI_RXDR;
  579. dma_conf->src_addr_width = buswidth;
  580. dma_conf->src_maxburst = maxburst;
  581. dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n",
  582. buswidth, maxburst);
  583. } else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */
  584. dma_conf->dst_addr = spi->phys_addr + STM32_SPI_TXDR;
  585. dma_conf->dst_addr_width = buswidth;
  586. dma_conf->dst_maxburst = maxburst;
  587. dev_dbg(spi->dev, "Tx DMA config buswidth=%d, maxburst=%d\n",
  588. buswidth, maxburst);
  589. }
  590. }
  591. /**
  592. * stm32_spi_transfer_one_irq - transfer a single spi_transfer using
  593. * interrupts
  594. *
  595. * It must returns 0 if the transfer is finished or 1 if the transfer is still
  596. * in progress.
  597. */
  598. static int stm32_spi_transfer_one_irq(struct stm32_spi *spi)
  599. {
  600. unsigned long flags;
  601. u32 ier = 0;
  602. /* Enable the interrupts relative to the current communication mode */
  603. if (spi->tx_buf && spi->rx_buf) /* Full Duplex */
  604. ier |= SPI_IER_DXPIE;
  605. else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */
  606. ier |= SPI_IER_TXPIE;
  607. else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */
  608. ier |= SPI_IER_RXPIE;
  609. /* Enable the interrupts relative to the end of transfer */
  610. ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE;
  611. spin_lock_irqsave(&spi->lock, flags);
  612. stm32_spi_enable(spi);
  613. /* Be sure to have data in fifo before starting data transfer */
  614. if (spi->tx_buf)
  615. stm32_spi_write_txfifo(spi);
  616. stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART);
  617. writel_relaxed(ier, spi->base + STM32_SPI_IER);
  618. spin_unlock_irqrestore(&spi->lock, flags);
  619. return 1;
  620. }
  621. /**
  622. * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA
  623. *
  624. * It must returns 0 if the transfer is finished or 1 if the transfer is still
  625. * in progress.
  626. */
  627. static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
  628. struct spi_transfer *xfer)
  629. {
  630. struct dma_slave_config tx_dma_conf, rx_dma_conf;
  631. struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc;
  632. unsigned long flags;
  633. u32 ier = 0;
  634. spin_lock_irqsave(&spi->lock, flags);
  635. rx_dma_desc = NULL;
  636. if (spi->rx_buf) {
  637. stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
  638. dmaengine_slave_config(spi->dma_rx, &rx_dma_conf);
  639. /* Enable Rx DMA request */
  640. stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN);
  641. rx_dma_desc = dmaengine_prep_slave_sg(
  642. spi->dma_rx, xfer->rx_sg.sgl,
  643. xfer->rx_sg.nents,
  644. rx_dma_conf.direction,
  645. DMA_PREP_INTERRUPT);
  646. }
  647. tx_dma_desc = NULL;
  648. if (spi->tx_buf) {
  649. stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV);
  650. dmaengine_slave_config(spi->dma_tx, &tx_dma_conf);
  651. tx_dma_desc = dmaengine_prep_slave_sg(
  652. spi->dma_tx, xfer->tx_sg.sgl,
  653. xfer->tx_sg.nents,
  654. tx_dma_conf.direction,
  655. DMA_PREP_INTERRUPT);
  656. }
  657. if ((spi->tx_buf && !tx_dma_desc) ||
  658. (spi->rx_buf && !rx_dma_desc))
  659. goto dma_desc_error;
  660. if (rx_dma_desc) {
  661. rx_dma_desc->callback = stm32_spi_dma_cb;
  662. rx_dma_desc->callback_param = spi;
  663. if (dma_submit_error(dmaengine_submit(rx_dma_desc))) {
  664. dev_err(spi->dev, "Rx DMA submit failed\n");
  665. goto dma_desc_error;
  666. }
  667. /* Enable Rx DMA channel */
  668. dma_async_issue_pending(spi->dma_rx);
  669. }
  670. if (tx_dma_desc) {
  671. if (spi->cur_comm == SPI_SIMPLEX_TX) {
  672. tx_dma_desc->callback = stm32_spi_dma_cb;
  673. tx_dma_desc->callback_param = spi;
  674. }
  675. if (dma_submit_error(dmaengine_submit(tx_dma_desc))) {
  676. dev_err(spi->dev, "Tx DMA submit failed\n");
  677. goto dma_submit_error;
  678. }
  679. /* Enable Tx DMA channel */
  680. dma_async_issue_pending(spi->dma_tx);
  681. /* Enable Tx DMA request */
  682. stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN);
  683. }
  684. /* Enable the interrupts relative to the end of transfer */
  685. ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE;
  686. writel_relaxed(ier, spi->base + STM32_SPI_IER);
  687. stm32_spi_enable(spi);
  688. stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART);
  689. spin_unlock_irqrestore(&spi->lock, flags);
  690. return 1;
  691. dma_submit_error:
  692. if (spi->rx_buf)
  693. dmaengine_terminate_all(spi->dma_rx);
  694. dma_desc_error:
  695. stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN);
  696. spin_unlock_irqrestore(&spi->lock, flags);
  697. dev_info(spi->dev, "DMA issue: fall back to irq transfer\n");
  698. return stm32_spi_transfer_one_irq(spi);
  699. }
  700. /**
  701. * stm32_spi_transfer_one_setup - common setup to transfer a single
  702. * spi_transfer either using DMA or
  703. * interrupts.
  704. */
  705. static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
  706. struct spi_device *spi_dev,
  707. struct spi_transfer *transfer)
  708. {
  709. unsigned long flags;
  710. u32 cfg1_clrb = 0, cfg1_setb = 0, cfg2_clrb = 0, cfg2_setb = 0;
  711. u32 mode, nb_words;
  712. int ret = 0;
  713. spin_lock_irqsave(&spi->lock, flags);
  714. if (spi->cur_bpw != transfer->bits_per_word) {
  715. u32 bpw, fthlv;
  716. spi->cur_bpw = transfer->bits_per_word;
  717. bpw = spi->cur_bpw - 1;
  718. cfg1_clrb |= SPI_CFG1_DSIZE;
  719. cfg1_setb |= (bpw << SPI_CFG1_DSIZE_SHIFT) & SPI_CFG1_DSIZE;
  720. spi->cur_fthlv = stm32_spi_prepare_fthlv(spi);
  721. fthlv = spi->cur_fthlv - 1;
  722. cfg1_clrb |= SPI_CFG1_FTHLV;
  723. cfg1_setb |= (fthlv << SPI_CFG1_FTHLV_SHIFT) & SPI_CFG1_FTHLV;
  724. }
  725. if (spi->cur_speed != transfer->speed_hz) {
  726. int mbr;
  727. /* Update spi->cur_speed with real clock speed */
  728. mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz);
  729. if (mbr < 0) {
  730. ret = mbr;
  731. goto out;
  732. }
  733. transfer->speed_hz = spi->cur_speed;
  734. cfg1_clrb |= SPI_CFG1_MBR;
  735. cfg1_setb |= ((u32)mbr << SPI_CFG1_MBR_SHIFT) & SPI_CFG1_MBR;
  736. }
  737. if (cfg1_clrb || cfg1_setb)
  738. writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG1) &
  739. ~cfg1_clrb) | cfg1_setb,
  740. spi->base + STM32_SPI_CFG1);
  741. mode = SPI_FULL_DUPLEX;
  742. if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */
  743. /*
  744. * SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL
  745. * is forbidden und unvalidated by SPI subsystem so depending
  746. * on the valid buffer, we can determine the direction of the
  747. * transfer.
  748. */
  749. mode = SPI_HALF_DUPLEX;
  750. if (!transfer->tx_buf)
  751. stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR);
  752. else if (!transfer->rx_buf)
  753. stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR);
  754. } else {
  755. if (!transfer->tx_buf)
  756. mode = SPI_SIMPLEX_RX;
  757. else if (!transfer->rx_buf)
  758. mode = SPI_SIMPLEX_TX;
  759. }
  760. if (spi->cur_comm != mode) {
  761. spi->cur_comm = mode;
  762. cfg2_clrb |= SPI_CFG2_COMM;
  763. cfg2_setb |= (mode << SPI_CFG2_COMM_SHIFT) & SPI_CFG2_COMM;
  764. }
  765. cfg2_clrb |= SPI_CFG2_MIDI;
  766. if ((transfer->len > 1) && (spi->cur_midi > 0)) {
  767. u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed);
  768. u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns),
  769. (u32)SPI_CFG2_MIDI >> SPI_CFG2_MIDI_SHIFT);
  770. dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n",
  771. sck_period_ns, midi, midi * sck_period_ns);
  772. cfg2_setb |= (midi << SPI_CFG2_MIDI_SHIFT) & SPI_CFG2_MIDI;
  773. }
  774. if (cfg2_clrb || cfg2_setb)
  775. writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG2) &
  776. ~cfg2_clrb) | cfg2_setb,
  777. spi->base + STM32_SPI_CFG2);
  778. if (spi->cur_bpw <= 8)
  779. nb_words = transfer->len;
  780. else if (spi->cur_bpw <= 16)
  781. nb_words = DIV_ROUND_UP(transfer->len * 8, 16);
  782. else
  783. nb_words = DIV_ROUND_UP(transfer->len * 8, 32);
  784. nb_words <<= SPI_CR2_TSIZE_SHIFT;
  785. if (nb_words <= SPI_CR2_TSIZE) {
  786. writel_relaxed(nb_words, spi->base + STM32_SPI_CR2);
  787. } else {
  788. ret = -EMSGSIZE;
  789. goto out;
  790. }
  791. spi->cur_xferlen = transfer->len;
  792. dev_dbg(spi->dev, "transfer communication mode set to %d\n",
  793. spi->cur_comm);
  794. dev_dbg(spi->dev,
  795. "data frame of %d-bit, data packet of %d data frames\n",
  796. spi->cur_bpw, spi->cur_fthlv);
  797. dev_dbg(spi->dev, "speed set to %dHz\n", spi->cur_speed);
  798. dev_dbg(spi->dev, "transfer of %d bytes (%d data frames)\n",
  799. spi->cur_xferlen, nb_words);
  800. dev_dbg(spi->dev, "dma %s\n",
  801. (spi->cur_usedma) ? "enabled" : "disabled");
  802. out:
  803. spin_unlock_irqrestore(&spi->lock, flags);
  804. return ret;
  805. }
  806. /**
  807. * stm32_spi_transfer_one - transfer a single spi_transfer
  808. *
  809. * It must return 0 if the transfer is finished or 1 if the transfer is still
  810. * in progress.
  811. */
  812. static int stm32_spi_transfer_one(struct spi_master *master,
  813. struct spi_device *spi_dev,
  814. struct spi_transfer *transfer)
  815. {
  816. struct stm32_spi *spi = spi_master_get_devdata(master);
  817. int ret;
  818. spi->tx_buf = transfer->tx_buf;
  819. spi->rx_buf = transfer->rx_buf;
  820. spi->tx_len = spi->tx_buf ? transfer->len : 0;
  821. spi->rx_len = spi->rx_buf ? transfer->len : 0;
  822. spi->cur_usedma = (master->can_dma &&
  823. stm32_spi_can_dma(master, spi_dev, transfer));
  824. ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer);
  825. if (ret) {
  826. dev_err(spi->dev, "SPI transfer setup failed\n");
  827. return ret;
  828. }
  829. if (spi->cur_usedma)
  830. return stm32_spi_transfer_one_dma(spi, transfer);
  831. else
  832. return stm32_spi_transfer_one_irq(spi);
  833. }
  834. /**
  835. * stm32_spi_unprepare_msg - relax the hardware
  836. *
  837. * Normally, if TSIZE has been configured, we should relax the hardware at the
  838. * reception of the EOT interrupt. But in case of error, EOT will not be
  839. * raised. So the subsystem unprepare_message call allows us to properly
  840. * complete the transfer from an hardware point of view.
  841. */
  842. static int stm32_spi_unprepare_msg(struct spi_master *master,
  843. struct spi_message *msg)
  844. {
  845. struct stm32_spi *spi = spi_master_get_devdata(master);
  846. stm32_spi_disable(spi);
  847. return 0;
  848. }
  849. /**
  850. * stm32_spi_config - Configure SPI controller as SPI master
  851. */
  852. static int stm32_spi_config(struct stm32_spi *spi)
  853. {
  854. unsigned long flags;
  855. spin_lock_irqsave(&spi->lock, flags);
  856. /* Ensure I2SMOD bit is kept cleared */
  857. stm32_spi_clr_bits(spi, STM32_SPI_I2SCFGR, SPI_I2SCFGR_I2SMOD);
  858. /*
  859. * - SS input value high
  860. * - transmitter half duplex direction
  861. * - automatic communication suspend when RX-Fifo is full
  862. */
  863. stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SSI |
  864. SPI_CR1_HDDIR |
  865. SPI_CR1_MASRX);
  866. /*
  867. * - Set the master mode (default Motorola mode)
  868. * - Consider 1 master/n slaves configuration and
  869. * SS input value is determined by the SSI bit
  870. * - keep control of all associated GPIOs
  871. */
  872. stm32_spi_set_bits(spi, STM32_SPI_CFG2, SPI_CFG2_MASTER |
  873. SPI_CFG2_SSM |
  874. SPI_CFG2_AFCNTR);
  875. spin_unlock_irqrestore(&spi->lock, flags);
  876. return 0;
  877. }
  878. static const struct of_device_id stm32_spi_of_match[] = {
  879. { .compatible = "st,stm32h7-spi", },
  880. {},
  881. };
  882. MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
  883. static int stm32_spi_probe(struct platform_device *pdev)
  884. {
  885. struct spi_master *master;
  886. struct stm32_spi *spi;
  887. struct resource *res;
  888. int i, ret;
  889. master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
  890. if (!master) {
  891. dev_err(&pdev->dev, "spi master allocation failed\n");
  892. return -ENOMEM;
  893. }
  894. platform_set_drvdata(pdev, master);
  895. spi = spi_master_get_devdata(master);
  896. spi->dev = &pdev->dev;
  897. spi->master = master;
  898. spin_lock_init(&spi->lock);
  899. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  900. spi->base = devm_ioremap_resource(&pdev->dev, res);
  901. if (IS_ERR(spi->base)) {
  902. ret = PTR_ERR(spi->base);
  903. goto err_master_put;
  904. }
  905. spi->phys_addr = (dma_addr_t)res->start;
  906. spi->irq = platform_get_irq(pdev, 0);
  907. if (spi->irq <= 0) {
  908. dev_err(&pdev->dev, "no irq: %d\n", spi->irq);
  909. ret = -ENOENT;
  910. goto err_master_put;
  911. }
  912. ret = devm_request_threaded_irq(&pdev->dev, spi->irq, NULL,
  913. stm32_spi_irq, IRQF_ONESHOT,
  914. pdev->name, master);
  915. if (ret) {
  916. dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
  917. ret);
  918. goto err_master_put;
  919. }
  920. spi->clk = devm_clk_get(&pdev->dev, 0);
  921. if (IS_ERR(spi->clk)) {
  922. ret = PTR_ERR(spi->clk);
  923. dev_err(&pdev->dev, "clk get failed: %d\n", ret);
  924. goto err_master_put;
  925. }
  926. ret = clk_prepare_enable(spi->clk);
  927. if (ret) {
  928. dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
  929. goto err_master_put;
  930. }
  931. spi->clk_rate = clk_get_rate(spi->clk);
  932. if (!spi->clk_rate) {
  933. dev_err(&pdev->dev, "clk rate = 0\n");
  934. ret = -EINVAL;
  935. goto err_clk_disable;
  936. }
  937. spi->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
  938. if (!IS_ERR(spi->rst)) {
  939. reset_control_assert(spi->rst);
  940. udelay(2);
  941. reset_control_deassert(spi->rst);
  942. }
  943. spi->fifo_size = stm32_spi_get_fifo_size(spi);
  944. ret = stm32_spi_config(spi);
  945. if (ret) {
  946. dev_err(&pdev->dev, "controller configuration failed: %d\n",
  947. ret);
  948. goto err_clk_disable;
  949. }
  950. master->dev.of_node = pdev->dev.of_node;
  951. master->auto_runtime_pm = true;
  952. master->bus_num = pdev->id;
  953. master->mode_bits = SPI_MODE_3 | SPI_CS_HIGH | SPI_LSB_FIRST |
  954. SPI_3WIRE | SPI_LOOP;
  955. master->bits_per_word_mask = stm32_spi_get_bpw_mask(spi);
  956. master->max_speed_hz = spi->clk_rate / SPI_MBR_DIV_MIN;
  957. master->min_speed_hz = spi->clk_rate / SPI_MBR_DIV_MAX;
  958. master->setup = stm32_spi_setup;
  959. master->prepare_message = stm32_spi_prepare_msg;
  960. master->transfer_one = stm32_spi_transfer_one;
  961. master->unprepare_message = stm32_spi_unprepare_msg;
  962. spi->dma_tx = dma_request_slave_channel(spi->dev, "tx");
  963. if (!spi->dma_tx)
  964. dev_warn(&pdev->dev, "failed to request tx dma channel\n");
  965. else
  966. master->dma_tx = spi->dma_tx;
  967. spi->dma_rx = dma_request_slave_channel(spi->dev, "rx");
  968. if (!spi->dma_rx)
  969. dev_warn(&pdev->dev, "failed to request rx dma channel\n");
  970. else
  971. master->dma_rx = spi->dma_rx;
  972. if (spi->dma_tx || spi->dma_rx)
  973. master->can_dma = stm32_spi_can_dma;
  974. pm_runtime_set_active(&pdev->dev);
  975. pm_runtime_enable(&pdev->dev);
  976. ret = devm_spi_register_master(&pdev->dev, master);
  977. if (ret) {
  978. dev_err(&pdev->dev, "spi master registration failed: %d\n",
  979. ret);
  980. goto err_dma_release;
  981. }
  982. if (!master->cs_gpios) {
  983. dev_err(&pdev->dev, "no CS gpios available\n");
  984. ret = -EINVAL;
  985. goto err_dma_release;
  986. }
  987. for (i = 0; i < master->num_chipselect; i++) {
  988. if (!gpio_is_valid(master->cs_gpios[i])) {
  989. dev_err(&pdev->dev, "%i is not a valid gpio\n",
  990. master->cs_gpios[i]);
  991. ret = -EINVAL;
  992. goto err_dma_release;
  993. }
  994. ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
  995. DRIVER_NAME);
  996. if (ret) {
  997. dev_err(&pdev->dev, "can't get CS gpio %i\n",
  998. master->cs_gpios[i]);
  999. goto err_dma_release;
  1000. }
  1001. }
  1002. dev_info(&pdev->dev, "driver initialized\n");
  1003. return 0;
  1004. err_dma_release:
  1005. if (spi->dma_tx)
  1006. dma_release_channel(spi->dma_tx);
  1007. if (spi->dma_rx)
  1008. dma_release_channel(spi->dma_rx);
  1009. pm_runtime_disable(&pdev->dev);
  1010. err_clk_disable:
  1011. clk_disable_unprepare(spi->clk);
  1012. err_master_put:
  1013. spi_master_put(master);
  1014. return ret;
  1015. }
  1016. static int stm32_spi_remove(struct platform_device *pdev)
  1017. {
  1018. struct spi_master *master = platform_get_drvdata(pdev);
  1019. struct stm32_spi *spi = spi_master_get_devdata(master);
  1020. stm32_spi_disable(spi);
  1021. if (master->dma_tx)
  1022. dma_release_channel(master->dma_tx);
  1023. if (master->dma_rx)
  1024. dma_release_channel(master->dma_rx);
  1025. clk_disable_unprepare(spi->clk);
  1026. pm_runtime_disable(&pdev->dev);
  1027. return 0;
  1028. }
  1029. #ifdef CONFIG_PM
  1030. static int stm32_spi_runtime_suspend(struct device *dev)
  1031. {
  1032. struct spi_master *master = dev_get_drvdata(dev);
  1033. struct stm32_spi *spi = spi_master_get_devdata(master);
  1034. clk_disable_unprepare(spi->clk);
  1035. return 0;
  1036. }
  1037. static int stm32_spi_runtime_resume(struct device *dev)
  1038. {
  1039. struct spi_master *master = dev_get_drvdata(dev);
  1040. struct stm32_spi *spi = spi_master_get_devdata(master);
  1041. return clk_prepare_enable(spi->clk);
  1042. }
  1043. #endif
  1044. #ifdef CONFIG_PM_SLEEP
  1045. static int stm32_spi_suspend(struct device *dev)
  1046. {
  1047. struct spi_master *master = dev_get_drvdata(dev);
  1048. int ret;
  1049. ret = spi_master_suspend(master);
  1050. if (ret)
  1051. return ret;
  1052. return pm_runtime_force_suspend(dev);
  1053. }
  1054. static int stm32_spi_resume(struct device *dev)
  1055. {
  1056. struct spi_master *master = dev_get_drvdata(dev);
  1057. struct stm32_spi *spi = spi_master_get_devdata(master);
  1058. int ret;
  1059. ret = pm_runtime_force_resume(dev);
  1060. if (ret)
  1061. return ret;
  1062. ret = spi_master_resume(master);
  1063. if (ret)
  1064. clk_disable_unprepare(spi->clk);
  1065. return ret;
  1066. }
  1067. #endif
  1068. static const struct dev_pm_ops stm32_spi_pm_ops = {
  1069. SET_SYSTEM_SLEEP_PM_OPS(stm32_spi_suspend, stm32_spi_resume)
  1070. SET_RUNTIME_PM_OPS(stm32_spi_runtime_suspend,
  1071. stm32_spi_runtime_resume, NULL)
  1072. };
  1073. static struct platform_driver stm32_spi_driver = {
  1074. .probe = stm32_spi_probe,
  1075. .remove = stm32_spi_remove,
  1076. .driver = {
  1077. .name = DRIVER_NAME,
  1078. .pm = &stm32_spi_pm_ops,
  1079. .of_match_table = stm32_spi_of_match,
  1080. },
  1081. };
  1082. module_platform_driver(stm32_spi_driver);
  1083. MODULE_ALIAS("platform:" DRIVER_NAME);
  1084. MODULE_DESCRIPTION("STMicroelectronics STM32 SPI Controller driver");
  1085. MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
  1086. MODULE_LICENSE("GPL v2");