spi_bfin_sport.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953
  1. /*
  2. * SPI bus via the Blackfin SPORT peripheral
  3. *
  4. * Enter bugs at http://blackfin.uclinux.org/
  5. *
  6. * Copyright 2009-2011 Analog Devices Inc.
  7. *
  8. * Licensed under the GPL-2 or later.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/module.h>
  12. #include <linux/delay.h>
  13. #include <linux/device.h>
  14. #include <linux/gpio.h>
  15. #include <linux/io.h>
  16. #include <linux/ioport.h>
  17. #include <linux/irq.h>
  18. #include <linux/errno.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/spi/spi.h>
  22. #include <linux/workqueue.h>
  23. #include <asm/portmux.h>
  24. #include <asm/bfin5xx_spi.h>
  25. #include <asm/blackfin.h>
  26. #include <asm/bfin_sport.h>
  27. #include <asm/cacheflush.h>
  28. #define DRV_NAME "bfin-sport-spi"
  29. #define DRV_DESC "SPI bus via the Blackfin SPORT"
  30. MODULE_AUTHOR("Cliff Cai");
  31. MODULE_DESCRIPTION(DRV_DESC);
  32. MODULE_LICENSE("GPL");
  33. MODULE_ALIAS("platform:bfin-sport-spi");
  34. enum bfin_sport_spi_state {
  35. START_STATE,
  36. RUNNING_STATE,
  37. DONE_STATE,
  38. ERROR_STATE,
  39. };
  40. struct bfin_sport_spi_master_data;
  41. struct bfin_sport_transfer_ops {
  42. void (*write) (struct bfin_sport_spi_master_data *);
  43. void (*read) (struct bfin_sport_spi_master_data *);
  44. void (*duplex) (struct bfin_sport_spi_master_data *);
  45. };
  46. struct bfin_sport_spi_master_data {
  47. /* Driver model hookup */
  48. struct device *dev;
  49. /* SPI framework hookup */
  50. struct spi_master *master;
  51. /* Regs base of SPI controller */
  52. struct sport_register __iomem *regs;
  53. int err_irq;
  54. /* Pin request list */
  55. u16 *pin_req;
  56. /* Driver message queue */
  57. struct workqueue_struct *workqueue;
  58. struct work_struct pump_messages;
  59. spinlock_t lock;
  60. struct list_head queue;
  61. int busy;
  62. bool run;
  63. /* Message Transfer pump */
  64. struct tasklet_struct pump_transfers;
  65. /* Current message transfer state info */
  66. enum bfin_sport_spi_state state;
  67. struct spi_message *cur_msg;
  68. struct spi_transfer *cur_transfer;
  69. struct bfin_sport_spi_slave_data *cur_chip;
  70. union {
  71. void *tx;
  72. u8 *tx8;
  73. u16 *tx16;
  74. };
  75. void *tx_end;
  76. union {
  77. void *rx;
  78. u8 *rx8;
  79. u16 *rx16;
  80. };
  81. void *rx_end;
  82. int cs_change;
  83. struct bfin_sport_transfer_ops *ops;
  84. };
  85. struct bfin_sport_spi_slave_data {
  86. u16 ctl_reg;
  87. u16 baud;
  88. u16 cs_chg_udelay; /* Some devices require > 255usec delay */
  89. u32 cs_gpio;
  90. u16 idle_tx_val;
  91. struct bfin_sport_transfer_ops *ops;
  92. };
  93. static void
  94. bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data)
  95. {
  96. bfin_write_or(&drv_data->regs->tcr1, TSPEN);
  97. bfin_write_or(&drv_data->regs->rcr1, TSPEN);
  98. SSYNC();
  99. }
  100. static void
  101. bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data)
  102. {
  103. bfin_write_and(&drv_data->regs->tcr1, ~TSPEN);
  104. bfin_write_and(&drv_data->regs->rcr1, ~TSPEN);
  105. SSYNC();
  106. }
  107. /* Caculate the SPI_BAUD register value based on input HZ */
  108. static u16
  109. bfin_sport_hz_to_spi_baud(u32 speed_hz)
  110. {
  111. u_long clk, sclk = get_sclk();
  112. int div = (sclk / (2 * speed_hz)) - 1;
  113. if (div < 0)
  114. div = 0;
  115. clk = sclk / (2 * (div + 1));
  116. if (clk > speed_hz)
  117. div++;
  118. return div;
  119. }
  120. /* Chip select operation functions for cs_change flag */
  121. static void
  122. bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip)
  123. {
  124. gpio_direction_output(chip->cs_gpio, 0);
  125. }
  126. static void
  127. bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip)
  128. {
  129. gpio_direction_output(chip->cs_gpio, 1);
  130. /* Move delay here for consistency */
  131. if (chip->cs_chg_udelay)
  132. udelay(chip->cs_chg_udelay);
  133. }
  134. static void
  135. bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data)
  136. {
  137. unsigned long timeout = jiffies + HZ;
  138. while (!(bfin_read(&drv_data->regs->stat) & RXNE)) {
  139. if (!time_before(jiffies, timeout))
  140. break;
  141. }
  142. }
  143. static void
  144. bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data)
  145. {
  146. u16 dummy;
  147. while (drv_data->tx < drv_data->tx_end) {
  148. bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
  149. bfin_sport_spi_stat_poll_complete(drv_data);
  150. dummy = bfin_read(&drv_data->regs->rx16);
  151. }
  152. }
  153. static void
  154. bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data)
  155. {
  156. u16 tx_val = drv_data->cur_chip->idle_tx_val;
  157. while (drv_data->rx < drv_data->rx_end) {
  158. bfin_write(&drv_data->regs->tx16, tx_val);
  159. bfin_sport_spi_stat_poll_complete(drv_data);
  160. *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
  161. }
  162. }
  163. static void
  164. bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data)
  165. {
  166. while (drv_data->rx < drv_data->rx_end) {
  167. bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
  168. bfin_sport_spi_stat_poll_complete(drv_data);
  169. *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
  170. }
  171. }
  172. static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = {
  173. .write = bfin_sport_spi_u8_writer,
  174. .read = bfin_sport_spi_u8_reader,
  175. .duplex = bfin_sport_spi_u8_duplex,
  176. };
  177. static void
  178. bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data)
  179. {
  180. u16 dummy;
  181. while (drv_data->tx < drv_data->tx_end) {
  182. bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
  183. bfin_sport_spi_stat_poll_complete(drv_data);
  184. dummy = bfin_read(&drv_data->regs->rx16);
  185. }
  186. }
  187. static void
  188. bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data)
  189. {
  190. u16 tx_val = drv_data->cur_chip->idle_tx_val;
  191. while (drv_data->rx < drv_data->rx_end) {
  192. bfin_write(&drv_data->regs->tx16, tx_val);
  193. bfin_sport_spi_stat_poll_complete(drv_data);
  194. *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
  195. }
  196. }
  197. static void
  198. bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data)
  199. {
  200. while (drv_data->rx < drv_data->rx_end) {
  201. bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
  202. bfin_sport_spi_stat_poll_complete(drv_data);
  203. *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
  204. }
  205. }
  206. static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = {
  207. .write = bfin_sport_spi_u16_writer,
  208. .read = bfin_sport_spi_u16_reader,
  209. .duplex = bfin_sport_spi_u16_duplex,
  210. };
  211. /* stop controller and re-config current chip */
  212. static void
  213. bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
  214. {
  215. struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
  216. unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15);
  217. bfin_sport_spi_disable(drv_data);
  218. dev_dbg(drv_data->dev, "restoring spi ctl state\n");
  219. bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
  220. bfin_write(&drv_data->regs->tcr2, bits);
  221. bfin_write(&drv_data->regs->tclkdiv, chip->baud);
  222. bfin_write(&drv_data->regs->tfsdiv, bits);
  223. SSYNC();
  224. bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
  225. bfin_write(&drv_data->regs->rcr2, bits);
  226. SSYNC();
  227. bfin_sport_spi_cs_active(chip);
  228. }
  229. /* test if there is more transfer to be done */
  230. static enum bfin_sport_spi_state
  231. bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data)
  232. {
  233. struct spi_message *msg = drv_data->cur_msg;
  234. struct spi_transfer *trans = drv_data->cur_transfer;
  235. /* Move to next transfer */
  236. if (trans->transfer_list.next != &msg->transfers) {
  237. drv_data->cur_transfer =
  238. list_entry(trans->transfer_list.next,
  239. struct spi_transfer, transfer_list);
  240. return RUNNING_STATE;
  241. }
  242. return DONE_STATE;
  243. }
  244. /*
  245. * caller already set message->status;
  246. * dma and pio irqs are blocked give finished message back
  247. */
  248. static void
  249. bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data)
  250. {
  251. struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
  252. unsigned long flags;
  253. struct spi_message *msg;
  254. spin_lock_irqsave(&drv_data->lock, flags);
  255. msg = drv_data->cur_msg;
  256. drv_data->state = START_STATE;
  257. drv_data->cur_msg = NULL;
  258. drv_data->cur_transfer = NULL;
  259. drv_data->cur_chip = NULL;
  260. queue_work(drv_data->workqueue, &drv_data->pump_messages);
  261. spin_unlock_irqrestore(&drv_data->lock, flags);
  262. if (!drv_data->cs_change)
  263. bfin_sport_spi_cs_deactive(chip);
  264. if (msg->complete)
  265. msg->complete(msg->context);
  266. }
  267. static irqreturn_t
  268. sport_err_handler(int irq, void *dev_id)
  269. {
  270. struct bfin_sport_spi_master_data *drv_data = dev_id;
  271. u16 status;
  272. dev_dbg(drv_data->dev, "%s enter\n", __func__);
  273. status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF);
  274. if (status) {
  275. bfin_write(&drv_data->regs->stat, status);
  276. SSYNC();
  277. bfin_sport_spi_disable(drv_data);
  278. dev_err(drv_data->dev, "status error:%s%s%s%s\n",
  279. status & TOVF ? " TOVF" : "",
  280. status & TUVF ? " TUVF" : "",
  281. status & ROVF ? " ROVF" : "",
  282. status & RUVF ? " RUVF" : "");
  283. }
  284. return IRQ_HANDLED;
  285. }
  286. static void
  287. bfin_sport_spi_pump_transfers(unsigned long data)
  288. {
  289. struct bfin_sport_spi_master_data *drv_data = (void *)data;
  290. struct spi_message *message = NULL;
  291. struct spi_transfer *transfer = NULL;
  292. struct spi_transfer *previous = NULL;
  293. struct bfin_sport_spi_slave_data *chip = NULL;
  294. unsigned int bits_per_word;
  295. u32 tranf_success = 1;
  296. u32 transfer_speed;
  297. u8 full_duplex = 0;
  298. /* Get current state information */
  299. message = drv_data->cur_msg;
  300. transfer = drv_data->cur_transfer;
  301. chip = drv_data->cur_chip;
  302. if (transfer->speed_hz)
  303. transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
  304. else
  305. transfer_speed = chip->baud;
  306. bfin_write(&drv_data->regs->tclkdiv, transfer_speed);
  307. SSYNC();
  308. /*
  309. * if msg is error or done, report it back using complete() callback
  310. */
  311. /* Handle for abort */
  312. if (drv_data->state == ERROR_STATE) {
  313. dev_dbg(drv_data->dev, "transfer: we've hit an error\n");
  314. message->status = -EIO;
  315. bfin_sport_spi_giveback(drv_data);
  316. return;
  317. }
  318. /* Handle end of message */
  319. if (drv_data->state == DONE_STATE) {
  320. dev_dbg(drv_data->dev, "transfer: all done!\n");
  321. message->status = 0;
  322. bfin_sport_spi_giveback(drv_data);
  323. return;
  324. }
  325. /* Delay if requested at end of transfer */
  326. if (drv_data->state == RUNNING_STATE) {
  327. dev_dbg(drv_data->dev, "transfer: still running ...\n");
  328. previous = list_entry(transfer->transfer_list.prev,
  329. struct spi_transfer, transfer_list);
  330. if (previous->delay_usecs)
  331. udelay(previous->delay_usecs);
  332. }
  333. if (transfer->len == 0) {
  334. /* Move to next transfer of this msg */
  335. drv_data->state = bfin_sport_spi_next_transfer(drv_data);
  336. /* Schedule next transfer tasklet */
  337. tasklet_schedule(&drv_data->pump_transfers);
  338. }
  339. if (transfer->tx_buf != NULL) {
  340. drv_data->tx = (void *)transfer->tx_buf;
  341. drv_data->tx_end = drv_data->tx + transfer->len;
  342. dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n",
  343. transfer->tx_buf, drv_data->tx_end);
  344. } else
  345. drv_data->tx = NULL;
  346. if (transfer->rx_buf != NULL) {
  347. full_duplex = transfer->tx_buf != NULL;
  348. drv_data->rx = transfer->rx_buf;
  349. drv_data->rx_end = drv_data->rx + transfer->len;
  350. dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n",
  351. transfer->rx_buf, drv_data->rx_end);
  352. } else
  353. drv_data->rx = NULL;
  354. drv_data->cs_change = transfer->cs_change;
  355. /* Bits per word setup */
  356. bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
  357. if (bits_per_word == 8)
  358. drv_data->ops = &bfin_sport_transfer_ops_u8;
  359. else
  360. drv_data->ops = &bfin_sport_transfer_ops_u16;
  361. drv_data->state = RUNNING_STATE;
  362. if (drv_data->cs_change)
  363. bfin_sport_spi_cs_active(chip);
  364. dev_dbg(drv_data->dev,
  365. "now pumping a transfer: width is %d, len is %d\n",
  366. bits_per_word, transfer->len);
  367. /* PIO mode write then read */
  368. dev_dbg(drv_data->dev, "doing IO transfer\n");
  369. bfin_sport_spi_enable(drv_data);
  370. if (full_duplex) {
  371. /* full duplex mode */
  372. BUG_ON((drv_data->tx_end - drv_data->tx) !=
  373. (drv_data->rx_end - drv_data->rx));
  374. drv_data->ops->duplex(drv_data);
  375. if (drv_data->tx != drv_data->tx_end)
  376. tranf_success = 0;
  377. } else if (drv_data->tx != NULL) {
  378. /* write only half duplex */
  379. drv_data->ops->write(drv_data);
  380. if (drv_data->tx != drv_data->tx_end)
  381. tranf_success = 0;
  382. } else if (drv_data->rx != NULL) {
  383. /* read only half duplex */
  384. drv_data->ops->read(drv_data);
  385. if (drv_data->rx != drv_data->rx_end)
  386. tranf_success = 0;
  387. }
  388. bfin_sport_spi_disable(drv_data);
  389. if (!tranf_success) {
  390. dev_dbg(drv_data->dev, "IO write error!\n");
  391. drv_data->state = ERROR_STATE;
  392. } else {
  393. /* Update total byte transfered */
  394. message->actual_length += transfer->len;
  395. /* Move to next transfer of this msg */
  396. drv_data->state = bfin_sport_spi_next_transfer(drv_data);
  397. if (drv_data->cs_change)
  398. bfin_sport_spi_cs_deactive(chip);
  399. }
  400. /* Schedule next transfer tasklet */
  401. tasklet_schedule(&drv_data->pump_transfers);
  402. }
  403. /* pop a msg from queue and kick off real transfer */
  404. static void
  405. bfin_sport_spi_pump_messages(struct work_struct *work)
  406. {
  407. struct bfin_sport_spi_master_data *drv_data;
  408. unsigned long flags;
  409. struct spi_message *next_msg;
  410. drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages);
  411. /* Lock queue and check for queue work */
  412. spin_lock_irqsave(&drv_data->lock, flags);
  413. if (list_empty(&drv_data->queue) || !drv_data->run) {
  414. /* pumper kicked off but no work to do */
  415. drv_data->busy = 0;
  416. spin_unlock_irqrestore(&drv_data->lock, flags);
  417. return;
  418. }
  419. /* Make sure we are not already running a message */
  420. if (drv_data->cur_msg) {
  421. spin_unlock_irqrestore(&drv_data->lock, flags);
  422. return;
  423. }
  424. /* Extract head of queue */
  425. next_msg = list_entry(drv_data->queue.next,
  426. struct spi_message, queue);
  427. drv_data->cur_msg = next_msg;
  428. /* Setup the SSP using the per chip configuration */
  429. drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
  430. list_del_init(&drv_data->cur_msg->queue);
  431. /* Initialize message state */
  432. drv_data->cur_msg->state = START_STATE;
  433. drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
  434. struct spi_transfer, transfer_list);
  435. bfin_sport_spi_restore_state(drv_data);
  436. dev_dbg(drv_data->dev, "got a message to pump, "
  437. "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n",
  438. drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio,
  439. drv_data->cur_chip->ctl_reg);
  440. dev_dbg(drv_data->dev,
  441. "the first transfer len is %d\n",
  442. drv_data->cur_transfer->len);
  443. /* Mark as busy and launch transfers */
  444. tasklet_schedule(&drv_data->pump_transfers);
  445. drv_data->busy = 1;
  446. spin_unlock_irqrestore(&drv_data->lock, flags);
  447. }
  448. /*
  449. * got a msg to transfer, queue it in drv_data->queue.
  450. * And kick off message pumper
  451. */
  452. static int
  453. bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg)
  454. {
  455. struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
  456. unsigned long flags;
  457. spin_lock_irqsave(&drv_data->lock, flags);
  458. if (!drv_data->run) {
  459. spin_unlock_irqrestore(&drv_data->lock, flags);
  460. return -ESHUTDOWN;
  461. }
  462. msg->actual_length = 0;
  463. msg->status = -EINPROGRESS;
  464. msg->state = START_STATE;
  465. dev_dbg(&spi->dev, "adding an msg in transfer()\n");
  466. list_add_tail(&msg->queue, &drv_data->queue);
  467. if (drv_data->run && !drv_data->busy)
  468. queue_work(drv_data->workqueue, &drv_data->pump_messages);
  469. spin_unlock_irqrestore(&drv_data->lock, flags);
  470. return 0;
  471. }
  472. /* Called every time common spi devices change state */
  473. static int
  474. bfin_sport_spi_setup(struct spi_device *spi)
  475. {
  476. struct bfin_sport_spi_slave_data *chip, *first = NULL;
  477. int ret;
  478. /* Only alloc (or use chip_info) on first setup */
  479. chip = spi_get_ctldata(spi);
  480. if (chip == NULL) {
  481. struct bfin5xx_spi_chip *chip_info;
  482. chip = first = kzalloc(sizeof(*chip), GFP_KERNEL);
  483. if (!chip)
  484. return -ENOMEM;
  485. /* platform chip_info isn't required */
  486. chip_info = spi->controller_data;
  487. if (chip_info) {
  488. /*
  489. * DITFS and TDTYPE are only thing we don't set, but
  490. * they probably shouldn't be changed by people.
  491. */
  492. if (chip_info->ctl_reg || chip_info->enable_dma) {
  493. ret = -EINVAL;
  494. dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields");
  495. goto error;
  496. }
  497. chip->cs_chg_udelay = chip_info->cs_chg_udelay;
  498. chip->idle_tx_val = chip_info->idle_tx_val;
  499. spi->bits_per_word = chip_info->bits_per_word;
  500. }
  501. }
  502. if (spi->bits_per_word != 8 && spi->bits_per_word != 16) {
  503. ret = -EINVAL;
  504. goto error;
  505. }
  506. /* translate common spi framework into our register
  507. * following configure contents are same for tx and rx.
  508. */
  509. if (spi->mode & SPI_CPHA)
  510. chip->ctl_reg &= ~TCKFE;
  511. else
  512. chip->ctl_reg |= TCKFE;
  513. if (spi->mode & SPI_LSB_FIRST)
  514. chip->ctl_reg |= TLSBIT;
  515. else
  516. chip->ctl_reg &= ~TLSBIT;
  517. /* Sport in master mode */
  518. chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS;
  519. chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz);
  520. chip->cs_gpio = spi->chip_select;
  521. ret = gpio_request(chip->cs_gpio, spi->modalias);
  522. if (ret)
  523. goto error;
  524. dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n",
  525. spi->modalias, spi->bits_per_word);
  526. dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n",
  527. chip->ctl_reg, spi->chip_select);
  528. spi_set_ctldata(spi, chip);
  529. bfin_sport_spi_cs_deactive(chip);
  530. return ret;
  531. error:
  532. kfree(first);
  533. return ret;
  534. }
  535. /*
  536. * callback for spi framework.
  537. * clean driver specific data
  538. */
  539. static void
  540. bfin_sport_spi_cleanup(struct spi_device *spi)
  541. {
  542. struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi);
  543. if (!chip)
  544. return;
  545. gpio_free(chip->cs_gpio);
  546. kfree(chip);
  547. }
  548. static int
  549. bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data)
  550. {
  551. INIT_LIST_HEAD(&drv_data->queue);
  552. spin_lock_init(&drv_data->lock);
  553. drv_data->run = false;
  554. drv_data->busy = 0;
  555. /* init transfer tasklet */
  556. tasklet_init(&drv_data->pump_transfers,
  557. bfin_sport_spi_pump_transfers, (unsigned long)drv_data);
  558. /* init messages workqueue */
  559. INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages);
  560. drv_data->workqueue =
  561. create_singlethread_workqueue(dev_name(drv_data->master->dev.parent));
  562. if (drv_data->workqueue == NULL)
  563. return -EBUSY;
  564. return 0;
  565. }
  566. static int
  567. bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data)
  568. {
  569. unsigned long flags;
  570. spin_lock_irqsave(&drv_data->lock, flags);
  571. if (drv_data->run || drv_data->busy) {
  572. spin_unlock_irqrestore(&drv_data->lock, flags);
  573. return -EBUSY;
  574. }
  575. drv_data->run = true;
  576. drv_data->cur_msg = NULL;
  577. drv_data->cur_transfer = NULL;
  578. drv_data->cur_chip = NULL;
  579. spin_unlock_irqrestore(&drv_data->lock, flags);
  580. queue_work(drv_data->workqueue, &drv_data->pump_messages);
  581. return 0;
  582. }
  583. static inline int
  584. bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data)
  585. {
  586. unsigned long flags;
  587. unsigned limit = 500;
  588. int status = 0;
  589. spin_lock_irqsave(&drv_data->lock, flags);
  590. /*
  591. * This is a bit lame, but is optimized for the common execution path.
  592. * A wait_queue on the drv_data->busy could be used, but then the common
  593. * execution path (pump_messages) would be required to call wake_up or
  594. * friends on every SPI message. Do this instead
  595. */
  596. drv_data->run = false;
  597. while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
  598. spin_unlock_irqrestore(&drv_data->lock, flags);
  599. msleep(10);
  600. spin_lock_irqsave(&drv_data->lock, flags);
  601. }
  602. if (!list_empty(&drv_data->queue) || drv_data->busy)
  603. status = -EBUSY;
  604. spin_unlock_irqrestore(&drv_data->lock, flags);
  605. return status;
  606. }
  607. static inline int
  608. bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
  609. {
  610. int status;
  611. status = bfin_sport_spi_stop_queue(drv_data);
  612. if (status)
  613. return status;
  614. destroy_workqueue(drv_data->workqueue);
  615. return 0;
  616. }
  617. static int __devinit
  618. bfin_sport_spi_probe(struct platform_device *pdev)
  619. {
  620. struct device *dev = &pdev->dev;
  621. struct bfin5xx_spi_master *platform_info;
  622. struct spi_master *master;
  623. struct resource *res, *ires;
  624. struct bfin_sport_spi_master_data *drv_data;
  625. int status;
  626. platform_info = dev->platform_data;
  627. /* Allocate master with space for drv_data */
  628. master = spi_alloc_master(dev, sizeof(*master) + 16);
  629. if (!master) {
  630. dev_err(dev, "cannot alloc spi_master\n");
  631. return -ENOMEM;
  632. }
  633. drv_data = spi_master_get_devdata(master);
  634. drv_data->master = master;
  635. drv_data->dev = dev;
  636. drv_data->pin_req = platform_info->pin_req;
  637. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
  638. master->bus_num = pdev->id;
  639. master->num_chipselect = platform_info->num_chipselect;
  640. master->cleanup = bfin_sport_spi_cleanup;
  641. master->setup = bfin_sport_spi_setup;
  642. master->transfer = bfin_sport_spi_transfer;
  643. /* Find and map our resources */
  644. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  645. if (res == NULL) {
  646. dev_err(dev, "cannot get IORESOURCE_MEM\n");
  647. status = -ENOENT;
  648. goto out_error_get_res;
  649. }
  650. drv_data->regs = ioremap(res->start, resource_size(res));
  651. if (drv_data->regs == NULL) {
  652. dev_err(dev, "cannot map registers\n");
  653. status = -ENXIO;
  654. goto out_error_ioremap;
  655. }
  656. ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  657. if (!ires) {
  658. dev_err(dev, "cannot get IORESOURCE_IRQ\n");
  659. status = -ENODEV;
  660. goto out_error_get_ires;
  661. }
  662. drv_data->err_irq = ires->start;
  663. /* Initial and start queue */
  664. status = bfin_sport_spi_init_queue(drv_data);
  665. if (status) {
  666. dev_err(dev, "problem initializing queue\n");
  667. goto out_error_queue_alloc;
  668. }
  669. status = bfin_sport_spi_start_queue(drv_data);
  670. if (status) {
  671. dev_err(dev, "problem starting queue\n");
  672. goto out_error_queue_alloc;
  673. }
  674. status = request_irq(drv_data->err_irq, sport_err_handler,
  675. 0, "sport_spi_err", drv_data);
  676. if (status) {
  677. dev_err(dev, "unable to request sport err irq\n");
  678. goto out_error_irq;
  679. }
  680. status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
  681. if (status) {
  682. dev_err(dev, "requesting peripherals failed\n");
  683. goto out_error_peripheral;
  684. }
  685. /* Register with the SPI framework */
  686. platform_set_drvdata(pdev, drv_data);
  687. status = spi_register_master(master);
  688. if (status) {
  689. dev_err(dev, "problem registering spi master\n");
  690. goto out_error_master;
  691. }
  692. dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs);
  693. return 0;
  694. out_error_master:
  695. peripheral_free_list(drv_data->pin_req);
  696. out_error_peripheral:
  697. free_irq(drv_data->err_irq, drv_data);
  698. out_error_irq:
  699. out_error_queue_alloc:
  700. bfin_sport_spi_destroy_queue(drv_data);
  701. out_error_get_ires:
  702. iounmap(drv_data->regs);
  703. out_error_ioremap:
  704. out_error_get_res:
  705. spi_master_put(master);
  706. return status;
  707. }
  708. /* stop hardware and remove the driver */
  709. static int __devexit
  710. bfin_sport_spi_remove(struct platform_device *pdev)
  711. {
  712. struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
  713. int status = 0;
  714. if (!drv_data)
  715. return 0;
  716. /* Remove the queue */
  717. status = bfin_sport_spi_destroy_queue(drv_data);
  718. if (status)
  719. return status;
  720. /* Disable the SSP at the peripheral and SOC level */
  721. bfin_sport_spi_disable(drv_data);
  722. /* Disconnect from the SPI framework */
  723. spi_unregister_master(drv_data->master);
  724. peripheral_free_list(drv_data->pin_req);
  725. /* Prevent double remove */
  726. platform_set_drvdata(pdev, NULL);
  727. return 0;
  728. }
  729. #ifdef CONFIG_PM
  730. static int
  731. bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state)
  732. {
  733. struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
  734. int status;
  735. status = bfin_sport_spi_stop_queue(drv_data);
  736. if (status)
  737. return status;
  738. /* stop hardware */
  739. bfin_sport_spi_disable(drv_data);
  740. return status;
  741. }
  742. static int
  743. bfin_sport_spi_resume(struct platform_device *pdev)
  744. {
  745. struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
  746. int status;
  747. /* Enable the SPI interface */
  748. bfin_sport_spi_enable(drv_data);
  749. /* Start the queue running */
  750. status = bfin_sport_spi_start_queue(drv_data);
  751. if (status)
  752. dev_err(drv_data->dev, "problem resuming queue\n");
  753. return status;
  754. }
  755. #else
  756. # define bfin_sport_spi_suspend NULL
  757. # define bfin_sport_spi_resume NULL
  758. #endif
  759. static struct platform_driver bfin_sport_spi_driver = {
  760. .driver = {
  761. .name = DRV_NAME,
  762. .owner = THIS_MODULE,
  763. },
  764. .probe = bfin_sport_spi_probe,
  765. .remove = __devexit_p(bfin_sport_spi_remove),
  766. .suspend = bfin_sport_spi_suspend,
  767. .resume = bfin_sport_spi_resume,
  768. };
  769. static int __init bfin_sport_spi_init(void)
  770. {
  771. return platform_driver_register(&bfin_sport_spi_driver);
  772. }
  773. module_init(bfin_sport_spi_init);
  774. static void __exit bfin_sport_spi_exit(void)
  775. {
  776. platform_driver_unregister(&bfin_sport_spi_driver);
  777. }
  778. module_exit(bfin_sport_spi_exit);