spilib.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. /*
  2. * Greybus SPI library
  3. *
  4. * Copyright 2014-2016 Google Inc.
  5. * Copyright 2014-2016 Linaro Ltd.
  6. *
  7. * Released under the GPLv2 only.
  8. */
  9. #include <linux/bitops.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/spi/spi.h>
  14. #include "greybus.h"
  15. #include "spilib.h"
  16. struct gb_spilib {
  17. struct gb_connection *connection;
  18. struct device *parent;
  19. struct spi_transfer *first_xfer;
  20. struct spi_transfer *last_xfer;
  21. struct spilib_ops *ops;
  22. u32 rx_xfer_offset;
  23. u32 tx_xfer_offset;
  24. u32 last_xfer_size;
  25. unsigned int op_timeout;
  26. u16 mode;
  27. u16 flags;
  28. u32 bits_per_word_mask;
  29. u8 num_chipselect;
  30. u32 min_speed_hz;
  31. u32 max_speed_hz;
  32. };
  33. #define GB_SPI_STATE_MSG_DONE ((void *)0)
  34. #define GB_SPI_STATE_MSG_IDLE ((void *)1)
  35. #define GB_SPI_STATE_MSG_RUNNING ((void *)2)
  36. #define GB_SPI_STATE_OP_READY ((void *)3)
  37. #define GB_SPI_STATE_OP_DONE ((void *)4)
  38. #define GB_SPI_STATE_MSG_ERROR ((void *)-1)
  39. #define XFER_TIMEOUT_TOLERANCE 200
  40. static struct spi_master *get_master_from_spi(struct gb_spilib *spi)
  41. {
  42. return gb_connection_get_data(spi->connection);
  43. }
  44. static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
  45. {
  46. size_t headers_size;
  47. data_max -= sizeof(struct gb_spi_transfer_request);
  48. headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
  49. return tx_size + headers_size > data_max ? 0 : 1;
  50. }
  51. static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
  52. size_t data_max)
  53. {
  54. size_t rx_xfer_size;
  55. data_max -= sizeof(struct gb_spi_transfer_response);
  56. if (rx_size + len > data_max)
  57. rx_xfer_size = data_max - rx_size;
  58. else
  59. rx_xfer_size = len;
  60. /* if this is a write_read, for symmetry read the same as write */
  61. if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
  62. rx_xfer_size = *tx_xfer_size;
  63. if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
  64. *tx_xfer_size = rx_xfer_size;
  65. return rx_xfer_size;
  66. }
  67. static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
  68. size_t data_max)
  69. {
  70. size_t headers_size;
  71. data_max -= sizeof(struct gb_spi_transfer_request);
  72. headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
  73. if (tx_size + headers_size + len > data_max)
  74. return data_max - (tx_size + sizeof(struct gb_spi_transfer));
  75. return len;
  76. }
  77. static void clean_xfer_state(struct gb_spilib *spi)
  78. {
  79. spi->first_xfer = NULL;
  80. spi->last_xfer = NULL;
  81. spi->rx_xfer_offset = 0;
  82. spi->tx_xfer_offset = 0;
  83. spi->last_xfer_size = 0;
  84. spi->op_timeout = 0;
  85. }
  86. static bool is_last_xfer_done(struct gb_spilib *spi)
  87. {
  88. struct spi_transfer *last_xfer = spi->last_xfer;
  89. if ((spi->tx_xfer_offset + spi->last_xfer_size == last_xfer->len) ||
  90. (spi->rx_xfer_offset + spi->last_xfer_size == last_xfer->len))
  91. return true;
  92. return false;
  93. }
  94. static int setup_next_xfer(struct gb_spilib *spi, struct spi_message *msg)
  95. {
  96. struct spi_transfer *last_xfer = spi->last_xfer;
  97. if (msg->state != GB_SPI_STATE_OP_DONE)
  98. return 0;
  99. /*
  100. * if we transferred all content of the last transfer, reset values and
  101. * check if this was the last transfer in the message
  102. */
  103. if (is_last_xfer_done(spi)) {
  104. spi->tx_xfer_offset = 0;
  105. spi->rx_xfer_offset = 0;
  106. spi->op_timeout = 0;
  107. if (last_xfer == list_last_entry(&msg->transfers,
  108. struct spi_transfer,
  109. transfer_list))
  110. msg->state = GB_SPI_STATE_MSG_DONE;
  111. else
  112. spi->first_xfer = list_next_entry(last_xfer,
  113. transfer_list);
  114. return 0;
  115. }
  116. spi->first_xfer = last_xfer;
  117. if (last_xfer->tx_buf)
  118. spi->tx_xfer_offset += spi->last_xfer_size;
  119. if (last_xfer->rx_buf)
  120. spi->rx_xfer_offset += spi->last_xfer_size;
  121. return 0;
  122. }
  123. static struct spi_transfer *get_next_xfer(struct spi_transfer *xfer,
  124. struct spi_message *msg)
  125. {
  126. if (xfer == list_last_entry(&msg->transfers, struct spi_transfer,
  127. transfer_list))
  128. return NULL;
  129. return list_next_entry(xfer, transfer_list);
  130. }
  131. /* Routines to transfer data */
  132. static struct gb_operation *gb_spi_operation_create(struct gb_spilib *spi,
  133. struct gb_connection *connection, struct spi_message *msg)
  134. {
  135. struct gb_spi_transfer_request *request;
  136. struct spi_device *dev = msg->spi;
  137. struct spi_transfer *xfer;
  138. struct gb_spi_transfer *gb_xfer;
  139. struct gb_operation *operation;
  140. u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
  141. u32 tx_xfer_size = 0, rx_xfer_size = 0, len;
  142. u32 total_len = 0;
  143. unsigned int xfer_timeout;
  144. size_t data_max;
  145. void *tx_data;
  146. data_max = gb_operation_get_payload_size_max(connection);
  147. xfer = spi->first_xfer;
  148. /* Find number of transfers queued and tx/rx length in the message */
  149. while (msg->state != GB_SPI_STATE_OP_READY) {
  150. msg->state = GB_SPI_STATE_MSG_RUNNING;
  151. spi->last_xfer = xfer;
  152. if (!xfer->tx_buf && !xfer->rx_buf) {
  153. dev_err(spi->parent,
  154. "bufferless transfer, length %u\n", xfer->len);
  155. msg->state = GB_SPI_STATE_MSG_ERROR;
  156. return NULL;
  157. }
  158. tx_xfer_size = 0;
  159. rx_xfer_size = 0;
  160. if (xfer->tx_buf) {
  161. len = xfer->len - spi->tx_xfer_offset;
  162. if (!tx_header_fit_operation(tx_size, count, data_max))
  163. break;
  164. tx_xfer_size = calc_tx_xfer_size(tx_size, count,
  165. len, data_max);
  166. spi->last_xfer_size = tx_xfer_size;
  167. }
  168. if (xfer->rx_buf) {
  169. len = xfer->len - spi->rx_xfer_offset;
  170. rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
  171. len, data_max);
  172. spi->last_xfer_size = rx_xfer_size;
  173. }
  174. tx_size += tx_xfer_size;
  175. rx_size += rx_xfer_size;
  176. total_len += spi->last_xfer_size;
  177. count++;
  178. xfer = get_next_xfer(xfer, msg);
  179. if (!xfer || total_len >= data_max)
  180. msg->state = GB_SPI_STATE_OP_READY;
  181. }
  182. /*
  183. * In addition to space for all message descriptors we need
  184. * to have enough to hold all tx data.
  185. */
  186. request_size = sizeof(*request);
  187. request_size += count * sizeof(*gb_xfer);
  188. request_size += tx_size;
  189. /* Response consists only of incoming data */
  190. operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
  191. request_size, rx_size, GFP_KERNEL);
  192. if (!operation)
  193. return NULL;
  194. request = operation->request->payload;
  195. request->count = cpu_to_le16(count);
  196. request->mode = dev->mode;
  197. request->chip_select = dev->chip_select;
  198. gb_xfer = &request->transfers[0];
  199. tx_data = gb_xfer + count; /* place tx data after last gb_xfer */
  200. /* Fill in the transfers array */
  201. xfer = spi->first_xfer;
  202. while (msg->state != GB_SPI_STATE_OP_DONE) {
  203. if (xfer == spi->last_xfer)
  204. xfer_len = spi->last_xfer_size;
  205. else
  206. xfer_len = xfer->len;
  207. /* make sure we do not timeout in a slow transfer */
  208. xfer_timeout = xfer_len * 8 * MSEC_PER_SEC / xfer->speed_hz;
  209. xfer_timeout += GB_OPERATION_TIMEOUT_DEFAULT;
  210. if (xfer_timeout > spi->op_timeout)
  211. spi->op_timeout = xfer_timeout;
  212. gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
  213. gb_xfer->len = cpu_to_le32(xfer_len);
  214. gb_xfer->delay_usecs = cpu_to_le16(xfer->delay_usecs);
  215. gb_xfer->cs_change = xfer->cs_change;
  216. gb_xfer->bits_per_word = xfer->bits_per_word;
  217. /* Copy tx data */
  218. if (xfer->tx_buf) {
  219. gb_xfer->xfer_flags |= GB_SPI_XFER_WRITE;
  220. memcpy(tx_data, xfer->tx_buf + spi->tx_xfer_offset,
  221. xfer_len);
  222. tx_data += xfer_len;
  223. }
  224. if (xfer->rx_buf)
  225. gb_xfer->xfer_flags |= GB_SPI_XFER_READ;
  226. if (xfer == spi->last_xfer) {
  227. if (!is_last_xfer_done(spi))
  228. gb_xfer->xfer_flags |= GB_SPI_XFER_INPROGRESS;
  229. msg->state = GB_SPI_STATE_OP_DONE;
  230. continue;
  231. }
  232. gb_xfer++;
  233. xfer = get_next_xfer(xfer, msg);
  234. }
  235. msg->actual_length += total_len;
  236. return operation;
  237. }
  238. static void gb_spi_decode_response(struct gb_spilib *spi,
  239. struct spi_message *msg,
  240. struct gb_spi_transfer_response *response)
  241. {
  242. struct spi_transfer *xfer = spi->first_xfer;
  243. void *rx_data = response->data;
  244. u32 xfer_len;
  245. while (xfer) {
  246. /* Copy rx data */
  247. if (xfer->rx_buf) {
  248. if (xfer == spi->first_xfer)
  249. xfer_len = xfer->len - spi->rx_xfer_offset;
  250. else if (xfer == spi->last_xfer)
  251. xfer_len = spi->last_xfer_size;
  252. else
  253. xfer_len = xfer->len;
  254. memcpy(xfer->rx_buf + spi->rx_xfer_offset, rx_data,
  255. xfer_len);
  256. rx_data += xfer_len;
  257. }
  258. if (xfer == spi->last_xfer)
  259. break;
  260. xfer = list_next_entry(xfer, transfer_list);
  261. }
  262. }
  263. static int gb_spi_transfer_one_message(struct spi_master *master,
  264. struct spi_message *msg)
  265. {
  266. struct gb_spilib *spi = spi_master_get_devdata(master);
  267. struct gb_connection *connection = spi->connection;
  268. struct gb_spi_transfer_response *response;
  269. struct gb_operation *operation;
  270. int ret = 0;
  271. spi->first_xfer = list_first_entry_or_null(&msg->transfers,
  272. struct spi_transfer,
  273. transfer_list);
  274. if (!spi->first_xfer) {
  275. ret = -ENOMEM;
  276. goto out;
  277. }
  278. msg->state = GB_SPI_STATE_MSG_IDLE;
  279. while (msg->state != GB_SPI_STATE_MSG_DONE &&
  280. msg->state != GB_SPI_STATE_MSG_ERROR) {
  281. operation = gb_spi_operation_create(spi, connection, msg);
  282. if (!operation) {
  283. msg->state = GB_SPI_STATE_MSG_ERROR;
  284. ret = -EINVAL;
  285. continue;
  286. }
  287. ret = gb_operation_request_send_sync_timeout(operation,
  288. spi->op_timeout);
  289. if (!ret) {
  290. response = operation->response->payload;
  291. if (response)
  292. gb_spi_decode_response(spi, msg, response);
  293. } else {
  294. dev_err(spi->parent,
  295. "transfer operation failed: %d\n", ret);
  296. msg->state = GB_SPI_STATE_MSG_ERROR;
  297. }
  298. gb_operation_put(operation);
  299. setup_next_xfer(spi, msg);
  300. }
  301. out:
  302. msg->status = ret;
  303. clean_xfer_state(spi);
  304. spi_finalize_current_message(master);
  305. return ret;
  306. }
  307. static int gb_spi_prepare_transfer_hardware(struct spi_master *master)
  308. {
  309. struct gb_spilib *spi = spi_master_get_devdata(master);
  310. return spi->ops->prepare_transfer_hardware(spi->parent);
  311. }
  312. static int gb_spi_unprepare_transfer_hardware(struct spi_master *master)
  313. {
  314. struct gb_spilib *spi = spi_master_get_devdata(master);
  315. spi->ops->unprepare_transfer_hardware(spi->parent);
  316. return 0;
  317. }
  318. static int gb_spi_setup(struct spi_device *spi)
  319. {
  320. /* Nothing to do for now */
  321. return 0;
  322. }
  323. static void gb_spi_cleanup(struct spi_device *spi)
  324. {
  325. /* Nothing to do for now */
  326. }
  327. /* Routines to get controller information */
  328. /*
  329. * Map Greybus spi mode bits/flags/bpw into Linux ones.
  330. * All bits are same for now and so these macro's return same values.
  331. */
  332. #define gb_spi_mode_map(mode) mode
  333. #define gb_spi_flags_map(flags) flags
  334. static int gb_spi_get_master_config(struct gb_spilib *spi)
  335. {
  336. struct gb_spi_master_config_response response;
  337. u16 mode, flags;
  338. int ret;
  339. ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
  340. NULL, 0, &response, sizeof(response));
  341. if (ret < 0)
  342. return ret;
  343. mode = le16_to_cpu(response.mode);
  344. spi->mode = gb_spi_mode_map(mode);
  345. flags = le16_to_cpu(response.flags);
  346. spi->flags = gb_spi_flags_map(flags);
  347. spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
  348. spi->num_chipselect = response.num_chipselect;
  349. spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
  350. spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
  351. return 0;
  352. }
  353. static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
  354. {
  355. struct spi_master *master = get_master_from_spi(spi);
  356. struct gb_spi_device_config_request request;
  357. struct gb_spi_device_config_response response;
  358. struct spi_board_info spi_board = { {0} };
  359. struct spi_device *spidev;
  360. int ret;
  361. u8 dev_type;
  362. request.chip_select = cs;
  363. ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
  364. &request, sizeof(request),
  365. &response, sizeof(response));
  366. if (ret < 0)
  367. return ret;
  368. dev_type = response.device_type;
  369. if (dev_type == GB_SPI_SPI_DEV)
  370. strlcpy(spi_board.modalias, "spidev",
  371. sizeof(spi_board.modalias));
  372. else if (dev_type == GB_SPI_SPI_NOR)
  373. strlcpy(spi_board.modalias, "spi-nor",
  374. sizeof(spi_board.modalias));
  375. else if (dev_type == GB_SPI_SPI_MODALIAS)
  376. memcpy(spi_board.modalias, response.name,
  377. sizeof(spi_board.modalias));
  378. else
  379. return -EINVAL;
  380. spi_board.mode = le16_to_cpu(response.mode);
  381. spi_board.bus_num = master->bus_num;
  382. spi_board.chip_select = cs;
  383. spi_board.max_speed_hz = le32_to_cpu(response.max_speed_hz);
  384. spidev = spi_new_device(master, &spi_board);
  385. if (!spidev)
  386. return -EINVAL;
  387. return 0;
  388. }
  389. int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
  390. struct spilib_ops *ops)
  391. {
  392. struct gb_spilib *spi;
  393. struct spi_master *master;
  394. int ret;
  395. u8 i;
  396. /* Allocate master with space for data */
  397. master = spi_alloc_master(dev, sizeof(*spi));
  398. if (!master) {
  399. dev_err(dev, "cannot alloc SPI master\n");
  400. return -ENOMEM;
  401. }
  402. spi = spi_master_get_devdata(master);
  403. spi->connection = connection;
  404. gb_connection_set_data(connection, master);
  405. spi->parent = dev;
  406. spi->ops = ops;
  407. /* get master configuration */
  408. ret = gb_spi_get_master_config(spi);
  409. if (ret)
  410. goto exit_spi_put;
  411. master->bus_num = -1; /* Allow spi-core to allocate it dynamically */
  412. master->num_chipselect = spi->num_chipselect;
  413. master->mode_bits = spi->mode;
  414. master->flags = spi->flags;
  415. master->bits_per_word_mask = spi->bits_per_word_mask;
  416. /* Attach methods */
  417. master->cleanup = gb_spi_cleanup;
  418. master->setup = gb_spi_setup;
  419. master->transfer_one_message = gb_spi_transfer_one_message;
  420. if (ops && ops->prepare_transfer_hardware) {
  421. master->prepare_transfer_hardware =
  422. gb_spi_prepare_transfer_hardware;
  423. }
  424. if (ops && ops->unprepare_transfer_hardware) {
  425. master->unprepare_transfer_hardware =
  426. gb_spi_unprepare_transfer_hardware;
  427. }
  428. master->auto_runtime_pm = true;
  429. ret = spi_register_master(master);
  430. if (ret < 0)
  431. goto exit_spi_put;
  432. /* now, fetch the devices configuration */
  433. for (i = 0; i < spi->num_chipselect; i++) {
  434. ret = gb_spi_setup_device(spi, i);
  435. if (ret < 0) {
  436. dev_err(dev, "failed to allocate spi device %d: %d\n",
  437. i, ret);
  438. goto exit_spi_unregister;
  439. }
  440. }
  441. return 0;
  442. exit_spi_put:
  443. spi_master_put(master);
  444. return ret;
  445. exit_spi_unregister:
  446. spi_unregister_master(master);
  447. return ret;
  448. }
  449. EXPORT_SYMBOL_GPL(gb_spilib_master_init);
  450. void gb_spilib_master_exit(struct gb_connection *connection)
  451. {
  452. struct spi_master *master = gb_connection_get_data(connection);
  453. spi_unregister_master(master);
  454. }
  455. EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
  456. MODULE_LICENSE("GPL v2");