dma_port.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. /*
  2. * Thunderbolt DMA configuration based mailbox support
  3. *
  4. * Copyright (C) 2017, Intel Corporation
  5. * Authors: Michael Jamet <michael.jamet@intel.com>
  6. * Mika Westerberg <mika.westerberg@linux.intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/delay.h>
  13. #include <linux/slab.h>
  14. #include "dma_port.h"
  15. #include "tb_regs.h"
  16. #define DMA_PORT_CAP 0x3e
  17. #define MAIL_DATA 1
  18. #define MAIL_DATA_DWORDS 16
  19. #define MAIL_IN 17
  20. #define MAIL_IN_CMD_SHIFT 28
  21. #define MAIL_IN_CMD_MASK GENMASK(31, 28)
  22. #define MAIL_IN_CMD_FLASH_WRITE 0x0
  23. #define MAIL_IN_CMD_FLASH_UPDATE_AUTH 0x1
  24. #define MAIL_IN_CMD_FLASH_READ 0x2
  25. #define MAIL_IN_CMD_POWER_CYCLE 0x4
  26. #define MAIL_IN_DWORDS_SHIFT 24
  27. #define MAIL_IN_DWORDS_MASK GENMASK(27, 24)
  28. #define MAIL_IN_ADDRESS_SHIFT 2
  29. #define MAIL_IN_ADDRESS_MASK GENMASK(23, 2)
  30. #define MAIL_IN_CSS BIT(1)
  31. #define MAIL_IN_OP_REQUEST BIT(0)
  32. #define MAIL_OUT 18
  33. #define MAIL_OUT_STATUS_RESPONSE BIT(29)
  34. #define MAIL_OUT_STATUS_CMD_SHIFT 4
  35. #define MAIL_OUT_STATUS_CMD_MASK GENMASK(7, 4)
  36. #define MAIL_OUT_STATUS_MASK GENMASK(3, 0)
  37. #define MAIL_OUT_STATUS_COMPLETED 0
  38. #define MAIL_OUT_STATUS_ERR_AUTH 1
  39. #define MAIL_OUT_STATUS_ERR_ACCESS 2
  40. #define DMA_PORT_TIMEOUT 5000 /* ms */
  41. #define DMA_PORT_RETRIES 3
  42. /**
  43. * struct tb_dma_port - DMA control port
  44. * @sw: Switch the DMA port belongs to
  45. * @port: Switch port number where DMA capability is found
  46. * @base: Start offset of the mailbox registers
  47. * @buf: Temporary buffer to store a single block
  48. */
  49. struct tb_dma_port {
  50. struct tb_switch *sw;
  51. u8 port;
  52. u32 base;
  53. u8 *buf;
  54. };
  55. /*
  56. * When the switch is in safe mode it supports very little functionality
  57. * so we don't validate that much here.
  58. */
  59. static bool dma_port_match(const struct tb_cfg_request *req,
  60. const struct ctl_pkg *pkg)
  61. {
  62. u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
  63. if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  64. return true;
  65. if (pkg->frame.eof != req->response_type)
  66. return false;
  67. if (route != tb_cfg_get_route(req->request))
  68. return false;
  69. if (pkg->frame.size != req->response_size)
  70. return false;
  71. return true;
  72. }
  73. static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
  74. {
  75. memcpy(req->response, pkg->buffer, req->response_size);
  76. return true;
  77. }
  78. static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
  79. u32 port, u32 offset, u32 length, int timeout_msec)
  80. {
  81. struct cfg_read_pkg request = {
  82. .header = tb_cfg_make_header(route),
  83. .addr = {
  84. .seq = 1,
  85. .port = port,
  86. .space = TB_CFG_PORT,
  87. .offset = offset,
  88. .length = length,
  89. },
  90. };
  91. struct tb_cfg_request *req;
  92. struct cfg_write_pkg reply;
  93. struct tb_cfg_result res;
  94. req = tb_cfg_request_alloc();
  95. if (!req)
  96. return -ENOMEM;
  97. req->match = dma_port_match;
  98. req->copy = dma_port_copy;
  99. req->request = &request;
  100. req->request_size = sizeof(request);
  101. req->request_type = TB_CFG_PKG_READ;
  102. req->response = &reply;
  103. req->response_size = 12 + 4 * length;
  104. req->response_type = TB_CFG_PKG_READ;
  105. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  106. tb_cfg_request_put(req);
  107. if (res.err)
  108. return res.err;
  109. memcpy(buffer, &reply.data, 4 * length);
  110. return 0;
  111. }
  112. static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
  113. u32 port, u32 offset, u32 length, int timeout_msec)
  114. {
  115. struct cfg_write_pkg request = {
  116. .header = tb_cfg_make_header(route),
  117. .addr = {
  118. .seq = 1,
  119. .port = port,
  120. .space = TB_CFG_PORT,
  121. .offset = offset,
  122. .length = length,
  123. },
  124. };
  125. struct tb_cfg_request *req;
  126. struct cfg_read_pkg reply;
  127. struct tb_cfg_result res;
  128. memcpy(&request.data, buffer, length * 4);
  129. req = tb_cfg_request_alloc();
  130. if (!req)
  131. return -ENOMEM;
  132. req->match = dma_port_match;
  133. req->copy = dma_port_copy;
  134. req->request = &request;
  135. req->request_size = 12 + 4 * length;
  136. req->request_type = TB_CFG_PKG_WRITE;
  137. req->response = &reply;
  138. req->response_size = sizeof(reply);
  139. req->response_type = TB_CFG_PKG_WRITE;
  140. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  141. tb_cfg_request_put(req);
  142. return res.err;
  143. }
  144. static int dma_find_port(struct tb_switch *sw)
  145. {
  146. static const int ports[] = { 3, 5, 7 };
  147. int i;
  148. /*
  149. * The DMA (NHI) port is either 3, 5 or 7 depending on the
  150. * controller. Try all of them.
  151. */
  152. for (i = 0; i < ARRAY_SIZE(ports); i++) {
  153. u32 type;
  154. int ret;
  155. ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i],
  156. 2, 1, DMA_PORT_TIMEOUT);
  157. if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
  158. return ports[i];
  159. }
  160. return -ENODEV;
  161. }
  162. /**
  163. * dma_port_alloc() - Finds DMA control port from a switch pointed by route
  164. * @sw: Switch from where find the DMA port
  165. *
  166. * Function checks if the switch NHI port supports DMA configuration
  167. * based mailbox capability and if it does, allocates and initializes
  168. * DMA port structure. Returns %NULL if the capabity was not found.
  169. *
  170. * The DMA control port is functional also when the switch is in safe
  171. * mode.
  172. */
  173. struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
  174. {
  175. struct tb_dma_port *dma;
  176. int port;
  177. port = dma_find_port(sw);
  178. if (port < 0)
  179. return NULL;
  180. dma = kzalloc(sizeof(*dma), GFP_KERNEL);
  181. if (!dma)
  182. return NULL;
  183. dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
  184. if (!dma->buf) {
  185. kfree(dma);
  186. return NULL;
  187. }
  188. dma->sw = sw;
  189. dma->port = port;
  190. dma->base = DMA_PORT_CAP;
  191. return dma;
  192. }
  193. /**
  194. * dma_port_free() - Release DMA control port structure
  195. * @dma: DMA control port
  196. */
  197. void dma_port_free(struct tb_dma_port *dma)
  198. {
  199. if (dma) {
  200. kfree(dma->buf);
  201. kfree(dma);
  202. }
  203. }
  204. static int dma_port_wait_for_completion(struct tb_dma_port *dma,
  205. unsigned int timeout)
  206. {
  207. unsigned long end = jiffies + msecs_to_jiffies(timeout);
  208. struct tb_switch *sw = dma->sw;
  209. do {
  210. int ret;
  211. u32 in;
  212. ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
  213. dma->base + MAIL_IN, 1, 50);
  214. if (ret) {
  215. if (ret != -ETIMEDOUT)
  216. return ret;
  217. } else if (!(in & MAIL_IN_OP_REQUEST)) {
  218. return 0;
  219. }
  220. usleep_range(50, 100);
  221. } while (time_before(jiffies, end));
  222. return -ETIMEDOUT;
  223. }
  224. static int status_to_errno(u32 status)
  225. {
  226. switch (status & MAIL_OUT_STATUS_MASK) {
  227. case MAIL_OUT_STATUS_COMPLETED:
  228. return 0;
  229. case MAIL_OUT_STATUS_ERR_AUTH:
  230. return -EINVAL;
  231. case MAIL_OUT_STATUS_ERR_ACCESS:
  232. return -EACCES;
  233. }
  234. return -EIO;
  235. }
  236. static int dma_port_request(struct tb_dma_port *dma, u32 in,
  237. unsigned int timeout)
  238. {
  239. struct tb_switch *sw = dma->sw;
  240. u32 out;
  241. int ret;
  242. ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
  243. dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
  244. if (ret)
  245. return ret;
  246. ret = dma_port_wait_for_completion(dma, timeout);
  247. if (ret)
  248. return ret;
  249. ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
  250. dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
  251. if (ret)
  252. return ret;
  253. return status_to_errno(out);
  254. }
  255. static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
  256. void *buf, u32 size)
  257. {
  258. struct tb_switch *sw = dma->sw;
  259. u32 in, dwaddress, dwords;
  260. int ret;
  261. dwaddress = address / 4;
  262. dwords = size / 4;
  263. in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
  264. if (dwords < MAIL_DATA_DWORDS)
  265. in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
  266. in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
  267. in |= MAIL_IN_OP_REQUEST;
  268. ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
  269. if (ret)
  270. return ret;
  271. return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
  272. dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
  273. }
  274. static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
  275. const void *buf, u32 size)
  276. {
  277. struct tb_switch *sw = dma->sw;
  278. u32 in, dwaddress, dwords;
  279. int ret;
  280. dwords = size / 4;
  281. /* Write the block to MAIL_DATA registers */
  282. ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
  283. dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
  284. in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
  285. /* CSS header write is always done to the same magic address */
  286. if (address >= DMA_PORT_CSS_ADDRESS) {
  287. dwaddress = DMA_PORT_CSS_ADDRESS;
  288. in |= MAIL_IN_CSS;
  289. } else {
  290. dwaddress = address / 4;
  291. }
  292. in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
  293. in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
  294. in |= MAIL_IN_OP_REQUEST;
  295. return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
  296. }
  297. /**
  298. * dma_port_flash_read() - Read from active flash region
  299. * @dma: DMA control port
  300. * @address: Address relative to the start of active region
  301. * @buf: Buffer where the data is read
  302. * @size: Size of the buffer
  303. */
  304. int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
  305. void *buf, size_t size)
  306. {
  307. unsigned int retries = DMA_PORT_RETRIES;
  308. unsigned int offset;
  309. offset = address & 3;
  310. address = address & ~3;
  311. do {
  312. u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
  313. int ret;
  314. ret = dma_port_flash_read_block(dma, address, dma->buf,
  315. ALIGN(nbytes, 4));
  316. if (ret) {
  317. if (ret == -ETIMEDOUT) {
  318. if (retries--)
  319. continue;
  320. ret = -EIO;
  321. }
  322. return ret;
  323. }
  324. memcpy(buf, dma->buf + offset, nbytes);
  325. size -= nbytes;
  326. address += nbytes;
  327. buf += nbytes;
  328. } while (size > 0);
  329. return 0;
  330. }
  331. /**
  332. * dma_port_flash_write() - Write to non-active flash region
  333. * @dma: DMA control port
  334. * @address: Address relative to the start of non-active region
  335. * @buf: Data to write
  336. * @size: Size of the buffer
  337. *
  338. * Writes block of data to the non-active flash region of the switch. If
  339. * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
  340. * using CSS command.
  341. */
  342. int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
  343. const void *buf, size_t size)
  344. {
  345. unsigned int retries = DMA_PORT_RETRIES;
  346. unsigned int offset;
  347. if (address >= DMA_PORT_CSS_ADDRESS) {
  348. offset = 0;
  349. if (size > DMA_PORT_CSS_MAX_SIZE)
  350. return -E2BIG;
  351. } else {
  352. offset = address & 3;
  353. address = address & ~3;
  354. }
  355. do {
  356. u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
  357. int ret;
  358. memcpy(dma->buf + offset, buf, nbytes);
  359. ret = dma_port_flash_write_block(dma, address, buf, nbytes);
  360. if (ret) {
  361. if (ret == -ETIMEDOUT) {
  362. if (retries--)
  363. continue;
  364. ret = -EIO;
  365. }
  366. return ret;
  367. }
  368. size -= nbytes;
  369. address += nbytes;
  370. buf += nbytes;
  371. } while (size > 0);
  372. return 0;
  373. }
  374. /**
  375. * dma_port_flash_update_auth() - Starts flash authenticate cycle
  376. * @dma: DMA control port
  377. *
  378. * Starts the flash update authentication cycle. If the image in the
  379. * non-active area was valid, the switch starts upgrade process where
  380. * active and non-active area get swapped in the end. Caller should call
  381. * dma_port_flash_update_auth_status() to get status of this command.
  382. * This is because if the switch in question is root switch the
  383. * thunderbolt host controller gets reset as well.
  384. */
  385. int dma_port_flash_update_auth(struct tb_dma_port *dma)
  386. {
  387. u32 in;
  388. in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
  389. in |= MAIL_IN_OP_REQUEST;
  390. return dma_port_request(dma, in, 150);
  391. }
  392. /**
  393. * dma_port_flash_update_auth_status() - Reads status of update auth command
  394. * @dma: DMA control port
  395. * @status: Status code of the operation
  396. *
  397. * The function checks if there is status available from the last update
  398. * auth command. Returns %0 if there is no status and no further
  399. * action is required. If there is status, %1 is returned instead and
  400. * @status holds the failure code.
  401. *
  402. * Negative return means there was an error reading status from the
  403. * switch.
  404. */
  405. int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
  406. {
  407. struct tb_switch *sw = dma->sw;
  408. u32 out, cmd;
  409. int ret;
  410. ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
  411. dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
  412. if (ret)
  413. return ret;
  414. /* Check if the status relates to flash update auth */
  415. cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
  416. if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
  417. if (status)
  418. *status = out & MAIL_OUT_STATUS_MASK;
  419. /* Reset is needed in any case */
  420. return 1;
  421. }
  422. return 0;
  423. }
  424. /**
  425. * dma_port_power_cycle() - Power cycles the switch
  426. * @dma: DMA control port
  427. *
  428. * Triggers power cycle to the switch.
  429. */
  430. int dma_port_power_cycle(struct tb_dma_port *dma)
  431. {
  432. u32 in;
  433. in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
  434. in |= MAIL_IN_OP_REQUEST;
  435. return dma_port_request(dma, in, 150);
  436. }