coresight-tmc-etf.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright(C) 2016 Linaro Limited. All rights reserved.
  4. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  5. */
  6. #include <linux/circ_buf.h>
  7. #include <linux/coresight.h>
  8. #include <linux/perf_event.h>
  9. #include <linux/slab.h>
  10. #include "coresight-priv.h"
  11. #include "coresight-tmc.h"
  12. static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
  13. {
  14. CS_UNLOCK(drvdata->base);
  15. /* Wait for TMCSReady bit to be set */
  16. tmc_wait_for_tmcready(drvdata);
  17. writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
  18. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
  19. TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
  20. TMC_FFCR_TRIGON_TRIGIN,
  21. drvdata->base + TMC_FFCR);
  22. writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
  23. tmc_enable_hw(drvdata);
  24. CS_LOCK(drvdata->base);
  25. }
  26. static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
  27. {
  28. char *bufp;
  29. u32 read_data, lost;
  30. int i;
  31. /* Check if the buffer wrapped around. */
  32. lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
  33. bufp = drvdata->buf;
  34. drvdata->len = 0;
  35. while (1) {
  36. for (i = 0; i < drvdata->memwidth; i++) {
  37. read_data = readl_relaxed(drvdata->base + TMC_RRD);
  38. if (read_data == 0xFFFFFFFF)
  39. goto done;
  40. memcpy(bufp, &read_data, 4);
  41. bufp += 4;
  42. drvdata->len += 4;
  43. }
  44. }
  45. done:
  46. if (lost)
  47. coresight_insert_barrier_packet(drvdata->buf);
  48. return;
  49. }
  50. static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
  51. {
  52. CS_UNLOCK(drvdata->base);
  53. tmc_flush_and_stop(drvdata);
  54. /*
  55. * When operating in sysFS mode the content of the buffer needs to be
  56. * read before the TMC is disabled.
  57. */
  58. if (drvdata->mode == CS_MODE_SYSFS)
  59. tmc_etb_dump_hw(drvdata);
  60. tmc_disable_hw(drvdata);
  61. CS_LOCK(drvdata->base);
  62. }
  63. static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
  64. {
  65. CS_UNLOCK(drvdata->base);
  66. /* Wait for TMCSReady bit to be set */
  67. tmc_wait_for_tmcready(drvdata);
  68. writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
  69. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
  70. drvdata->base + TMC_FFCR);
  71. writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
  72. tmc_enable_hw(drvdata);
  73. CS_LOCK(drvdata->base);
  74. }
  75. static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
  76. {
  77. CS_UNLOCK(drvdata->base);
  78. tmc_flush_and_stop(drvdata);
  79. tmc_disable_hw(drvdata);
  80. CS_LOCK(drvdata->base);
  81. }
  82. /*
  83. * Return the available trace data in the buffer from @pos, with
  84. * a maximum limit of @len, updating the @bufpp on where to
  85. * find it.
  86. */
  87. ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
  88. loff_t pos, size_t len, char **bufpp)
  89. {
  90. ssize_t actual = len;
  91. /* Adjust the len to available size @pos */
  92. if (pos + actual > drvdata->len)
  93. actual = drvdata->len - pos;
  94. if (actual > 0)
  95. *bufpp = drvdata->buf + pos;
  96. return actual;
  97. }
  98. static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
  99. {
  100. int ret = 0;
  101. bool used = false;
  102. char *buf = NULL;
  103. unsigned long flags;
  104. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  105. /*
  106. * If we don't have a buffer release the lock and allocate memory.
  107. * Otherwise keep the lock and move along.
  108. */
  109. spin_lock_irqsave(&drvdata->spinlock, flags);
  110. if (!drvdata->buf) {
  111. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  112. /* Allocating the memory here while outside of the spinlock */
  113. buf = kzalloc(drvdata->size, GFP_KERNEL);
  114. if (!buf)
  115. return -ENOMEM;
  116. /* Let's try again */
  117. spin_lock_irqsave(&drvdata->spinlock, flags);
  118. }
  119. if (drvdata->reading) {
  120. ret = -EBUSY;
  121. goto out;
  122. }
  123. /*
  124. * In sysFS mode we can have multiple writers per sink. Since this
  125. * sink is already enabled no memory is needed and the HW need not be
  126. * touched.
  127. */
  128. if (drvdata->mode == CS_MODE_SYSFS)
  129. goto out;
  130. /*
  131. * If drvdata::buf isn't NULL, memory was allocated for a previous
  132. * trace run but wasn't read. If so simply zero-out the memory.
  133. * Otherwise use the memory allocated above.
  134. *
  135. * The memory is freed when users read the buffer using the
  136. * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
  137. * details.
  138. */
  139. if (drvdata->buf) {
  140. memset(drvdata->buf, 0, drvdata->size);
  141. } else {
  142. used = true;
  143. drvdata->buf = buf;
  144. }
  145. drvdata->mode = CS_MODE_SYSFS;
  146. tmc_etb_enable_hw(drvdata);
  147. out:
  148. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  149. /* Free memory outside the spinlock if need be */
  150. if (!used)
  151. kfree(buf);
  152. return ret;
  153. }
  154. static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
  155. {
  156. int ret = 0;
  157. unsigned long flags;
  158. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  159. spin_lock_irqsave(&drvdata->spinlock, flags);
  160. if (drvdata->reading) {
  161. ret = -EINVAL;
  162. goto out;
  163. }
  164. /*
  165. * In Perf mode there can be only one writer per sink. There
  166. * is also no need to continue if the ETB/ETR is already operated
  167. * from sysFS.
  168. */
  169. if (drvdata->mode != CS_MODE_DISABLED) {
  170. ret = -EINVAL;
  171. goto out;
  172. }
  173. drvdata->mode = CS_MODE_PERF;
  174. tmc_etb_enable_hw(drvdata);
  175. out:
  176. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  177. return ret;
  178. }
  179. static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
  180. {
  181. int ret;
  182. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  183. switch (mode) {
  184. case CS_MODE_SYSFS:
  185. ret = tmc_enable_etf_sink_sysfs(csdev);
  186. break;
  187. case CS_MODE_PERF:
  188. ret = tmc_enable_etf_sink_perf(csdev);
  189. break;
  190. /* We shouldn't be here */
  191. default:
  192. ret = -EINVAL;
  193. break;
  194. }
  195. if (ret)
  196. return ret;
  197. dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
  198. return 0;
  199. }
  200. static void tmc_disable_etf_sink(struct coresight_device *csdev)
  201. {
  202. unsigned long flags;
  203. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  204. spin_lock_irqsave(&drvdata->spinlock, flags);
  205. if (drvdata->reading) {
  206. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  207. return;
  208. }
  209. /* Disable the TMC only if it needs to */
  210. if (drvdata->mode != CS_MODE_DISABLED) {
  211. tmc_etb_disable_hw(drvdata);
  212. drvdata->mode = CS_MODE_DISABLED;
  213. }
  214. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  215. dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
  216. }
  217. static int tmc_enable_etf_link(struct coresight_device *csdev,
  218. int inport, int outport)
  219. {
  220. unsigned long flags;
  221. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  222. spin_lock_irqsave(&drvdata->spinlock, flags);
  223. if (drvdata->reading) {
  224. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  225. return -EBUSY;
  226. }
  227. tmc_etf_enable_hw(drvdata);
  228. drvdata->mode = CS_MODE_SYSFS;
  229. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  230. dev_info(drvdata->dev, "TMC-ETF enabled\n");
  231. return 0;
  232. }
  233. static void tmc_disable_etf_link(struct coresight_device *csdev,
  234. int inport, int outport)
  235. {
  236. unsigned long flags;
  237. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  238. spin_lock_irqsave(&drvdata->spinlock, flags);
  239. if (drvdata->reading) {
  240. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  241. return;
  242. }
  243. tmc_etf_disable_hw(drvdata);
  244. drvdata->mode = CS_MODE_DISABLED;
  245. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  246. dev_info(drvdata->dev, "TMC-ETF disabled\n");
  247. }
  248. static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
  249. void **pages, int nr_pages, bool overwrite)
  250. {
  251. int node;
  252. struct cs_buffers *buf;
  253. node = (cpu == -1) ? NUMA_NO_NODE : cpu_to_node(cpu);
  254. /* Allocate memory structure for interaction with Perf */
  255. buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
  256. if (!buf)
  257. return NULL;
  258. buf->snapshot = overwrite;
  259. buf->nr_pages = nr_pages;
  260. buf->data_pages = pages;
  261. return buf;
  262. }
  263. static void tmc_free_etf_buffer(void *config)
  264. {
  265. struct cs_buffers *buf = config;
  266. kfree(buf);
  267. }
  268. static int tmc_set_etf_buffer(struct coresight_device *csdev,
  269. struct perf_output_handle *handle,
  270. void *sink_config)
  271. {
  272. int ret = 0;
  273. unsigned long head;
  274. struct cs_buffers *buf = sink_config;
  275. /* wrap head around to the amount of space we have */
  276. head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
  277. /* find the page to write to */
  278. buf->cur = head / PAGE_SIZE;
  279. /* and offset within that page */
  280. buf->offset = head % PAGE_SIZE;
  281. local_set(&buf->data_size, 0);
  282. return ret;
  283. }
  284. static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
  285. struct perf_output_handle *handle,
  286. void *sink_config)
  287. {
  288. long size = 0;
  289. struct cs_buffers *buf = sink_config;
  290. if (buf) {
  291. /*
  292. * In snapshot mode ->data_size holds the new address of the
  293. * ring buffer's head. The size itself is the whole address
  294. * range since we want the latest information.
  295. */
  296. if (buf->snapshot)
  297. handle->head = local_xchg(&buf->data_size,
  298. buf->nr_pages << PAGE_SHIFT);
  299. /*
  300. * Tell the tracer PMU how much we got in this run and if
  301. * something went wrong along the way. Nobody else can use
  302. * this cs_buffers instance until we are done. As such
  303. * resetting parameters here and squaring off with the ring
  304. * buffer API in the tracer PMU is fine.
  305. */
  306. size = local_xchg(&buf->data_size, 0);
  307. }
  308. return size;
  309. }
  310. static void tmc_update_etf_buffer(struct coresight_device *csdev,
  311. struct perf_output_handle *handle,
  312. void *sink_config)
  313. {
  314. bool lost = false;
  315. int i, cur;
  316. const u32 *barrier;
  317. u32 *buf_ptr;
  318. u64 read_ptr, write_ptr;
  319. u32 status, to_read;
  320. unsigned long offset;
  321. struct cs_buffers *buf = sink_config;
  322. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  323. if (!buf)
  324. return;
  325. /* This shouldn't happen */
  326. if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
  327. return;
  328. CS_UNLOCK(drvdata->base);
  329. tmc_flush_and_stop(drvdata);
  330. read_ptr = tmc_read_rrp(drvdata);
  331. write_ptr = tmc_read_rwp(drvdata);
  332. /*
  333. * Get a hold of the status register and see if a wrap around
  334. * has occurred. If so adjust things accordingly.
  335. */
  336. status = readl_relaxed(drvdata->base + TMC_STS);
  337. if (status & TMC_STS_FULL) {
  338. lost = true;
  339. to_read = drvdata->size;
  340. } else {
  341. to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
  342. }
  343. /*
  344. * The TMC RAM buffer may be bigger than the space available in the
  345. * perf ring buffer (handle->size). If so advance the RRP so that we
  346. * get the latest trace data.
  347. */
  348. if (to_read > handle->size) {
  349. u32 mask = 0;
  350. /*
  351. * The value written to RRP must be byte-address aligned to
  352. * the width of the trace memory databus _and_ to a frame
  353. * boundary (16 byte), whichever is the biggest. For example,
  354. * for 32-bit, 64-bit and 128-bit wide trace memory, the four
  355. * LSBs must be 0s. For 256-bit wide trace memory, the five
  356. * LSBs must be 0s.
  357. */
  358. switch (drvdata->memwidth) {
  359. case TMC_MEM_INTF_WIDTH_32BITS:
  360. case TMC_MEM_INTF_WIDTH_64BITS:
  361. case TMC_MEM_INTF_WIDTH_128BITS:
  362. mask = GENMASK(31, 4);
  363. break;
  364. case TMC_MEM_INTF_WIDTH_256BITS:
  365. mask = GENMASK(31, 5);
  366. break;
  367. }
  368. /*
  369. * Make sure the new size is aligned in accordance with the
  370. * requirement explained above.
  371. */
  372. to_read = handle->size & mask;
  373. /* Move the RAM read pointer up */
  374. read_ptr = (write_ptr + drvdata->size) - to_read;
  375. /* Make sure we are still within our limits */
  376. if (read_ptr > (drvdata->size - 1))
  377. read_ptr -= drvdata->size;
  378. /* Tell the HW */
  379. tmc_write_rrp(drvdata, read_ptr);
  380. lost = true;
  381. }
  382. if (lost)
  383. perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
  384. cur = buf->cur;
  385. offset = buf->offset;
  386. barrier = barrier_pkt;
  387. /* for every byte to read */
  388. for (i = 0; i < to_read; i += 4) {
  389. buf_ptr = buf->data_pages[cur] + offset;
  390. *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
  391. if (lost && *barrier) {
  392. *buf_ptr = *barrier;
  393. barrier++;
  394. }
  395. offset += 4;
  396. if (offset >= PAGE_SIZE) {
  397. offset = 0;
  398. cur++;
  399. /* wrap around at the end of the buffer */
  400. cur &= buf->nr_pages - 1;
  401. }
  402. }
  403. /*
  404. * In snapshot mode all we have to do is communicate to
  405. * perf_aux_output_end() the address of the current head. In full
  406. * trace mode the same function expects a size to move rb->aux_head
  407. * forward.
  408. */
  409. if (buf->snapshot)
  410. local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
  411. else
  412. local_add(to_read, &buf->data_size);
  413. CS_LOCK(drvdata->base);
  414. }
  415. static const struct coresight_ops_sink tmc_etf_sink_ops = {
  416. .enable = tmc_enable_etf_sink,
  417. .disable = tmc_disable_etf_sink,
  418. .alloc_buffer = tmc_alloc_etf_buffer,
  419. .free_buffer = tmc_free_etf_buffer,
  420. .set_buffer = tmc_set_etf_buffer,
  421. .reset_buffer = tmc_reset_etf_buffer,
  422. .update_buffer = tmc_update_etf_buffer,
  423. };
  424. static const struct coresight_ops_link tmc_etf_link_ops = {
  425. .enable = tmc_enable_etf_link,
  426. .disable = tmc_disable_etf_link,
  427. };
  428. const struct coresight_ops tmc_etb_cs_ops = {
  429. .sink_ops = &tmc_etf_sink_ops,
  430. };
  431. const struct coresight_ops tmc_etf_cs_ops = {
  432. .sink_ops = &tmc_etf_sink_ops,
  433. .link_ops = &tmc_etf_link_ops,
  434. };
  435. int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
  436. {
  437. enum tmc_mode mode;
  438. int ret = 0;
  439. unsigned long flags;
  440. /* config types are set a boot time and never change */
  441. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
  442. drvdata->config_type != TMC_CONFIG_TYPE_ETF))
  443. return -EINVAL;
  444. spin_lock_irqsave(&drvdata->spinlock, flags);
  445. if (drvdata->reading) {
  446. ret = -EBUSY;
  447. goto out;
  448. }
  449. /* There is no point in reading a TMC in HW FIFO mode */
  450. mode = readl_relaxed(drvdata->base + TMC_MODE);
  451. if (mode != TMC_MODE_CIRCULAR_BUFFER) {
  452. ret = -EINVAL;
  453. goto out;
  454. }
  455. /* Don't interfere if operated from Perf */
  456. if (drvdata->mode == CS_MODE_PERF) {
  457. ret = -EINVAL;
  458. goto out;
  459. }
  460. /* If drvdata::buf is NULL the trace data has been read already */
  461. if (drvdata->buf == NULL) {
  462. ret = -EINVAL;
  463. goto out;
  464. }
  465. /* Disable the TMC if need be */
  466. if (drvdata->mode == CS_MODE_SYSFS)
  467. tmc_etb_disable_hw(drvdata);
  468. drvdata->reading = true;
  469. out:
  470. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  471. return ret;
  472. }
  473. int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
  474. {
  475. char *buf = NULL;
  476. enum tmc_mode mode;
  477. unsigned long flags;
  478. /* config types are set a boot time and never change */
  479. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
  480. drvdata->config_type != TMC_CONFIG_TYPE_ETF))
  481. return -EINVAL;
  482. spin_lock_irqsave(&drvdata->spinlock, flags);
  483. /* There is no point in reading a TMC in HW FIFO mode */
  484. mode = readl_relaxed(drvdata->base + TMC_MODE);
  485. if (mode != TMC_MODE_CIRCULAR_BUFFER) {
  486. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  487. return -EINVAL;
  488. }
  489. /* Re-enable the TMC if need be */
  490. if (drvdata->mode == CS_MODE_SYSFS) {
  491. /*
  492. * The trace run will continue with the same allocated trace
  493. * buffer. As such zero-out the buffer so that we don't end
  494. * up with stale data.
  495. *
  496. * Since the tracer is still enabled drvdata::buf
  497. * can't be NULL.
  498. */
  499. memset(drvdata->buf, 0, drvdata->size);
  500. tmc_etb_enable_hw(drvdata);
  501. } else {
  502. /*
  503. * The ETB/ETF is not tracing and the buffer was just read.
  504. * As such prepare to free the trace buffer.
  505. */
  506. buf = drvdata->buf;
  507. drvdata->buf = NULL;
  508. }
  509. drvdata->reading = false;
  510. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  511. /*
  512. * Free allocated memory outside of the spinlock. There is no need
  513. * to assert the validity of 'buf' since calling kfree(NULL) is safe.
  514. */
  515. kfree(buf);
  516. return 0;
  517. }