coresight-tmc-etr.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright(C) 2016 Linaro Limited. All rights reserved.
  4. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  5. */
  6. #include <linux/coresight.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/iommu.h>
  9. #include <linux/slab.h>
  10. #include <linux/vmalloc.h>
  11. #include "coresight-catu.h"
  12. #include "coresight-priv.h"
  13. #include "coresight-tmc.h"
  14. struct etr_flat_buf {
  15. struct device *dev;
  16. dma_addr_t daddr;
  17. void *vaddr;
  18. size_t size;
  19. };
  20. /*
  21. * The TMC ETR SG has a page size of 4K. The SG table contains pointers
  22. * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
  23. * 4K (i.e, 16KB or 64KB). This implies that a single OS page could
  24. * contain more than one SG buffer and tables.
  25. *
  26. * A table entry has the following format:
  27. *
  28. * ---Bit31------------Bit4-------Bit1-----Bit0--
  29. * | Address[39:12] | SBZ | Entry Type |
  30. * ----------------------------------------------
  31. *
  32. * Address: Bits [39:12] of a physical page address. Bits [11:0] are
  33. * always zero.
  34. *
  35. * Entry type:
  36. * b00 - Reserved.
  37. * b01 - Last entry in the tables, points to 4K page buffer.
  38. * b10 - Normal entry, points to 4K page buffer.
  39. * b11 - Link. The address points to the base of next table.
  40. */
  41. typedef u32 sgte_t;
  42. #define ETR_SG_PAGE_SHIFT 12
  43. #define ETR_SG_PAGE_SIZE (1UL << ETR_SG_PAGE_SHIFT)
  44. #define ETR_SG_PAGES_PER_SYSPAGE (PAGE_SIZE / ETR_SG_PAGE_SIZE)
  45. #define ETR_SG_PTRS_PER_PAGE (ETR_SG_PAGE_SIZE / sizeof(sgte_t))
  46. #define ETR_SG_PTRS_PER_SYSPAGE (PAGE_SIZE / sizeof(sgte_t))
  47. #define ETR_SG_ET_MASK 0x3
  48. #define ETR_SG_ET_LAST 0x1
  49. #define ETR_SG_ET_NORMAL 0x2
  50. #define ETR_SG_ET_LINK 0x3
  51. #define ETR_SG_ADDR_SHIFT 4
  52. #define ETR_SG_ENTRY(addr, type) \
  53. (sgte_t)((((addr) >> ETR_SG_PAGE_SHIFT) << ETR_SG_ADDR_SHIFT) | \
  54. (type & ETR_SG_ET_MASK))
  55. #define ETR_SG_ADDR(entry) \
  56. (((dma_addr_t)(entry) >> ETR_SG_ADDR_SHIFT) << ETR_SG_PAGE_SHIFT)
  57. #define ETR_SG_ET(entry) ((entry) & ETR_SG_ET_MASK)
  58. /*
  59. * struct etr_sg_table : ETR SG Table
  60. * @sg_table: Generic SG Table holding the data/table pages.
  61. * @hwaddr: hwaddress used by the TMC, which is the base
  62. * address of the table.
  63. */
  64. struct etr_sg_table {
  65. struct tmc_sg_table *sg_table;
  66. dma_addr_t hwaddr;
  67. };
  68. /*
  69. * tmc_etr_sg_table_entries: Total number of table entries required to map
  70. * @nr_pages system pages.
  71. *
  72. * We need to map @nr_pages * ETR_SG_PAGES_PER_SYSPAGE data pages.
  73. * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
  74. * with the last entry pointing to another page of table entries.
  75. * If we spill over to a new page for mapping 1 entry, we could as
  76. * well replace the link entry of the previous page with the last entry.
  77. */
  78. static inline unsigned long __attribute_const__
  79. tmc_etr_sg_table_entries(int nr_pages)
  80. {
  81. unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
  82. unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1);
  83. /*
  84. * If we spill over to a new page for 1 entry, we could as well
  85. * make it the LAST entry in the previous page, skipping the Link
  86. * address.
  87. */
  88. if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2))
  89. nr_sglinks--;
  90. return nr_sgpages + nr_sglinks;
  91. }
  92. /*
  93. * tmc_pages_get_offset: Go through all the pages in the tmc_pages
  94. * and map the device address @addr to an offset within the virtual
  95. * contiguous buffer.
  96. */
  97. static long
  98. tmc_pages_get_offset(struct tmc_pages *tmc_pages, dma_addr_t addr)
  99. {
  100. int i;
  101. dma_addr_t page_start;
  102. for (i = 0; i < tmc_pages->nr_pages; i++) {
  103. page_start = tmc_pages->daddrs[i];
  104. if (addr >= page_start && addr < (page_start + PAGE_SIZE))
  105. return i * PAGE_SIZE + (addr - page_start);
  106. }
  107. return -EINVAL;
  108. }
  109. /*
  110. * tmc_pages_free : Unmap and free the pages used by tmc_pages.
  111. * If the pages were not allocated in tmc_pages_alloc(), we would
  112. * simply drop the refcount.
  113. */
  114. static void tmc_pages_free(struct tmc_pages *tmc_pages,
  115. struct device *dev, enum dma_data_direction dir)
  116. {
  117. int i;
  118. for (i = 0; i < tmc_pages->nr_pages; i++) {
  119. if (tmc_pages->daddrs && tmc_pages->daddrs[i])
  120. dma_unmap_page(dev, tmc_pages->daddrs[i],
  121. PAGE_SIZE, dir);
  122. if (tmc_pages->pages && tmc_pages->pages[i])
  123. __free_page(tmc_pages->pages[i]);
  124. }
  125. kfree(tmc_pages->pages);
  126. kfree(tmc_pages->daddrs);
  127. tmc_pages->pages = NULL;
  128. tmc_pages->daddrs = NULL;
  129. tmc_pages->nr_pages = 0;
  130. }
  131. /*
  132. * tmc_pages_alloc : Allocate and map pages for a given @tmc_pages.
  133. * If @pages is not NULL, the list of page virtual addresses are
  134. * used as the data pages. The pages are then dma_map'ed for @dev
  135. * with dma_direction @dir.
  136. *
  137. * Returns 0 upon success, else the error number.
  138. */
  139. static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
  140. struct device *dev, int node,
  141. enum dma_data_direction dir, void **pages)
  142. {
  143. int i, nr_pages;
  144. dma_addr_t paddr;
  145. struct page *page;
  146. nr_pages = tmc_pages->nr_pages;
  147. tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs),
  148. GFP_KERNEL);
  149. if (!tmc_pages->daddrs)
  150. return -ENOMEM;
  151. tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages),
  152. GFP_KERNEL);
  153. if (!tmc_pages->pages) {
  154. kfree(tmc_pages->daddrs);
  155. tmc_pages->daddrs = NULL;
  156. return -ENOMEM;
  157. }
  158. for (i = 0; i < nr_pages; i++) {
  159. if (pages && pages[i]) {
  160. page = virt_to_page(pages[i]);
  161. /* Hold a refcount on the page */
  162. get_page(page);
  163. } else {
  164. page = alloc_pages_node(node,
  165. GFP_KERNEL | __GFP_ZERO, 0);
  166. }
  167. paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
  168. if (dma_mapping_error(dev, paddr))
  169. goto err;
  170. tmc_pages->daddrs[i] = paddr;
  171. tmc_pages->pages[i] = page;
  172. }
  173. return 0;
  174. err:
  175. tmc_pages_free(tmc_pages, dev, dir);
  176. return -ENOMEM;
  177. }
  178. static inline long
  179. tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr)
  180. {
  181. return tmc_pages_get_offset(&sg_table->data_pages, addr);
  182. }
  183. static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table)
  184. {
  185. if (sg_table->table_vaddr)
  186. vunmap(sg_table->table_vaddr);
  187. tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE);
  188. }
  189. static void tmc_free_data_pages(struct tmc_sg_table *sg_table)
  190. {
  191. if (sg_table->data_vaddr)
  192. vunmap(sg_table->data_vaddr);
  193. tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE);
  194. }
  195. void tmc_free_sg_table(struct tmc_sg_table *sg_table)
  196. {
  197. tmc_free_table_pages(sg_table);
  198. tmc_free_data_pages(sg_table);
  199. }
  200. /*
  201. * Alloc pages for the table. Since this will be used by the device,
  202. * allocate the pages closer to the device (i.e, dev_to_node(dev)
  203. * rather than the CPU node).
  204. */
  205. static int tmc_alloc_table_pages(struct tmc_sg_table *sg_table)
  206. {
  207. int rc;
  208. struct tmc_pages *table_pages = &sg_table->table_pages;
  209. rc = tmc_pages_alloc(table_pages, sg_table->dev,
  210. dev_to_node(sg_table->dev),
  211. DMA_TO_DEVICE, NULL);
  212. if (rc)
  213. return rc;
  214. sg_table->table_vaddr = vmap(table_pages->pages,
  215. table_pages->nr_pages,
  216. VM_MAP,
  217. PAGE_KERNEL);
  218. if (!sg_table->table_vaddr)
  219. rc = -ENOMEM;
  220. else
  221. sg_table->table_daddr = table_pages->daddrs[0];
  222. return rc;
  223. }
  224. static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages)
  225. {
  226. int rc;
  227. /* Allocate data pages on the node requested by the caller */
  228. rc = tmc_pages_alloc(&sg_table->data_pages,
  229. sg_table->dev, sg_table->node,
  230. DMA_FROM_DEVICE, pages);
  231. if (!rc) {
  232. sg_table->data_vaddr = vmap(sg_table->data_pages.pages,
  233. sg_table->data_pages.nr_pages,
  234. VM_MAP,
  235. PAGE_KERNEL);
  236. if (!sg_table->data_vaddr)
  237. rc = -ENOMEM;
  238. }
  239. return rc;
  240. }
  241. /*
  242. * tmc_alloc_sg_table: Allocate and setup dma pages for the TMC SG table
  243. * and data buffers. TMC writes to the data buffers and reads from the SG
  244. * Table pages.
  245. *
  246. * @dev - Device to which page should be DMA mapped.
  247. * @node - Numa node for mem allocations
  248. * @nr_tpages - Number of pages for the table entries.
  249. * @nr_dpages - Number of pages for Data buffer.
  250. * @pages - Optional list of virtual address of pages.
  251. */
  252. struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
  253. int node,
  254. int nr_tpages,
  255. int nr_dpages,
  256. void **pages)
  257. {
  258. long rc;
  259. struct tmc_sg_table *sg_table;
  260. sg_table = kzalloc(sizeof(*sg_table), GFP_KERNEL);
  261. if (!sg_table)
  262. return ERR_PTR(-ENOMEM);
  263. sg_table->data_pages.nr_pages = nr_dpages;
  264. sg_table->table_pages.nr_pages = nr_tpages;
  265. sg_table->node = node;
  266. sg_table->dev = dev;
  267. rc = tmc_alloc_data_pages(sg_table, pages);
  268. if (!rc)
  269. rc = tmc_alloc_table_pages(sg_table);
  270. if (rc) {
  271. tmc_free_sg_table(sg_table);
  272. kfree(sg_table);
  273. return ERR_PTR(rc);
  274. }
  275. return sg_table;
  276. }
  277. /*
  278. * tmc_sg_table_sync_data_range: Sync the data buffer written
  279. * by the device from @offset upto a @size bytes.
  280. */
  281. void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
  282. u64 offset, u64 size)
  283. {
  284. int i, index, start;
  285. int npages = DIV_ROUND_UP(size, PAGE_SIZE);
  286. struct device *dev = table->dev;
  287. struct tmc_pages *data = &table->data_pages;
  288. start = offset >> PAGE_SHIFT;
  289. for (i = start; i < (start + npages); i++) {
  290. index = i % data->nr_pages;
  291. dma_sync_single_for_cpu(dev, data->daddrs[index],
  292. PAGE_SIZE, DMA_FROM_DEVICE);
  293. }
  294. }
  295. /* tmc_sg_sync_table: Sync the page table */
  296. void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
  297. {
  298. int i;
  299. struct device *dev = sg_table->dev;
  300. struct tmc_pages *table_pages = &sg_table->table_pages;
  301. for (i = 0; i < table_pages->nr_pages; i++)
  302. dma_sync_single_for_device(dev, table_pages->daddrs[i],
  303. PAGE_SIZE, DMA_TO_DEVICE);
  304. }
  305. /*
  306. * tmc_sg_table_get_data: Get the buffer pointer for data @offset
  307. * in the SG buffer. The @bufpp is updated to point to the buffer.
  308. * Returns :
  309. * the length of linear data available at @offset.
  310. * or
  311. * <= 0 if no data is available.
  312. */
  313. ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
  314. u64 offset, size_t len, char **bufpp)
  315. {
  316. size_t size;
  317. int pg_idx = offset >> PAGE_SHIFT;
  318. int pg_offset = offset & (PAGE_SIZE - 1);
  319. struct tmc_pages *data_pages = &sg_table->data_pages;
  320. size = tmc_sg_table_buf_size(sg_table);
  321. if (offset >= size)
  322. return -EINVAL;
  323. /* Make sure we don't go beyond the end */
  324. len = (len < (size - offset)) ? len : size - offset;
  325. /* Respect the page boundaries */
  326. len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset);
  327. if (len > 0)
  328. *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset;
  329. return len;
  330. }
  331. #ifdef ETR_SG_DEBUG
  332. /* Map a dma address to virtual address */
  333. static unsigned long
  334. tmc_sg_daddr_to_vaddr(struct tmc_sg_table *sg_table,
  335. dma_addr_t addr, bool table)
  336. {
  337. long offset;
  338. unsigned long base;
  339. struct tmc_pages *tmc_pages;
  340. if (table) {
  341. tmc_pages = &sg_table->table_pages;
  342. base = (unsigned long)sg_table->table_vaddr;
  343. } else {
  344. tmc_pages = &sg_table->data_pages;
  345. base = (unsigned long)sg_table->data_vaddr;
  346. }
  347. offset = tmc_pages_get_offset(tmc_pages, addr);
  348. if (offset < 0)
  349. return 0;
  350. return base + offset;
  351. }
  352. /* Dump the given sg_table */
  353. static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
  354. {
  355. sgte_t *ptr;
  356. int i = 0;
  357. dma_addr_t addr;
  358. struct tmc_sg_table *sg_table = etr_table->sg_table;
  359. ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
  360. etr_table->hwaddr, true);
  361. while (ptr) {
  362. addr = ETR_SG_ADDR(*ptr);
  363. switch (ETR_SG_ET(*ptr)) {
  364. case ETR_SG_ET_NORMAL:
  365. dev_dbg(sg_table->dev,
  366. "%05d: %p\t:[N] 0x%llx\n", i, ptr, addr);
  367. ptr++;
  368. break;
  369. case ETR_SG_ET_LINK:
  370. dev_dbg(sg_table->dev,
  371. "%05d: *** %p\t:{L} 0x%llx ***\n",
  372. i, ptr, addr);
  373. ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
  374. addr, true);
  375. break;
  376. case ETR_SG_ET_LAST:
  377. dev_dbg(sg_table->dev,
  378. "%05d: ### %p\t:[L] 0x%llx ###\n",
  379. i, ptr, addr);
  380. return;
  381. default:
  382. dev_dbg(sg_table->dev,
  383. "%05d: xxx %p\t:[INVALID] 0x%llx xxx\n",
  384. i, ptr, addr);
  385. return;
  386. }
  387. i++;
  388. }
  389. dev_dbg(sg_table->dev, "******* End of Table *****\n");
  390. }
  391. #else
  392. static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
  393. #endif
  394. /*
  395. * Populate the SG Table page table entries from table/data
  396. * pages allocated. Each Data page has ETR_SG_PAGES_PER_SYSPAGE SG pages.
  397. * So does a Table page. So we keep track of indices of the tables
  398. * in each system page and move the pointers accordingly.
  399. */
  400. #define INC_IDX_ROUND(idx, size) ((idx) = ((idx) + 1) % (size))
  401. static void tmc_etr_sg_table_populate(struct etr_sg_table *etr_table)
  402. {
  403. dma_addr_t paddr;
  404. int i, type, nr_entries;
  405. int tpidx = 0; /* index to the current system table_page */
  406. int sgtidx = 0; /* index to the sg_table within the current syspage */
  407. int sgtentry = 0; /* the entry within the sg_table */
  408. int dpidx = 0; /* index to the current system data_page */
  409. int spidx = 0; /* index to the SG page within the current data page */
  410. sgte_t *ptr; /* pointer to the table entry to fill */
  411. struct tmc_sg_table *sg_table = etr_table->sg_table;
  412. dma_addr_t *table_daddrs = sg_table->table_pages.daddrs;
  413. dma_addr_t *data_daddrs = sg_table->data_pages.daddrs;
  414. nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages);
  415. /*
  416. * Use the contiguous virtual address of the table to update entries.
  417. */
  418. ptr = sg_table->table_vaddr;
  419. /*
  420. * Fill all the entries, except the last entry to avoid special
  421. * checks within the loop.
  422. */
  423. for (i = 0; i < nr_entries - 1; i++) {
  424. if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) {
  425. /*
  426. * Last entry in a sg_table page is a link address to
  427. * the next table page. If this sg_table is the last
  428. * one in the system page, it links to the first
  429. * sg_table in the next system page. Otherwise, it
  430. * links to the next sg_table page within the system
  431. * page.
  432. */
  433. if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) {
  434. paddr = table_daddrs[tpidx + 1];
  435. } else {
  436. paddr = table_daddrs[tpidx] +
  437. (ETR_SG_PAGE_SIZE * (sgtidx + 1));
  438. }
  439. type = ETR_SG_ET_LINK;
  440. } else {
  441. /*
  442. * Update the indices to the data_pages to point to the
  443. * next sg_page in the data buffer.
  444. */
  445. type = ETR_SG_ET_NORMAL;
  446. paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
  447. if (!INC_IDX_ROUND(spidx, ETR_SG_PAGES_PER_SYSPAGE))
  448. dpidx++;
  449. }
  450. *ptr++ = ETR_SG_ENTRY(paddr, type);
  451. /*
  452. * Move to the next table pointer, moving the table page index
  453. * if necessary
  454. */
  455. if (!INC_IDX_ROUND(sgtentry, ETR_SG_PTRS_PER_PAGE)) {
  456. if (!INC_IDX_ROUND(sgtidx, ETR_SG_PAGES_PER_SYSPAGE))
  457. tpidx++;
  458. }
  459. }
  460. /* Set up the last entry, which is always a data pointer */
  461. paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
  462. *ptr++ = ETR_SG_ENTRY(paddr, ETR_SG_ET_LAST);
  463. }
  464. /*
  465. * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and
  466. * populate the table.
  467. *
  468. * @dev - Device pointer for the TMC
  469. * @node - NUMA node where the memory should be allocated
  470. * @size - Total size of the data buffer
  471. * @pages - Optional list of page virtual address
  472. */
  473. static struct etr_sg_table *
  474. tmc_init_etr_sg_table(struct device *dev, int node,
  475. unsigned long size, void **pages)
  476. {
  477. int nr_entries, nr_tpages;
  478. int nr_dpages = size >> PAGE_SHIFT;
  479. struct tmc_sg_table *sg_table;
  480. struct etr_sg_table *etr_table;
  481. etr_table = kzalloc(sizeof(*etr_table), GFP_KERNEL);
  482. if (!etr_table)
  483. return ERR_PTR(-ENOMEM);
  484. nr_entries = tmc_etr_sg_table_entries(nr_dpages);
  485. nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE);
  486. sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
  487. if (IS_ERR(sg_table)) {
  488. kfree(etr_table);
  489. return ERR_CAST(sg_table);
  490. }
  491. etr_table->sg_table = sg_table;
  492. /* TMC should use table base address for DBA */
  493. etr_table->hwaddr = sg_table->table_daddr;
  494. tmc_etr_sg_table_populate(etr_table);
  495. /* Sync the table pages for the HW */
  496. tmc_sg_table_sync_table(sg_table);
  497. tmc_etr_sg_table_dump(etr_table);
  498. return etr_table;
  499. }
  500. /*
  501. * tmc_etr_alloc_flat_buf: Allocate a contiguous DMA buffer.
  502. */
  503. static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
  504. struct etr_buf *etr_buf, int node,
  505. void **pages)
  506. {
  507. struct etr_flat_buf *flat_buf;
  508. /* We cannot reuse existing pages for flat buf */
  509. if (pages)
  510. return -EINVAL;
  511. flat_buf = kzalloc(sizeof(*flat_buf), GFP_KERNEL);
  512. if (!flat_buf)
  513. return -ENOMEM;
  514. flat_buf->vaddr = dma_alloc_coherent(drvdata->dev, etr_buf->size,
  515. &flat_buf->daddr, GFP_KERNEL);
  516. if (!flat_buf->vaddr) {
  517. kfree(flat_buf);
  518. return -ENOMEM;
  519. }
  520. flat_buf->size = etr_buf->size;
  521. flat_buf->dev = drvdata->dev;
  522. etr_buf->hwaddr = flat_buf->daddr;
  523. etr_buf->mode = ETR_MODE_FLAT;
  524. etr_buf->private = flat_buf;
  525. return 0;
  526. }
  527. static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
  528. {
  529. struct etr_flat_buf *flat_buf = etr_buf->private;
  530. if (flat_buf && flat_buf->daddr)
  531. dma_free_coherent(flat_buf->dev, flat_buf->size,
  532. flat_buf->vaddr, flat_buf->daddr);
  533. kfree(flat_buf);
  534. }
  535. static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
  536. {
  537. /*
  538. * Adjust the buffer to point to the beginning of the trace data
  539. * and update the available trace data.
  540. */
  541. etr_buf->offset = rrp - etr_buf->hwaddr;
  542. if (etr_buf->full)
  543. etr_buf->len = etr_buf->size;
  544. else
  545. etr_buf->len = rwp - rrp;
  546. }
  547. static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
  548. u64 offset, size_t len, char **bufpp)
  549. {
  550. struct etr_flat_buf *flat_buf = etr_buf->private;
  551. *bufpp = (char *)flat_buf->vaddr + offset;
  552. /*
  553. * tmc_etr_buf_get_data already adjusts the length to handle
  554. * buffer wrapping around.
  555. */
  556. return len;
  557. }
  558. static const struct etr_buf_operations etr_flat_buf_ops = {
  559. .alloc = tmc_etr_alloc_flat_buf,
  560. .free = tmc_etr_free_flat_buf,
  561. .sync = tmc_etr_sync_flat_buf,
  562. .get_data = tmc_etr_get_data_flat_buf,
  563. };
  564. /*
  565. * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters
  566. * appropriately.
  567. */
  568. static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata,
  569. struct etr_buf *etr_buf, int node,
  570. void **pages)
  571. {
  572. struct etr_sg_table *etr_table;
  573. etr_table = tmc_init_etr_sg_table(drvdata->dev, node,
  574. etr_buf->size, pages);
  575. if (IS_ERR(etr_table))
  576. return -ENOMEM;
  577. etr_buf->hwaddr = etr_table->hwaddr;
  578. etr_buf->mode = ETR_MODE_ETR_SG;
  579. etr_buf->private = etr_table;
  580. return 0;
  581. }
  582. static void tmc_etr_free_sg_buf(struct etr_buf *etr_buf)
  583. {
  584. struct etr_sg_table *etr_table = etr_buf->private;
  585. if (etr_table) {
  586. tmc_free_sg_table(etr_table->sg_table);
  587. kfree(etr_table);
  588. }
  589. }
  590. static ssize_t tmc_etr_get_data_sg_buf(struct etr_buf *etr_buf, u64 offset,
  591. size_t len, char **bufpp)
  592. {
  593. struct etr_sg_table *etr_table = etr_buf->private;
  594. return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp);
  595. }
  596. static void tmc_etr_sync_sg_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
  597. {
  598. long r_offset, w_offset;
  599. struct etr_sg_table *etr_table = etr_buf->private;
  600. struct tmc_sg_table *table = etr_table->sg_table;
  601. /* Convert hw address to offset in the buffer */
  602. r_offset = tmc_sg_get_data_page_offset(table, rrp);
  603. if (r_offset < 0) {
  604. dev_warn(table->dev,
  605. "Unable to map RRP %llx to offset\n", rrp);
  606. etr_buf->len = 0;
  607. return;
  608. }
  609. w_offset = tmc_sg_get_data_page_offset(table, rwp);
  610. if (w_offset < 0) {
  611. dev_warn(table->dev,
  612. "Unable to map RWP %llx to offset\n", rwp);
  613. etr_buf->len = 0;
  614. return;
  615. }
  616. etr_buf->offset = r_offset;
  617. if (etr_buf->full)
  618. etr_buf->len = etr_buf->size;
  619. else
  620. etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) +
  621. w_offset - r_offset;
  622. tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
  623. }
  624. static const struct etr_buf_operations etr_sg_buf_ops = {
  625. .alloc = tmc_etr_alloc_sg_buf,
  626. .free = tmc_etr_free_sg_buf,
  627. .sync = tmc_etr_sync_sg_buf,
  628. .get_data = tmc_etr_get_data_sg_buf,
  629. };
  630. /*
  631. * TMC ETR could be connected to a CATU device, which can provide address
  632. * translation service. This is represented by the Output port of the TMC
  633. * (ETR) connected to the input port of the CATU.
  634. *
  635. * Returns : coresight_device ptr for the CATU device if a CATU is found.
  636. * : NULL otherwise.
  637. */
  638. struct coresight_device *
  639. tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
  640. {
  641. int i;
  642. struct coresight_device *tmp, *etr = drvdata->csdev;
  643. if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
  644. return NULL;
  645. for (i = 0; i < etr->nr_outport; i++) {
  646. tmp = etr->conns[i].child_dev;
  647. if (tmp && coresight_is_catu_device(tmp))
  648. return tmp;
  649. }
  650. return NULL;
  651. }
  652. static inline void tmc_etr_enable_catu(struct tmc_drvdata *drvdata)
  653. {
  654. struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
  655. if (catu && helper_ops(catu)->enable)
  656. helper_ops(catu)->enable(catu, drvdata->etr_buf);
  657. }
  658. static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
  659. {
  660. struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
  661. if (catu && helper_ops(catu)->disable)
  662. helper_ops(catu)->disable(catu, drvdata->etr_buf);
  663. }
  664. static const struct etr_buf_operations *etr_buf_ops[] = {
  665. [ETR_MODE_FLAT] = &etr_flat_buf_ops,
  666. [ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
  667. [ETR_MODE_CATU] = IS_ENABLED(CONFIG_CORESIGHT_CATU)
  668. ? &etr_catu_buf_ops : NULL,
  669. };
  670. static inline int tmc_etr_mode_alloc_buf(int mode,
  671. struct tmc_drvdata *drvdata,
  672. struct etr_buf *etr_buf, int node,
  673. void **pages)
  674. {
  675. int rc = -EINVAL;
  676. switch (mode) {
  677. case ETR_MODE_FLAT:
  678. case ETR_MODE_ETR_SG:
  679. case ETR_MODE_CATU:
  680. if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
  681. rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
  682. node, pages);
  683. if (!rc)
  684. etr_buf->ops = etr_buf_ops[mode];
  685. return rc;
  686. default:
  687. return -EINVAL;
  688. }
  689. }
  690. /*
  691. * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
  692. * @drvdata : ETR device details.
  693. * @size : size of the requested buffer.
  694. * @flags : Required properties for the buffer.
  695. * @node : Node for memory allocations.
  696. * @pages : An optional list of pages.
  697. */
  698. static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
  699. ssize_t size, int flags,
  700. int node, void **pages)
  701. {
  702. int rc = -ENOMEM;
  703. bool has_etr_sg, has_iommu;
  704. bool has_sg, has_catu;
  705. struct etr_buf *etr_buf;
  706. has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
  707. has_iommu = iommu_get_domain_for_dev(drvdata->dev);
  708. has_catu = !!tmc_etr_get_catu_device(drvdata);
  709. has_sg = has_catu || has_etr_sg;
  710. etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
  711. if (!etr_buf)
  712. return ERR_PTR(-ENOMEM);
  713. etr_buf->size = size;
  714. /*
  715. * If we have to use an existing list of pages, we cannot reliably
  716. * use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
  717. * we use the contiguous DMA memory if at least one of the following
  718. * conditions is true:
  719. * a) The ETR cannot use Scatter-Gather.
  720. * b) we have a backing IOMMU
  721. * c) The requested memory size is smaller (< 1M).
  722. *
  723. * Fallback to available mechanisms.
  724. *
  725. */
  726. if (!pages &&
  727. (!has_sg || has_iommu || size < SZ_1M))
  728. rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
  729. etr_buf, node, pages);
  730. if (rc && has_etr_sg)
  731. rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
  732. etr_buf, node, pages);
  733. if (rc && has_catu)
  734. rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
  735. etr_buf, node, pages);
  736. if (rc) {
  737. kfree(etr_buf);
  738. return ERR_PTR(rc);
  739. }
  740. dev_dbg(drvdata->dev, "allocated buffer of size %ldKB in mode %d\n",
  741. (unsigned long)size >> 10, etr_buf->mode);
  742. return etr_buf;
  743. }
  744. static void tmc_free_etr_buf(struct etr_buf *etr_buf)
  745. {
  746. WARN_ON(!etr_buf->ops || !etr_buf->ops->free);
  747. etr_buf->ops->free(etr_buf);
  748. kfree(etr_buf);
  749. }
  750. /*
  751. * tmc_etr_buf_get_data: Get the pointer the trace data at @offset
  752. * with a maximum of @len bytes.
  753. * Returns: The size of the linear data available @pos, with *bufpp
  754. * updated to point to the buffer.
  755. */
  756. static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
  757. u64 offset, size_t len, char **bufpp)
  758. {
  759. /* Adjust the length to limit this transaction to end of buffer */
  760. len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset;
  761. return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
  762. }
  763. static inline s64
  764. tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
  765. {
  766. ssize_t len;
  767. char *bufp;
  768. len = tmc_etr_buf_get_data(etr_buf, offset,
  769. CORESIGHT_BARRIER_PKT_SIZE, &bufp);
  770. if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
  771. return -EINVAL;
  772. coresight_insert_barrier_packet(bufp);
  773. return offset + CORESIGHT_BARRIER_PKT_SIZE;
  774. }
  775. /*
  776. * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata.
  777. * Makes sure the trace data is synced to the memory for consumption.
  778. * @etr_buf->offset will hold the offset to the beginning of the trace data
  779. * within the buffer, with @etr_buf->len bytes to consume.
  780. */
  781. static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
  782. {
  783. struct etr_buf *etr_buf = drvdata->etr_buf;
  784. u64 rrp, rwp;
  785. u32 status;
  786. rrp = tmc_read_rrp(drvdata);
  787. rwp = tmc_read_rwp(drvdata);
  788. status = readl_relaxed(drvdata->base + TMC_STS);
  789. etr_buf->full = status & TMC_STS_FULL;
  790. WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
  791. etr_buf->ops->sync(etr_buf, rrp, rwp);
  792. /* Insert barrier packets at the beginning, if there was an overflow */
  793. if (etr_buf->full)
  794. tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
  795. }
  796. static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
  797. struct etr_buf *etr_buf)
  798. {
  799. u32 axictl, sts;
  800. /* Callers should provide an appropriate buffer for use */
  801. if (WARN_ON(!etr_buf || drvdata->etr_buf))
  802. return;
  803. drvdata->etr_buf = etr_buf;
  804. /*
  805. * If this ETR is connected to a CATU, enable it before we turn
  806. * this on
  807. */
  808. tmc_etr_enable_catu(drvdata);
  809. CS_UNLOCK(drvdata->base);
  810. /* Wait for TMCSReady bit to be set */
  811. tmc_wait_for_tmcready(drvdata);
  812. writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
  813. writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
  814. axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
  815. axictl &= ~TMC_AXICTL_CLEAR_MASK;
  816. axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
  817. axictl |= TMC_AXICTL_AXCACHE_OS;
  818. if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
  819. axictl &= ~TMC_AXICTL_ARCACHE_MASK;
  820. axictl |= TMC_AXICTL_ARCACHE_OS;
  821. }
  822. if (etr_buf->mode == ETR_MODE_ETR_SG) {
  823. if (WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
  824. return;
  825. axictl |= TMC_AXICTL_SCT_GAT_MODE;
  826. }
  827. writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
  828. tmc_write_dba(drvdata, etr_buf->hwaddr);
  829. /*
  830. * If the TMC pointers must be programmed before the session,
  831. * we have to set it properly (i.e, RRP/RWP to base address and
  832. * STS to "not full").
  833. */
  834. if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
  835. tmc_write_rrp(drvdata, etr_buf->hwaddr);
  836. tmc_write_rwp(drvdata, etr_buf->hwaddr);
  837. sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
  838. writel_relaxed(sts, drvdata->base + TMC_STS);
  839. }
  840. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
  841. TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
  842. TMC_FFCR_TRIGON_TRIGIN,
  843. drvdata->base + TMC_FFCR);
  844. writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
  845. tmc_enable_hw(drvdata);
  846. CS_LOCK(drvdata->base);
  847. }
  848. /*
  849. * Return the available trace data in the buffer (starts at etr_buf->offset,
  850. * limited by etr_buf->len) from @pos, with a maximum limit of @len,
  851. * also updating the @bufpp on where to find it. Since the trace data
  852. * starts at anywhere in the buffer, depending on the RRP, we adjust the
  853. * @len returned to handle buffer wrapping around.
  854. *
  855. * We are protected here by drvdata->reading != 0, which ensures the
  856. * sysfs_buf stays alive.
  857. */
  858. ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
  859. loff_t pos, size_t len, char **bufpp)
  860. {
  861. s64 offset;
  862. ssize_t actual = len;
  863. struct etr_buf *etr_buf = drvdata->sysfs_buf;
  864. if (pos + actual > etr_buf->len)
  865. actual = etr_buf->len - pos;
  866. if (actual <= 0)
  867. return actual;
  868. /* Compute the offset from which we read the data */
  869. offset = etr_buf->offset + pos;
  870. if (offset >= etr_buf->size)
  871. offset -= etr_buf->size;
  872. return tmc_etr_buf_get_data(etr_buf, offset, actual, bufpp);
  873. }
  874. static struct etr_buf *
  875. tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
  876. {
  877. return tmc_alloc_etr_buf(drvdata, drvdata->size,
  878. 0, cpu_to_node(0), NULL);
  879. }
  880. static void
  881. tmc_etr_free_sysfs_buf(struct etr_buf *buf)
  882. {
  883. if (buf)
  884. tmc_free_etr_buf(buf);
  885. }
  886. static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
  887. {
  888. struct etr_buf *etr_buf = drvdata->etr_buf;
  889. if (WARN_ON(drvdata->sysfs_buf != etr_buf)) {
  890. tmc_etr_free_sysfs_buf(drvdata->sysfs_buf);
  891. drvdata->sysfs_buf = NULL;
  892. } else {
  893. tmc_sync_etr_buf(drvdata);
  894. }
  895. }
  896. static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
  897. {
  898. CS_UNLOCK(drvdata->base);
  899. tmc_flush_and_stop(drvdata);
  900. /*
  901. * When operating in sysFS mode the content of the buffer needs to be
  902. * read before the TMC is disabled.
  903. */
  904. if (drvdata->mode == CS_MODE_SYSFS)
  905. tmc_etr_sync_sysfs_buf(drvdata);
  906. tmc_disable_hw(drvdata);
  907. CS_LOCK(drvdata->base);
  908. /* Disable CATU device if this ETR is connected to one */
  909. tmc_etr_disable_catu(drvdata);
  910. /* Reset the ETR buf used by hardware */
  911. drvdata->etr_buf = NULL;
  912. }
  913. static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
  914. {
  915. int ret = 0;
  916. unsigned long flags;
  917. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  918. struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL;
  919. /*
  920. * If we are enabling the ETR from disabled state, we need to make
  921. * sure we have a buffer with the right size. The etr_buf is not reset
  922. * immediately after we stop the tracing in SYSFS mode as we wait for
  923. * the user to collect the data. We may be able to reuse the existing
  924. * buffer, provided the size matches. Any allocation has to be done
  925. * with the lock released.
  926. */
  927. spin_lock_irqsave(&drvdata->spinlock, flags);
  928. sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
  929. if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
  930. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  931. /* Allocate memory with the locks released */
  932. free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
  933. if (IS_ERR(new_buf))
  934. return PTR_ERR(new_buf);
  935. /* Let's try again */
  936. spin_lock_irqsave(&drvdata->spinlock, flags);
  937. }
  938. if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
  939. ret = -EBUSY;
  940. goto out;
  941. }
  942. /*
  943. * In sysFS mode we can have multiple writers per sink. Since this
  944. * sink is already enabled no memory is needed and the HW need not be
  945. * touched, even if the buffer size has changed.
  946. */
  947. if (drvdata->mode == CS_MODE_SYSFS)
  948. goto out;
  949. /*
  950. * If we don't have a buffer or it doesn't match the requested size,
  951. * use the buffer allocated above. Otherwise reuse the existing buffer.
  952. */
  953. sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
  954. if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) {
  955. free_buf = sysfs_buf;
  956. drvdata->sysfs_buf = new_buf;
  957. }
  958. drvdata->mode = CS_MODE_SYSFS;
  959. tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
  960. out:
  961. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  962. /* Free memory outside the spinlock if need be */
  963. if (free_buf)
  964. tmc_etr_free_sysfs_buf(free_buf);
  965. if (!ret)
  966. dev_info(drvdata->dev, "TMC-ETR enabled\n");
  967. return ret;
  968. }
  969. static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
  970. {
  971. /* We don't support perf mode yet ! */
  972. return -EINVAL;
  973. }
  974. static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
  975. {
  976. switch (mode) {
  977. case CS_MODE_SYSFS:
  978. return tmc_enable_etr_sink_sysfs(csdev);
  979. case CS_MODE_PERF:
  980. return tmc_enable_etr_sink_perf(csdev);
  981. }
  982. /* We shouldn't be here */
  983. return -EINVAL;
  984. }
  985. static void tmc_disable_etr_sink(struct coresight_device *csdev)
  986. {
  987. unsigned long flags;
  988. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  989. spin_lock_irqsave(&drvdata->spinlock, flags);
  990. if (drvdata->reading) {
  991. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  992. return;
  993. }
  994. /* Disable the TMC only if it needs to */
  995. if (drvdata->mode != CS_MODE_DISABLED) {
  996. tmc_etr_disable_hw(drvdata);
  997. drvdata->mode = CS_MODE_DISABLED;
  998. }
  999. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  1000. dev_info(drvdata->dev, "TMC-ETR disabled\n");
  1001. }
  1002. static const struct coresight_ops_sink tmc_etr_sink_ops = {
  1003. .enable = tmc_enable_etr_sink,
  1004. .disable = tmc_disable_etr_sink,
  1005. };
  1006. const struct coresight_ops tmc_etr_cs_ops = {
  1007. .sink_ops = &tmc_etr_sink_ops,
  1008. };
  1009. int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
  1010. {
  1011. int ret = 0;
  1012. unsigned long flags;
  1013. /* config types are set a boot time and never change */
  1014. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
  1015. return -EINVAL;
  1016. spin_lock_irqsave(&drvdata->spinlock, flags);
  1017. if (drvdata->reading) {
  1018. ret = -EBUSY;
  1019. goto out;
  1020. }
  1021. /* Don't interfere if operated from Perf */
  1022. if (drvdata->mode == CS_MODE_PERF) {
  1023. ret = -EINVAL;
  1024. goto out;
  1025. }
  1026. /* If sysfs_buf is NULL the trace data has been read already */
  1027. if (!drvdata->sysfs_buf) {
  1028. ret = -EINVAL;
  1029. goto out;
  1030. }
  1031. /* Disable the TMC if we are trying to read from a running session */
  1032. if (drvdata->mode == CS_MODE_SYSFS)
  1033. tmc_etr_disable_hw(drvdata);
  1034. drvdata->reading = true;
  1035. out:
  1036. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  1037. return ret;
  1038. }
  1039. int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
  1040. {
  1041. unsigned long flags;
  1042. struct etr_buf *sysfs_buf = NULL;
  1043. /* config types are set a boot time and never change */
  1044. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
  1045. return -EINVAL;
  1046. spin_lock_irqsave(&drvdata->spinlock, flags);
  1047. /* RE-enable the TMC if need be */
  1048. if (drvdata->mode == CS_MODE_SYSFS) {
  1049. /*
  1050. * The trace run will continue with the same allocated trace
  1051. * buffer. Since the tracer is still enabled drvdata::buf can't
  1052. * be NULL.
  1053. */
  1054. tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
  1055. } else {
  1056. /*
  1057. * The ETR is not tracing and the buffer was just read.
  1058. * As such prepare to free the trace buffer.
  1059. */
  1060. sysfs_buf = drvdata->sysfs_buf;
  1061. drvdata->sysfs_buf = NULL;
  1062. }
  1063. drvdata->reading = false;
  1064. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  1065. /* Free allocated memory out side of the spinlock */
  1066. if (sysfs_buf)
  1067. tmc_etr_free_sysfs_buf(sysfs_buf);
  1068. return 0;
  1069. }