fsl_imx8_ddr_perf.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2017 NXP
  4. * Copyright 2016 Freescale Semiconductor, Inc.
  5. */
  6. #include <linux/bitfield.h>
  7. #include <linux/init.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/io.h>
  10. #include <linux/module.h>
  11. #include <linux/of.h>
  12. #include <linux/of_address.h>
  13. #include <linux/of_device.h>
  14. #include <linux/of_irq.h>
  15. #include <linux/perf_event.h>
  16. #include <linux/slab.h>
  17. #define COUNTER_CNTL 0x0
  18. #define COUNTER_READ 0x20
  19. #define COUNTER_DPCR1 0x30
  20. #define CNTL_OVER 0x1
  21. #define CNTL_CLEAR 0x2
  22. #define CNTL_EN 0x4
  23. #define CNTL_EN_MASK 0xFFFFFFFB
  24. #define CNTL_CLEAR_MASK 0xFFFFFFFD
  25. #define CNTL_OVER_MASK 0xFFFFFFFE
  26. #define CNTL_CSV_SHIFT 24
  27. #define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
  28. #define EVENT_CYCLES_ID 0
  29. #define EVENT_CYCLES_COUNTER 0
  30. #define NUM_COUNTERS 4
  31. #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
  32. #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
  33. #define DDR_PERF_DEV_NAME "imx8_ddr"
  34. #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
  35. static DEFINE_IDA(ddr_ida);
  36. /* DDR Perf hardware feature */
  37. #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
  38. #define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
  39. struct fsl_ddr_devtype_data {
  40. unsigned int quirks; /* quirks needed for different DDR Perf core */
  41. };
  42. static const struct fsl_ddr_devtype_data imx8_devtype_data;
  43. static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
  44. .quirks = DDR_CAP_AXI_ID_FILTER,
  45. };
  46. static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
  47. { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
  48. { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
  49. { /* sentinel */ }
  50. };
  51. MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
  52. struct ddr_pmu {
  53. struct pmu pmu;
  54. void __iomem *base;
  55. unsigned int cpu;
  56. struct hlist_node node;
  57. struct device *dev;
  58. struct perf_event *events[NUM_COUNTERS];
  59. int active_events;
  60. enum cpuhp_state cpuhp_state;
  61. const struct fsl_ddr_devtype_data *devtype_data;
  62. int irq;
  63. int id;
  64. };
  65. static ssize_t ddr_perf_cpumask_show(struct device *dev,
  66. struct device_attribute *attr, char *buf)
  67. {
  68. struct ddr_pmu *pmu = dev_get_drvdata(dev);
  69. return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
  70. }
  71. static struct device_attribute ddr_perf_cpumask_attr =
  72. __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
  73. static struct attribute *ddr_perf_cpumask_attrs[] = {
  74. &ddr_perf_cpumask_attr.attr,
  75. NULL,
  76. };
  77. static struct attribute_group ddr_perf_cpumask_attr_group = {
  78. .attrs = ddr_perf_cpumask_attrs,
  79. };
  80. static ssize_t
  81. ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
  82. char *page)
  83. {
  84. struct perf_pmu_events_attr *pmu_attr;
  85. pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
  86. return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
  87. }
  88. #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
  89. (&((struct perf_pmu_events_attr[]) { \
  90. { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
  91. .id = _id, } \
  92. })[0].attr.attr)
  93. static struct attribute *ddr_perf_events_attrs[] = {
  94. IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
  95. IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01),
  96. IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04),
  97. IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05),
  98. IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
  99. IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
  100. IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10),
  101. IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11),
  102. IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12),
  103. IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20),
  104. IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21),
  105. IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22),
  106. IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23),
  107. IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
  108. IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25),
  109. IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
  110. IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27),
  111. IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29),
  112. IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a),
  113. IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b),
  114. IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30),
  115. IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31),
  116. IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32),
  117. IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33),
  118. IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34),
  119. IMX8_DDR_PMU_EVENT_ATTR(read, 0x35),
  120. IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36),
  121. IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
  122. IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
  123. IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
  124. IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
  125. IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
  126. NULL,
  127. };
  128. static struct attribute_group ddr_perf_events_attr_group = {
  129. .name = "events",
  130. .attrs = ddr_perf_events_attrs,
  131. };
  132. PMU_FORMAT_ATTR(event, "config:0-7");
  133. PMU_FORMAT_ATTR(axi_id, "config1:0-15");
  134. PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
  135. static struct attribute *ddr_perf_format_attrs[] = {
  136. &format_attr_event.attr,
  137. &format_attr_axi_id.attr,
  138. &format_attr_axi_mask.attr,
  139. NULL,
  140. };
  141. static struct attribute_group ddr_perf_format_attr_group = {
  142. .name = "format",
  143. .attrs = ddr_perf_format_attrs,
  144. };
  145. static const struct attribute_group *attr_groups[] = {
  146. &ddr_perf_events_attr_group,
  147. &ddr_perf_format_attr_group,
  148. &ddr_perf_cpumask_attr_group,
  149. NULL,
  150. };
  151. static bool ddr_perf_is_filtered(struct perf_event *event)
  152. {
  153. return event->attr.config == 0x41 || event->attr.config == 0x42;
  154. }
  155. static u32 ddr_perf_filter_val(struct perf_event *event)
  156. {
  157. return event->attr.config1;
  158. }
  159. static bool ddr_perf_filters_compatible(struct perf_event *a,
  160. struct perf_event *b)
  161. {
  162. if (!ddr_perf_is_filtered(a))
  163. return true;
  164. if (!ddr_perf_is_filtered(b))
  165. return true;
  166. return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
  167. }
  168. static bool ddr_perf_is_enhanced_filtered(struct perf_event *event)
  169. {
  170. unsigned int filt;
  171. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  172. filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
  173. return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) &&
  174. ddr_perf_is_filtered(event);
  175. }
  176. static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
  177. {
  178. int i;
  179. /*
  180. * Always map cycle event to counter 0
  181. * Cycles counter is dedicated for cycle event
  182. * can't used for the other events
  183. */
  184. if (event == EVENT_CYCLES_ID) {
  185. if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
  186. return EVENT_CYCLES_COUNTER;
  187. else
  188. return -ENOENT;
  189. }
  190. for (i = 1; i < NUM_COUNTERS; i++) {
  191. if (pmu->events[i] == NULL)
  192. return i;
  193. }
  194. return -ENOENT;
  195. }
  196. static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
  197. {
  198. pmu->events[counter] = NULL;
  199. }
  200. static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
  201. {
  202. struct perf_event *event = pmu->events[counter];
  203. void __iomem *base = pmu->base;
  204. /*
  205. * return bytes instead of bursts from ddr transaction for
  206. * axid-read and axid-write event if PMU core supports enhanced
  207. * filter.
  208. */
  209. base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 :
  210. COUNTER_READ;
  211. return readl_relaxed(base + counter * 4);
  212. }
  213. static int ddr_perf_event_init(struct perf_event *event)
  214. {
  215. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  216. struct hw_perf_event *hwc = &event->hw;
  217. struct perf_event *sibling;
  218. if (event->attr.type != event->pmu->type)
  219. return -ENOENT;
  220. if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
  221. return -EOPNOTSUPP;
  222. if (event->cpu < 0) {
  223. dev_warn(pmu->dev, "Can't provide per-task data!\n");
  224. return -EOPNOTSUPP;
  225. }
  226. /*
  227. * We must NOT create groups containing mixed PMUs, although software
  228. * events are acceptable (for example to create a CCN group
  229. * periodically read when a hrtimer aka cpu-clock leader triggers).
  230. */
  231. if (event->group_leader->pmu != event->pmu &&
  232. !is_software_event(event->group_leader))
  233. return -EINVAL;
  234. if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
  235. if (!ddr_perf_filters_compatible(event, event->group_leader))
  236. return -EINVAL;
  237. for_each_sibling_event(sibling, event->group_leader) {
  238. if (!ddr_perf_filters_compatible(event, sibling))
  239. return -EINVAL;
  240. }
  241. }
  242. for_each_sibling_event(sibling, event->group_leader) {
  243. if (sibling->pmu != event->pmu &&
  244. !is_software_event(sibling))
  245. return -EINVAL;
  246. }
  247. event->cpu = pmu->cpu;
  248. hwc->idx = -1;
  249. return 0;
  250. }
  251. static void ddr_perf_event_update(struct perf_event *event)
  252. {
  253. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  254. struct hw_perf_event *hwc = &event->hw;
  255. u64 delta, prev_raw_count, new_raw_count;
  256. int counter = hwc->idx;
  257. do {
  258. prev_raw_count = local64_read(&hwc->prev_count);
  259. new_raw_count = ddr_perf_read_counter(pmu, counter);
  260. } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  261. new_raw_count) != prev_raw_count);
  262. delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
  263. local64_add(delta, &event->count);
  264. }
  265. static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
  266. int counter, bool enable)
  267. {
  268. u8 reg = counter * 4 + COUNTER_CNTL;
  269. int val;
  270. if (enable) {
  271. /*
  272. * cycle counter is special which should firstly write 0 then
  273. * write 1 into CLEAR bit to clear it. Other counters only
  274. * need write 0 into CLEAR bit and it turns out to be 1 by
  275. * hardware. Below enable flow is harmless for all counters.
  276. */
  277. writel(0, pmu->base + reg);
  278. val = CNTL_EN | CNTL_CLEAR;
  279. val |= FIELD_PREP(CNTL_CSV_MASK, config);
  280. writel(val, pmu->base + reg);
  281. } else {
  282. /* Disable counter */
  283. val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK;
  284. writel(val, pmu->base + reg);
  285. }
  286. }
  287. static void ddr_perf_event_start(struct perf_event *event, int flags)
  288. {
  289. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  290. struct hw_perf_event *hwc = &event->hw;
  291. int counter = hwc->idx;
  292. local64_set(&hwc->prev_count, 0);
  293. ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
  294. hwc->state = 0;
  295. }
  296. static int ddr_perf_event_add(struct perf_event *event, int flags)
  297. {
  298. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  299. struct hw_perf_event *hwc = &event->hw;
  300. int counter;
  301. int cfg = event->attr.config;
  302. int cfg1 = event->attr.config1;
  303. if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
  304. int i;
  305. for (i = 1; i < NUM_COUNTERS; i++) {
  306. if (pmu->events[i] &&
  307. !ddr_perf_filters_compatible(event, pmu->events[i]))
  308. return -EINVAL;
  309. }
  310. if (ddr_perf_is_filtered(event)) {
  311. /* revert axi id masking(axi_mask) value */
  312. cfg1 ^= AXI_MASKING_REVERT;
  313. writel(cfg1, pmu->base + COUNTER_DPCR1);
  314. }
  315. }
  316. counter = ddr_perf_alloc_counter(pmu, cfg);
  317. if (counter < 0) {
  318. dev_dbg(pmu->dev, "There are not enough counters\n");
  319. return -EOPNOTSUPP;
  320. }
  321. pmu->events[counter] = event;
  322. pmu->active_events++;
  323. hwc->idx = counter;
  324. hwc->state |= PERF_HES_STOPPED;
  325. if (flags & PERF_EF_START)
  326. ddr_perf_event_start(event, flags);
  327. return 0;
  328. }
  329. static void ddr_perf_event_stop(struct perf_event *event, int flags)
  330. {
  331. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  332. struct hw_perf_event *hwc = &event->hw;
  333. int counter = hwc->idx;
  334. ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
  335. ddr_perf_event_update(event);
  336. hwc->state |= PERF_HES_STOPPED;
  337. }
  338. static void ddr_perf_event_del(struct perf_event *event, int flags)
  339. {
  340. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  341. struct hw_perf_event *hwc = &event->hw;
  342. int counter = hwc->idx;
  343. ddr_perf_event_stop(event, PERF_EF_UPDATE);
  344. ddr_perf_free_counter(pmu, counter);
  345. pmu->active_events--;
  346. hwc->idx = -1;
  347. }
  348. static void ddr_perf_pmu_enable(struct pmu *pmu)
  349. {
  350. struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
  351. /* enable cycle counter if cycle is not active event list */
  352. if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
  353. ddr_perf_counter_enable(ddr_pmu,
  354. EVENT_CYCLES_ID,
  355. EVENT_CYCLES_COUNTER,
  356. true);
  357. }
  358. static void ddr_perf_pmu_disable(struct pmu *pmu)
  359. {
  360. struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
  361. if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
  362. ddr_perf_counter_enable(ddr_pmu,
  363. EVENT_CYCLES_ID,
  364. EVENT_CYCLES_COUNTER,
  365. false);
  366. }
  367. static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
  368. struct device *dev)
  369. {
  370. *pmu = (struct ddr_pmu) {
  371. .pmu = (struct pmu) {
  372. .module = THIS_MODULE,
  373. .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
  374. .task_ctx_nr = perf_invalid_context,
  375. .attr_groups = attr_groups,
  376. .event_init = ddr_perf_event_init,
  377. .add = ddr_perf_event_add,
  378. .del = ddr_perf_event_del,
  379. .start = ddr_perf_event_start,
  380. .stop = ddr_perf_event_stop,
  381. .read = ddr_perf_event_update,
  382. .pmu_enable = ddr_perf_pmu_enable,
  383. .pmu_disable = ddr_perf_pmu_disable,
  384. },
  385. .base = base,
  386. .dev = dev,
  387. };
  388. pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
  389. return pmu->id;
  390. }
  391. static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
  392. {
  393. int i;
  394. struct ddr_pmu *pmu = (struct ddr_pmu *) p;
  395. struct perf_event *event, *cycle_event = NULL;
  396. /* all counter will stop if cycle counter disabled */
  397. ddr_perf_counter_enable(pmu,
  398. EVENT_CYCLES_ID,
  399. EVENT_CYCLES_COUNTER,
  400. false);
  401. /*
  402. * When the cycle counter overflows, all counters are stopped,
  403. * and an IRQ is raised. If any other counter overflows, it
  404. * continues counting, and no IRQ is raised.
  405. *
  406. * Cycles occur at least 4 times as often as other events, so we
  407. * can update all events on a cycle counter overflow and not
  408. * lose events.
  409. *
  410. */
  411. for (i = 0; i < NUM_COUNTERS; i++) {
  412. if (!pmu->events[i])
  413. continue;
  414. event = pmu->events[i];
  415. ddr_perf_event_update(event);
  416. if (event->hw.idx == EVENT_CYCLES_COUNTER)
  417. cycle_event = event;
  418. }
  419. ddr_perf_counter_enable(pmu,
  420. EVENT_CYCLES_ID,
  421. EVENT_CYCLES_COUNTER,
  422. true);
  423. if (cycle_event)
  424. ddr_perf_event_update(cycle_event);
  425. return IRQ_HANDLED;
  426. }
  427. static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
  428. {
  429. struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
  430. int target;
  431. if (cpu != pmu->cpu)
  432. return 0;
  433. target = cpumask_any_but(cpu_online_mask, cpu);
  434. if (target >= nr_cpu_ids)
  435. return 0;
  436. perf_pmu_migrate_context(&pmu->pmu, cpu, target);
  437. pmu->cpu = target;
  438. WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
  439. return 0;
  440. }
  441. static int ddr_perf_probe(struct platform_device *pdev)
  442. {
  443. struct ddr_pmu *pmu;
  444. struct device_node *np;
  445. void __iomem *base;
  446. char *name;
  447. int num;
  448. int ret;
  449. int irq;
  450. base = devm_platform_ioremap_resource(pdev, 0);
  451. if (IS_ERR(base))
  452. return PTR_ERR(base);
  453. np = pdev->dev.of_node;
  454. pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
  455. if (!pmu)
  456. return -ENOMEM;
  457. num = ddr_perf_init(pmu, base, &pdev->dev);
  458. platform_set_drvdata(pdev, pmu);
  459. name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
  460. num);
  461. if (!name) {
  462. ret = -ENOMEM;
  463. goto cpuhp_state_err;
  464. }
  465. pmu->devtype_data = of_device_get_match_data(&pdev->dev);
  466. pmu->cpu = raw_smp_processor_id();
  467. ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
  468. DDR_CPUHP_CB_NAME,
  469. NULL,
  470. ddr_perf_offline_cpu);
  471. if (ret < 0) {
  472. dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
  473. goto cpuhp_state_err;
  474. }
  475. pmu->cpuhp_state = ret;
  476. /* Register the pmu instance for cpu hotplug */
  477. ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
  478. if (ret) {
  479. dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
  480. goto cpuhp_instance_err;
  481. }
  482. /* Request irq */
  483. irq = of_irq_get(np, 0);
  484. if (irq < 0) {
  485. dev_err(&pdev->dev, "Failed to get irq: %d", irq);
  486. ret = irq;
  487. goto ddr_perf_err;
  488. }
  489. ret = devm_request_irq(&pdev->dev, irq,
  490. ddr_perf_irq_handler,
  491. IRQF_NOBALANCING | IRQF_NO_THREAD,
  492. DDR_CPUHP_CB_NAME,
  493. pmu);
  494. if (ret < 0) {
  495. dev_err(&pdev->dev, "Request irq failed: %d", ret);
  496. goto ddr_perf_err;
  497. }
  498. pmu->irq = irq;
  499. ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
  500. if (ret) {
  501. dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
  502. goto ddr_perf_err;
  503. }
  504. ret = perf_pmu_register(&pmu->pmu, name, -1);
  505. if (ret)
  506. goto ddr_perf_err;
  507. return 0;
  508. ddr_perf_err:
  509. cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
  510. cpuhp_instance_err:
  511. cpuhp_remove_multi_state(pmu->cpuhp_state);
  512. cpuhp_state_err:
  513. ida_simple_remove(&ddr_ida, pmu->id);
  514. dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
  515. return ret;
  516. }
  517. static int ddr_perf_remove(struct platform_device *pdev)
  518. {
  519. struct ddr_pmu *pmu = platform_get_drvdata(pdev);
  520. cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
  521. cpuhp_remove_multi_state(pmu->cpuhp_state);
  522. irq_set_affinity_hint(pmu->irq, NULL);
  523. perf_pmu_unregister(&pmu->pmu);
  524. ida_simple_remove(&ddr_ida, pmu->id);
  525. return 0;
  526. }
  527. static struct platform_driver imx_ddr_pmu_driver = {
  528. .driver = {
  529. .name = "imx-ddr-pmu",
  530. .of_match_table = imx_ddr_pmu_dt_ids,
  531. .suppress_bind_attrs = true,
  532. },
  533. .probe = ddr_perf_probe,
  534. .remove = ddr_perf_remove,
  535. };
  536. module_platform_driver(imx_ddr_pmu_driver);
  537. MODULE_LICENSE("GPL v2");