arm_pmu_acpi.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * ACPI probing code for ARM performance counters.
  4. *
  5. * Copyright (C) 2017 ARM Ltd.
  6. */
  7. #include <linux/acpi.h>
  8. #include <linux/cpumask.h>
  9. #include <linux/init.h>
  10. #include <linux/irq.h>
  11. #include <linux/irqdesc.h>
  12. #include <linux/percpu.h>
  13. #include <linux/perf/arm_pmu.h>
  14. #include <asm/cputype.h>
  15. static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
  16. static DEFINE_PER_CPU(int, pmu_irqs);
  17. static int arm_pmu_acpi_register_irq(int cpu)
  18. {
  19. struct acpi_madt_generic_interrupt *gicc;
  20. int gsi, trigger;
  21. gicc = acpi_cpu_get_madt_gicc(cpu);
  22. gsi = gicc->performance_interrupt;
  23. /*
  24. * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
  25. * have an interrupt. QEMU advertises this by using a GSI of zero,
  26. * which is not known to be valid on any hardware despite being
  27. * valid per the spec. Take the pragmatic approach and reject a
  28. * GSI of zero for now.
  29. */
  30. if (!gsi)
  31. return 0;
  32. if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
  33. trigger = ACPI_EDGE_SENSITIVE;
  34. else
  35. trigger = ACPI_LEVEL_SENSITIVE;
  36. /*
  37. * Helpfully, the MADT GICC doesn't have a polarity flag for the
  38. * "performance interrupt". Luckily, on compliant GICs the polarity is
  39. * a fixed value in HW (for both SPIs and PPIs) that we cannot change
  40. * from SW.
  41. *
  42. * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
  43. * may not match the real polarity, but that should not matter.
  44. *
  45. * Other interrupt controllers are not supported with ACPI.
  46. */
  47. return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
  48. }
  49. static void arm_pmu_acpi_unregister_irq(int cpu)
  50. {
  51. struct acpi_madt_generic_interrupt *gicc;
  52. int gsi;
  53. gicc = acpi_cpu_get_madt_gicc(cpu);
  54. gsi = gicc->performance_interrupt;
  55. if (gsi)
  56. acpi_unregister_gsi(gsi);
  57. }
  58. #if IS_ENABLED(CONFIG_ARM_SPE_PMU)
  59. static struct resource spe_resources[] = {
  60. {
  61. /* irq */
  62. .flags = IORESOURCE_IRQ,
  63. }
  64. };
  65. static struct platform_device spe_dev = {
  66. .name = ARMV8_SPE_PDEV_NAME,
  67. .id = -1,
  68. .resource = spe_resources,
  69. .num_resources = ARRAY_SIZE(spe_resources)
  70. };
  71. /*
  72. * For lack of a better place, hook the normal PMU MADT walk
  73. * and create a SPE device if we detect a recent MADT with
  74. * a homogeneous PPI mapping.
  75. */
  76. static void arm_spe_acpi_register_device(void)
  77. {
  78. int cpu, hetid, irq, ret;
  79. bool first = true;
  80. u16 gsi = 0;
  81. /*
  82. * Sanity check all the GICC tables for the same interrupt number.
  83. * For now, we only support homogeneous ACPI/SPE machines.
  84. */
  85. for_each_possible_cpu(cpu) {
  86. struct acpi_madt_generic_interrupt *gicc;
  87. gicc = acpi_cpu_get_madt_gicc(cpu);
  88. if (gicc->header.length < ACPI_MADT_GICC_SPE)
  89. return;
  90. if (first) {
  91. gsi = gicc->spe_interrupt;
  92. if (!gsi)
  93. return;
  94. hetid = find_acpi_cpu_topology_hetero_id(cpu);
  95. first = false;
  96. } else if ((gsi != gicc->spe_interrupt) ||
  97. (hetid != find_acpi_cpu_topology_hetero_id(cpu))) {
  98. pr_warn("ACPI: SPE must be homogeneous\n");
  99. return;
  100. }
  101. }
  102. irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE,
  103. ACPI_ACTIVE_HIGH);
  104. if (irq < 0) {
  105. pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi);
  106. return;
  107. }
  108. spe_resources[0].start = irq;
  109. ret = platform_device_register(&spe_dev);
  110. if (ret < 0) {
  111. pr_warn("ACPI: SPE: Unable to register device\n");
  112. acpi_unregister_gsi(gsi);
  113. }
  114. }
  115. #else
  116. static inline void arm_spe_acpi_register_device(void)
  117. {
  118. }
  119. #endif /* CONFIG_ARM_SPE_PMU */
  120. static int arm_pmu_acpi_parse_irqs(void)
  121. {
  122. int irq, cpu, irq_cpu, err;
  123. for_each_possible_cpu(cpu) {
  124. irq = arm_pmu_acpi_register_irq(cpu);
  125. if (irq < 0) {
  126. err = irq;
  127. pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
  128. cpu, err);
  129. goto out_err;
  130. } else if (irq == 0) {
  131. pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
  132. }
  133. /*
  134. * Log and request the IRQ so the core arm_pmu code can manage
  135. * it. We'll have to sanity-check IRQs later when we associate
  136. * them with their PMUs.
  137. */
  138. per_cpu(pmu_irqs, cpu) = irq;
  139. armpmu_request_irq(irq, cpu);
  140. }
  141. return 0;
  142. out_err:
  143. for_each_possible_cpu(cpu) {
  144. irq = per_cpu(pmu_irqs, cpu);
  145. if (!irq)
  146. continue;
  147. arm_pmu_acpi_unregister_irq(cpu);
  148. /*
  149. * Blat all copies of the IRQ so that we only unregister the
  150. * corresponding GSI once (e.g. when we have PPIs).
  151. */
  152. for_each_possible_cpu(irq_cpu) {
  153. if (per_cpu(pmu_irqs, irq_cpu) == irq)
  154. per_cpu(pmu_irqs, irq_cpu) = 0;
  155. }
  156. }
  157. return err;
  158. }
  159. static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
  160. {
  161. unsigned long cpuid = read_cpuid_id();
  162. struct arm_pmu *pmu;
  163. int cpu;
  164. for_each_possible_cpu(cpu) {
  165. pmu = per_cpu(probed_pmus, cpu);
  166. if (!pmu || pmu->acpi_cpuid != cpuid)
  167. continue;
  168. return pmu;
  169. }
  170. pmu = armpmu_alloc_atomic();
  171. if (!pmu) {
  172. pr_warn("Unable to allocate PMU for CPU%d\n",
  173. smp_processor_id());
  174. return NULL;
  175. }
  176. pmu->acpi_cpuid = cpuid;
  177. return pmu;
  178. }
  179. /*
  180. * Check whether the new IRQ is compatible with those already associated with
  181. * the PMU (e.g. we don't have mismatched PPIs).
  182. */
  183. static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
  184. {
  185. struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
  186. int cpu;
  187. if (!irq)
  188. return true;
  189. for_each_cpu(cpu, &pmu->supported_cpus) {
  190. int other_irq = per_cpu(hw_events->irq, cpu);
  191. if (!other_irq)
  192. continue;
  193. if (irq == other_irq)
  194. continue;
  195. if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
  196. continue;
  197. pr_warn("mismatched PPIs detected\n");
  198. return false;
  199. }
  200. return true;
  201. }
  202. /*
  203. * This must run before the common arm_pmu hotplug logic, so that we can
  204. * associate a CPU and its interrupt before the common code tries to manage the
  205. * affinity and so on.
  206. *
  207. * Note that hotplug events are serialized, so we cannot race with another CPU
  208. * coming up. The perf core won't open events while a hotplug event is in
  209. * progress.
  210. */
  211. static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
  212. {
  213. struct arm_pmu *pmu;
  214. struct pmu_hw_events __percpu *hw_events;
  215. int irq;
  216. /* If we've already probed this CPU, we have nothing to do */
  217. if (per_cpu(probed_pmus, cpu))
  218. return 0;
  219. irq = per_cpu(pmu_irqs, cpu);
  220. pmu = arm_pmu_acpi_find_alloc_pmu();
  221. if (!pmu)
  222. return -ENOMEM;
  223. per_cpu(probed_pmus, cpu) = pmu;
  224. if (pmu_irq_matches(pmu, irq)) {
  225. hw_events = pmu->hw_events;
  226. per_cpu(hw_events->irq, cpu) = irq;
  227. }
  228. cpumask_set_cpu(cpu, &pmu->supported_cpus);
  229. /*
  230. * Ideally, we'd probe the PMU here when we find the first matching
  231. * CPU. We can't do that for several reasons; see the comment in
  232. * arm_pmu_acpi_init().
  233. *
  234. * So for the time being, we're done.
  235. */
  236. return 0;
  237. }
  238. int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
  239. {
  240. int pmu_idx = 0;
  241. int cpu, ret;
  242. /*
  243. * Initialise and register the set of PMUs which we know about right
  244. * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
  245. * could handle late hotplug, but this may lead to deadlock since we
  246. * might try to register a hotplug notifier instance from within a
  247. * hotplug notifier.
  248. *
  249. * There's also the problem of having access to the right init_fn,
  250. * without tying this too deeply into the "real" PMU driver.
  251. *
  252. * For the moment, as with the platform/DT case, we need at least one
  253. * of a PMU's CPUs to be online at probe time.
  254. */
  255. for_each_possible_cpu(cpu) {
  256. struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
  257. char *base_name;
  258. if (!pmu || pmu->name)
  259. continue;
  260. ret = init_fn(pmu);
  261. if (ret == -ENODEV) {
  262. /* PMU not handled by this driver, or not present */
  263. continue;
  264. } else if (ret) {
  265. pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
  266. return ret;
  267. }
  268. base_name = pmu->name;
  269. pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
  270. if (!pmu->name) {
  271. pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
  272. return -ENOMEM;
  273. }
  274. ret = armpmu_register(pmu);
  275. if (ret) {
  276. pr_warn("Failed to register PMU for CPU%d\n", cpu);
  277. kfree(pmu->name);
  278. return ret;
  279. }
  280. }
  281. return 0;
  282. }
  283. static int arm_pmu_acpi_init(void)
  284. {
  285. int ret;
  286. if (acpi_disabled)
  287. return 0;
  288. arm_spe_acpi_register_device();
  289. ret = arm_pmu_acpi_parse_irqs();
  290. if (ret)
  291. return ret;
  292. ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
  293. "perf/arm/pmu_acpi:starting",
  294. arm_pmu_acpi_cpu_starting, NULL);
  295. return ret;
  296. }
  297. subsys_initcall(arm_pmu_acpi_init)