arm_pmu_acpi.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * ACPI probing code for ARM performance counters.
  3. *
  4. * Copyright (C) 2017 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/acpi.h>
  11. #include <linux/cpumask.h>
  12. #include <linux/init.h>
  13. #include <linux/irq.h>
  14. #include <linux/irqdesc.h>
  15. #include <linux/percpu.h>
  16. #include <linux/perf/arm_pmu.h>
  17. #include <asm/cputype.h>
  18. static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
  19. static DEFINE_PER_CPU(int, pmu_irqs);
  20. static int arm_pmu_acpi_register_irq(int cpu)
  21. {
  22. struct acpi_madt_generic_interrupt *gicc;
  23. int gsi, trigger;
  24. gicc = acpi_cpu_get_madt_gicc(cpu);
  25. gsi = gicc->performance_interrupt;
  26. /*
  27. * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
  28. * have an interrupt. QEMU advertises this by using a GSI of zero,
  29. * which is not known to be valid on any hardware despite being
  30. * valid per the spec. Take the pragmatic approach and reject a
  31. * GSI of zero for now.
  32. */
  33. if (!gsi)
  34. return 0;
  35. if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
  36. trigger = ACPI_EDGE_SENSITIVE;
  37. else
  38. trigger = ACPI_LEVEL_SENSITIVE;
  39. /*
  40. * Helpfully, the MADT GICC doesn't have a polarity flag for the
  41. * "performance interrupt". Luckily, on compliant GICs the polarity is
  42. * a fixed value in HW (for both SPIs and PPIs) that we cannot change
  43. * from SW.
  44. *
  45. * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
  46. * may not match the real polarity, but that should not matter.
  47. *
  48. * Other interrupt controllers are not supported with ACPI.
  49. */
  50. return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
  51. }
  52. static void arm_pmu_acpi_unregister_irq(int cpu)
  53. {
  54. struct acpi_madt_generic_interrupt *gicc;
  55. int gsi;
  56. gicc = acpi_cpu_get_madt_gicc(cpu);
  57. gsi = gicc->performance_interrupt;
  58. if (gsi)
  59. acpi_unregister_gsi(gsi);
  60. }
  61. static int arm_pmu_acpi_parse_irqs(void)
  62. {
  63. int irq, cpu, irq_cpu, err;
  64. for_each_possible_cpu(cpu) {
  65. irq = arm_pmu_acpi_register_irq(cpu);
  66. if (irq < 0) {
  67. err = irq;
  68. pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
  69. cpu, err);
  70. goto out_err;
  71. } else if (irq == 0) {
  72. pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
  73. }
  74. /*
  75. * Log and request the IRQ so the core arm_pmu code can manage
  76. * it. We'll have to sanity-check IRQs later when we associate
  77. * them with their PMUs.
  78. */
  79. per_cpu(pmu_irqs, cpu) = irq;
  80. armpmu_request_irq(irq, cpu);
  81. }
  82. return 0;
  83. out_err:
  84. for_each_possible_cpu(cpu) {
  85. irq = per_cpu(pmu_irqs, cpu);
  86. if (!irq)
  87. continue;
  88. arm_pmu_acpi_unregister_irq(cpu);
  89. /*
  90. * Blat all copies of the IRQ so that we only unregister the
  91. * corresponding GSI once (e.g. when we have PPIs).
  92. */
  93. for_each_possible_cpu(irq_cpu) {
  94. if (per_cpu(pmu_irqs, irq_cpu) == irq)
  95. per_cpu(pmu_irqs, irq_cpu) = 0;
  96. }
  97. }
  98. return err;
  99. }
  100. static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
  101. {
  102. unsigned long cpuid = read_cpuid_id();
  103. struct arm_pmu *pmu;
  104. int cpu;
  105. for_each_possible_cpu(cpu) {
  106. pmu = per_cpu(probed_pmus, cpu);
  107. if (!pmu || pmu->acpi_cpuid != cpuid)
  108. continue;
  109. return pmu;
  110. }
  111. pmu = armpmu_alloc_atomic();
  112. if (!pmu) {
  113. pr_warn("Unable to allocate PMU for CPU%d\n",
  114. smp_processor_id());
  115. return NULL;
  116. }
  117. pmu->acpi_cpuid = cpuid;
  118. return pmu;
  119. }
  120. /*
  121. * Check whether the new IRQ is compatible with those already associated with
  122. * the PMU (e.g. we don't have mismatched PPIs).
  123. */
  124. static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
  125. {
  126. struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
  127. int cpu;
  128. if (!irq)
  129. return true;
  130. for_each_cpu(cpu, &pmu->supported_cpus) {
  131. int other_irq = per_cpu(hw_events->irq, cpu);
  132. if (!other_irq)
  133. continue;
  134. if (irq == other_irq)
  135. continue;
  136. if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
  137. continue;
  138. pr_warn("mismatched PPIs detected\n");
  139. return false;
  140. }
  141. return true;
  142. }
  143. /*
  144. * This must run before the common arm_pmu hotplug logic, so that we can
  145. * associate a CPU and its interrupt before the common code tries to manage the
  146. * affinity and so on.
  147. *
  148. * Note that hotplug events are serialized, so we cannot race with another CPU
  149. * coming up. The perf core won't open events while a hotplug event is in
  150. * progress.
  151. */
  152. static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
  153. {
  154. struct arm_pmu *pmu;
  155. struct pmu_hw_events __percpu *hw_events;
  156. int irq;
  157. /* If we've already probed this CPU, we have nothing to do */
  158. if (per_cpu(probed_pmus, cpu))
  159. return 0;
  160. irq = per_cpu(pmu_irqs, cpu);
  161. pmu = arm_pmu_acpi_find_alloc_pmu();
  162. if (!pmu)
  163. return -ENOMEM;
  164. per_cpu(probed_pmus, cpu) = pmu;
  165. if (pmu_irq_matches(pmu, irq)) {
  166. hw_events = pmu->hw_events;
  167. per_cpu(hw_events->irq, cpu) = irq;
  168. }
  169. cpumask_set_cpu(cpu, &pmu->supported_cpus);
  170. /*
  171. * Ideally, we'd probe the PMU here when we find the first matching
  172. * CPU. We can't do that for several reasons; see the comment in
  173. * arm_pmu_acpi_init().
  174. *
  175. * So for the time being, we're done.
  176. */
  177. return 0;
  178. }
  179. int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
  180. {
  181. int pmu_idx = 0;
  182. int cpu, ret;
  183. /*
  184. * Initialise and register the set of PMUs which we know about right
  185. * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
  186. * could handle late hotplug, but this may lead to deadlock since we
  187. * might try to register a hotplug notifier instance from within a
  188. * hotplug notifier.
  189. *
  190. * There's also the problem of having access to the right init_fn,
  191. * without tying this too deeply into the "real" PMU driver.
  192. *
  193. * For the moment, as with the platform/DT case, we need at least one
  194. * of a PMU's CPUs to be online at probe time.
  195. */
  196. for_each_possible_cpu(cpu) {
  197. struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
  198. char *base_name;
  199. if (!pmu || pmu->name)
  200. continue;
  201. ret = init_fn(pmu);
  202. if (ret == -ENODEV) {
  203. /* PMU not handled by this driver, or not present */
  204. continue;
  205. } else if (ret) {
  206. pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
  207. return ret;
  208. }
  209. base_name = pmu->name;
  210. pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
  211. if (!pmu->name) {
  212. pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
  213. return -ENOMEM;
  214. }
  215. ret = armpmu_register(pmu);
  216. if (ret) {
  217. pr_warn("Failed to register PMU for CPU%d\n", cpu);
  218. kfree(pmu->name);
  219. return ret;
  220. }
  221. }
  222. return 0;
  223. }
  224. static int arm_pmu_acpi_init(void)
  225. {
  226. int ret;
  227. if (acpi_disabled)
  228. return 0;
  229. ret = arm_pmu_acpi_parse_irqs();
  230. if (ret)
  231. return ret;
  232. ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
  233. "perf/arm/pmu_acpi:starting",
  234. arm_pmu_acpi_cpu_starting, NULL);
  235. return ret;
  236. }
  237. subsys_initcall(arm_pmu_acpi_init)