cpumap.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <perf/cpumap.h>
  3. #include <stdlib.h>
  4. #include <linux/refcount.h>
  5. #include <internal/cpumap.h>
  6. #include <asm/bug.h>
  7. #include <stdio.h>
  8. #include <string.h>
  9. #include <unistd.h>
  10. #include <ctype.h>
  11. #include <limits.h>
  12. struct perf_cpu_map *perf_cpu_map__dummy_new(void)
  13. {
  14. struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
  15. if (cpus != NULL) {
  16. cpus->nr = 1;
  17. cpus->map[0] = -1;
  18. refcount_set(&cpus->refcnt, 1);
  19. }
  20. return cpus;
  21. }
  22. static void cpu_map__delete(struct perf_cpu_map *map)
  23. {
  24. if (map) {
  25. WARN_ONCE(refcount_read(&map->refcnt) != 0,
  26. "cpu_map refcnt unbalanced\n");
  27. free(map);
  28. }
  29. }
  30. struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
  31. {
  32. if (map)
  33. refcount_inc(&map->refcnt);
  34. return map;
  35. }
  36. void perf_cpu_map__put(struct perf_cpu_map *map)
  37. {
  38. if (map && refcount_dec_and_test(&map->refcnt))
  39. cpu_map__delete(map);
  40. }
  41. static struct perf_cpu_map *cpu_map__default_new(void)
  42. {
  43. struct perf_cpu_map *cpus;
  44. int nr_cpus;
  45. nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
  46. if (nr_cpus < 0)
  47. return NULL;
  48. cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
  49. if (cpus != NULL) {
  50. int i;
  51. for (i = 0; i < nr_cpus; ++i)
  52. cpus->map[i] = i;
  53. cpus->nr = nr_cpus;
  54. refcount_set(&cpus->refcnt, 1);
  55. }
  56. return cpus;
  57. }
  58. static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
  59. {
  60. size_t payload_size = nr_cpus * sizeof(int);
  61. struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
  62. if (cpus != NULL) {
  63. cpus->nr = nr_cpus;
  64. memcpy(cpus->map, tmp_cpus, payload_size);
  65. refcount_set(&cpus->refcnt, 1);
  66. }
  67. return cpus;
  68. }
  69. struct perf_cpu_map *perf_cpu_map__read(FILE *file)
  70. {
  71. struct perf_cpu_map *cpus = NULL;
  72. int nr_cpus = 0;
  73. int *tmp_cpus = NULL, *tmp;
  74. int max_entries = 0;
  75. int n, cpu, prev;
  76. char sep;
  77. sep = 0;
  78. prev = -1;
  79. for (;;) {
  80. n = fscanf(file, "%u%c", &cpu, &sep);
  81. if (n <= 0)
  82. break;
  83. if (prev >= 0) {
  84. int new_max = nr_cpus + cpu - prev - 1;
  85. WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
  86. "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
  87. if (new_max >= max_entries) {
  88. max_entries = new_max + MAX_NR_CPUS / 2;
  89. tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  90. if (tmp == NULL)
  91. goto out_free_tmp;
  92. tmp_cpus = tmp;
  93. }
  94. while (++prev < cpu)
  95. tmp_cpus[nr_cpus++] = prev;
  96. }
  97. if (nr_cpus == max_entries) {
  98. max_entries += MAX_NR_CPUS;
  99. tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  100. if (tmp == NULL)
  101. goto out_free_tmp;
  102. tmp_cpus = tmp;
  103. }
  104. tmp_cpus[nr_cpus++] = cpu;
  105. if (n == 2 && sep == '-')
  106. prev = cpu;
  107. else
  108. prev = -1;
  109. if (n == 1 || sep == '\n')
  110. break;
  111. }
  112. if (nr_cpus > 0)
  113. cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
  114. else
  115. cpus = cpu_map__default_new();
  116. out_free_tmp:
  117. free(tmp_cpus);
  118. return cpus;
  119. }
  120. static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
  121. {
  122. struct perf_cpu_map *cpus = NULL;
  123. FILE *onlnf;
  124. onlnf = fopen("/sys/devices/system/cpu/online", "r");
  125. if (!onlnf)
  126. return cpu_map__default_new();
  127. cpus = perf_cpu_map__read(onlnf);
  128. fclose(onlnf);
  129. return cpus;
  130. }
  131. struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
  132. {
  133. struct perf_cpu_map *cpus = NULL;
  134. unsigned long start_cpu, end_cpu = 0;
  135. char *p = NULL;
  136. int i, nr_cpus = 0;
  137. int *tmp_cpus = NULL, *tmp;
  138. int max_entries = 0;
  139. if (!cpu_list)
  140. return cpu_map__read_all_cpu_map();
  141. /*
  142. * must handle the case of empty cpumap to cover
  143. * TOPOLOGY header for NUMA nodes with no CPU
  144. * ( e.g., because of CPU hotplug)
  145. */
  146. if (!isdigit(*cpu_list) && *cpu_list != '\0')
  147. goto out;
  148. while (isdigit(*cpu_list)) {
  149. p = NULL;
  150. start_cpu = strtoul(cpu_list, &p, 0);
  151. if (start_cpu >= INT_MAX
  152. || (*p != '\0' && *p != ',' && *p != '-'))
  153. goto invalid;
  154. if (*p == '-') {
  155. cpu_list = ++p;
  156. p = NULL;
  157. end_cpu = strtoul(cpu_list, &p, 0);
  158. if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
  159. goto invalid;
  160. if (end_cpu < start_cpu)
  161. goto invalid;
  162. } else {
  163. end_cpu = start_cpu;
  164. }
  165. WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
  166. "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
  167. for (; start_cpu <= end_cpu; start_cpu++) {
  168. /* check for duplicates */
  169. for (i = 0; i < nr_cpus; i++)
  170. if (tmp_cpus[i] == (int)start_cpu)
  171. goto invalid;
  172. if (nr_cpus == max_entries) {
  173. max_entries += MAX_NR_CPUS;
  174. tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  175. if (tmp == NULL)
  176. goto invalid;
  177. tmp_cpus = tmp;
  178. }
  179. tmp_cpus[nr_cpus++] = (int)start_cpu;
  180. }
  181. if (*p)
  182. ++p;
  183. cpu_list = p;
  184. }
  185. if (nr_cpus > 0)
  186. cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
  187. else if (*cpu_list != '\0')
  188. cpus = cpu_map__default_new();
  189. else
  190. cpus = perf_cpu_map__dummy_new();
  191. invalid:
  192. free(tmp_cpus);
  193. out:
  194. return cpus;
  195. }
  196. int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
  197. {
  198. if (idx < cpus->nr)
  199. return cpus->map[idx];
  200. return -1;
  201. }
  202. int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
  203. {
  204. return cpus ? cpus->nr : 1;
  205. }
  206. bool perf_cpu_map__empty(const struct perf_cpu_map *map)
  207. {
  208. return map ? map->map[0] == -1 : true;
  209. }
  210. int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
  211. {
  212. int i;
  213. for (i = 0; i < cpus->nr; ++i) {
  214. if (cpus->map[i] == cpu)
  215. return i;
  216. }
  217. return -1;
  218. }
  219. int perf_cpu_map__max(struct perf_cpu_map *map)
  220. {
  221. int i, max = -1;
  222. for (i = 0; i < map->nr; i++) {
  223. if (map->map[i] > max)
  224. max = map->map[i];
  225. }
  226. return max;
  227. }