cacheinfo.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. /*
  2. * cacheinfo support - processor cache information via sysfs
  3. *
  4. * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
  5. * Author: Sudeep Holla <sudeep.holla@arm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  12. * kind, whether express or implied; without even the implied warranty
  13. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/bitops.h>
  20. #include <linux/cacheinfo.h>
  21. #include <linux/compiler.h>
  22. #include <linux/cpu.h>
  23. #include <linux/device.h>
  24. #include <linux/init.h>
  25. #include <linux/of.h>
  26. #include <linux/sched.h>
  27. #include <linux/slab.h>
  28. #include <linux/smp.h>
  29. #include <linux/sysfs.h>
  30. /* pointer to per cpu cacheinfo */
  31. static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
  32. #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
  33. #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
  34. #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
  35. struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
  36. {
  37. return ci_cacheinfo(cpu);
  38. }
  39. #ifdef CONFIG_OF
  40. static int cache_setup_of_node(unsigned int cpu)
  41. {
  42. struct device_node *np;
  43. struct cacheinfo *this_leaf;
  44. struct device *cpu_dev = get_cpu_device(cpu);
  45. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  46. unsigned int index = 0;
  47. /* skip if of_node is already populated */
  48. if (this_cpu_ci->info_list->of_node)
  49. return 0;
  50. if (!cpu_dev) {
  51. pr_err("No cpu device for CPU %d\n", cpu);
  52. return -ENODEV;
  53. }
  54. np = cpu_dev->of_node;
  55. if (!np) {
  56. pr_err("Failed to find cpu%d device node\n", cpu);
  57. return -ENOENT;
  58. }
  59. while (index < cache_leaves(cpu)) {
  60. this_leaf = this_cpu_ci->info_list + index;
  61. if (this_leaf->level != 1)
  62. np = of_find_next_cache_node(np);
  63. else
  64. np = of_node_get(np);/* cpu node itself */
  65. if (!np)
  66. break;
  67. this_leaf->of_node = np;
  68. index++;
  69. }
  70. if (index != cache_leaves(cpu)) /* not all OF nodes populated */
  71. return -ENOENT;
  72. return 0;
  73. }
  74. static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
  75. struct cacheinfo *sib_leaf)
  76. {
  77. return sib_leaf->of_node == this_leaf->of_node;
  78. }
  79. #else
  80. static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
  81. static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
  82. struct cacheinfo *sib_leaf)
  83. {
  84. /*
  85. * For non-DT systems, assume unique level 1 cache, system-wide
  86. * shared caches for all other levels. This will be used only if
  87. * arch specific code has not populated shared_cpu_map
  88. */
  89. return !(this_leaf->level == 1);
  90. }
  91. #endif
  92. static int cache_shared_cpu_map_setup(unsigned int cpu)
  93. {
  94. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  95. struct cacheinfo *this_leaf, *sib_leaf;
  96. unsigned int index;
  97. int ret;
  98. ret = cache_setup_of_node(cpu);
  99. if (ret)
  100. return ret;
  101. for (index = 0; index < cache_leaves(cpu); index++) {
  102. unsigned int i;
  103. this_leaf = this_cpu_ci->info_list + index;
  104. /* skip if shared_cpu_map is already populated */
  105. if (!cpumask_empty(&this_leaf->shared_cpu_map))
  106. continue;
  107. cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
  108. for_each_online_cpu(i) {
  109. struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
  110. if (i == cpu || !sib_cpu_ci->info_list)
  111. continue;/* skip if itself or no cacheinfo */
  112. sib_leaf = sib_cpu_ci->info_list + index;
  113. if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
  114. cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
  115. cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
  116. }
  117. }
  118. }
  119. return 0;
  120. }
  121. static void cache_shared_cpu_map_remove(unsigned int cpu)
  122. {
  123. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  124. struct cacheinfo *this_leaf, *sib_leaf;
  125. unsigned int sibling, index;
  126. for (index = 0; index < cache_leaves(cpu); index++) {
  127. this_leaf = this_cpu_ci->info_list + index;
  128. for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
  129. struct cpu_cacheinfo *sib_cpu_ci;
  130. if (sibling == cpu) /* skip itself */
  131. continue;
  132. sib_cpu_ci = get_cpu_cacheinfo(sibling);
  133. sib_leaf = sib_cpu_ci->info_list + index;
  134. cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
  135. cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
  136. }
  137. of_node_put(this_leaf->of_node);
  138. }
  139. }
  140. static void free_cache_attributes(unsigned int cpu)
  141. {
  142. cache_shared_cpu_map_remove(cpu);
  143. kfree(per_cpu_cacheinfo(cpu));
  144. per_cpu_cacheinfo(cpu) = NULL;
  145. }
  146. int __weak init_cache_level(unsigned int cpu)
  147. {
  148. return -ENOENT;
  149. }
  150. int __weak populate_cache_leaves(unsigned int cpu)
  151. {
  152. return -ENOENT;
  153. }
  154. static int detect_cache_attributes(unsigned int cpu)
  155. {
  156. int ret;
  157. if (init_cache_level(cpu) || !cache_leaves(cpu))
  158. return -ENOENT;
  159. per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
  160. sizeof(struct cacheinfo), GFP_KERNEL);
  161. if (per_cpu_cacheinfo(cpu) == NULL)
  162. return -ENOMEM;
  163. ret = populate_cache_leaves(cpu);
  164. if (ret)
  165. goto free_ci;
  166. /*
  167. * For systems using DT for cache hierarchy, of_node and shared_cpu_map
  168. * will be set up here only if they are not populated already
  169. */
  170. ret = cache_shared_cpu_map_setup(cpu);
  171. if (ret) {
  172. pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
  173. cpu);
  174. goto free_ci;
  175. }
  176. return 0;
  177. free_ci:
  178. free_cache_attributes(cpu);
  179. return ret;
  180. }
  181. /* pointer to cpuX/cache device */
  182. static DEFINE_PER_CPU(struct device *, ci_cache_dev);
  183. #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
  184. static cpumask_t cache_dev_map;
  185. /* pointer to array of devices for cpuX/cache/indexY */
  186. static DEFINE_PER_CPU(struct device **, ci_index_dev);
  187. #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
  188. #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
  189. #define show_one(file_name, object) \
  190. static ssize_t file_name##_show(struct device *dev, \
  191. struct device_attribute *attr, char *buf) \
  192. { \
  193. struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
  194. return sprintf(buf, "%u\n", this_leaf->object); \
  195. }
  196. show_one(level, level);
  197. show_one(coherency_line_size, coherency_line_size);
  198. show_one(number_of_sets, number_of_sets);
  199. show_one(physical_line_partition, physical_line_partition);
  200. show_one(ways_of_associativity, ways_of_associativity);
  201. static ssize_t size_show(struct device *dev,
  202. struct device_attribute *attr, char *buf)
  203. {
  204. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  205. return sprintf(buf, "%uK\n", this_leaf->size >> 10);
  206. }
  207. static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
  208. {
  209. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  210. const struct cpumask *mask = &this_leaf->shared_cpu_map;
  211. return cpumap_print_to_pagebuf(list, buf, mask);
  212. }
  213. static ssize_t shared_cpu_map_show(struct device *dev,
  214. struct device_attribute *attr, char *buf)
  215. {
  216. return shared_cpumap_show_func(dev, false, buf);
  217. }
  218. static ssize_t shared_cpu_list_show(struct device *dev,
  219. struct device_attribute *attr, char *buf)
  220. {
  221. return shared_cpumap_show_func(dev, true, buf);
  222. }
  223. static ssize_t type_show(struct device *dev,
  224. struct device_attribute *attr, char *buf)
  225. {
  226. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  227. switch (this_leaf->type) {
  228. case CACHE_TYPE_DATA:
  229. return sprintf(buf, "Data\n");
  230. case CACHE_TYPE_INST:
  231. return sprintf(buf, "Instruction\n");
  232. case CACHE_TYPE_UNIFIED:
  233. return sprintf(buf, "Unified\n");
  234. default:
  235. return -EINVAL;
  236. }
  237. }
  238. static ssize_t allocation_policy_show(struct device *dev,
  239. struct device_attribute *attr, char *buf)
  240. {
  241. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  242. unsigned int ci_attr = this_leaf->attributes;
  243. int n = 0;
  244. if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
  245. n = sprintf(buf, "ReadWriteAllocate\n");
  246. else if (ci_attr & CACHE_READ_ALLOCATE)
  247. n = sprintf(buf, "ReadAllocate\n");
  248. else if (ci_attr & CACHE_WRITE_ALLOCATE)
  249. n = sprintf(buf, "WriteAllocate\n");
  250. return n;
  251. }
  252. static ssize_t write_policy_show(struct device *dev,
  253. struct device_attribute *attr, char *buf)
  254. {
  255. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  256. unsigned int ci_attr = this_leaf->attributes;
  257. int n = 0;
  258. if (ci_attr & CACHE_WRITE_THROUGH)
  259. n = sprintf(buf, "WriteThrough\n");
  260. else if (ci_attr & CACHE_WRITE_BACK)
  261. n = sprintf(buf, "WriteBack\n");
  262. return n;
  263. }
  264. static DEVICE_ATTR_RO(level);
  265. static DEVICE_ATTR_RO(type);
  266. static DEVICE_ATTR_RO(coherency_line_size);
  267. static DEVICE_ATTR_RO(ways_of_associativity);
  268. static DEVICE_ATTR_RO(number_of_sets);
  269. static DEVICE_ATTR_RO(size);
  270. static DEVICE_ATTR_RO(allocation_policy);
  271. static DEVICE_ATTR_RO(write_policy);
  272. static DEVICE_ATTR_RO(shared_cpu_map);
  273. static DEVICE_ATTR_RO(shared_cpu_list);
  274. static DEVICE_ATTR_RO(physical_line_partition);
  275. static struct attribute *cache_default_attrs[] = {
  276. &dev_attr_type.attr,
  277. &dev_attr_level.attr,
  278. &dev_attr_shared_cpu_map.attr,
  279. &dev_attr_shared_cpu_list.attr,
  280. &dev_attr_coherency_line_size.attr,
  281. &dev_attr_ways_of_associativity.attr,
  282. &dev_attr_number_of_sets.attr,
  283. &dev_attr_size.attr,
  284. &dev_attr_allocation_policy.attr,
  285. &dev_attr_write_policy.attr,
  286. &dev_attr_physical_line_partition.attr,
  287. NULL
  288. };
  289. static umode_t
  290. cache_default_attrs_is_visible(struct kobject *kobj,
  291. struct attribute *attr, int unused)
  292. {
  293. struct device *dev = kobj_to_dev(kobj);
  294. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  295. const struct cpumask *mask = &this_leaf->shared_cpu_map;
  296. umode_t mode = attr->mode;
  297. if ((attr == &dev_attr_type.attr) && this_leaf->type)
  298. return mode;
  299. if ((attr == &dev_attr_level.attr) && this_leaf->level)
  300. return mode;
  301. if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
  302. return mode;
  303. if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
  304. return mode;
  305. if ((attr == &dev_attr_coherency_line_size.attr) &&
  306. this_leaf->coherency_line_size)
  307. return mode;
  308. if ((attr == &dev_attr_ways_of_associativity.attr) &&
  309. this_leaf->size) /* allow 0 = full associativity */
  310. return mode;
  311. if ((attr == &dev_attr_number_of_sets.attr) &&
  312. this_leaf->number_of_sets)
  313. return mode;
  314. if ((attr == &dev_attr_size.attr) && this_leaf->size)
  315. return mode;
  316. if ((attr == &dev_attr_write_policy.attr) &&
  317. (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
  318. return mode;
  319. if ((attr == &dev_attr_allocation_policy.attr) &&
  320. (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
  321. return mode;
  322. if ((attr == &dev_attr_physical_line_partition.attr) &&
  323. this_leaf->physical_line_partition)
  324. return mode;
  325. return 0;
  326. }
  327. static const struct attribute_group cache_default_group = {
  328. .attrs = cache_default_attrs,
  329. .is_visible = cache_default_attrs_is_visible,
  330. };
  331. static const struct attribute_group *cache_default_groups[] = {
  332. &cache_default_group,
  333. NULL,
  334. };
  335. static const struct attribute_group *cache_private_groups[] = {
  336. &cache_default_group,
  337. NULL, /* Place holder for private group */
  338. NULL,
  339. };
  340. const struct attribute_group *
  341. __weak cache_get_priv_group(struct cacheinfo *this_leaf)
  342. {
  343. return NULL;
  344. }
  345. static const struct attribute_group **
  346. cache_get_attribute_groups(struct cacheinfo *this_leaf)
  347. {
  348. const struct attribute_group *priv_group =
  349. cache_get_priv_group(this_leaf);
  350. if (!priv_group)
  351. return cache_default_groups;
  352. if (!cache_private_groups[1])
  353. cache_private_groups[1] = priv_group;
  354. return cache_private_groups;
  355. }
  356. /* Add/Remove cache interface for CPU device */
  357. static void cpu_cache_sysfs_exit(unsigned int cpu)
  358. {
  359. int i;
  360. struct device *ci_dev;
  361. if (per_cpu_index_dev(cpu)) {
  362. for (i = 0; i < cache_leaves(cpu); i++) {
  363. ci_dev = per_cache_index_dev(cpu, i);
  364. if (!ci_dev)
  365. continue;
  366. device_unregister(ci_dev);
  367. }
  368. kfree(per_cpu_index_dev(cpu));
  369. per_cpu_index_dev(cpu) = NULL;
  370. }
  371. device_unregister(per_cpu_cache_dev(cpu));
  372. per_cpu_cache_dev(cpu) = NULL;
  373. }
  374. static int cpu_cache_sysfs_init(unsigned int cpu)
  375. {
  376. struct device *dev = get_cpu_device(cpu);
  377. if (per_cpu_cacheinfo(cpu) == NULL)
  378. return -ENOENT;
  379. per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
  380. if (IS_ERR(per_cpu_cache_dev(cpu)))
  381. return PTR_ERR(per_cpu_cache_dev(cpu));
  382. /* Allocate all required memory */
  383. per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
  384. sizeof(struct device *), GFP_KERNEL);
  385. if (unlikely(per_cpu_index_dev(cpu) == NULL))
  386. goto err_out;
  387. return 0;
  388. err_out:
  389. cpu_cache_sysfs_exit(cpu);
  390. return -ENOMEM;
  391. }
  392. static int cache_add_dev(unsigned int cpu)
  393. {
  394. unsigned int i;
  395. int rc;
  396. struct device *ci_dev, *parent;
  397. struct cacheinfo *this_leaf;
  398. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  399. const struct attribute_group **cache_groups;
  400. rc = cpu_cache_sysfs_init(cpu);
  401. if (unlikely(rc < 0))
  402. return rc;
  403. parent = per_cpu_cache_dev(cpu);
  404. for (i = 0; i < cache_leaves(cpu); i++) {
  405. this_leaf = this_cpu_ci->info_list + i;
  406. if (this_leaf->disable_sysfs)
  407. continue;
  408. cache_groups = cache_get_attribute_groups(this_leaf);
  409. ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
  410. "index%1u", i);
  411. if (IS_ERR(ci_dev)) {
  412. rc = PTR_ERR(ci_dev);
  413. goto err;
  414. }
  415. per_cache_index_dev(cpu, i) = ci_dev;
  416. }
  417. cpumask_set_cpu(cpu, &cache_dev_map);
  418. return 0;
  419. err:
  420. cpu_cache_sysfs_exit(cpu);
  421. return rc;
  422. }
  423. static void cache_remove_dev(unsigned int cpu)
  424. {
  425. if (!cpumask_test_cpu(cpu, &cache_dev_map))
  426. return;
  427. cpumask_clear_cpu(cpu, &cache_dev_map);
  428. cpu_cache_sysfs_exit(cpu);
  429. }
  430. static int cacheinfo_cpu_callback(struct notifier_block *nfb,
  431. unsigned long action, void *hcpu)
  432. {
  433. unsigned int cpu = (unsigned long)hcpu;
  434. int rc = 0;
  435. switch (action & ~CPU_TASKS_FROZEN) {
  436. case CPU_ONLINE:
  437. rc = detect_cache_attributes(cpu);
  438. if (!rc)
  439. rc = cache_add_dev(cpu);
  440. break;
  441. case CPU_DEAD:
  442. cache_remove_dev(cpu);
  443. if (per_cpu_cacheinfo(cpu))
  444. free_cache_attributes(cpu);
  445. break;
  446. }
  447. return notifier_from_errno(rc);
  448. }
  449. static int __init cacheinfo_sysfs_init(void)
  450. {
  451. int cpu, rc = 0;
  452. cpu_notifier_register_begin();
  453. for_each_online_cpu(cpu) {
  454. rc = detect_cache_attributes(cpu);
  455. if (rc)
  456. goto out;
  457. rc = cache_add_dev(cpu);
  458. if (rc) {
  459. free_cache_attributes(cpu);
  460. pr_err("error populating cacheinfo..cpu%d\n", cpu);
  461. goto out;
  462. }
  463. }
  464. __hotcpu_notifier(cacheinfo_cpu_callback, 0);
  465. out:
  466. cpu_notifier_register_done();
  467. return rc;
  468. }
  469. device_initcall(cacheinfo_sysfs_init);