topology.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. /*
  2. * Copyright IBM Corp. 2007
  3. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  4. */
  5. #define KMSG_COMPONENT "cpu"
  6. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <linux/init.h>
  10. #include <linux/device.h>
  11. #include <linux/bootmem.h>
  12. #include <linux/sched.h>
  13. #include <linux/workqueue.h>
  14. #include <linux/cpu.h>
  15. #include <linux/smp.h>
  16. #include <linux/cpuset.h>
  17. #include <asm/delay.h>
  18. #define PTF_HORIZONTAL (0UL)
  19. #define PTF_VERTICAL (1UL)
  20. #define PTF_CHECK (2UL)
  21. struct mask_info {
  22. struct mask_info *next;
  23. unsigned char id;
  24. cpumask_t mask;
  25. };
  26. static int topology_enabled = 1;
  27. static void topology_work_fn(struct work_struct *work);
  28. static struct sysinfo_15_1_x *tl_info;
  29. static struct timer_list topology_timer;
  30. static void set_topology_timer(void);
  31. static DECLARE_WORK(topology_work, topology_work_fn);
  32. /* topology_lock protects the core linked list */
  33. static DEFINE_SPINLOCK(topology_lock);
  34. static struct mask_info core_info;
  35. cpumask_t cpu_core_map[NR_CPUS];
  36. unsigned char cpu_core_id[NR_CPUS];
  37. #ifdef CONFIG_SCHED_BOOK
  38. static struct mask_info book_info;
  39. cpumask_t cpu_book_map[NR_CPUS];
  40. unsigned char cpu_book_id[NR_CPUS];
  41. #endif
  42. static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
  43. {
  44. cpumask_t mask;
  45. cpumask_clear(&mask);
  46. if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
  47. cpumask_copy(&mask, cpumask_of(cpu));
  48. return mask;
  49. }
  50. while (info) {
  51. if (cpumask_test_cpu(cpu, &info->mask)) {
  52. mask = info->mask;
  53. break;
  54. }
  55. info = info->next;
  56. }
  57. if (cpumask_empty(&mask))
  58. cpumask_copy(&mask, cpumask_of(cpu));
  59. return mask;
  60. }
  61. static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
  62. struct mask_info *book, struct mask_info *core)
  63. {
  64. unsigned int cpu;
  65. for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS);
  66. cpu < TOPOLOGY_CPU_BITS;
  67. cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
  68. {
  69. unsigned int rcpu, lcpu;
  70. rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
  71. for_each_present_cpu(lcpu) {
  72. if (cpu_logical_map(lcpu) != rcpu)
  73. continue;
  74. #ifdef CONFIG_SCHED_BOOK
  75. cpumask_set_cpu(lcpu, &book->mask);
  76. cpu_book_id[lcpu] = book->id;
  77. #endif
  78. cpumask_set_cpu(lcpu, &core->mask);
  79. cpu_core_id[lcpu] = core->id;
  80. smp_cpu_polarization[lcpu] = tl_cpu->pp;
  81. }
  82. }
  83. }
  84. static void clear_masks(void)
  85. {
  86. struct mask_info *info;
  87. info = &core_info;
  88. while (info) {
  89. cpumask_clear(&info->mask);
  90. info = info->next;
  91. }
  92. #ifdef CONFIG_SCHED_BOOK
  93. info = &book_info;
  94. while (info) {
  95. cpumask_clear(&info->mask);
  96. info = info->next;
  97. }
  98. #endif
  99. }
  100. static union topology_entry *next_tle(union topology_entry *tle)
  101. {
  102. if (!tle->nl)
  103. return (union topology_entry *)((struct topology_cpu *)tle + 1);
  104. return (union topology_entry *)((struct topology_container *)tle + 1);
  105. }
  106. static void tl_to_cores(struct sysinfo_15_1_x *info)
  107. {
  108. #ifdef CONFIG_SCHED_BOOK
  109. struct mask_info *book = &book_info;
  110. #else
  111. struct mask_info *book = NULL;
  112. #endif
  113. struct mask_info *core = &core_info;
  114. union topology_entry *tle, *end;
  115. spin_lock_irq(&topology_lock);
  116. clear_masks();
  117. tle = info->tle;
  118. end = (union topology_entry *)((unsigned long)info + info->length);
  119. while (tle < end) {
  120. switch (tle->nl) {
  121. #ifdef CONFIG_SCHED_BOOK
  122. case 2:
  123. book = book->next;
  124. book->id = tle->container.id;
  125. break;
  126. #endif
  127. case 1:
  128. core = core->next;
  129. core->id = tle->container.id;
  130. break;
  131. case 0:
  132. add_cpus_to_mask(&tle->cpu, book, core);
  133. break;
  134. default:
  135. clear_masks();
  136. goto out;
  137. }
  138. tle = next_tle(tle);
  139. }
  140. out:
  141. spin_unlock_irq(&topology_lock);
  142. }
  143. static void topology_update_polarization_simple(void)
  144. {
  145. int cpu;
  146. mutex_lock(&smp_cpu_state_mutex);
  147. for_each_possible_cpu(cpu)
  148. smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
  149. mutex_unlock(&smp_cpu_state_mutex);
  150. }
  151. static int ptf(unsigned long fc)
  152. {
  153. int rc;
  154. asm volatile(
  155. " .insn rre,0xb9a20000,%1,%1\n"
  156. " ipm %0\n"
  157. " srl %0,28\n"
  158. : "=d" (rc)
  159. : "d" (fc) : "cc");
  160. return rc;
  161. }
  162. int topology_set_cpu_management(int fc)
  163. {
  164. int cpu;
  165. int rc;
  166. if (!MACHINE_HAS_TOPOLOGY)
  167. return -EOPNOTSUPP;
  168. if (fc)
  169. rc = ptf(PTF_VERTICAL);
  170. else
  171. rc = ptf(PTF_HORIZONTAL);
  172. if (rc)
  173. return -EBUSY;
  174. for_each_possible_cpu(cpu)
  175. smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
  176. return rc;
  177. }
  178. static void update_cpu_core_map(void)
  179. {
  180. unsigned long flags;
  181. int cpu;
  182. spin_lock_irqsave(&topology_lock, flags);
  183. for_each_possible_cpu(cpu) {
  184. cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
  185. #ifdef CONFIG_SCHED_BOOK
  186. cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
  187. #endif
  188. }
  189. spin_unlock_irqrestore(&topology_lock, flags);
  190. }
  191. void store_topology(struct sysinfo_15_1_x *info)
  192. {
  193. #ifdef CONFIG_SCHED_BOOK
  194. int rc;
  195. rc = stsi(info, 15, 1, 3);
  196. if (rc != -ENOSYS)
  197. return;
  198. #endif
  199. stsi(info, 15, 1, 2);
  200. }
  201. int arch_update_cpu_topology(void)
  202. {
  203. struct sysinfo_15_1_x *info = tl_info;
  204. struct sys_device *sysdev;
  205. int cpu;
  206. if (!MACHINE_HAS_TOPOLOGY) {
  207. update_cpu_core_map();
  208. topology_update_polarization_simple();
  209. return 0;
  210. }
  211. store_topology(info);
  212. tl_to_cores(info);
  213. update_cpu_core_map();
  214. for_each_online_cpu(cpu) {
  215. sysdev = get_cpu_sysdev(cpu);
  216. kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
  217. }
  218. return 1;
  219. }
  220. static void topology_work_fn(struct work_struct *work)
  221. {
  222. rebuild_sched_domains();
  223. }
  224. void topology_schedule_update(void)
  225. {
  226. schedule_work(&topology_work);
  227. }
  228. static void topology_timer_fn(unsigned long ignored)
  229. {
  230. if (ptf(PTF_CHECK))
  231. topology_schedule_update();
  232. set_topology_timer();
  233. }
  234. static void set_topology_timer(void)
  235. {
  236. topology_timer.function = topology_timer_fn;
  237. topology_timer.data = 0;
  238. topology_timer.expires = jiffies + 60 * HZ;
  239. add_timer(&topology_timer);
  240. }
  241. static int __init early_parse_topology(char *p)
  242. {
  243. if (strncmp(p, "off", 3))
  244. return 0;
  245. topology_enabled = 0;
  246. return 0;
  247. }
  248. early_param("topology", early_parse_topology);
  249. static int __init init_topology_update(void)
  250. {
  251. int rc;
  252. rc = 0;
  253. if (!MACHINE_HAS_TOPOLOGY) {
  254. topology_update_polarization_simple();
  255. goto out;
  256. }
  257. init_timer_deferrable(&topology_timer);
  258. set_topology_timer();
  259. out:
  260. update_cpu_core_map();
  261. return rc;
  262. }
  263. __initcall(init_topology_update);
  264. static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask,
  265. int offset)
  266. {
  267. int i, nr_masks;
  268. nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
  269. for (i = 0; i < info->mnest - offset; i++)
  270. nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
  271. nr_masks = max(nr_masks, 1);
  272. for (i = 0; i < nr_masks; i++) {
  273. mask->next = alloc_bootmem(sizeof(struct mask_info));
  274. mask = mask->next;
  275. }
  276. }
  277. void __init s390_init_cpu_topology(void)
  278. {
  279. struct sysinfo_15_1_x *info;
  280. int i;
  281. if (!MACHINE_HAS_TOPOLOGY)
  282. return;
  283. tl_info = alloc_bootmem_pages(PAGE_SIZE);
  284. info = tl_info;
  285. store_topology(info);
  286. pr_info("The CPU configuration topology of the machine is:");
  287. for (i = 0; i < TOPOLOGY_NR_MAG; i++)
  288. printk(" %d", info->mag[i]);
  289. printk(" / %d\n", info->mnest);
  290. alloc_masks(info, &core_info, 2);
  291. #ifdef CONFIG_SCHED_BOOK
  292. alloc_masks(info, &book_info, 3);
  293. #endif
  294. }