smp.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758
  1. /*
  2. * linux/arch/arm/kernel/smp.c
  3. *
  4. * Copyright (C) 2002 ARM Limited, All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/delay.h>
  12. #include <linux/init.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/sched.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/cache.h>
  17. #include <linux/profile.h>
  18. #include <linux/errno.h>
  19. #include <linux/mm.h>
  20. #include <linux/err.h>
  21. #include <linux/cpu.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/irq.h>
  24. #include <linux/nmi.h>
  25. #include <linux/percpu.h>
  26. #include <linux/clockchips.h>
  27. #include <linux/completion.h>
  28. #include <linux/cpufreq.h>
  29. #include <linux/irq_work.h>
  30. #include <linux/atomic.h>
  31. #include <asm/smp.h>
  32. #include <asm/cacheflush.h>
  33. #include <asm/cpu.h>
  34. #include <asm/cputype.h>
  35. #include <asm/exception.h>
  36. #include <asm/idmap.h>
  37. #include <asm/topology.h>
  38. #include <asm/mmu_context.h>
  39. #include <asm/pgtable.h>
  40. #include <asm/pgalloc.h>
  41. #include <asm/processor.h>
  42. #include <asm/sections.h>
  43. #include <asm/tlbflush.h>
  44. #include <asm/ptrace.h>
  45. #include <asm/smp_plat.h>
  46. #include <asm/virt.h>
  47. #include <asm/mach/arch.h>
  48. #include <asm/mpu.h>
  49. #define CREATE_TRACE_POINTS
  50. #include <trace/events/ipi.h>
  51. /*
  52. * as from 2.5, kernels no longer have an init_tasks structure
  53. * so we need some other way of telling a new secondary core
  54. * where to place its SVC stack
  55. */
  56. struct secondary_data secondary_data;
  57. /*
  58. * control for which core is the next to come out of the secondary
  59. * boot "holding pen"
  60. */
  61. volatile int pen_release = -1;
  62. enum ipi_msg_type {
  63. IPI_WAKEUP,
  64. IPI_TIMER,
  65. IPI_RESCHEDULE,
  66. IPI_CALL_FUNC,
  67. IPI_CPU_STOP,
  68. IPI_IRQ_WORK,
  69. IPI_COMPLETION,
  70. IPI_CPU_BACKTRACE,
  71. /*
  72. * SGI8-15 can be reserved by secure firmware, and thus may
  73. * not be usable by the kernel. Please keep the above limited
  74. * to at most 8 entries.
  75. */
  76. };
  77. static DECLARE_COMPLETION(cpu_running);
  78. static struct smp_operations smp_ops __ro_after_init;
  79. void __init smp_set_ops(const struct smp_operations *ops)
  80. {
  81. if (ops)
  82. smp_ops = *ops;
  83. };
  84. static unsigned long get_arch_pgd(pgd_t *pgd)
  85. {
  86. #ifdef CONFIG_ARM_LPAE
  87. return __phys_to_pfn(virt_to_phys(pgd));
  88. #else
  89. return virt_to_phys(pgd);
  90. #endif
  91. }
  92. int __cpu_up(unsigned int cpu, struct task_struct *idle)
  93. {
  94. int ret;
  95. if (!smp_ops.smp_boot_secondary)
  96. return -ENOSYS;
  97. /*
  98. * We need to tell the secondary core where to find
  99. * its stack and the page tables.
  100. */
  101. secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
  102. #ifdef CONFIG_ARM_MPU
  103. secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
  104. #endif
  105. #ifdef CONFIG_MMU
  106. secondary_data.pgdir = virt_to_phys(idmap_pgd);
  107. secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
  108. #endif
  109. sync_cache_w(&secondary_data);
  110. /*
  111. * Now bring the CPU into our world.
  112. */
  113. ret = smp_ops.smp_boot_secondary(cpu, idle);
  114. if (ret == 0) {
  115. /*
  116. * CPU was successfully started, wait for it
  117. * to come online or time out.
  118. */
  119. wait_for_completion_timeout(&cpu_running,
  120. msecs_to_jiffies(1000));
  121. if (!cpu_online(cpu)) {
  122. pr_crit("CPU%u: failed to come online\n", cpu);
  123. ret = -EIO;
  124. }
  125. } else {
  126. pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
  127. }
  128. memset(&secondary_data, 0, sizeof(secondary_data));
  129. return ret;
  130. }
  131. /* platform specific SMP operations */
  132. void __init smp_init_cpus(void)
  133. {
  134. if (smp_ops.smp_init_cpus)
  135. smp_ops.smp_init_cpus();
  136. }
  137. int platform_can_secondary_boot(void)
  138. {
  139. return !!smp_ops.smp_boot_secondary;
  140. }
  141. int platform_can_cpu_hotplug(void)
  142. {
  143. #ifdef CONFIG_HOTPLUG_CPU
  144. if (smp_ops.cpu_kill)
  145. return 1;
  146. #endif
  147. return 0;
  148. }
  149. #ifdef CONFIG_HOTPLUG_CPU
  150. static int platform_cpu_kill(unsigned int cpu)
  151. {
  152. if (smp_ops.cpu_kill)
  153. return smp_ops.cpu_kill(cpu);
  154. return 1;
  155. }
  156. static int platform_cpu_disable(unsigned int cpu)
  157. {
  158. if (smp_ops.cpu_disable)
  159. return smp_ops.cpu_disable(cpu);
  160. return 0;
  161. }
  162. int platform_can_hotplug_cpu(unsigned int cpu)
  163. {
  164. /* cpu_die must be specified to support hotplug */
  165. if (!smp_ops.cpu_die)
  166. return 0;
  167. if (smp_ops.cpu_can_disable)
  168. return smp_ops.cpu_can_disable(cpu);
  169. /*
  170. * By default, allow disabling all CPUs except the first one,
  171. * since this is special on a lot of platforms, e.g. because
  172. * of clock tick interrupts.
  173. */
  174. return cpu != 0;
  175. }
  176. /*
  177. * __cpu_disable runs on the processor to be shutdown.
  178. */
  179. int __cpu_disable(void)
  180. {
  181. unsigned int cpu = smp_processor_id();
  182. int ret;
  183. ret = platform_cpu_disable(cpu);
  184. if (ret)
  185. return ret;
  186. /*
  187. * Take this CPU offline. Once we clear this, we can't return,
  188. * and we must not schedule until we're ready to give up the cpu.
  189. */
  190. set_cpu_online(cpu, false);
  191. /*
  192. * OK - migrate IRQs away from this CPU
  193. */
  194. migrate_irqs();
  195. /*
  196. * Flush user cache and TLB mappings, and then remove this CPU
  197. * from the vm mask set of all processes.
  198. *
  199. * Caches are flushed to the Level of Unification Inner Shareable
  200. * to write-back dirty lines to unified caches shared by all CPUs.
  201. */
  202. flush_cache_louis();
  203. local_flush_tlb_all();
  204. clear_tasks_mm_cpumask(cpu);
  205. return 0;
  206. }
  207. static DECLARE_COMPLETION(cpu_died);
  208. /*
  209. * called on the thread which is asking for a CPU to be shutdown -
  210. * waits until shutdown has completed, or it is timed out.
  211. */
  212. void __cpu_die(unsigned int cpu)
  213. {
  214. if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
  215. pr_err("CPU%u: cpu didn't die\n", cpu);
  216. return;
  217. }
  218. pr_notice("CPU%u: shutdown\n", cpu);
  219. /*
  220. * platform_cpu_kill() is generally expected to do the powering off
  221. * and/or cutting of clocks to the dying CPU. Optionally, this may
  222. * be done by the CPU which is dying in preference to supporting
  223. * this call, but that means there is _no_ synchronisation between
  224. * the requesting CPU and the dying CPU actually losing power.
  225. */
  226. if (!platform_cpu_kill(cpu))
  227. pr_err("CPU%u: unable to kill\n", cpu);
  228. }
  229. /*
  230. * Called from the idle thread for the CPU which has been shutdown.
  231. *
  232. * Note that we disable IRQs here, but do not re-enable them
  233. * before returning to the caller. This is also the behaviour
  234. * of the other hotplug-cpu capable cores, so presumably coming
  235. * out of idle fixes this.
  236. */
  237. void arch_cpu_idle_dead(void)
  238. {
  239. unsigned int cpu = smp_processor_id();
  240. idle_task_exit();
  241. local_irq_disable();
  242. /*
  243. * Flush the data out of the L1 cache for this CPU. This must be
  244. * before the completion to ensure that data is safely written out
  245. * before platform_cpu_kill() gets called - which may disable
  246. * *this* CPU and power down its cache.
  247. */
  248. flush_cache_louis();
  249. /*
  250. * Tell __cpu_die() that this CPU is now safe to dispose of. Once
  251. * this returns, power and/or clocks can be removed at any point
  252. * from this CPU and its cache by platform_cpu_kill().
  253. */
  254. complete(&cpu_died);
  255. /*
  256. * Ensure that the cache lines associated with that completion are
  257. * written out. This covers the case where _this_ CPU is doing the
  258. * powering down, to ensure that the completion is visible to the
  259. * CPU waiting for this one.
  260. */
  261. flush_cache_louis();
  262. /*
  263. * The actual CPU shutdown procedure is at least platform (if not
  264. * CPU) specific. This may remove power, or it may simply spin.
  265. *
  266. * Platforms are generally expected *NOT* to return from this call,
  267. * although there are some which do because they have no way to
  268. * power down the CPU. These platforms are the _only_ reason we
  269. * have a return path which uses the fragment of assembly below.
  270. *
  271. * The return path should not be used for platforms which can
  272. * power off the CPU.
  273. */
  274. if (smp_ops.cpu_die)
  275. smp_ops.cpu_die(cpu);
  276. pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
  277. cpu);
  278. /*
  279. * Do not return to the idle loop - jump back to the secondary
  280. * cpu initialisation. There's some initialisation which needs
  281. * to be repeated to undo the effects of taking the CPU offline.
  282. */
  283. __asm__("mov sp, %0\n"
  284. " mov fp, #0\n"
  285. " b secondary_start_kernel"
  286. :
  287. : "r" (task_stack_page(current) + THREAD_SIZE - 8));
  288. }
  289. #endif /* CONFIG_HOTPLUG_CPU */
  290. /*
  291. * Called by both boot and secondaries to move global data into
  292. * per-processor storage.
  293. */
  294. static void smp_store_cpu_info(unsigned int cpuid)
  295. {
  296. struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
  297. cpu_info->loops_per_jiffy = loops_per_jiffy;
  298. cpu_info->cpuid = read_cpuid_id();
  299. store_cpu_topology(cpuid);
  300. }
  301. /*
  302. * This is the secondary CPU boot entry. We're using this CPUs
  303. * idle thread stack, but a set of temporary page tables.
  304. */
  305. asmlinkage void secondary_start_kernel(void)
  306. {
  307. struct mm_struct *mm = &init_mm;
  308. unsigned int cpu;
  309. /*
  310. * The identity mapping is uncached (strongly ordered), so
  311. * switch away from it before attempting any exclusive accesses.
  312. */
  313. cpu_switch_mm(mm->pgd, mm);
  314. local_flush_bp_all();
  315. enter_lazy_tlb(mm, current);
  316. local_flush_tlb_all();
  317. /*
  318. * All kernel threads share the same mm context; grab a
  319. * reference and switch to it.
  320. */
  321. cpu = smp_processor_id();
  322. atomic_inc(&mm->mm_count);
  323. current->active_mm = mm;
  324. cpumask_set_cpu(cpu, mm_cpumask(mm));
  325. cpu_init();
  326. pr_debug("CPU%u: Booted secondary processor\n", cpu);
  327. preempt_disable();
  328. trace_hardirqs_off();
  329. /*
  330. * Give the platform a chance to do its own initialisation.
  331. */
  332. if (smp_ops.smp_secondary_init)
  333. smp_ops.smp_secondary_init(cpu);
  334. notify_cpu_starting(cpu);
  335. calibrate_delay();
  336. smp_store_cpu_info(cpu);
  337. /*
  338. * OK, now it's safe to let the boot CPU continue. Wait for
  339. * the CPU migration code to notice that the CPU is online
  340. * before we continue - which happens after __cpu_up returns.
  341. */
  342. set_cpu_online(cpu, true);
  343. complete(&cpu_running);
  344. local_irq_enable();
  345. local_fiq_enable();
  346. local_abt_enable();
  347. /*
  348. * OK, it's off to the idle thread for us
  349. */
  350. cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
  351. }
  352. void __init smp_cpus_done(unsigned int max_cpus)
  353. {
  354. int cpu;
  355. unsigned long bogosum = 0;
  356. for_each_online_cpu(cpu)
  357. bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
  358. printk(KERN_INFO "SMP: Total of %d processors activated "
  359. "(%lu.%02lu BogoMIPS).\n",
  360. num_online_cpus(),
  361. bogosum / (500000/HZ),
  362. (bogosum / (5000/HZ)) % 100);
  363. hyp_mode_check();
  364. }
  365. void __init smp_prepare_boot_cpu(void)
  366. {
  367. set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
  368. }
  369. void __init smp_prepare_cpus(unsigned int max_cpus)
  370. {
  371. unsigned int ncores = num_possible_cpus();
  372. init_cpu_topology();
  373. smp_store_cpu_info(smp_processor_id());
  374. /*
  375. * are we trying to boot more cores than exist?
  376. */
  377. if (max_cpus > ncores)
  378. max_cpus = ncores;
  379. if (ncores > 1 && max_cpus) {
  380. /*
  381. * Initialise the present map, which describes the set of CPUs
  382. * actually populated at the present time. A platform should
  383. * re-initialize the map in the platforms smp_prepare_cpus()
  384. * if present != possible (e.g. physical hotplug).
  385. */
  386. init_cpu_present(cpu_possible_mask);
  387. /*
  388. * Initialise the SCU if there are more than one CPU
  389. * and let them know where to start.
  390. */
  391. if (smp_ops.smp_prepare_cpus)
  392. smp_ops.smp_prepare_cpus(max_cpus);
  393. }
  394. }
  395. static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
  396. void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
  397. {
  398. if (!__smp_cross_call)
  399. __smp_cross_call = fn;
  400. }
  401. static const char *ipi_types[NR_IPI] __tracepoint_string = {
  402. #define S(x,s) [x] = s
  403. S(IPI_WAKEUP, "CPU wakeup interrupts"),
  404. S(IPI_TIMER, "Timer broadcast interrupts"),
  405. S(IPI_RESCHEDULE, "Rescheduling interrupts"),
  406. S(IPI_CALL_FUNC, "Function call interrupts"),
  407. S(IPI_CPU_STOP, "CPU stop interrupts"),
  408. S(IPI_IRQ_WORK, "IRQ work interrupts"),
  409. S(IPI_COMPLETION, "completion interrupts"),
  410. };
  411. static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
  412. {
  413. trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
  414. __smp_cross_call(target, ipinr);
  415. }
  416. void show_ipi_list(struct seq_file *p, int prec)
  417. {
  418. unsigned int cpu, i;
  419. for (i = 0; i < NR_IPI; i++) {
  420. seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
  421. for_each_online_cpu(cpu)
  422. seq_printf(p, "%10u ",
  423. __get_irq_stat(cpu, ipi_irqs[i]));
  424. seq_printf(p, " %s\n", ipi_types[i]);
  425. }
  426. }
  427. u64 smp_irq_stat_cpu(unsigned int cpu)
  428. {
  429. u64 sum = 0;
  430. int i;
  431. for (i = 0; i < NR_IPI; i++)
  432. sum += __get_irq_stat(cpu, ipi_irqs[i]);
  433. return sum;
  434. }
  435. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  436. {
  437. smp_cross_call(mask, IPI_CALL_FUNC);
  438. }
  439. void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
  440. {
  441. smp_cross_call(mask, IPI_WAKEUP);
  442. }
  443. void arch_send_call_function_single_ipi(int cpu)
  444. {
  445. smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
  446. }
  447. #ifdef CONFIG_IRQ_WORK
  448. void arch_irq_work_raise(void)
  449. {
  450. if (arch_irq_work_has_interrupt())
  451. smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
  452. }
  453. #endif
  454. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  455. void tick_broadcast(const struct cpumask *mask)
  456. {
  457. smp_cross_call(mask, IPI_TIMER);
  458. }
  459. #endif
  460. static DEFINE_RAW_SPINLOCK(stop_lock);
  461. /*
  462. * ipi_cpu_stop - handle IPI from smp_send_stop()
  463. */
  464. static void ipi_cpu_stop(unsigned int cpu)
  465. {
  466. if (system_state == SYSTEM_BOOTING ||
  467. system_state == SYSTEM_RUNNING) {
  468. raw_spin_lock(&stop_lock);
  469. pr_crit("CPU%u: stopping\n", cpu);
  470. dump_stack();
  471. raw_spin_unlock(&stop_lock);
  472. }
  473. set_cpu_online(cpu, false);
  474. local_fiq_disable();
  475. local_irq_disable();
  476. while (1)
  477. cpu_relax();
  478. }
  479. static DEFINE_PER_CPU(struct completion *, cpu_completion);
  480. int register_ipi_completion(struct completion *completion, int cpu)
  481. {
  482. per_cpu(cpu_completion, cpu) = completion;
  483. return IPI_COMPLETION;
  484. }
  485. static void ipi_complete(unsigned int cpu)
  486. {
  487. complete(per_cpu(cpu_completion, cpu));
  488. }
  489. /*
  490. * Main handler for inter-processor interrupts
  491. */
  492. asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
  493. {
  494. handle_IPI(ipinr, regs);
  495. }
  496. void handle_IPI(int ipinr, struct pt_regs *regs)
  497. {
  498. unsigned int cpu = smp_processor_id();
  499. struct pt_regs *old_regs = set_irq_regs(regs);
  500. if ((unsigned)ipinr < NR_IPI) {
  501. trace_ipi_entry_rcuidle(ipi_types[ipinr]);
  502. __inc_irq_stat(cpu, ipi_irqs[ipinr]);
  503. }
  504. switch (ipinr) {
  505. case IPI_WAKEUP:
  506. break;
  507. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  508. case IPI_TIMER:
  509. irq_enter();
  510. tick_receive_broadcast();
  511. irq_exit();
  512. break;
  513. #endif
  514. case IPI_RESCHEDULE:
  515. scheduler_ipi();
  516. break;
  517. case IPI_CALL_FUNC:
  518. irq_enter();
  519. generic_smp_call_function_interrupt();
  520. irq_exit();
  521. break;
  522. case IPI_CPU_STOP:
  523. irq_enter();
  524. ipi_cpu_stop(cpu);
  525. irq_exit();
  526. break;
  527. #ifdef CONFIG_IRQ_WORK
  528. case IPI_IRQ_WORK:
  529. irq_enter();
  530. irq_work_run();
  531. irq_exit();
  532. break;
  533. #endif
  534. case IPI_COMPLETION:
  535. irq_enter();
  536. ipi_complete(cpu);
  537. irq_exit();
  538. break;
  539. case IPI_CPU_BACKTRACE:
  540. printk_nmi_enter();
  541. irq_enter();
  542. nmi_cpu_backtrace(regs);
  543. irq_exit();
  544. printk_nmi_exit();
  545. break;
  546. default:
  547. pr_crit("CPU%u: Unknown IPI message 0x%x\n",
  548. cpu, ipinr);
  549. break;
  550. }
  551. if ((unsigned)ipinr < NR_IPI)
  552. trace_ipi_exit_rcuidle(ipi_types[ipinr]);
  553. set_irq_regs(old_regs);
  554. }
  555. void smp_send_reschedule(int cpu)
  556. {
  557. smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
  558. }
  559. void smp_send_stop(void)
  560. {
  561. unsigned long timeout;
  562. struct cpumask mask;
  563. cpumask_copy(&mask, cpu_online_mask);
  564. cpumask_clear_cpu(smp_processor_id(), &mask);
  565. if (!cpumask_empty(&mask))
  566. smp_cross_call(&mask, IPI_CPU_STOP);
  567. /* Wait up to one second for other CPUs to stop */
  568. timeout = USEC_PER_SEC;
  569. while (num_online_cpus() > 1 && timeout--)
  570. udelay(1);
  571. if (num_online_cpus() > 1)
  572. pr_warn("SMP: failed to stop secondary CPUs\n");
  573. }
  574. /*
  575. * not supported here
  576. */
  577. int setup_profiling_timer(unsigned int multiplier)
  578. {
  579. return -EINVAL;
  580. }
  581. #ifdef CONFIG_CPU_FREQ
  582. static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
  583. static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
  584. static unsigned long global_l_p_j_ref;
  585. static unsigned long global_l_p_j_ref_freq;
  586. static int cpufreq_callback(struct notifier_block *nb,
  587. unsigned long val, void *data)
  588. {
  589. struct cpufreq_freqs *freq = data;
  590. int cpu = freq->cpu;
  591. if (freq->flags & CPUFREQ_CONST_LOOPS)
  592. return NOTIFY_OK;
  593. if (!per_cpu(l_p_j_ref, cpu)) {
  594. per_cpu(l_p_j_ref, cpu) =
  595. per_cpu(cpu_data, cpu).loops_per_jiffy;
  596. per_cpu(l_p_j_ref_freq, cpu) = freq->old;
  597. if (!global_l_p_j_ref) {
  598. global_l_p_j_ref = loops_per_jiffy;
  599. global_l_p_j_ref_freq = freq->old;
  600. }
  601. }
  602. if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
  603. (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
  604. loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
  605. global_l_p_j_ref_freq,
  606. freq->new);
  607. per_cpu(cpu_data, cpu).loops_per_jiffy =
  608. cpufreq_scale(per_cpu(l_p_j_ref, cpu),
  609. per_cpu(l_p_j_ref_freq, cpu),
  610. freq->new);
  611. }
  612. return NOTIFY_OK;
  613. }
  614. static struct notifier_block cpufreq_notifier = {
  615. .notifier_call = cpufreq_callback,
  616. };
  617. static int __init register_cpufreq_notifier(void)
  618. {
  619. return cpufreq_register_notifier(&cpufreq_notifier,
  620. CPUFREQ_TRANSITION_NOTIFIER);
  621. }
  622. core_initcall(register_cpufreq_notifier);
  623. #endif
  624. static void raise_nmi(cpumask_t *mask)
  625. {
  626. smp_cross_call(mask, IPI_CPU_BACKTRACE);
  627. }
  628. void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
  629. {
  630. nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
  631. }