cpu-boost.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. /*
  2. * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) "cpu-boost: " fmt
  14. #include <linux/kernel.h>
  15. #include <linux/init.h>
  16. #include <linux/notifier.h>
  17. #include <linux/cpufreq.h>
  18. #include <linux/cpu.h>
  19. #include <linux/sched.h>
  20. #include <linux/jiffies.h>
  21. #include <linux/kthread.h>
  22. #include <linux/moduleparam.h>
  23. #include <linux/slab.h>
  24. #include <linux/input.h>
  25. #include <linux/time.h>
  26. struct cpu_sync {
  27. struct task_struct *thread;
  28. wait_queue_head_t sync_wq;
  29. struct delayed_work boost_rem;
  30. int cpu;
  31. spinlock_t lock;
  32. bool pending;
  33. atomic_t being_woken;
  34. int src_cpu;
  35. unsigned int boost_min;
  36. unsigned int input_boost_min;
  37. unsigned int input_boost_freq;
  38. };
  39. static DEFINE_PER_CPU(struct cpu_sync, sync_info);
  40. static struct workqueue_struct *cpu_boost_wq;
  41. static struct work_struct input_boost_work;
  42. static unsigned int boost_ms;
  43. module_param(boost_ms, uint, 0644);
  44. static unsigned int sync_threshold;
  45. module_param(sync_threshold, uint, 0644);
  46. static unsigned int input_boost_enabled;
  47. module_param(input_boost_enabled, uint, 0644);
  48. static unsigned int input_boost_ms = 40;
  49. module_param(input_boost_ms, uint, 0644);
  50. static struct delayed_work input_boost_rem;
  51. static u64 last_input_time;
  52. static int set_input_boost_freq(const char *buf, const struct kernel_param *kp)
  53. {
  54. int i, ntokens = 0;
  55. unsigned int val, cpu;
  56. const char *cp = buf;
  57. while ((cp = strpbrk(cp + 1, " :")))
  58. ntokens++;
  59. /* single number: apply to all CPUs */
  60. if (!ntokens) {
  61. if (sscanf(buf, "%u\n", &val) != 1)
  62. return -EINVAL;
  63. for_each_possible_cpu(i)
  64. per_cpu(sync_info, i).input_boost_freq = val;
  65. goto out;
  66. }
  67. /* CPU:value pair */
  68. if (!(ntokens % 2))
  69. return -EINVAL;
  70. cp = buf;
  71. for (i = 0; i < ntokens; i += 2) {
  72. if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
  73. return -EINVAL;
  74. if (cpu > num_possible_cpus())
  75. return -EINVAL;
  76. per_cpu(sync_info, cpu).input_boost_freq = val;
  77. cp = strchr(cp, ' ');
  78. cp++;
  79. }
  80. out:
  81. return 0;
  82. }
  83. static int get_input_boost_freq(char *buf, const struct kernel_param *kp)
  84. {
  85. int cnt = 0, cpu;
  86. struct cpu_sync *s;
  87. for_each_possible_cpu(cpu) {
  88. s = &per_cpu(sync_info, cpu);
  89. cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
  90. "%d:%u ", cpu, s->input_boost_freq);
  91. }
  92. cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
  93. return cnt;
  94. }
  95. static const struct kernel_param_ops param_ops_input_boost_freq = {
  96. .set = set_input_boost_freq,
  97. .get = get_input_boost_freq,
  98. };
  99. module_param_cb(input_boost_freq, &param_ops_input_boost_freq, NULL, 0644);
  100. /*
  101. * The CPUFREQ_ADJUST notifier is used to override the current policy min to
  102. * make sure policy min >= boost_min. The cpufreq framework then does the job
  103. * of enforcing the new policy.
  104. *
  105. * The sync kthread needs to run on the CPU in question to avoid deadlocks in
  106. * the wake up code. Achieve this by binding the thread to the respective
  107. * CPU. But a CPU going offline unbinds threads from that CPU. So, set it up
  108. * again each time the CPU comes back up. We can use CPUFREQ_START to figure
  109. * out a CPU is coming online instead of registering for hotplug notifiers.
  110. */
  111. static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data)
  112. {
  113. struct cpufreq_policy *policy = data;
  114. unsigned int cpu = policy->cpu;
  115. struct cpu_sync *s = &per_cpu(sync_info, cpu);
  116. unsigned int b_min = s->boost_min;
  117. unsigned int ib_min = s->input_boost_min;
  118. unsigned int min;
  119. switch (val) {
  120. case CPUFREQ_ADJUST:
  121. if (!b_min && !ib_min)
  122. break;
  123. if (ib_min && ib_min <= policy->min)
  124. break;
  125. min = max(b_min, ib_min);
  126. min = min(min, policy->max);
  127. pr_debug("CPU%u policy min before boost: %u kHz\n",
  128. cpu, policy->min);
  129. pr_debug("CPU%u boost min: %u kHz\n", cpu, min);
  130. cpufreq_verify_within_limits(policy, min, UINT_MAX);
  131. pr_debug("CPU%u policy min after boost: %u kHz\n",
  132. cpu, policy->min);
  133. break;
  134. case CPUFREQ_START:
  135. set_cpus_allowed(s->thread, *cpumask_of(cpu));
  136. break;
  137. }
  138. return NOTIFY_OK;
  139. }
  140. static struct notifier_block boost_adjust_nb = {
  141. .notifier_call = boost_adjust_notify,
  142. };
  143. static void do_boost_rem(struct work_struct *work)
  144. {
  145. struct cpu_sync *s = container_of(work, struct cpu_sync,
  146. boost_rem.work);
  147. pr_debug("Removing boost for CPU%d\n", s->cpu);
  148. s->boost_min = 0;
  149. /* Force policy re-evaluation to trigger adjust notifier. */
  150. cpufreq_update_policy(s->cpu);
  151. }
  152. static void update_policy_online(void)
  153. {
  154. unsigned int i;
  155. /* Re-evaluate policy to trigger adjust notifier for online CPUs */
  156. get_online_cpus();
  157. for_each_online_cpu(i) {
  158. pr_debug("Updating policy for CPU%d\n", i);
  159. cpufreq_update_policy(i);
  160. }
  161. put_online_cpus();
  162. }
  163. static void do_input_boost_rem(struct work_struct *work)
  164. {
  165. unsigned int i;
  166. struct cpu_sync *i_sync_info;
  167. /* Reset the input_boost_min for all CPUs in the system */
  168. pr_debug("Resetting input boost min for all CPUs\n");
  169. for_each_possible_cpu(i) {
  170. i_sync_info = &per_cpu(sync_info, i);
  171. i_sync_info->input_boost_min = 0;
  172. }
  173. /* Update policies for all online CPUs */
  174. update_policy_online();
  175. }
  176. static int boost_mig_sync_thread(void *data)
  177. {
  178. int dest_cpu = (long) data;
  179. int src_cpu, ret;
  180. struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
  181. struct cpufreq_policy dest_policy;
  182. struct cpufreq_policy src_policy;
  183. unsigned long flags;
  184. while(1) {
  185. wait_event_interruptible(s->sync_wq, s->pending ||
  186. kthread_should_stop());
  187. if (kthread_should_stop())
  188. break;
  189. spin_lock_irqsave(&s->lock, flags);
  190. s->pending = false;
  191. src_cpu = s->src_cpu;
  192. spin_unlock_irqrestore(&s->lock, flags);
  193. ret = cpufreq_get_policy(&src_policy, src_cpu);
  194. if (ret)
  195. continue;
  196. ret = cpufreq_get_policy(&dest_policy, dest_cpu);
  197. if (ret)
  198. continue;
  199. if (dest_policy.cur >= src_policy.cur ) {
  200. pr_debug("No sync. CPU%d@%dKHz >= CPU%d@%dKHz\n",
  201. dest_cpu, dest_policy.cur, src_cpu, src_policy.cur);
  202. continue;
  203. }
  204. if (sync_threshold && (dest_policy.cur >= sync_threshold))
  205. continue;
  206. cancel_delayed_work_sync(&s->boost_rem);
  207. if (sync_threshold) {
  208. if (src_policy.cur >= sync_threshold)
  209. s->boost_min = sync_threshold;
  210. else
  211. s->boost_min = src_policy.cur;
  212. } else {
  213. s->boost_min = src_policy.cur;
  214. }
  215. /* Force policy re-evaluation to trigger adjust notifier. */
  216. get_online_cpus();
  217. if (cpu_online(dest_cpu)) {
  218. cpufreq_update_policy(dest_cpu);
  219. queue_delayed_work_on(dest_cpu, cpu_boost_wq,
  220. &s->boost_rem, msecs_to_jiffies(boost_ms));
  221. } else {
  222. s->boost_min = 0;
  223. pr_debug("Resetting boost_min to 0\n");
  224. }
  225. put_online_cpus();
  226. }
  227. return 0;
  228. }
  229. static int boost_migration_notify(struct notifier_block *nb,
  230. unsigned long dest_cpu, void *arg)
  231. {
  232. unsigned long flags;
  233. struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
  234. if (!boost_ms)
  235. return NOTIFY_OK;
  236. /* Avoid deadlock in try_to_wake_up() */
  237. if (s->thread == current)
  238. return NOTIFY_OK;
  239. pr_debug("Migration: CPU%d --> CPU%d\n", (int) arg, (int) dest_cpu);
  240. spin_lock_irqsave(&s->lock, flags);
  241. s->pending = true;
  242. s->src_cpu = (int) arg;
  243. spin_unlock_irqrestore(&s->lock, flags);
  244. /*
  245. * Avoid issuing recursive wakeup call, as sync thread itself could be
  246. * seen as migrating triggering this notification. Note that sync thread
  247. * of a cpu could be running for a short while with its affinity broken
  248. * because of CPU hotplug.
  249. */
  250. if (!atomic_cmpxchg(&s->being_woken, 0, 1)) {
  251. wake_up(&s->sync_wq);
  252. atomic_set(&s->being_woken, 0);
  253. }
  254. return NOTIFY_OK;
  255. }
  256. static struct notifier_block boost_migration_nb = {
  257. .notifier_call = boost_migration_notify,
  258. };
  259. static void do_input_boost(struct work_struct *work)
  260. {
  261. unsigned int i;
  262. struct cpu_sync *i_sync_info;
  263. cancel_delayed_work_sync(&input_boost_rem);
  264. /* Set the input_boost_min for all CPUs in the system */
  265. pr_debug("Setting input boost min for all CPUs\n");
  266. for_each_possible_cpu(i) {
  267. i_sync_info = &per_cpu(sync_info, i);
  268. i_sync_info->input_boost_min = i_sync_info->input_boost_freq;
  269. }
  270. /* Update policies for all online CPUs */
  271. update_policy_online();
  272. queue_delayed_work(cpu_boost_wq, &input_boost_rem,
  273. msecs_to_jiffies(input_boost_ms));
  274. }
  275. static void cpuboost_input_event(struct input_handle *handle,
  276. unsigned int type, unsigned int code, int value)
  277. {
  278. u64 now;
  279. if (!input_boost_enabled)
  280. return;
  281. now = ktime_to_us(ktime_get());
  282. if ((now - last_input_time) < (input_boost_ms * USEC_PER_MSEC))
  283. return;
  284. if (work_pending(&input_boost_work))
  285. return;
  286. queue_work(cpu_boost_wq, &input_boost_work);
  287. last_input_time = ktime_to_us(ktime_get());
  288. }
  289. static int cpuboost_input_connect(struct input_handler *handler,
  290. struct input_dev *dev, const struct input_device_id *id)
  291. {
  292. struct input_handle *handle;
  293. int error;
  294. handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
  295. if (!handle)
  296. return -ENOMEM;
  297. handle->dev = dev;
  298. handle->handler = handler;
  299. handle->name = "cpufreq";
  300. error = input_register_handle(handle);
  301. if (error)
  302. goto err2;
  303. error = input_open_device(handle);
  304. if (error)
  305. goto err1;
  306. return 0;
  307. err1:
  308. input_unregister_handle(handle);
  309. err2:
  310. kfree(handle);
  311. return error;
  312. }
  313. static void cpuboost_input_disconnect(struct input_handle *handle)
  314. {
  315. input_close_device(handle);
  316. input_unregister_handle(handle);
  317. kfree(handle);
  318. }
  319. static const struct input_device_id cpuboost_ids[] = {
  320. /* multi-touch touchscreen */
  321. {
  322. .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
  323. INPUT_DEVICE_ID_MATCH_ABSBIT,
  324. .evbit = { BIT_MASK(EV_ABS) },
  325. .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
  326. BIT_MASK(ABS_MT_POSITION_X) |
  327. BIT_MASK(ABS_MT_POSITION_Y) },
  328. },
  329. /* touchpad */
  330. {
  331. .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
  332. INPUT_DEVICE_ID_MATCH_ABSBIT,
  333. .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
  334. .absbit = { [BIT_WORD(ABS_X)] =
  335. BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
  336. },
  337. /* Keypad */
  338. {
  339. .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
  340. .evbit = { BIT_MASK(EV_KEY) },
  341. },
  342. { },
  343. };
  344. static struct input_handler cpuboost_input_handler = {
  345. .event = cpuboost_input_event,
  346. .connect = cpuboost_input_connect,
  347. .disconnect = cpuboost_input_disconnect,
  348. .name = "cpu-boost",
  349. .id_table = cpuboost_ids,
  350. };
  351. static int cpu_boost_init(void)
  352. {
  353. int cpu, ret;
  354. struct cpu_sync *s;
  355. cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
  356. if (!cpu_boost_wq)
  357. return -EFAULT;
  358. INIT_WORK(&input_boost_work, do_input_boost);
  359. INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem);
  360. for_each_possible_cpu(cpu) {
  361. s = &per_cpu(sync_info, cpu);
  362. s->cpu = cpu;
  363. init_waitqueue_head(&s->sync_wq);
  364. atomic_set(&s->being_woken, 0);
  365. spin_lock_init(&s->lock);
  366. INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem);
  367. s->thread = kthread_run(boost_mig_sync_thread,
  368. (void *) (long)cpu, "boost_sync/%d", cpu);
  369. set_cpus_allowed(s->thread, *cpumask_of(cpu));
  370. }
  371. cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
  372. atomic_notifier_chain_register(&migration_notifier_head,
  373. &boost_migration_nb);
  374. ret = input_register_handler(&cpuboost_input_handler);
  375. return 0;
  376. }
  377. late_initcall(cpu_boost_init);