cpufreq_ondemand.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632
  1. /*
  2. * drivers/cpufreq/cpufreq_ondemand.c
  3. *
  4. * Copyright (C) 2001 Russell King
  5. * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  6. * Jun Nakajima <jun.nakajima@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/cpu.h>
  14. #include <linux/percpu-defs.h>
  15. #include <linux/slab.h>
  16. #include <linux/tick.h>
  17. #include "cpufreq_governor.h"
  18. /* On-demand governor macros */
  19. #define DEF_FREQUENCY_UP_THRESHOLD (80)
  20. #define DEF_SAMPLING_DOWN_FACTOR (1)
  21. #define MAX_SAMPLING_DOWN_FACTOR (100000)
  22. #define MICRO_FREQUENCY_UP_THRESHOLD (95)
  23. #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
  24. #define MIN_FREQUENCY_UP_THRESHOLD (11)
  25. #define MAX_FREQUENCY_UP_THRESHOLD (100)
  26. static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
  27. static struct od_ops od_ops;
  28. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
  29. static struct cpufreq_governor cpufreq_gov_ondemand;
  30. #endif
  31. static unsigned int default_powersave_bias;
  32. static void ondemand_powersave_bias_init_cpu(int cpu)
  33. {
  34. struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
  35. dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
  36. dbs_info->freq_lo = 0;
  37. }
  38. /*
  39. * Not all CPUs want IO time to be accounted as busy; this depends on how
  40. * efficient idling at a higher frequency/voltage is.
  41. * Pavel Machek says this is not so for various generations of AMD and old
  42. * Intel systems.
  43. * Mike Chan (android.com) claims this is also not true for ARM.
  44. * Because of this, whitelist specific known (series) of CPUs by default, and
  45. * leave all others up to the user.
  46. */
  47. static int should_io_be_busy(void)
  48. {
  49. #if defined(CONFIG_X86)
  50. /*
  51. * For Intel, Core 2 (model 15) and later have an efficient idle.
  52. */
  53. if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
  54. boot_cpu_data.x86 == 6 &&
  55. boot_cpu_data.x86_model >= 15)
  56. return 1;
  57. #endif
  58. return 0;
  59. }
  60. /*
  61. * Find right freq to be set now with powersave_bias on.
  62. * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
  63. * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
  64. */
  65. static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
  66. unsigned int freq_next, unsigned int relation)
  67. {
  68. unsigned int freq_req, freq_reduc, freq_avg;
  69. unsigned int freq_hi, freq_lo;
  70. unsigned int index = 0;
  71. unsigned int jiffies_total, jiffies_hi, jiffies_lo;
  72. struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
  73. policy->cpu);
  74. struct dbs_data *dbs_data = policy->governor_data;
  75. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  76. if (!dbs_info->freq_table) {
  77. dbs_info->freq_lo = 0;
  78. dbs_info->freq_lo_jiffies = 0;
  79. return freq_next;
  80. }
  81. cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
  82. relation, &index);
  83. freq_req = dbs_info->freq_table[index].frequency;
  84. freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
  85. freq_avg = freq_req - freq_reduc;
  86. /* Find freq bounds for freq_avg in freq_table */
  87. index = 0;
  88. cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
  89. CPUFREQ_RELATION_H, &index);
  90. freq_lo = dbs_info->freq_table[index].frequency;
  91. index = 0;
  92. cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
  93. CPUFREQ_RELATION_L, &index);
  94. freq_hi = dbs_info->freq_table[index].frequency;
  95. /* Find out how long we have to be in hi and lo freqs */
  96. if (freq_hi == freq_lo) {
  97. dbs_info->freq_lo = 0;
  98. dbs_info->freq_lo_jiffies = 0;
  99. return freq_lo;
  100. }
  101. jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
  102. jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
  103. jiffies_hi += ((freq_hi - freq_lo) / 2);
  104. jiffies_hi /= (freq_hi - freq_lo);
  105. jiffies_lo = jiffies_total - jiffies_hi;
  106. dbs_info->freq_lo = freq_lo;
  107. dbs_info->freq_lo_jiffies = jiffies_lo;
  108. dbs_info->freq_hi_jiffies = jiffies_hi;
  109. return freq_hi;
  110. }
  111. static void ondemand_powersave_bias_init(void)
  112. {
  113. int i;
  114. for_each_online_cpu(i) {
  115. ondemand_powersave_bias_init_cpu(i);
  116. }
  117. }
  118. static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
  119. {
  120. struct dbs_data *dbs_data = policy->governor_data;
  121. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  122. if (od_tuners->powersave_bias)
  123. freq = od_ops.powersave_bias_target(policy, freq,
  124. CPUFREQ_RELATION_H);
  125. else if (policy->cur == policy->max)
  126. return;
  127. __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
  128. CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
  129. }
  130. /*
  131. * Every sampling_rate, we check, if current idle time is less than 20%
  132. * (default), then we try to increase frequency. Else, we adjust the frequency
  133. * proportional to load.
  134. */
  135. static void od_check_cpu(int cpu, unsigned int load)
  136. {
  137. struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
  138. struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
  139. struct dbs_data *dbs_data = policy->governor_data;
  140. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  141. dbs_info->freq_lo = 0;
  142. /* Check for frequency increase */
  143. if (load > od_tuners->up_threshold) {
  144. /* If switching to max speed, apply sampling_down_factor */
  145. if (policy->cur < policy->max)
  146. dbs_info->rate_mult =
  147. od_tuners->sampling_down_factor;
  148. dbs_freq_increase(policy, policy->max);
  149. } else {
  150. /* Calculate the next frequency proportional to load */
  151. unsigned int freq_next, min_f, max_f;
  152. min_f = policy->cpuinfo.min_freq;
  153. max_f = policy->cpuinfo.max_freq;
  154. freq_next = min_f + load * (max_f - min_f) / 100;
  155. /* No longer fully busy, reset rate_mult */
  156. dbs_info->rate_mult = 1;
  157. if (!od_tuners->powersave_bias) {
  158. __cpufreq_driver_target(policy, freq_next,
  159. CPUFREQ_RELATION_C);
  160. return;
  161. }
  162. freq_next = od_ops.powersave_bias_target(policy, freq_next,
  163. CPUFREQ_RELATION_L);
  164. __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
  165. }
  166. }
  167. static void od_dbs_timer(struct work_struct *work)
  168. {
  169. struct od_cpu_dbs_info_s *dbs_info =
  170. container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
  171. unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
  172. struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
  173. cpu);
  174. struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
  175. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  176. int delay = 0, sample_type = core_dbs_info->sample_type;
  177. bool modify_all = true;
  178. mutex_lock(&core_dbs_info->cdbs.timer_mutex);
  179. if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
  180. modify_all = false;
  181. goto max_delay;
  182. }
  183. /* Common NORMAL_SAMPLE setup */
  184. core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
  185. if (sample_type == OD_SUB_SAMPLE) {
  186. delay = core_dbs_info->freq_lo_jiffies;
  187. __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
  188. core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
  189. } else {
  190. dbs_check_cpu(dbs_data, cpu);
  191. if (core_dbs_info->freq_lo) {
  192. /* Setup timer for SUB_SAMPLE */
  193. core_dbs_info->sample_type = OD_SUB_SAMPLE;
  194. delay = core_dbs_info->freq_hi_jiffies;
  195. }
  196. }
  197. max_delay:
  198. if (!delay)
  199. delay = delay_for_sampling_rate(od_tuners->sampling_rate
  200. * core_dbs_info->rate_mult);
  201. gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
  202. mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
  203. }
  204. /************************** sysfs interface ************************/
  205. static struct common_dbs_data od_dbs_cdata;
  206. /**
  207. * update_sampling_rate - update sampling rate effective immediately if needed.
  208. * @new_rate: new sampling rate
  209. *
  210. * If new rate is smaller than the old, simply updating
  211. * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
  212. * original sampling_rate was 1 second and the requested new sampling rate is 10
  213. * ms because the user needs immediate reaction from ondemand governor, but not
  214. * sure if higher frequency will be required or not, then, the governor may
  215. * change the sampling rate too late; up to 1 second later. Thus, if we are
  216. * reducing the sampling rate, we need to make the new value effective
  217. * immediately.
  218. */
  219. static void update_sampling_rate(struct dbs_data *dbs_data,
  220. unsigned int new_rate)
  221. {
  222. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  223. int cpu;
  224. od_tuners->sampling_rate = new_rate = max(new_rate,
  225. dbs_data->min_sampling_rate);
  226. for_each_online_cpu(cpu) {
  227. struct cpufreq_policy *policy;
  228. struct od_cpu_dbs_info_s *dbs_info;
  229. unsigned long next_sampling, appointed_at;
  230. policy = cpufreq_cpu_get(cpu);
  231. if (!policy)
  232. continue;
  233. if (policy->governor != &cpufreq_gov_ondemand) {
  234. cpufreq_cpu_put(policy);
  235. continue;
  236. }
  237. dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
  238. cpufreq_cpu_put(policy);
  239. mutex_lock(&dbs_info->cdbs.timer_mutex);
  240. if (!delayed_work_pending(&dbs_info->cdbs.work)) {
  241. mutex_unlock(&dbs_info->cdbs.timer_mutex);
  242. continue;
  243. }
  244. next_sampling = jiffies + usecs_to_jiffies(new_rate);
  245. appointed_at = dbs_info->cdbs.work.timer.expires;
  246. if (time_before(next_sampling, appointed_at)) {
  247. mutex_unlock(&dbs_info->cdbs.timer_mutex);
  248. cancel_delayed_work_sync(&dbs_info->cdbs.work);
  249. mutex_lock(&dbs_info->cdbs.timer_mutex);
  250. gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
  251. usecs_to_jiffies(new_rate), true);
  252. }
  253. mutex_unlock(&dbs_info->cdbs.timer_mutex);
  254. }
  255. }
  256. static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
  257. size_t count)
  258. {
  259. unsigned int input;
  260. int ret;
  261. ret = sscanf(buf, "%u", &input);
  262. if (ret != 1)
  263. return -EINVAL;
  264. update_sampling_rate(dbs_data, input);
  265. return count;
  266. }
  267. static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
  268. size_t count)
  269. {
  270. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  271. unsigned int input;
  272. int ret;
  273. unsigned int j;
  274. ret = sscanf(buf, "%u", &input);
  275. if (ret != 1)
  276. return -EINVAL;
  277. od_tuners->io_is_busy = !!input;
  278. /* we need to re-evaluate prev_cpu_idle */
  279. for_each_online_cpu(j) {
  280. struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
  281. j);
  282. dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
  283. &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
  284. }
  285. return count;
  286. }
  287. static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
  288. size_t count)
  289. {
  290. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  291. unsigned int input;
  292. int ret;
  293. ret = sscanf(buf, "%u", &input);
  294. if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
  295. input < MIN_FREQUENCY_UP_THRESHOLD) {
  296. return -EINVAL;
  297. }
  298. od_tuners->up_threshold = input;
  299. return count;
  300. }
  301. static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
  302. const char *buf, size_t count)
  303. {
  304. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  305. unsigned int input, j;
  306. int ret;
  307. ret = sscanf(buf, "%u", &input);
  308. if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
  309. return -EINVAL;
  310. od_tuners->sampling_down_factor = input;
  311. /* Reset down sampling multiplier in case it was active */
  312. for_each_online_cpu(j) {
  313. struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
  314. j);
  315. dbs_info->rate_mult = 1;
  316. }
  317. return count;
  318. }
  319. static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
  320. const char *buf, size_t count)
  321. {
  322. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  323. unsigned int input;
  324. int ret;
  325. unsigned int j;
  326. ret = sscanf(buf, "%u", &input);
  327. if (ret != 1)
  328. return -EINVAL;
  329. if (input > 1)
  330. input = 1;
  331. if (input == od_tuners->ignore_nice_load) { /* nothing to do */
  332. return count;
  333. }
  334. od_tuners->ignore_nice_load = input;
  335. /* we need to re-evaluate prev_cpu_idle */
  336. for_each_online_cpu(j) {
  337. struct od_cpu_dbs_info_s *dbs_info;
  338. dbs_info = &per_cpu(od_cpu_dbs_info, j);
  339. dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
  340. &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
  341. if (od_tuners->ignore_nice_load)
  342. dbs_info->cdbs.prev_cpu_nice =
  343. kcpustat_cpu(j).cpustat[CPUTIME_NICE];
  344. }
  345. return count;
  346. }
  347. static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
  348. size_t count)
  349. {
  350. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  351. unsigned int input;
  352. int ret;
  353. ret = sscanf(buf, "%u", &input);
  354. if (ret != 1)
  355. return -EINVAL;
  356. if (input > 1000)
  357. input = 1000;
  358. od_tuners->powersave_bias = input;
  359. ondemand_powersave_bias_init();
  360. return count;
  361. }
  362. show_store_one(od, sampling_rate);
  363. show_store_one(od, io_is_busy);
  364. show_store_one(od, up_threshold);
  365. show_store_one(od, sampling_down_factor);
  366. show_store_one(od, ignore_nice_load);
  367. show_store_one(od, powersave_bias);
  368. declare_show_sampling_rate_min(od);
  369. gov_sys_pol_attr_rw(sampling_rate);
  370. gov_sys_pol_attr_rw(io_is_busy);
  371. gov_sys_pol_attr_rw(up_threshold);
  372. gov_sys_pol_attr_rw(sampling_down_factor);
  373. gov_sys_pol_attr_rw(ignore_nice_load);
  374. gov_sys_pol_attr_rw(powersave_bias);
  375. gov_sys_pol_attr_ro(sampling_rate_min);
  376. static struct attribute *dbs_attributes_gov_sys[] = {
  377. &sampling_rate_min_gov_sys.attr,
  378. &sampling_rate_gov_sys.attr,
  379. &up_threshold_gov_sys.attr,
  380. &sampling_down_factor_gov_sys.attr,
  381. &ignore_nice_load_gov_sys.attr,
  382. &powersave_bias_gov_sys.attr,
  383. &io_is_busy_gov_sys.attr,
  384. NULL
  385. };
  386. static struct attribute_group od_attr_group_gov_sys = {
  387. .attrs = dbs_attributes_gov_sys,
  388. .name = "ondemand",
  389. };
  390. static struct attribute *dbs_attributes_gov_pol[] = {
  391. &sampling_rate_min_gov_pol.attr,
  392. &sampling_rate_gov_pol.attr,
  393. &up_threshold_gov_pol.attr,
  394. &sampling_down_factor_gov_pol.attr,
  395. &ignore_nice_load_gov_pol.attr,
  396. &powersave_bias_gov_pol.attr,
  397. &io_is_busy_gov_pol.attr,
  398. NULL
  399. };
  400. static struct attribute_group od_attr_group_gov_pol = {
  401. .attrs = dbs_attributes_gov_pol,
  402. .name = "ondemand",
  403. };
  404. /************************** sysfs end ************************/
  405. static int od_init(struct dbs_data *dbs_data, bool notify)
  406. {
  407. struct od_dbs_tuners *tuners;
  408. u64 idle_time;
  409. int cpu;
  410. tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
  411. if (!tuners) {
  412. pr_err("%s: kzalloc failed\n", __func__);
  413. return -ENOMEM;
  414. }
  415. cpu = get_cpu();
  416. idle_time = get_cpu_idle_time_us(cpu, NULL);
  417. put_cpu();
  418. if (idle_time != -1ULL) {
  419. /* Idle micro accounting is supported. Use finer thresholds */
  420. tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
  421. /*
  422. * In nohz/micro accounting case we set the minimum frequency
  423. * not depending on HZ, but fixed (very low). The deferred
  424. * timer might skip some samples if idle/sleeping as needed.
  425. */
  426. dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
  427. } else {
  428. tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
  429. /* For correct statistics, we need 10 ticks for each measure */
  430. dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
  431. jiffies_to_usecs(10);
  432. }
  433. tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
  434. tuners->ignore_nice_load = 0;
  435. tuners->powersave_bias = default_powersave_bias;
  436. tuners->io_is_busy = should_io_be_busy();
  437. dbs_data->tuners = tuners;
  438. return 0;
  439. }
  440. static void od_exit(struct dbs_data *dbs_data, bool notify)
  441. {
  442. kfree(dbs_data->tuners);
  443. }
  444. define_get_cpu_dbs_routines(od_cpu_dbs_info);
  445. static struct od_ops od_ops = {
  446. .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
  447. .powersave_bias_target = generic_powersave_bias_target,
  448. .freq_increase = dbs_freq_increase,
  449. };
  450. static struct common_dbs_data od_dbs_cdata = {
  451. .governor = GOV_ONDEMAND,
  452. .attr_group_gov_sys = &od_attr_group_gov_sys,
  453. .attr_group_gov_pol = &od_attr_group_gov_pol,
  454. .get_cpu_cdbs = get_cpu_cdbs,
  455. .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
  456. .gov_dbs_timer = od_dbs_timer,
  457. .gov_check_cpu = od_check_cpu,
  458. .gov_ops = &od_ops,
  459. .init = od_init,
  460. .exit = od_exit,
  461. .mutex = __MUTEX_INITIALIZER(od_dbs_cdata.mutex),
  462. };
  463. static void od_set_powersave_bias(unsigned int powersave_bias)
  464. {
  465. struct cpufreq_policy *policy;
  466. struct dbs_data *dbs_data;
  467. struct od_dbs_tuners *od_tuners;
  468. unsigned int cpu;
  469. cpumask_t done;
  470. default_powersave_bias = powersave_bias;
  471. cpumask_clear(&done);
  472. get_online_cpus();
  473. for_each_online_cpu(cpu) {
  474. if (cpumask_test_cpu(cpu, &done))
  475. continue;
  476. policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
  477. if (!policy)
  478. continue;
  479. cpumask_or(&done, &done, policy->cpus);
  480. if (policy->governor != &cpufreq_gov_ondemand)
  481. continue;
  482. dbs_data = policy->governor_data;
  483. od_tuners = dbs_data->tuners;
  484. od_tuners->powersave_bias = default_powersave_bias;
  485. }
  486. put_online_cpus();
  487. }
  488. void od_register_powersave_bias_handler(unsigned int (*f)
  489. (struct cpufreq_policy *, unsigned int, unsigned int),
  490. unsigned int powersave_bias)
  491. {
  492. od_ops.powersave_bias_target = f;
  493. od_set_powersave_bias(powersave_bias);
  494. }
  495. EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
  496. void od_unregister_powersave_bias_handler(void)
  497. {
  498. od_ops.powersave_bias_target = generic_powersave_bias_target;
  499. od_set_powersave_bias(0);
  500. }
  501. EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
  502. static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
  503. unsigned int event)
  504. {
  505. return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
  506. }
  507. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
  508. static
  509. #endif
  510. struct cpufreq_governor cpufreq_gov_ondemand = {
  511. .name = "ondemand",
  512. .governor = od_cpufreq_governor_dbs,
  513. .max_transition_latency = TRANSITION_LATENCY_LIMIT,
  514. .owner = THIS_MODULE,
  515. };
  516. static int __init cpufreq_gov_dbs_init(void)
  517. {
  518. return cpufreq_register_governor(&cpufreq_gov_ondemand);
  519. }
  520. static void __exit cpufreq_gov_dbs_exit(void)
  521. {
  522. cpufreq_unregister_governor(&cpufreq_gov_ondemand);
  523. }
  524. MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
  525. MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
  526. MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
  527. "Low Latency Frequency Transition capable processors");
  528. MODULE_LICENSE("GPL");
  529. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
  530. fs_initcall(cpufreq_gov_dbs_init);
  531. #else
  532. module_init(cpufreq_gov_dbs_init);
  533. #endif
  534. module_exit(cpufreq_gov_dbs_exit);