memory_state_time.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. /* drivers/misc/memory_state_time.c
  2. *
  3. * Copyright (C) 2016 Google, Inc.
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15. #include <linux/device.h>
  16. #include <linux/err.h>
  17. #include <linux/errno.h>
  18. #include <linux/hashtable.h>
  19. #include <linux/kconfig.h>
  20. #include <linux/kernel.h>
  21. #include <linux/kobject.h>
  22. #include <linux/memory-state-time.h>
  23. #include <linux/module.h>
  24. #include <linux/mutex.h>
  25. #include <linux/of_platform.h>
  26. #include <linux/slab.h>
  27. #include <linux/sysfs.h>
  28. #include <linux/time.h>
  29. #include <linux/timekeeping.h>
  30. #include <linux/workqueue.h>
  31. #define KERNEL_ATTR_RO(_name) \
  32. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  33. #define KERNEL_ATTR_RW(_name) \
  34. static struct kobj_attribute _name##_attr = \
  35. __ATTR(_name, 0644, _name##_show, _name##_store)
  36. #define FREQ_HASH_BITS 4
  37. DECLARE_HASHTABLE(freq_hash_table, FREQ_HASH_BITS);
  38. static DEFINE_MUTEX(mem_lock);
  39. #define TAG "memory_state_time"
  40. #define BW_NODE "/soc/memory-state-time"
  41. #define FREQ_TBL "freq-tbl"
  42. #define BW_TBL "bw-buckets"
  43. #define NUM_SOURCES "num-sources"
  44. #define LOWEST_FREQ 2
  45. static int curr_bw;
  46. static int curr_freq;
  47. static u32 *bw_buckets;
  48. static u32 *freq_buckets;
  49. static int num_freqs;
  50. static int num_buckets;
  51. static int registered_bw_sources;
  52. static u64 last_update;
  53. static bool init_success;
  54. static struct workqueue_struct *memory_wq;
  55. static u32 num_sources = 10;
  56. static int *bandwidths;
  57. struct freq_entry {
  58. int freq;
  59. u64 *buckets; /* Bandwidth buckets. */
  60. struct hlist_node hash;
  61. };
  62. struct queue_container {
  63. struct work_struct update_state;
  64. int value;
  65. u64 time_now;
  66. int id;
  67. struct mutex *lock;
  68. };
  69. static int find_bucket(int bw)
  70. {
  71. int i;
  72. if (bw_buckets != NULL) {
  73. for (i = 0; i < num_buckets; i++) {
  74. if (bw_buckets[i] > bw) {
  75. pr_debug("Found bucket %d for bandwidth %d\n",
  76. i, bw);
  77. return i;
  78. }
  79. }
  80. return num_buckets - 1;
  81. }
  82. return 0;
  83. }
  84. static u64 get_time_diff(u64 time_now)
  85. {
  86. u64 ms;
  87. ms = time_now - last_update;
  88. last_update = time_now;
  89. return ms;
  90. }
  91. static ssize_t show_stat_show(struct kobject *kobj,
  92. struct kobj_attribute *attr, char *buf)
  93. {
  94. int i, j;
  95. int len = 0;
  96. struct freq_entry *freq_entry;
  97. for (i = 0; i < num_freqs; i++) {
  98. hash_for_each_possible(freq_hash_table, freq_entry, hash,
  99. freq_buckets[i]) {
  100. if (freq_entry->freq == freq_buckets[i]) {
  101. len += scnprintf(buf + len, PAGE_SIZE - len,
  102. "%d ", freq_buckets[i]);
  103. if (len >= PAGE_SIZE)
  104. break;
  105. for (j = 0; j < num_buckets; j++) {
  106. len += scnprintf(buf + len,
  107. PAGE_SIZE - len,
  108. "%llu ",
  109. freq_entry->buckets[j]);
  110. }
  111. len += scnprintf(buf + len, PAGE_SIZE - len,
  112. "\n");
  113. }
  114. }
  115. }
  116. pr_debug("Current Time: %llu\n", ktime_get_boot_ns());
  117. return len;
  118. }
  119. KERNEL_ATTR_RO(show_stat);
  120. static void update_table(u64 time_now)
  121. {
  122. struct freq_entry *freq_entry;
  123. pr_debug("Last known bw %d freq %d\n", curr_bw, curr_freq);
  124. hash_for_each_possible(freq_hash_table, freq_entry, hash, curr_freq) {
  125. if (curr_freq == freq_entry->freq) {
  126. freq_entry->buckets[find_bucket(curr_bw)]
  127. += get_time_diff(time_now);
  128. break;
  129. }
  130. }
  131. }
  132. static bool freq_exists(int freq)
  133. {
  134. int i;
  135. for (i = 0; i < num_freqs; i++) {
  136. if (freq == freq_buckets[i])
  137. return true;
  138. }
  139. return false;
  140. }
  141. static int calculate_total_bw(int bw, int index)
  142. {
  143. int i;
  144. int total_bw = 0;
  145. pr_debug("memory_state_time New bw %d for id %d\n", bw, index);
  146. bandwidths[index] = bw;
  147. for (i = 0; i < registered_bw_sources; i++)
  148. total_bw += bandwidths[i];
  149. return total_bw;
  150. }
  151. static void freq_update_do_work(struct work_struct *work)
  152. {
  153. struct queue_container *freq_state_update
  154. = container_of(work, struct queue_container,
  155. update_state);
  156. if (freq_state_update) {
  157. mutex_lock(&mem_lock);
  158. update_table(freq_state_update->time_now);
  159. curr_freq = freq_state_update->value;
  160. mutex_unlock(&mem_lock);
  161. kfree(freq_state_update);
  162. }
  163. }
  164. static void bw_update_do_work(struct work_struct *work)
  165. {
  166. struct queue_container *bw_state_update
  167. = container_of(work, struct queue_container,
  168. update_state);
  169. if (bw_state_update) {
  170. mutex_lock(&mem_lock);
  171. update_table(bw_state_update->time_now);
  172. curr_bw = calculate_total_bw(bw_state_update->value,
  173. bw_state_update->id);
  174. mutex_unlock(&mem_lock);
  175. kfree(bw_state_update);
  176. }
  177. }
  178. static void memory_state_freq_update(struct memory_state_update_block *ub,
  179. int value)
  180. {
  181. if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
  182. if (freq_exists(value) && init_success) {
  183. struct queue_container *freq_container
  184. = kmalloc(sizeof(struct queue_container),
  185. GFP_KERNEL);
  186. if (!freq_container)
  187. return;
  188. INIT_WORK(&freq_container->update_state,
  189. freq_update_do_work);
  190. freq_container->time_now = ktime_get_boot_ns();
  191. freq_container->value = value;
  192. pr_debug("Scheduling freq update in work queue\n");
  193. queue_work(memory_wq, &freq_container->update_state);
  194. } else {
  195. pr_debug("Freq does not exist.\n");
  196. }
  197. }
  198. }
  199. static void memory_state_bw_update(struct memory_state_update_block *ub,
  200. int value)
  201. {
  202. if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
  203. if (init_success) {
  204. struct queue_container *bw_container
  205. = kmalloc(sizeof(struct queue_container),
  206. GFP_KERNEL);
  207. if (!bw_container)
  208. return;
  209. INIT_WORK(&bw_container->update_state,
  210. bw_update_do_work);
  211. bw_container->time_now = ktime_get_boot_ns();
  212. bw_container->value = value;
  213. bw_container->id = ub->id;
  214. pr_debug("Scheduling bandwidth update in work queue\n");
  215. queue_work(memory_wq, &bw_container->update_state);
  216. }
  217. }
  218. }
  219. struct memory_state_update_block *memory_state_register_frequency_source(void)
  220. {
  221. struct memory_state_update_block *block;
  222. if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
  223. pr_debug("Allocating frequency source\n");
  224. block = kmalloc(sizeof(struct memory_state_update_block),
  225. GFP_KERNEL);
  226. if (!block)
  227. return NULL;
  228. block->update_call = memory_state_freq_update;
  229. return block;
  230. }
  231. pr_err("Config option disabled.\n");
  232. return NULL;
  233. }
  234. EXPORT_SYMBOL_GPL(memory_state_register_frequency_source);
  235. struct memory_state_update_block *memory_state_register_bandwidth_source(void)
  236. {
  237. struct memory_state_update_block *block;
  238. if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
  239. pr_debug("Allocating bandwidth source %d\n",
  240. registered_bw_sources);
  241. block = kmalloc(sizeof(struct memory_state_update_block),
  242. GFP_KERNEL);
  243. if (!block)
  244. return NULL;
  245. block->update_call = memory_state_bw_update;
  246. if (registered_bw_sources < num_sources) {
  247. block->id = registered_bw_sources++;
  248. } else {
  249. pr_err("Unable to allocate source; max number reached\n");
  250. kfree(block);
  251. return NULL;
  252. }
  253. return block;
  254. }
  255. pr_err("Config option disabled.\n");
  256. return NULL;
  257. }
  258. EXPORT_SYMBOL_GPL(memory_state_register_bandwidth_source);
  259. /* Buckets are designated by their maximum.
  260. * Returns the buckets decided by the capability of the device.
  261. */
  262. static int get_bw_buckets(struct device *dev)
  263. {
  264. int ret, lenb;
  265. struct device_node *node = dev->of_node;
  266. of_property_read_u32(node, NUM_SOURCES, &num_sources);
  267. if (!of_find_property(node, BW_TBL, &lenb)) {
  268. pr_err("Missing %s property\n", BW_TBL);
  269. return -ENODATA;
  270. }
  271. bandwidths = devm_kzalloc(dev,
  272. sizeof(*bandwidths) * num_sources, GFP_KERNEL);
  273. if (!bandwidths)
  274. return -ENOMEM;
  275. lenb /= sizeof(*bw_buckets);
  276. bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets),
  277. GFP_KERNEL);
  278. if (!bw_buckets) {
  279. devm_kfree(dev, bandwidths);
  280. return -ENOMEM;
  281. }
  282. ret = of_property_read_u32_array(node, BW_TBL, bw_buckets,
  283. lenb);
  284. if (ret < 0) {
  285. devm_kfree(dev, bandwidths);
  286. devm_kfree(dev, bw_buckets);
  287. pr_err("Unable to read bandwidth table from device tree.\n");
  288. return ret;
  289. }
  290. curr_bw = 0;
  291. num_buckets = lenb;
  292. return 0;
  293. }
  294. /* Adds struct freq_entry nodes to the hashtable for each compatible frequency.
  295. * Returns the supported number of frequencies.
  296. */
  297. static int freq_buckets_init(struct device *dev)
  298. {
  299. struct freq_entry *freq_entry;
  300. int i;
  301. int ret, lenf;
  302. struct device_node *node = dev->of_node;
  303. if (!of_find_property(node, FREQ_TBL, &lenf)) {
  304. pr_err("Missing %s property\n", FREQ_TBL);
  305. return -ENODATA;
  306. }
  307. lenf /= sizeof(*freq_buckets);
  308. freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets),
  309. GFP_KERNEL);
  310. if (!freq_buckets)
  311. return -ENOMEM;
  312. pr_debug("freqs found len %d\n", lenf);
  313. ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets,
  314. lenf);
  315. if (ret < 0) {
  316. devm_kfree(dev, freq_buckets);
  317. pr_err("Unable to read frequency table from device tree.\n");
  318. return ret;
  319. }
  320. pr_debug("ret freq %d\n", ret);
  321. num_freqs = lenf;
  322. curr_freq = freq_buckets[LOWEST_FREQ];
  323. for (i = 0; i < num_freqs; i++) {
  324. freq_entry = devm_kzalloc(dev, sizeof(struct freq_entry),
  325. GFP_KERNEL);
  326. if (!freq_entry)
  327. return -ENOMEM;
  328. freq_entry->buckets = devm_kzalloc(dev, sizeof(u64)*num_buckets,
  329. GFP_KERNEL);
  330. if (!freq_entry->buckets) {
  331. devm_kfree(dev, freq_entry);
  332. return -ENOMEM;
  333. }
  334. pr_debug("memory_state_time Adding freq to ht %d\n",
  335. freq_buckets[i]);
  336. freq_entry->freq = freq_buckets[i];
  337. hash_add(freq_hash_table, &freq_entry->hash, freq_buckets[i]);
  338. }
  339. return 0;
  340. }
  341. struct kobject *memory_kobj;
  342. EXPORT_SYMBOL_GPL(memory_kobj);
  343. static struct attribute *memory_attrs[] = {
  344. &show_stat_attr.attr,
  345. NULL
  346. };
  347. static struct attribute_group memory_attr_group = {
  348. .attrs = memory_attrs,
  349. };
  350. static int memory_state_time_probe(struct platform_device *pdev)
  351. {
  352. int error;
  353. error = get_bw_buckets(&pdev->dev);
  354. if (error)
  355. return error;
  356. error = freq_buckets_init(&pdev->dev);
  357. if (error)
  358. return error;
  359. last_update = ktime_get_boot_ns();
  360. init_success = true;
  361. pr_debug("memory_state_time initialized with num_freqs %d\n",
  362. num_freqs);
  363. return 0;
  364. }
  365. static const struct of_device_id match_table[] = {
  366. { .compatible = "memory-state-time" },
  367. {}
  368. };
  369. static struct platform_driver memory_state_time_driver = {
  370. .probe = memory_state_time_probe,
  371. .driver = {
  372. .name = "memory-state-time",
  373. .of_match_table = match_table,
  374. .owner = THIS_MODULE,
  375. },
  376. };
  377. static int __init memory_state_time_init(void)
  378. {
  379. int error;
  380. hash_init(freq_hash_table);
  381. memory_wq = create_singlethread_workqueue("memory_wq");
  382. if (!memory_wq) {
  383. pr_err("Unable to create workqueue.\n");
  384. return -EINVAL;
  385. }
  386. /*
  387. * Create sys/kernel directory for memory_state_time.
  388. */
  389. memory_kobj = kobject_create_and_add(TAG, kernel_kobj);
  390. if (!memory_kobj) {
  391. pr_err("Unable to allocate memory_kobj for sysfs directory.\n");
  392. error = -ENOMEM;
  393. goto wq;
  394. }
  395. error = sysfs_create_group(memory_kobj, &memory_attr_group);
  396. if (error) {
  397. pr_err("Unable to create sysfs folder.\n");
  398. goto kobj;
  399. }
  400. error = platform_driver_register(&memory_state_time_driver);
  401. if (error) {
  402. pr_err("Unable to register memory_state_time platform driver.\n");
  403. goto group;
  404. }
  405. return 0;
  406. group: sysfs_remove_group(memory_kobj, &memory_attr_group);
  407. kobj: kobject_put(memory_kobj);
  408. wq: destroy_workqueue(memory_wq);
  409. return error;
  410. }
  411. module_init(memory_state_time_init);