regcache-rbtree.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. /*
  2. * Register cache access API - rbtree caching support
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/debugfs.h>
  13. #include <linux/device.h>
  14. #include <linux/rbtree.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
  19. unsigned int value);
  20. static int regcache_rbtree_exit(struct regmap *map);
  21. struct regcache_rbtree_node {
  22. /* block of adjacent registers */
  23. void *block;
  24. /* Which registers are present */
  25. long *cache_present;
  26. /* base register handled by this block */
  27. unsigned int base_reg;
  28. /* number of registers available in the block */
  29. unsigned int blklen;
  30. /* the actual rbtree node holding this block */
  31. struct rb_node node;
  32. } __attribute__ ((packed));
  33. struct regcache_rbtree_ctx {
  34. struct rb_root root;
  35. struct regcache_rbtree_node *cached_rbnode;
  36. };
  37. static inline void regcache_rbtree_get_base_top_reg(
  38. struct regmap *map,
  39. struct regcache_rbtree_node *rbnode,
  40. unsigned int *base, unsigned int *top)
  41. {
  42. *base = rbnode->base_reg;
  43. *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
  44. }
  45. static unsigned int regcache_rbtree_get_register(struct regmap *map,
  46. struct regcache_rbtree_node *rbnode, unsigned int idx)
  47. {
  48. return regcache_get_val(map, rbnode->block, idx);
  49. }
  50. static void regcache_rbtree_set_register(struct regmap *map,
  51. struct regcache_rbtree_node *rbnode,
  52. unsigned int idx, unsigned int val)
  53. {
  54. set_bit(idx, rbnode->cache_present);
  55. regcache_set_val(map, rbnode->block, idx, val);
  56. }
  57. static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
  58. unsigned int reg)
  59. {
  60. struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
  61. struct rb_node *node;
  62. struct regcache_rbtree_node *rbnode;
  63. unsigned int base_reg, top_reg;
  64. rbnode = rbtree_ctx->cached_rbnode;
  65. if (rbnode) {
  66. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  67. &top_reg);
  68. if (reg >= base_reg && reg <= top_reg)
  69. return rbnode;
  70. }
  71. node = rbtree_ctx->root.rb_node;
  72. while (node) {
  73. rbnode = container_of(node, struct regcache_rbtree_node, node);
  74. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  75. &top_reg);
  76. if (reg >= base_reg && reg <= top_reg) {
  77. rbtree_ctx->cached_rbnode = rbnode;
  78. return rbnode;
  79. } else if (reg > top_reg) {
  80. node = node->rb_right;
  81. } else if (reg < base_reg) {
  82. node = node->rb_left;
  83. }
  84. }
  85. return NULL;
  86. }
  87. static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
  88. struct regcache_rbtree_node *rbnode)
  89. {
  90. struct rb_node **new, *parent;
  91. struct regcache_rbtree_node *rbnode_tmp;
  92. unsigned int base_reg_tmp, top_reg_tmp;
  93. unsigned int base_reg;
  94. parent = NULL;
  95. new = &root->rb_node;
  96. while (*new) {
  97. rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
  98. node);
  99. /* base and top registers of the current rbnode */
  100. regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
  101. &top_reg_tmp);
  102. /* base register of the rbnode to be added */
  103. base_reg = rbnode->base_reg;
  104. parent = *new;
  105. /* if this register has already been inserted, just return */
  106. if (base_reg >= base_reg_tmp &&
  107. base_reg <= top_reg_tmp)
  108. return 0;
  109. else if (base_reg > top_reg_tmp)
  110. new = &((*new)->rb_right);
  111. else if (base_reg < base_reg_tmp)
  112. new = &((*new)->rb_left);
  113. }
  114. /* insert the node into the rbtree */
  115. rb_link_node(&rbnode->node, parent, new);
  116. rb_insert_color(&rbnode->node, root);
  117. return 1;
  118. }
  119. #ifdef CONFIG_DEBUG_FS
  120. static int rbtree_show(struct seq_file *s, void *ignored)
  121. {
  122. struct regmap *map = s->private;
  123. struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
  124. struct regcache_rbtree_node *n;
  125. struct rb_node *node;
  126. unsigned int base, top;
  127. size_t mem_size;
  128. int nodes = 0;
  129. int registers = 0;
  130. int this_registers, average;
  131. map->lock(map->lock_arg);
  132. mem_size = sizeof(*rbtree_ctx);
  133. for (node = rb_first(&rbtree_ctx->root); node != NULL;
  134. node = rb_next(node)) {
  135. n = container_of(node, struct regcache_rbtree_node, node);
  136. mem_size += sizeof(*n);
  137. mem_size += (n->blklen * map->cache_word_size);
  138. mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
  139. regcache_rbtree_get_base_top_reg(map, n, &base, &top);
  140. this_registers = ((top - base) / map->reg_stride) + 1;
  141. seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
  142. nodes++;
  143. registers += this_registers;
  144. }
  145. if (nodes)
  146. average = registers / nodes;
  147. else
  148. average = 0;
  149. seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
  150. nodes, registers, average, mem_size);
  151. map->unlock(map->lock_arg);
  152. return 0;
  153. }
  154. static int rbtree_open(struct inode *inode, struct file *file)
  155. {
  156. return single_open(file, rbtree_show, inode->i_private);
  157. }
  158. static const struct file_operations rbtree_fops = {
  159. .open = rbtree_open,
  160. .read = seq_read,
  161. .llseek = seq_lseek,
  162. .release = single_release,
  163. };
  164. static void rbtree_debugfs_init(struct regmap *map)
  165. {
  166. debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
  167. }
  168. #endif
  169. static int regcache_rbtree_init(struct regmap *map)
  170. {
  171. struct regcache_rbtree_ctx *rbtree_ctx;
  172. int i;
  173. int ret;
  174. map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
  175. if (!map->cache)
  176. return -ENOMEM;
  177. rbtree_ctx = map->cache;
  178. rbtree_ctx->root = RB_ROOT;
  179. rbtree_ctx->cached_rbnode = NULL;
  180. for (i = 0; i < map->num_reg_defaults; i++) {
  181. ret = regcache_rbtree_write(map,
  182. map->reg_defaults[i].reg,
  183. map->reg_defaults[i].def);
  184. if (ret)
  185. goto err;
  186. }
  187. return 0;
  188. err:
  189. regcache_rbtree_exit(map);
  190. return ret;
  191. }
  192. static int regcache_rbtree_exit(struct regmap *map)
  193. {
  194. struct rb_node *next;
  195. struct regcache_rbtree_ctx *rbtree_ctx;
  196. struct regcache_rbtree_node *rbtree_node;
  197. /* if we've already been called then just return */
  198. rbtree_ctx = map->cache;
  199. if (!rbtree_ctx)
  200. return 0;
  201. /* free up the rbtree */
  202. next = rb_first(&rbtree_ctx->root);
  203. while (next) {
  204. rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
  205. next = rb_next(&rbtree_node->node);
  206. rb_erase(&rbtree_node->node, &rbtree_ctx->root);
  207. kfree(rbtree_node->cache_present);
  208. kfree(rbtree_node->block);
  209. kfree(rbtree_node);
  210. }
  211. /* release the resources */
  212. kfree(map->cache);
  213. map->cache = NULL;
  214. return 0;
  215. }
  216. static int regcache_rbtree_read(struct regmap *map,
  217. unsigned int reg, unsigned int *value)
  218. {
  219. struct regcache_rbtree_node *rbnode;
  220. unsigned int reg_tmp;
  221. rbnode = regcache_rbtree_lookup(map, reg);
  222. if (rbnode) {
  223. reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
  224. if (!test_bit(reg_tmp, rbnode->cache_present))
  225. return -ENOENT;
  226. *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
  227. } else {
  228. return -ENOENT;
  229. }
  230. return 0;
  231. }
  232. static int regcache_rbtree_insert_to_block(struct regmap *map,
  233. struct regcache_rbtree_node *rbnode,
  234. unsigned int base_reg,
  235. unsigned int top_reg,
  236. unsigned int reg,
  237. unsigned int value)
  238. {
  239. unsigned int blklen;
  240. unsigned int pos, offset;
  241. unsigned long *present;
  242. u8 *blk;
  243. blklen = (top_reg - base_reg) / map->reg_stride + 1;
  244. pos = (reg - base_reg) / map->reg_stride;
  245. offset = (rbnode->base_reg - base_reg) / map->reg_stride;
  246. blk = krealloc(rbnode->block,
  247. blklen * map->cache_word_size,
  248. GFP_KERNEL);
  249. if (!blk)
  250. return -ENOMEM;
  251. present = krealloc(rbnode->cache_present,
  252. BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
  253. if (!present) {
  254. kfree(blk);
  255. return -ENOMEM;
  256. }
  257. /* insert the register value in the correct place in the rbnode block */
  258. if (pos == 0) {
  259. memmove(blk + offset * map->cache_word_size,
  260. blk, rbnode->blklen * map->cache_word_size);
  261. bitmap_shift_left(present, present, offset, blklen);
  262. }
  263. /* update the rbnode block, its size and the base register */
  264. rbnode->block = blk;
  265. rbnode->blklen = blklen;
  266. rbnode->base_reg = base_reg;
  267. rbnode->cache_present = present;
  268. regcache_rbtree_set_register(map, rbnode, pos, value);
  269. return 0;
  270. }
  271. static struct regcache_rbtree_node *
  272. regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
  273. {
  274. struct regcache_rbtree_node *rbnode;
  275. const struct regmap_range *range;
  276. int i;
  277. rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
  278. if (!rbnode)
  279. return NULL;
  280. /* If there is a read table then use it to guess at an allocation */
  281. if (map->rd_table) {
  282. for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
  283. if (regmap_reg_in_range(reg,
  284. &map->rd_table->yes_ranges[i]))
  285. break;
  286. }
  287. if (i != map->rd_table->n_yes_ranges) {
  288. range = &map->rd_table->yes_ranges[i];
  289. rbnode->blklen = (range->range_max - range->range_min) /
  290. map->reg_stride + 1;
  291. rbnode->base_reg = range->range_min;
  292. }
  293. }
  294. if (!rbnode->blklen) {
  295. rbnode->blklen = 1;
  296. rbnode->base_reg = reg;
  297. }
  298. rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
  299. GFP_KERNEL);
  300. if (!rbnode->block)
  301. goto err_free;
  302. rbnode->cache_present = kzalloc(BITS_TO_LONGS(rbnode->blklen) *
  303. sizeof(*rbnode->cache_present), GFP_KERNEL);
  304. if (!rbnode->cache_present)
  305. goto err_free_block;
  306. return rbnode;
  307. err_free_block:
  308. kfree(rbnode->block);
  309. err_free:
  310. kfree(rbnode);
  311. return NULL;
  312. }
  313. static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
  314. unsigned int value)
  315. {
  316. struct regcache_rbtree_ctx *rbtree_ctx;
  317. struct regcache_rbtree_node *rbnode, *rbnode_tmp;
  318. struct rb_node *node;
  319. unsigned int reg_tmp;
  320. int ret;
  321. rbtree_ctx = map->cache;
  322. /* if we can't locate it in the cached rbnode we'll have
  323. * to traverse the rbtree looking for it.
  324. */
  325. rbnode = regcache_rbtree_lookup(map, reg);
  326. if (rbnode) {
  327. reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
  328. regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
  329. } else {
  330. unsigned int base_reg, top_reg;
  331. unsigned int new_base_reg, new_top_reg;
  332. unsigned int min, max;
  333. unsigned int max_dist;
  334. max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
  335. map->cache_word_size;
  336. if (reg < max_dist)
  337. min = 0;
  338. else
  339. min = reg - max_dist;
  340. max = reg + max_dist;
  341. /* look for an adjacent register to the one we are about to add */
  342. for (node = rb_first(&rbtree_ctx->root); node;
  343. node = rb_next(node)) {
  344. rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
  345. node);
  346. regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
  347. &base_reg, &top_reg);
  348. if (base_reg <= max && top_reg >= min) {
  349. new_base_reg = min(reg, base_reg);
  350. new_top_reg = max(reg, top_reg);
  351. } else {
  352. continue;
  353. }
  354. ret = regcache_rbtree_insert_to_block(map, rbnode_tmp,
  355. new_base_reg,
  356. new_top_reg, reg,
  357. value);
  358. if (ret)
  359. return ret;
  360. rbtree_ctx->cached_rbnode = rbnode_tmp;
  361. return 0;
  362. }
  363. /* We did not manage to find a place to insert it in
  364. * an existing block so create a new rbnode.
  365. */
  366. rbnode = regcache_rbtree_node_alloc(map, reg);
  367. if (!rbnode)
  368. return -ENOMEM;
  369. regcache_rbtree_set_register(map, rbnode,
  370. reg - rbnode->base_reg, value);
  371. regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
  372. rbtree_ctx->cached_rbnode = rbnode;
  373. }
  374. return 0;
  375. }
  376. static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
  377. unsigned int max)
  378. {
  379. struct regcache_rbtree_ctx *rbtree_ctx;
  380. struct rb_node *node;
  381. struct regcache_rbtree_node *rbnode;
  382. unsigned int base_reg, top_reg;
  383. unsigned int start, end;
  384. int ret;
  385. rbtree_ctx = map->cache;
  386. for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
  387. rbnode = rb_entry(node, struct regcache_rbtree_node, node);
  388. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  389. &top_reg);
  390. if (base_reg > max)
  391. break;
  392. if (top_reg < min)
  393. continue;
  394. if (min > base_reg)
  395. start = (min - base_reg) / map->reg_stride;
  396. else
  397. start = 0;
  398. if (max < top_reg)
  399. end = (max - base_reg) / map->reg_stride + 1;
  400. else
  401. end = rbnode->blklen;
  402. ret = regcache_sync_block(map, rbnode->block,
  403. rbnode->cache_present,
  404. rbnode->base_reg, start, end);
  405. if (ret != 0)
  406. return ret;
  407. }
  408. return regmap_async_complete(map);
  409. }
  410. static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
  411. unsigned int max)
  412. {
  413. struct regcache_rbtree_ctx *rbtree_ctx;
  414. struct regcache_rbtree_node *rbnode;
  415. struct rb_node *node;
  416. unsigned int base_reg, top_reg;
  417. unsigned int start, end;
  418. rbtree_ctx = map->cache;
  419. for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
  420. rbnode = rb_entry(node, struct regcache_rbtree_node, node);
  421. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  422. &top_reg);
  423. if (base_reg > max)
  424. break;
  425. if (top_reg < min)
  426. continue;
  427. if (min > base_reg)
  428. start = (min - base_reg) / map->reg_stride;
  429. else
  430. start = 0;
  431. if (max < top_reg)
  432. end = (max - base_reg) / map->reg_stride + 1;
  433. else
  434. end = rbnode->blklen;
  435. bitmap_clear(rbnode->cache_present, start, end - start);
  436. }
  437. return 0;
  438. }
  439. struct regcache_ops regcache_rbtree_ops = {
  440. .type = REGCACHE_RBTREE,
  441. .name = "rbtree",
  442. .init = regcache_rbtree_init,
  443. .exit = regcache_rbtree_exit,
  444. #ifdef CONFIG_DEBUG_FS
  445. .debugfs_init = rbtree_debugfs_init,
  446. #endif
  447. .read = regcache_rbtree_read,
  448. .write = regcache_rbtree_write,
  449. .sync = regcache_rbtree_sync,
  450. .drop = regcache_rbtree_drop,
  451. };