regcache-rbtree.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. /*
  2. * Register cache access API - rbtree caching support
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/debugfs.h>
  13. #include <linux/device.h>
  14. #include <linux/rbtree.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
  19. unsigned int value);
  20. static int regcache_rbtree_exit(struct regmap *map);
  21. struct regcache_rbtree_node {
  22. /* block of adjacent registers */
  23. void *block;
  24. /* Which registers are present */
  25. long *cache_present;
  26. /* base register handled by this block */
  27. unsigned int base_reg;
  28. /* number of registers available in the block */
  29. unsigned int blklen;
  30. /* the actual rbtree node holding this block */
  31. struct rb_node node;
  32. } __attribute__ ((packed));
  33. struct regcache_rbtree_ctx {
  34. struct rb_root root;
  35. struct regcache_rbtree_node *cached_rbnode;
  36. };
  37. static inline void regcache_rbtree_get_base_top_reg(
  38. struct regmap *map,
  39. struct regcache_rbtree_node *rbnode,
  40. unsigned int *base, unsigned int *top)
  41. {
  42. *base = rbnode->base_reg;
  43. *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
  44. }
  45. static unsigned int regcache_rbtree_get_register(struct regmap *map,
  46. struct regcache_rbtree_node *rbnode, unsigned int idx)
  47. {
  48. return regcache_get_val(map, rbnode->block, idx);
  49. }
  50. static void regcache_rbtree_set_register(struct regmap *map,
  51. struct regcache_rbtree_node *rbnode,
  52. unsigned int idx, unsigned int val)
  53. {
  54. set_bit(idx, rbnode->cache_present);
  55. regcache_set_val(map, rbnode->block, idx, val);
  56. }
  57. static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
  58. unsigned int reg)
  59. {
  60. struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
  61. struct rb_node *node;
  62. struct regcache_rbtree_node *rbnode;
  63. unsigned int base_reg, top_reg;
  64. rbnode = rbtree_ctx->cached_rbnode;
  65. if (rbnode) {
  66. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  67. &top_reg);
  68. if (reg >= base_reg && reg <= top_reg)
  69. return rbnode;
  70. }
  71. node = rbtree_ctx->root.rb_node;
  72. while (node) {
  73. rbnode = rb_entry(node, struct regcache_rbtree_node, node);
  74. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  75. &top_reg);
  76. if (reg >= base_reg && reg <= top_reg) {
  77. rbtree_ctx->cached_rbnode = rbnode;
  78. return rbnode;
  79. } else if (reg > top_reg) {
  80. node = node->rb_right;
  81. } else if (reg < base_reg) {
  82. node = node->rb_left;
  83. }
  84. }
  85. return NULL;
  86. }
  87. static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
  88. struct regcache_rbtree_node *rbnode)
  89. {
  90. struct rb_node **new, *parent;
  91. struct regcache_rbtree_node *rbnode_tmp;
  92. unsigned int base_reg_tmp, top_reg_tmp;
  93. unsigned int base_reg;
  94. parent = NULL;
  95. new = &root->rb_node;
  96. while (*new) {
  97. rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
  98. /* base and top registers of the current rbnode */
  99. regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
  100. &top_reg_tmp);
  101. /* base register of the rbnode to be added */
  102. base_reg = rbnode->base_reg;
  103. parent = *new;
  104. /* if this register has already been inserted, just return */
  105. if (base_reg >= base_reg_tmp &&
  106. base_reg <= top_reg_tmp)
  107. return 0;
  108. else if (base_reg > top_reg_tmp)
  109. new = &((*new)->rb_right);
  110. else if (base_reg < base_reg_tmp)
  111. new = &((*new)->rb_left);
  112. }
  113. /* insert the node into the rbtree */
  114. rb_link_node(&rbnode->node, parent, new);
  115. rb_insert_color(&rbnode->node, root);
  116. return 1;
  117. }
  118. #ifdef CONFIG_DEBUG_FS
  119. static int rbtree_show(struct seq_file *s, void *ignored)
  120. {
  121. struct regmap *map = s->private;
  122. struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
  123. struct regcache_rbtree_node *n;
  124. struct rb_node *node;
  125. unsigned int base, top;
  126. size_t mem_size;
  127. int nodes = 0;
  128. int registers = 0;
  129. int this_registers, average;
  130. map->lock(map->lock_arg);
  131. mem_size = sizeof(*rbtree_ctx);
  132. for (node = rb_first(&rbtree_ctx->root); node != NULL;
  133. node = rb_next(node)) {
  134. n = rb_entry(node, struct regcache_rbtree_node, node);
  135. mem_size += sizeof(*n);
  136. mem_size += (n->blklen * map->cache_word_size);
  137. mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
  138. regcache_rbtree_get_base_top_reg(map, n, &base, &top);
  139. this_registers = ((top - base) / map->reg_stride) + 1;
  140. seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
  141. nodes++;
  142. registers += this_registers;
  143. }
  144. if (nodes)
  145. average = registers / nodes;
  146. else
  147. average = 0;
  148. seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
  149. nodes, registers, average, mem_size);
  150. map->unlock(map->lock_arg);
  151. return 0;
  152. }
  153. static int rbtree_open(struct inode *inode, struct file *file)
  154. {
  155. return single_open(file, rbtree_show, inode->i_private);
  156. }
  157. static const struct file_operations rbtree_fops = {
  158. .open = rbtree_open,
  159. .read = seq_read,
  160. .llseek = seq_lseek,
  161. .release = single_release,
  162. };
  163. static void rbtree_debugfs_init(struct regmap *map)
  164. {
  165. debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
  166. }
  167. #endif
  168. static int regcache_rbtree_init(struct regmap *map)
  169. {
  170. struct regcache_rbtree_ctx *rbtree_ctx;
  171. int i;
  172. int ret;
  173. map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
  174. if (!map->cache)
  175. return -ENOMEM;
  176. rbtree_ctx = map->cache;
  177. rbtree_ctx->root = RB_ROOT;
  178. rbtree_ctx->cached_rbnode = NULL;
  179. for (i = 0; i < map->num_reg_defaults; i++) {
  180. ret = regcache_rbtree_write(map,
  181. map->reg_defaults[i].reg,
  182. map->reg_defaults[i].def);
  183. if (ret)
  184. goto err;
  185. }
  186. return 0;
  187. err:
  188. regcache_rbtree_exit(map);
  189. return ret;
  190. }
  191. static int regcache_rbtree_exit(struct regmap *map)
  192. {
  193. struct rb_node *next;
  194. struct regcache_rbtree_ctx *rbtree_ctx;
  195. struct regcache_rbtree_node *rbtree_node;
  196. /* if we've already been called then just return */
  197. rbtree_ctx = map->cache;
  198. if (!rbtree_ctx)
  199. return 0;
  200. /* free up the rbtree */
  201. next = rb_first(&rbtree_ctx->root);
  202. while (next) {
  203. rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
  204. next = rb_next(&rbtree_node->node);
  205. rb_erase(&rbtree_node->node, &rbtree_ctx->root);
  206. kfree(rbtree_node->cache_present);
  207. kfree(rbtree_node->block);
  208. kfree(rbtree_node);
  209. }
  210. /* release the resources */
  211. kfree(map->cache);
  212. map->cache = NULL;
  213. return 0;
  214. }
  215. static int regcache_rbtree_read(struct regmap *map,
  216. unsigned int reg, unsigned int *value)
  217. {
  218. struct regcache_rbtree_node *rbnode;
  219. unsigned int reg_tmp;
  220. rbnode = regcache_rbtree_lookup(map, reg);
  221. if (rbnode) {
  222. reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
  223. if (!test_bit(reg_tmp, rbnode->cache_present))
  224. return -ENOENT;
  225. *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
  226. } else {
  227. return -ENOENT;
  228. }
  229. return 0;
  230. }
  231. static int regcache_rbtree_insert_to_block(struct regmap *map,
  232. struct regcache_rbtree_node *rbnode,
  233. unsigned int base_reg,
  234. unsigned int top_reg,
  235. unsigned int reg,
  236. unsigned int value)
  237. {
  238. unsigned int blklen;
  239. unsigned int pos, offset;
  240. unsigned long *present;
  241. u8 *blk;
  242. blklen = (top_reg - base_reg) / map->reg_stride + 1;
  243. pos = (reg - base_reg) / map->reg_stride;
  244. offset = (rbnode->base_reg - base_reg) / map->reg_stride;
  245. blk = krealloc(rbnode->block,
  246. blklen * map->cache_word_size,
  247. GFP_KERNEL);
  248. if (!blk)
  249. return -ENOMEM;
  250. if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
  251. present = krealloc(rbnode->cache_present,
  252. BITS_TO_LONGS(blklen) * sizeof(*present),
  253. GFP_KERNEL);
  254. if (!present) {
  255. kfree(blk);
  256. return -ENOMEM;
  257. }
  258. memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
  259. (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
  260. * sizeof(*present));
  261. } else {
  262. present = rbnode->cache_present;
  263. }
  264. /* insert the register value in the correct place in the rbnode block */
  265. if (pos == 0) {
  266. memmove(blk + offset * map->cache_word_size,
  267. blk, rbnode->blklen * map->cache_word_size);
  268. bitmap_shift_left(present, present, offset, blklen);
  269. }
  270. /* update the rbnode block, its size and the base register */
  271. rbnode->block = blk;
  272. rbnode->blklen = blklen;
  273. rbnode->base_reg = base_reg;
  274. rbnode->cache_present = present;
  275. regcache_rbtree_set_register(map, rbnode, pos, value);
  276. return 0;
  277. }
  278. static struct regcache_rbtree_node *
  279. regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
  280. {
  281. struct regcache_rbtree_node *rbnode;
  282. const struct regmap_range *range;
  283. int i;
  284. rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
  285. if (!rbnode)
  286. return NULL;
  287. /* If there is a read table then use it to guess at an allocation */
  288. if (map->rd_table) {
  289. for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
  290. if (regmap_reg_in_range(reg,
  291. &map->rd_table->yes_ranges[i]))
  292. break;
  293. }
  294. if (i != map->rd_table->n_yes_ranges) {
  295. range = &map->rd_table->yes_ranges[i];
  296. rbnode->blklen = (range->range_max - range->range_min) /
  297. map->reg_stride + 1;
  298. rbnode->base_reg = range->range_min;
  299. }
  300. }
  301. if (!rbnode->blklen) {
  302. rbnode->blklen = 1;
  303. rbnode->base_reg = reg;
  304. }
  305. rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
  306. GFP_KERNEL);
  307. if (!rbnode->block)
  308. goto err_free;
  309. rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
  310. sizeof(*rbnode->cache_present),
  311. GFP_KERNEL);
  312. if (!rbnode->cache_present)
  313. goto err_free_block;
  314. return rbnode;
  315. err_free_block:
  316. kfree(rbnode->block);
  317. err_free:
  318. kfree(rbnode);
  319. return NULL;
  320. }
  321. static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
  322. unsigned int value)
  323. {
  324. struct regcache_rbtree_ctx *rbtree_ctx;
  325. struct regcache_rbtree_node *rbnode, *rbnode_tmp;
  326. struct rb_node *node;
  327. unsigned int reg_tmp;
  328. int ret;
  329. rbtree_ctx = map->cache;
  330. /* if we can't locate it in the cached rbnode we'll have
  331. * to traverse the rbtree looking for it.
  332. */
  333. rbnode = regcache_rbtree_lookup(map, reg);
  334. if (rbnode) {
  335. reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
  336. regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
  337. } else {
  338. unsigned int base_reg, top_reg;
  339. unsigned int new_base_reg, new_top_reg;
  340. unsigned int min, max;
  341. unsigned int max_dist;
  342. unsigned int dist, best_dist = UINT_MAX;
  343. max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
  344. map->cache_word_size;
  345. if (reg < max_dist)
  346. min = 0;
  347. else
  348. min = reg - max_dist;
  349. max = reg + max_dist;
  350. /* look for an adjacent register to the one we are about to add */
  351. node = rbtree_ctx->root.rb_node;
  352. while (node) {
  353. rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
  354. node);
  355. regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
  356. &base_reg, &top_reg);
  357. if (base_reg <= max && top_reg >= min) {
  358. if (reg < base_reg)
  359. dist = base_reg - reg;
  360. else if (reg > top_reg)
  361. dist = reg - top_reg;
  362. else
  363. dist = 0;
  364. if (dist < best_dist) {
  365. rbnode = rbnode_tmp;
  366. best_dist = dist;
  367. new_base_reg = min(reg, base_reg);
  368. new_top_reg = max(reg, top_reg);
  369. }
  370. }
  371. /*
  372. * Keep looking, we want to choose the closest block,
  373. * otherwise we might end up creating overlapping
  374. * blocks, which breaks the rbtree.
  375. */
  376. if (reg < base_reg)
  377. node = node->rb_left;
  378. else if (reg > top_reg)
  379. node = node->rb_right;
  380. else
  381. break;
  382. }
  383. if (rbnode) {
  384. ret = regcache_rbtree_insert_to_block(map, rbnode,
  385. new_base_reg,
  386. new_top_reg, reg,
  387. value);
  388. if (ret)
  389. return ret;
  390. rbtree_ctx->cached_rbnode = rbnode;
  391. return 0;
  392. }
  393. /* We did not manage to find a place to insert it in
  394. * an existing block so create a new rbnode.
  395. */
  396. rbnode = regcache_rbtree_node_alloc(map, reg);
  397. if (!rbnode)
  398. return -ENOMEM;
  399. regcache_rbtree_set_register(map, rbnode,
  400. reg - rbnode->base_reg, value);
  401. regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
  402. rbtree_ctx->cached_rbnode = rbnode;
  403. }
  404. return 0;
  405. }
  406. static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
  407. unsigned int max)
  408. {
  409. struct regcache_rbtree_ctx *rbtree_ctx;
  410. struct rb_node *node;
  411. struct regcache_rbtree_node *rbnode;
  412. unsigned int base_reg, top_reg;
  413. unsigned int start, end;
  414. int ret;
  415. rbtree_ctx = map->cache;
  416. for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
  417. rbnode = rb_entry(node, struct regcache_rbtree_node, node);
  418. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  419. &top_reg);
  420. if (base_reg > max)
  421. break;
  422. if (top_reg < min)
  423. continue;
  424. if (min > base_reg)
  425. start = (min - base_reg) / map->reg_stride;
  426. else
  427. start = 0;
  428. if (max < top_reg)
  429. end = (max - base_reg) / map->reg_stride + 1;
  430. else
  431. end = rbnode->blklen;
  432. ret = regcache_sync_block(map, rbnode->block,
  433. rbnode->cache_present,
  434. rbnode->base_reg, start, end);
  435. if (ret != 0)
  436. return ret;
  437. }
  438. return regmap_async_complete(map);
  439. }
  440. static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
  441. unsigned int max)
  442. {
  443. struct regcache_rbtree_ctx *rbtree_ctx;
  444. struct regcache_rbtree_node *rbnode;
  445. struct rb_node *node;
  446. unsigned int base_reg, top_reg;
  447. unsigned int start, end;
  448. rbtree_ctx = map->cache;
  449. for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
  450. rbnode = rb_entry(node, struct regcache_rbtree_node, node);
  451. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  452. &top_reg);
  453. if (base_reg > max)
  454. break;
  455. if (top_reg < min)
  456. continue;
  457. if (min > base_reg)
  458. start = (min - base_reg) / map->reg_stride;
  459. else
  460. start = 0;
  461. if (max < top_reg)
  462. end = (max - base_reg) / map->reg_stride + 1;
  463. else
  464. end = rbnode->blklen;
  465. bitmap_clear(rbnode->cache_present, start, end - start);
  466. }
  467. return 0;
  468. }
  469. struct regcache_ops regcache_rbtree_ops = {
  470. .type = REGCACHE_RBTREE,
  471. .name = "rbtree",
  472. .init = regcache_rbtree_init,
  473. .exit = regcache_rbtree_exit,
  474. #ifdef CONFIG_DEBUG_FS
  475. .debugfs_init = rbtree_debugfs_init,
  476. #endif
  477. .read = regcache_rbtree_read,
  478. .write = regcache_rbtree_write,
  479. .sync = regcache_rbtree_sync,
  480. .drop = regcache_rbtree_drop,
  481. };