MEM_CacheLimiter.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. /*
  2. * This program is free software; you can redistribute it and/or
  3. * modify it under the terms of the GNU General Public License
  4. * as published by the Free Software Foundation; either version 2
  5. * of the License, or (at your option) any later version.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software Foundation,
  14. * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  15. */
  16. /** \file
  17. * \ingroup memutil
  18. */
  19. #ifndef __MEM_CACHELIMITER_H__
  20. #define __MEM_CACHELIMITER_H__
  21. /**
  22. * \section MEM_CacheLimiter
  23. * This class defines a generic memory cache management system
  24. * to limit memory usage to a fixed global maximum.
  25. *
  26. * \note Please use the C-API in MEM_CacheLimiterC-Api.h for code written in C.
  27. *
  28. * Usage example:
  29. *
  30. * \code{.cpp}
  31. * class BigFatImage {
  32. * public:
  33. * ~BigFatImage() { tell_everyone_we_are_gone(this); }
  34. * };
  35. *
  36. * void doit()
  37. * {
  38. * MEM_Cache<BigFatImage> BigFatImages;
  39. *
  40. * MEM_Cache_Handle<BigFatImage>* h = BigFatImages.insert(new BigFatImage);
  41. *
  42. * BigFatImages.enforce_limits();
  43. * h->ref();
  44. *
  45. * // work with image...
  46. *
  47. * h->unref();
  48. *
  49. * // leave image in cache.
  50. * \endcode
  51. */
  52. #include <list>
  53. #include <queue>
  54. #include <vector>
  55. #include "MEM_Allocator.h"
  56. template<class T> class MEM_CacheLimiter;
  57. #ifndef __MEM_CACHELIMITERC_API_H__
  58. extern "C" {
  59. void MEM_CacheLimiter_set_maximum(size_t m);
  60. size_t MEM_CacheLimiter_get_maximum();
  61. void MEM_CacheLimiter_set_disabled(bool disabled);
  62. bool MEM_CacheLimiter_is_disabled(void);
  63. };
  64. #endif
  65. template<class T> class MEM_CacheLimiterHandle {
  66. public:
  67. explicit MEM_CacheLimiterHandle(T *data_, MEM_CacheLimiter<T> *parent_)
  68. : data(data_), refcount(0), parent(parent_)
  69. {
  70. }
  71. void ref()
  72. {
  73. refcount++;
  74. }
  75. void unref()
  76. {
  77. refcount--;
  78. }
  79. T *get()
  80. {
  81. return data;
  82. }
  83. const T *get() const
  84. {
  85. return data;
  86. }
  87. int get_refcount() const
  88. {
  89. return refcount;
  90. }
  91. bool can_destroy() const
  92. {
  93. return !data || !refcount;
  94. }
  95. bool destroy_if_possible()
  96. {
  97. if (can_destroy()) {
  98. delete data;
  99. data = NULL;
  100. unmanage();
  101. return true;
  102. }
  103. return false;
  104. }
  105. void unmanage()
  106. {
  107. parent->unmanage(this);
  108. }
  109. void touch()
  110. {
  111. parent->touch(this);
  112. }
  113. private:
  114. friend class MEM_CacheLimiter<T>;
  115. T *data;
  116. int refcount;
  117. int pos;
  118. MEM_CacheLimiter<T> *parent;
  119. };
  120. template<class T> class MEM_CacheLimiter {
  121. public:
  122. typedef size_t (*MEM_CacheLimiter_DataSize_Func)(void *data);
  123. typedef int (*MEM_CacheLimiter_ItemPriority_Func)(void *item, int default_priority);
  124. typedef bool (*MEM_CacheLimiter_ItemDestroyable_Func)(void *item);
  125. MEM_CacheLimiter(MEM_CacheLimiter_DataSize_Func data_size_func) : data_size_func(data_size_func)
  126. {
  127. }
  128. ~MEM_CacheLimiter()
  129. {
  130. int i;
  131. for (i = 0; i < queue.size(); i++) {
  132. delete queue[i];
  133. }
  134. }
  135. MEM_CacheLimiterHandle<T> *insert(T *elem)
  136. {
  137. queue.push_back(new MEM_CacheLimiterHandle<T>(elem, this));
  138. queue.back()->pos = queue.size() - 1;
  139. return queue.back();
  140. }
  141. void unmanage(MEM_CacheLimiterHandle<T> *handle)
  142. {
  143. int pos = handle->pos;
  144. queue[pos] = queue.back();
  145. queue[pos]->pos = pos;
  146. queue.pop_back();
  147. delete handle;
  148. }
  149. size_t get_memory_in_use()
  150. {
  151. size_t size = 0;
  152. if (data_size_func) {
  153. int i;
  154. for (i = 0; i < queue.size(); i++) {
  155. size += data_size_func(queue[i]->get()->get_data());
  156. }
  157. }
  158. else {
  159. size = MEM_get_memory_in_use();
  160. }
  161. return size;
  162. }
  163. void enforce_limits()
  164. {
  165. size_t max = MEM_CacheLimiter_get_maximum();
  166. bool is_disabled = MEM_CacheLimiter_is_disabled();
  167. size_t mem_in_use, cur_size;
  168. if (is_disabled) {
  169. return;
  170. }
  171. if (max == 0) {
  172. return;
  173. }
  174. mem_in_use = get_memory_in_use();
  175. if (mem_in_use <= max) {
  176. return;
  177. }
  178. while (!queue.empty() && mem_in_use > max) {
  179. MEM_CacheElementPtr elem = get_least_priority_destroyable_element();
  180. if (!elem)
  181. break;
  182. if (data_size_func) {
  183. cur_size = data_size_func(elem->get()->get_data());
  184. }
  185. else {
  186. cur_size = mem_in_use;
  187. }
  188. if (elem->destroy_if_possible()) {
  189. if (data_size_func) {
  190. mem_in_use -= cur_size;
  191. }
  192. else {
  193. mem_in_use -= cur_size - MEM_get_memory_in_use();
  194. }
  195. }
  196. }
  197. }
  198. void touch(MEM_CacheLimiterHandle<T> *handle)
  199. {
  200. /* If we're using custom priority callback re-arranging the queue
  201. * doesn't make much sense because we'll iterate it all to get
  202. * least priority element anyway.
  203. */
  204. if (item_priority_func == NULL) {
  205. queue[handle->pos] = queue.back();
  206. queue[handle->pos]->pos = handle->pos;
  207. queue.pop_back();
  208. queue.push_back(handle);
  209. handle->pos = queue.size() - 1;
  210. }
  211. }
  212. void set_item_priority_func(MEM_CacheLimiter_ItemPriority_Func item_priority_func)
  213. {
  214. this->item_priority_func = item_priority_func;
  215. }
  216. void set_item_destroyable_func(MEM_CacheLimiter_ItemDestroyable_Func item_destroyable_func)
  217. {
  218. this->item_destroyable_func = item_destroyable_func;
  219. }
  220. private:
  221. typedef MEM_CacheLimiterHandle<T> *MEM_CacheElementPtr;
  222. typedef std::vector<MEM_CacheElementPtr, MEM_Allocator<MEM_CacheElementPtr>> MEM_CacheQueue;
  223. typedef typename MEM_CacheQueue::iterator iterator;
  224. /* Check whether element can be destroyed when enforcing cache limits */
  225. bool can_destroy_element(MEM_CacheElementPtr &elem)
  226. {
  227. if (!elem->can_destroy()) {
  228. /* Element is referenced */
  229. return false;
  230. }
  231. if (item_destroyable_func) {
  232. if (!item_destroyable_func(elem->get()->get_data()))
  233. return false;
  234. }
  235. return true;
  236. }
  237. MEM_CacheElementPtr get_least_priority_destroyable_element(void)
  238. {
  239. if (queue.empty())
  240. return NULL;
  241. MEM_CacheElementPtr best_match_elem = NULL;
  242. if (!item_priority_func) {
  243. for (iterator it = queue.begin(); it != queue.end(); it++) {
  244. MEM_CacheElementPtr elem = *it;
  245. if (!can_destroy_element(elem))
  246. continue;
  247. best_match_elem = elem;
  248. break;
  249. }
  250. }
  251. else {
  252. int best_match_priority = 0;
  253. int i;
  254. for (i = 0; i < queue.size(); i++) {
  255. MEM_CacheElementPtr elem = queue[i];
  256. if (!can_destroy_element(elem))
  257. continue;
  258. /* by default 0 means highest priority element */
  259. /* casting a size type to int is questionable,
  260. but unlikely to cause problems */
  261. int priority = -((int)(queue.size()) - i - 1);
  262. priority = item_priority_func(elem->get()->get_data(), priority);
  263. if (priority < best_match_priority || best_match_elem == NULL) {
  264. best_match_priority = priority;
  265. best_match_elem = elem;
  266. }
  267. }
  268. }
  269. return best_match_elem;
  270. }
  271. MEM_CacheQueue queue;
  272. MEM_CacheLimiter_DataSize_Func data_size_func;
  273. MEM_CacheLimiter_ItemPriority_Func item_priority_func;
  274. MEM_CacheLimiter_ItemDestroyable_Func item_destroyable_func;
  275. };
  276. #endif // __MEM_CACHELIMITER_H__