paged_array.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. /**************************************************************************/
  2. /* paged_array.h */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. #pragma once
  31. #include "core/os/memory.h"
  32. #include "core/os/spin_lock.h"
  33. #include "core/typedefs.h"
  34. #include <type_traits>
  35. // PagedArray is used mainly for filling a very large array from multiple threads efficiently and without causing major fragmentation
  36. // PageArrayPool manages central page allocation in a thread safe matter
  37. template <typename T>
  38. class PagedArrayPool {
  39. T **page_pool = nullptr;
  40. uint32_t pages_allocated = 0;
  41. uint32_t *available_page_pool = nullptr;
  42. uint32_t pages_available = 0;
  43. uint32_t page_size = 0;
  44. SpinLock spin_lock;
  45. public:
  46. struct PageInfo {
  47. T *page = nullptr;
  48. uint32_t page_id = 0;
  49. };
  50. PageInfo alloc_page() {
  51. spin_lock.lock();
  52. if (unlikely(pages_available == 0)) {
  53. uint32_t pages_used = pages_allocated;
  54. pages_allocated++;
  55. page_pool = (T **)memrealloc(page_pool, sizeof(T *) * pages_allocated);
  56. available_page_pool = (uint32_t *)memrealloc(available_page_pool, sizeof(uint32_t) * pages_allocated);
  57. page_pool[pages_used] = (T *)memalloc(sizeof(T) * page_size);
  58. available_page_pool[0] = pages_used;
  59. pages_available++;
  60. }
  61. pages_available--;
  62. uint32_t page_id = available_page_pool[pages_available];
  63. T *page = page_pool[page_id];
  64. spin_lock.unlock();
  65. return PageInfo{ page, page_id };
  66. }
  67. void free_page(uint32_t p_page_id) {
  68. spin_lock.lock();
  69. available_page_pool[pages_available] = p_page_id;
  70. pages_available++;
  71. spin_lock.unlock();
  72. }
  73. uint32_t get_page_size_shift() const {
  74. return get_shift_from_power_of_2(page_size);
  75. }
  76. uint32_t get_page_size_mask() const {
  77. return page_size - 1;
  78. }
  79. void reset() {
  80. ERR_FAIL_COND(pages_available < pages_allocated);
  81. if (pages_allocated) {
  82. for (uint32_t i = 0; i < pages_allocated; i++) {
  83. memfree(page_pool[i]);
  84. }
  85. memfree(page_pool);
  86. memfree(available_page_pool);
  87. page_pool = nullptr;
  88. available_page_pool = nullptr;
  89. pages_allocated = 0;
  90. pages_available = 0;
  91. }
  92. }
  93. bool is_configured() const {
  94. return page_size > 0;
  95. }
  96. void configure(uint32_t p_page_size) {
  97. ERR_FAIL_COND(page_pool != nullptr); // Safety check.
  98. ERR_FAIL_COND(p_page_size == 0);
  99. page_size = nearest_power_of_2_templated(p_page_size);
  100. }
  101. PagedArrayPool(uint32_t p_page_size = 4096) { // power of 2 recommended because of alignment with OS page sizes. Even if element is bigger, its still a multiple and get rounded amount of pages
  102. configure(p_page_size);
  103. }
  104. ~PagedArrayPool() {
  105. ERR_FAIL_COND_MSG(pages_available < pages_allocated, "Pages in use exist at exit in PagedArrayPool");
  106. reset();
  107. }
  108. };
  109. // PageArray is a local array that is optimized to grow in place, then be cleared often.
  110. // It does so by allocating pages from a PagedArrayPool.
  111. // It is safe to use multiple PagedArrays from different threads, sharing a single PagedArrayPool
  112. template <typename T>
  113. class PagedArray {
  114. PagedArrayPool<T> *page_pool = nullptr;
  115. T **page_data = nullptr;
  116. uint32_t *page_ids = nullptr;
  117. uint32_t max_pages_used = 0;
  118. uint32_t page_size_shift = 0;
  119. uint32_t page_size_mask = 0;
  120. uint64_t count = 0;
  121. _FORCE_INLINE_ uint32_t _get_pages_in_use() const {
  122. if (count == 0) {
  123. return 0;
  124. } else {
  125. return ((count - 1) >> page_size_shift) + 1;
  126. }
  127. }
  128. void _grow_page_array() {
  129. //no more room in the page array to put the new page, make room
  130. if (max_pages_used == 0) {
  131. max_pages_used = 1;
  132. } else {
  133. max_pages_used *= 2; // increase in powers of 2 to keep allocations to minimum
  134. }
  135. page_data = (T **)memrealloc(page_data, sizeof(T *) * max_pages_used);
  136. page_ids = (uint32_t *)memrealloc(page_ids, sizeof(uint32_t) * max_pages_used);
  137. }
  138. public:
  139. _FORCE_INLINE_ const T &operator[](uint64_t p_index) const {
  140. CRASH_BAD_UNSIGNED_INDEX(p_index, count);
  141. uint32_t page = p_index >> page_size_shift;
  142. uint32_t offset = p_index & page_size_mask;
  143. return page_data[page][offset];
  144. }
  145. _FORCE_INLINE_ T &operator[](uint64_t p_index) {
  146. CRASH_BAD_UNSIGNED_INDEX(p_index, count);
  147. uint32_t page = p_index >> page_size_shift;
  148. uint32_t offset = p_index & page_size_mask;
  149. return page_data[page][offset];
  150. }
  151. _FORCE_INLINE_ void push_back(const T &p_value) {
  152. uint32_t remainder = count & page_size_mask;
  153. if (unlikely(remainder == 0)) {
  154. // at 0, so time to request a new page
  155. uint32_t page_count = _get_pages_in_use();
  156. uint32_t new_page_count = page_count + 1;
  157. if (unlikely(new_page_count > max_pages_used)) {
  158. ERR_FAIL_NULL(page_pool); // Safety check.
  159. _grow_page_array(); //keep out of inline
  160. }
  161. typename PagedArrayPool<T>::PageInfo page_info = page_pool->alloc_page();
  162. page_data[page_count] = page_info.page;
  163. page_ids[page_count] = page_info.page_id;
  164. }
  165. // place the new value
  166. uint32_t page = count >> page_size_shift;
  167. uint32_t offset = count & page_size_mask;
  168. if constexpr (!std::is_trivially_constructible_v<T>) {
  169. memnew_placement(&page_data[page][offset], T(p_value));
  170. } else {
  171. page_data[page][offset] = p_value;
  172. }
  173. count++;
  174. }
  175. _FORCE_INLINE_ void pop_back() {
  176. ERR_FAIL_COND(count == 0);
  177. if constexpr (!std::is_trivially_destructible_v<T>) {
  178. uint32_t page = (count - 1) >> page_size_shift;
  179. uint32_t offset = (count - 1) & page_size_mask;
  180. page_data[page][offset].~T();
  181. }
  182. uint32_t remainder = count & page_size_mask;
  183. if (unlikely(remainder == 1)) {
  184. // one element remained, so page must be freed.
  185. uint32_t last_page = _get_pages_in_use() - 1;
  186. page_pool->free_page(page_ids[last_page]);
  187. }
  188. count--;
  189. }
  190. void remove_at_unordered(uint64_t p_index) {
  191. ERR_FAIL_UNSIGNED_INDEX(p_index, count);
  192. (*this)[p_index] = (*this)[count - 1];
  193. pop_back();
  194. }
  195. void clear() {
  196. //destruct if needed
  197. if constexpr (!std::is_trivially_destructible_v<T>) {
  198. for (uint64_t i = 0; i < count; i++) {
  199. uint32_t page = i >> page_size_shift;
  200. uint32_t offset = i & page_size_mask;
  201. page_data[page][offset].~T();
  202. }
  203. }
  204. //return the pages to the pagepool, so they can be used by another array eventually
  205. uint32_t pages_used = _get_pages_in_use();
  206. for (uint32_t i = 0; i < pages_used; i++) {
  207. page_pool->free_page(page_ids[i]);
  208. }
  209. count = 0;
  210. //note we leave page_data and page_indices intact for next use. If you really want to clear them call reset()
  211. }
  212. void reset() {
  213. clear();
  214. if (page_data) {
  215. memfree(page_data);
  216. memfree(page_ids);
  217. page_data = nullptr;
  218. page_ids = nullptr;
  219. max_pages_used = 0;
  220. }
  221. }
  222. // This takes the pages from a source array and merges them to this one
  223. // resulting order is undefined, but content is merged very efficiently,
  224. // making it ideal to fill content on several threads to later join it.
  225. void merge_unordered(PagedArray<T> &p_array) {
  226. ERR_FAIL_COND(page_pool != p_array.page_pool);
  227. uint32_t remainder = count & page_size_mask;
  228. T *remainder_page = nullptr;
  229. uint32_t remainder_page_id = 0;
  230. if (remainder > 0) {
  231. uint32_t last_page = _get_pages_in_use() - 1;
  232. remainder_page = page_data[last_page];
  233. remainder_page_id = page_ids[last_page];
  234. }
  235. count -= remainder;
  236. uint32_t src_page_index = 0;
  237. uint32_t page_size = page_size_mask + 1;
  238. while (p_array.count > 0) {
  239. uint32_t page_count = _get_pages_in_use();
  240. uint32_t new_page_count = page_count + 1;
  241. if (unlikely(new_page_count > max_pages_used)) {
  242. _grow_page_array(); //keep out of inline
  243. }
  244. page_data[page_count] = p_array.page_data[src_page_index];
  245. page_ids[page_count] = p_array.page_ids[src_page_index];
  246. uint32_t take = MIN(p_array.count, page_size); //pages to take away
  247. p_array.count -= take;
  248. count += take;
  249. src_page_index++;
  250. }
  251. //handle the remainder page if exists
  252. if (remainder_page) {
  253. uint32_t new_remainder = count & page_size_mask;
  254. if (new_remainder > 0) {
  255. //must merge old remainder with new remainder
  256. T *dst_page = page_data[_get_pages_in_use() - 1];
  257. uint32_t to_copy = MIN(page_size - new_remainder, remainder);
  258. for (uint32_t i = 0; i < to_copy; i++) {
  259. if constexpr (!std::is_trivially_constructible_v<T>) {
  260. memnew_placement(&dst_page[i + new_remainder], T(remainder_page[i + remainder - to_copy]));
  261. } else {
  262. dst_page[i + new_remainder] = remainder_page[i + remainder - to_copy];
  263. }
  264. if constexpr (!std::is_trivially_destructible_v<T>) {
  265. remainder_page[i + remainder - to_copy].~T();
  266. }
  267. }
  268. remainder -= to_copy; //subtract what was copied from remainder
  269. count += to_copy; //add what was copied to the count
  270. if (remainder == 0) {
  271. //entire remainder copied, let go of remainder page
  272. page_pool->free_page(remainder_page_id);
  273. remainder_page = nullptr;
  274. }
  275. }
  276. if (remainder > 0) {
  277. //there is still remainder, append it
  278. uint32_t page_count = _get_pages_in_use();
  279. uint32_t new_page_count = page_count + 1;
  280. if (unlikely(new_page_count > max_pages_used)) {
  281. _grow_page_array(); //keep out of inline
  282. }
  283. page_data[page_count] = remainder_page;
  284. page_ids[page_count] = remainder_page_id;
  285. count += remainder;
  286. }
  287. }
  288. }
  289. _FORCE_INLINE_ uint64_t size() const {
  290. return count;
  291. }
  292. void set_page_pool(PagedArrayPool<T> *p_page_pool) {
  293. ERR_FAIL_COND(max_pages_used > 0); // Safety check.
  294. page_pool = p_page_pool;
  295. page_size_mask = page_pool->get_page_size_mask();
  296. page_size_shift = page_pool->get_page_size_shift();
  297. }
  298. ~PagedArray() {
  299. reset();
  300. }
  301. };