paged_array.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /**************************************************************************/
  2. /* paged_array.h */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. #ifndef PAGED_ARRAY_H
  31. #define PAGED_ARRAY_H
  32. #include "core/os/memory.h"
  33. #include "core/os/spin_lock.h"
  34. #include "core/typedefs.h"
  35. #include <type_traits>
  36. // PagedArray is used mainly for filling a very large array from multiple threads efficiently and without causing major fragmentation
  37. // PageArrayPool manages central page allocation in a thread safe matter
  38. template <typename T>
  39. class PagedArrayPool {
  40. T **page_pool = nullptr;
  41. uint32_t pages_allocated = 0;
  42. uint32_t *available_page_pool = nullptr;
  43. uint32_t pages_available = 0;
  44. uint32_t page_size = 0;
  45. SpinLock spin_lock;
  46. public:
  47. struct PageInfo {
  48. T *page = nullptr;
  49. uint32_t page_id = 0;
  50. };
  51. PageInfo alloc_page() {
  52. spin_lock.lock();
  53. if (unlikely(pages_available == 0)) {
  54. uint32_t pages_used = pages_allocated;
  55. pages_allocated++;
  56. page_pool = (T **)memrealloc(page_pool, sizeof(T *) * pages_allocated);
  57. available_page_pool = (uint32_t *)memrealloc(available_page_pool, sizeof(uint32_t) * pages_allocated);
  58. page_pool[pages_used] = (T *)memalloc(sizeof(T) * page_size);
  59. available_page_pool[0] = pages_used;
  60. pages_available++;
  61. }
  62. pages_available--;
  63. uint32_t page_id = available_page_pool[pages_available];
  64. T *page = page_pool[page_id];
  65. spin_lock.unlock();
  66. return PageInfo{ page, page_id };
  67. }
  68. void free_page(uint32_t p_page_id) {
  69. spin_lock.lock();
  70. available_page_pool[pages_available] = p_page_id;
  71. pages_available++;
  72. spin_lock.unlock();
  73. }
  74. uint32_t get_page_size_shift() const {
  75. return get_shift_from_power_of_2(page_size);
  76. }
  77. uint32_t get_page_size_mask() const {
  78. return page_size - 1;
  79. }
  80. void reset() {
  81. ERR_FAIL_COND(pages_available < pages_allocated);
  82. if (pages_allocated) {
  83. for (uint32_t i = 0; i < pages_allocated; i++) {
  84. memfree(page_pool[i]);
  85. }
  86. memfree(page_pool);
  87. memfree(available_page_pool);
  88. page_pool = nullptr;
  89. available_page_pool = nullptr;
  90. pages_allocated = 0;
  91. pages_available = 0;
  92. }
  93. }
  94. bool is_configured() const {
  95. return page_size > 0;
  96. }
  97. void configure(uint32_t p_page_size) {
  98. ERR_FAIL_COND(page_pool != nullptr); // Safety check.
  99. ERR_FAIL_COND(p_page_size == 0);
  100. page_size = nearest_power_of_2_templated(p_page_size);
  101. }
  102. PagedArrayPool(uint32_t p_page_size = 4096) { // power of 2 recommended because of alignment with OS page sizes. Even if element is bigger, its still a multiple and get rounded amount of pages
  103. configure(p_page_size);
  104. }
  105. ~PagedArrayPool() {
  106. ERR_FAIL_COND_MSG(pages_available < pages_allocated, "Pages in use exist at exit in PagedArrayPool");
  107. reset();
  108. }
  109. };
  110. // PageArray is a local array that is optimized to grow in place, then be cleared often.
  111. // It does so by allocating pages from a PagedArrayPool.
  112. // It is safe to use multiple PagedArrays from different threads, sharing a single PagedArrayPool
  113. template <typename T>
  114. class PagedArray {
  115. PagedArrayPool<T> *page_pool = nullptr;
  116. T **page_data = nullptr;
  117. uint32_t *page_ids = nullptr;
  118. uint32_t max_pages_used = 0;
  119. uint32_t page_size_shift = 0;
  120. uint32_t page_size_mask = 0;
  121. uint64_t count = 0;
  122. _FORCE_INLINE_ uint32_t _get_pages_in_use() const {
  123. if (count == 0) {
  124. return 0;
  125. } else {
  126. return ((count - 1) >> page_size_shift) + 1;
  127. }
  128. }
  129. void _grow_page_array() {
  130. //no more room in the page array to put the new page, make room
  131. if (max_pages_used == 0) {
  132. max_pages_used = 1;
  133. } else {
  134. max_pages_used *= 2; // increase in powers of 2 to keep allocations to minimum
  135. }
  136. page_data = (T **)memrealloc(page_data, sizeof(T *) * max_pages_used);
  137. page_ids = (uint32_t *)memrealloc(page_ids, sizeof(uint32_t) * max_pages_used);
  138. }
  139. public:
  140. _FORCE_INLINE_ const T &operator[](uint64_t p_index) const {
  141. CRASH_BAD_UNSIGNED_INDEX(p_index, count);
  142. uint32_t page = p_index >> page_size_shift;
  143. uint32_t offset = p_index & page_size_mask;
  144. return page_data[page][offset];
  145. }
  146. _FORCE_INLINE_ T &operator[](uint64_t p_index) {
  147. CRASH_BAD_UNSIGNED_INDEX(p_index, count);
  148. uint32_t page = p_index >> page_size_shift;
  149. uint32_t offset = p_index & page_size_mask;
  150. return page_data[page][offset];
  151. }
  152. _FORCE_INLINE_ void push_back(const T &p_value) {
  153. uint32_t remainder = count & page_size_mask;
  154. if (unlikely(remainder == 0)) {
  155. // at 0, so time to request a new page
  156. uint32_t page_count = _get_pages_in_use();
  157. uint32_t new_page_count = page_count + 1;
  158. if (unlikely(new_page_count > max_pages_used)) {
  159. ERR_FAIL_NULL(page_pool); // Safety check.
  160. _grow_page_array(); //keep out of inline
  161. }
  162. typename PagedArrayPool<T>::PageInfo page_info = page_pool->alloc_page();
  163. page_data[page_count] = page_info.page;
  164. page_ids[page_count] = page_info.page_id;
  165. }
  166. // place the new value
  167. uint32_t page = count >> page_size_shift;
  168. uint32_t offset = count & page_size_mask;
  169. if constexpr (!std::is_trivially_constructible_v<T>) {
  170. memnew_placement(&page_data[page][offset], T(p_value));
  171. } else {
  172. page_data[page][offset] = p_value;
  173. }
  174. count++;
  175. }
  176. _FORCE_INLINE_ void pop_back() {
  177. ERR_FAIL_COND(count == 0);
  178. if constexpr (!std::is_trivially_destructible_v<T>) {
  179. uint32_t page = (count - 1) >> page_size_shift;
  180. uint32_t offset = (count - 1) & page_size_mask;
  181. page_data[page][offset].~T();
  182. }
  183. uint32_t remainder = count & page_size_mask;
  184. if (unlikely(remainder == 1)) {
  185. // one element remained, so page must be freed.
  186. uint32_t last_page = _get_pages_in_use() - 1;
  187. page_pool->free_page(page_ids[last_page]);
  188. }
  189. count--;
  190. }
  191. void remove_at_unordered(uint64_t p_index) {
  192. ERR_FAIL_UNSIGNED_INDEX(p_index, count);
  193. (*this)[p_index] = (*this)[count - 1];
  194. pop_back();
  195. }
  196. void clear() {
  197. //destruct if needed
  198. if constexpr (!std::is_trivially_destructible_v<T>) {
  199. for (uint64_t i = 0; i < count; i++) {
  200. uint32_t page = i >> page_size_shift;
  201. uint32_t offset = i & page_size_mask;
  202. page_data[page][offset].~T();
  203. }
  204. }
  205. //return the pages to the pagepool, so they can be used by another array eventually
  206. uint32_t pages_used = _get_pages_in_use();
  207. for (uint32_t i = 0; i < pages_used; i++) {
  208. page_pool->free_page(page_ids[i]);
  209. }
  210. count = 0;
  211. //note we leave page_data and page_indices intact for next use. If you really want to clear them call reset()
  212. }
  213. void reset() {
  214. clear();
  215. if (page_data) {
  216. memfree(page_data);
  217. memfree(page_ids);
  218. page_data = nullptr;
  219. page_ids = nullptr;
  220. max_pages_used = 0;
  221. }
  222. }
  223. // This takes the pages from a source array and merges them to this one
  224. // resulting order is undefined, but content is merged very efficiently,
  225. // making it ideal to fill content on several threads to later join it.
  226. void merge_unordered(PagedArray<T> &p_array) {
  227. ERR_FAIL_COND(page_pool != p_array.page_pool);
  228. uint32_t remainder = count & page_size_mask;
  229. T *remainder_page = nullptr;
  230. uint32_t remainder_page_id = 0;
  231. if (remainder > 0) {
  232. uint32_t last_page = _get_pages_in_use() - 1;
  233. remainder_page = page_data[last_page];
  234. remainder_page_id = page_ids[last_page];
  235. }
  236. count -= remainder;
  237. uint32_t src_page_index = 0;
  238. uint32_t page_size = page_size_mask + 1;
  239. while (p_array.count > 0) {
  240. uint32_t page_count = _get_pages_in_use();
  241. uint32_t new_page_count = page_count + 1;
  242. if (unlikely(new_page_count > max_pages_used)) {
  243. _grow_page_array(); //keep out of inline
  244. }
  245. page_data[page_count] = p_array.page_data[src_page_index];
  246. page_ids[page_count] = p_array.page_ids[src_page_index];
  247. uint32_t take = MIN(p_array.count, page_size); //pages to take away
  248. p_array.count -= take;
  249. count += take;
  250. src_page_index++;
  251. }
  252. //handle the remainder page if exists
  253. if (remainder_page) {
  254. uint32_t new_remainder = count & page_size_mask;
  255. if (new_remainder > 0) {
  256. //must merge old remainder with new remainder
  257. T *dst_page = page_data[_get_pages_in_use() - 1];
  258. uint32_t to_copy = MIN(page_size - new_remainder, remainder);
  259. for (uint32_t i = 0; i < to_copy; i++) {
  260. if constexpr (!std::is_trivially_constructible_v<T>) {
  261. memnew_placement(&dst_page[i + new_remainder], T(remainder_page[i + remainder - to_copy]));
  262. } else {
  263. dst_page[i + new_remainder] = remainder_page[i + remainder - to_copy];
  264. }
  265. if constexpr (!std::is_trivially_destructible_v<T>) {
  266. remainder_page[i + remainder - to_copy].~T();
  267. }
  268. }
  269. remainder -= to_copy; //subtract what was copied from remainder
  270. count += to_copy; //add what was copied to the count
  271. if (remainder == 0) {
  272. //entire remainder copied, let go of remainder page
  273. page_pool->free_page(remainder_page_id);
  274. remainder_page = nullptr;
  275. }
  276. }
  277. if (remainder > 0) {
  278. //there is still remainder, append it
  279. uint32_t page_count = _get_pages_in_use();
  280. uint32_t new_page_count = page_count + 1;
  281. if (unlikely(new_page_count > max_pages_used)) {
  282. _grow_page_array(); //keep out of inline
  283. }
  284. page_data[page_count] = remainder_page;
  285. page_ids[page_count] = remainder_page_id;
  286. count += remainder;
  287. }
  288. }
  289. }
  290. _FORCE_INLINE_ uint64_t size() const {
  291. return count;
  292. }
  293. void set_page_pool(PagedArrayPool<T> *p_page_pool) {
  294. ERR_FAIL_COND(max_pages_used > 0); // Safety check.
  295. page_pool = p_page_pool;
  296. page_size_mask = page_pool->get_page_size_mask();
  297. page_size_shift = page_pool->get_page_size_shift();
  298. }
  299. ~PagedArray() {
  300. reset();
  301. }
  302. };
  303. #endif // PAGED_ARRAY_H