objcache.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. // SPDX-License-Identifier: GPL-2.0 or GPL-3.0
  2. // Copyright © 2018-2019 Ariadne Devos
  3. // sHT -- allocate fixed-size objects
  4. #include <stdint.h>
  5. #include <stddef.h>
  6. #include <sHT/block.h>
  7. #include <sHT/bugs.h>
  8. #include <sHT/compiler.h>
  9. #include <sHT/nospec.h>
  10. #include <sHT/resource.h>
  11. #include <sHT/vector.h>
  12. #include <sHT/test.h>
  13. #include <sHT/index.h>
  14. struct sHT_objcache
  15. {
  16. size_t capacity;
  17. size_t elem_size;
  18. size_t used;
  19. _Alignas(sHT_vector_align)
  20. /* Its actual size is a multiple of @var{sHT_vector_align}, to
  21. simplify SIMD code. */
  22. void *elems[];
  23. /* And after that, capacity * elem_size untyped bytes */
  24. };
  25. _Static_assert(sHT_BLOCK_ALIGN % _Alignof(struct sHT_objcache) == 0, "objcache must be at start of page");
  26. _Static_assert(sHT_vector_align >= _Alignof(max_align_t), "increase alignment");
  27. #define sHT_OBJCACHE_UNIT (sHT_vector_align/sizeof(void *))
  28. static size_t
  29. sHT_objcache_round_capacity(size_t capacity)
  30. {
  31. /* For alignment, let @var{capacity} * sizeof(void *) be a multiple
  32. sHT_vector_align, which is a power of two..
  33. To do that, compute the remainder with sHT_OBJCACHE_UNIT. Then,
  34. add sHT_OBJCACHE_UNIT - remainder unless the remainder is zero.
  35. No branch mispredictions are possible! */
  36. size_t remainder = capacity & (sHT_OBJCACHE_UNIT - 1);
  37. capacity += (sHT_OBJCACHE_UNIT - remainder) & (sHT_OBJCACHE_UNIT - 1);
  38. return capacity;
  39. }
  40. static _Bool
  41. sHT_objcache_size(size_t *dest, size_t capacity, size_t elem_size)
  42. {
  43. _Bool overflow;
  44. /* Avoid overflow later on. */
  45. overflow = sHT_gt(capacity, SIZE_MAX - sHT_OBJCACHE_UNIT + 1);
  46. capacity = sHT_objcache_round_capacity(capacity);
  47. size_t calc;
  48. /* TODO: hypothetical Spectre issues */
  49. /* TODO: other compilers than GCC */
  50. overflow |= __builtin_add_overflow(sizeof(void *), elem_size, &calc);
  51. overflow |= __builtin_mul_overflow(calc, capacity, &calc);
  52. overflow |= __builtin_add_overflow(calc, sizeof(struct sHT_objcache), &calc);
  53. *dest = calc;
  54. return overflow;
  55. }
  56. size_t
  57. sHT_objcache_size_batch(size_t n, size_t size[], const size_t capacity[], const size_t elem_size[])
  58. {
  59. /* TODO: Spectre mitigations for hypothetical issue
  60. (sHT_test_hidden2?) */
  61. _Bool overflow = 0;
  62. size_t i;
  63. sHT_index_iterate(i, n) {
  64. overflow |= sHT_objcache_size(&size[i], capacity[i], elem_size[i]);
  65. }
  66. if (sHT_nonzero_p(overflow))
  67. i = 0;
  68. return i;
  69. }
  70. /* Inlined into @var{sHT_objcache_bless} */
  71. static size_t
  72. sHT_init_elems(struct sHT_objcache *cache, size_t n, size_t elem_size, char *objects)
  73. {
  74. size_t i = 0;
  75. /* Always do at least one loop, such that @var{i} cannot be 0 after
  76. the loop in a speculative execution. That would cause
  77. @code{cache->capacity} to be set to zero, which would break Spectre
  78. mitigations in @code{sHT_alloc}. */
  79. do {
  80. /* If @var{n} were 0, there would be a speculative
  81. out-of-bounds access. That's why the capacity must be
  82. positive. */
  83. i = sHT_index_nospec(i, n);
  84. cache->elems[i] = objects + i * elem_size;
  85. i++;
  86. } while (i < n);
  87. return i;
  88. }
  89. static struct sHT_objcache *
  90. sHT_objcache_bless(struct sHT_objcache *cache, size_t n, size_t elem_size)
  91. {
  92. /* @code{(void) sHT_fix_alignment(&elem_size);} */
  93. char *objects = (char *) (cache->elems + n);
  94. /* Because of strict aliasing, and perhaps out-of-bound accesses */
  95. sHT_hide_var(objects);
  96. /* (After sHT_init_elems)
  97. The 'continue' branch on @code{i < n} may have been speculatively
  98. ignored, but @code{cache->elems[...]}, a pointer, may be returned
  99. by @var{sHT_alloc}.
  100. We do, however, have a variable that holds something less than or
  101. equal to the capacity: @var{i}. @var{i} is not an
  102. over-approximation, even on a speculative execution. An
  103. under-approximation is safe, as long as it is positive. */
  104. n = sHT_init_elems(cache, n, elem_size, objects);
  105. cache->capacity = n;
  106. cache->elem_size = elem_size;
  107. cache->used = 0;
  108. return cache;
  109. }
  110. size_t
  111. sHT_objcache_bless_batch(size_t n, void *cache[], const size_t capacity[], const size_t elem_size[])
  112. {
  113. size_t i;
  114. sHT_index_iterate(i, n) {
  115. struct sHT_objcache *c = sHT_objcache_bless(cache[i], capacity[i], elem_size[i]);
  116. sHT_depend(i, c);
  117. }
  118. return i;
  119. }
  120. void *
  121. sHT_alloc(struct sHT_objcache *cache)
  122. {
  123. sHT_assert(cache->used <= cache->capacity);
  124. if (sHT_eq(cache->used, cache->capacity))
  125. return NULL;
  126. size_t i = sHT_index_nospec(cache->used++, cache->capacity);
  127. /* This may speculatively allocate an already-allocated object,
  128. which is documented in <sHT/resource.h>. */
  129. return cache->elems[i];
  130. }
  131. void
  132. sHT_free(struct sHT_objcache *cache, void *object)
  133. {
  134. sHT_assert(cache->used > 0);
  135. sHT_assert(cache->used <= cache->capacity);
  136. size_t i = sHT_index_nospec(--cache->used, cache->capacity);
  137. /* This may speculatively free a speculatively a double-allocated
  138. object, which is documented in <sHT/resource.h> */
  139. cache->elems[i] = object;
  140. }
  141. _Bool
  142. sHT_cache_exhausted_p(struct sHT_objcache *cache)
  143. {
  144. return sHT_eq(cache->used, cache->capacity);
  145. }