alloc.nim 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083
  1. #
  2. #
  3. # Nim's Runtime Library
  4. # (c) Copyright 2012 Andreas Rumpf
  5. #
  6. # See the file "copying.txt", included in this
  7. # distribution, for details about the copyright.
  8. #
  9. # Low level allocator for Nim. Has been designed to support the GC.
  10. {.push profiler:off.}
  11. include osalloc
  12. template track(op, address, size) =
  13. when defined(memTracker):
  14. memTrackerOp(op, address, size)
  15. # We manage *chunks* of memory. Each chunk is a multiple of the page size.
  16. # Each chunk starts at an address that is divisible by the page size.
  17. const
  18. nimMinHeapPages {.intdefine.} = 128 # 0.5 MB
  19. SmallChunkSize = PageSize
  20. MaxFli = 30
  21. MaxLog2Sli = 5 # 32, this cannot be increased without changing 'uint32'
  22. # everywhere!
  23. MaxSli = 1 shl MaxLog2Sli
  24. FliOffset = 6
  25. RealFli = MaxFli - FliOffset
  26. # size of chunks in last matrix bin
  27. MaxBigChunkSize = 1 shl MaxFli - 1 shl (MaxFli-MaxLog2Sli-1)
  28. HugeChunkSize = MaxBigChunkSize + 1
  29. type
  30. PTrunk = ptr Trunk
  31. Trunk = object
  32. next: PTrunk # all nodes are connected with this pointer
  33. key: int # start address at bit 0
  34. bits: array[0..IntsPerTrunk-1, uint] # a bit vector
  35. TrunkBuckets = array[0..255, PTrunk]
  36. IntSet = object
  37. data: TrunkBuckets
  38. type
  39. AlignType = BiggestFloat
  40. FreeCell {.final, pure.} = object
  41. next: ptr FreeCell # next free cell in chunk (overlaid with refcount)
  42. zeroField: int # 0 means cell is not used (overlaid with typ field)
  43. # 1 means cell is manually managed pointer
  44. # otherwise a PNimType is stored in there
  45. PChunk = ptr BaseChunk
  46. PBigChunk = ptr BigChunk
  47. PSmallChunk = ptr SmallChunk
  48. BaseChunk {.pure, inheritable.} = object
  49. prevSize: int # size of previous chunk; for coalescing
  50. # 0th bit == 1 if 'used
  51. size: int # if < PageSize it is a small chunk
  52. SmallChunk = object of BaseChunk
  53. next, prev: PSmallChunk # chunks of the same size
  54. freeList: ptr FreeCell
  55. free: int # how many bytes remain
  56. acc: int # accumulator for small object allocation
  57. when defined(cpu32):
  58. align: int
  59. data: AlignType # start of usable memory
  60. BigChunk = object of BaseChunk # not necessarily > PageSize!
  61. next, prev: PBigChunk # chunks of the same (or bigger) size
  62. data: AlignType # start of usable memory
  63. template smallChunkOverhead(): untyped = sizeof(SmallChunk)-sizeof(AlignType)
  64. template bigChunkOverhead(): untyped = sizeof(BigChunk)-sizeof(AlignType)
  65. # ------------- chunk table ---------------------------------------------------
  66. # We use a PtrSet of chunk starts and a table[Page, chunksize] for chunk
  67. # endings of big chunks. This is needed by the merging operation. The only
  68. # remaining operation is best-fit for big chunks. Since there is a size-limit
  69. # for big chunks (because greater than the limit means they are returned back
  70. # to the OS), a fixed size array can be used.
  71. type
  72. PLLChunk = ptr LLChunk
  73. LLChunk = object ## *low-level* chunk
  74. size: int # remaining size
  75. acc: int # accumulator
  76. next: PLLChunk # next low-level chunk; only needed for dealloc
  77. PAvlNode = ptr AvlNode
  78. AvlNode = object
  79. link: array[0..1, PAvlNode] # Left (0) and right (1) links
  80. key, upperBound: int
  81. level: int
  82. HeapLinks = object
  83. len: int
  84. chunks: array[30, (PBigChunk, int)]
  85. next: ptr HeapLinks
  86. MemRegion = object
  87. minLargeObj, maxLargeObj: int
  88. freeSmallChunks: array[0..SmallChunkSize div MemAlign-1, PSmallChunk]
  89. flBitmap: uint32
  90. slBitmap: array[RealFli, uint32]
  91. matrix: array[RealFli, array[MaxSli, PBigChunk]]
  92. llmem: PLLChunk
  93. currMem, maxMem, freeMem, occ: int # memory sizes (allocated from OS)
  94. lastSize: int # needed for the case that OS gives us pages linearly
  95. chunkStarts: IntSet
  96. root, deleted, last, freeAvlNodes: PAvlNode
  97. locked, blockChunkSizeIncrease: bool # if locked, we cannot free pages.
  98. nextChunkSize: int
  99. bottomData: AvlNode
  100. heapLinks: HeapLinks
  101. when defined(nimTypeNames):
  102. allocCounter, deallocCounter: int
  103. const
  104. fsLookupTable: array[byte, int8] = [
  105. -1'i8, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
  106. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  107. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  108. 5, 5, 5, 5, 5, 5, 5, 5,
  109. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  110. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  111. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  112. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  113. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  114. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  115. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  116. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  117. 7, 7, 7, 7, 7, 7, 7, 7
  118. ]
  119. proc msbit(x: uint32): int {.inline.} =
  120. let a = if x <= 0xff_ff'u32:
  121. (if x <= 0xff: 0 else: 8)
  122. else:
  123. (if x <= 0xff_ff_ff'u32: 16 else: 24)
  124. result = int(fsLookupTable[byte(x shr a)]) + a
  125. proc lsbit(x: uint32): int {.inline.} =
  126. msbit(x and ((not x) + 1))
  127. proc setBit(nr: int; dest: var uint32) {.inline.} =
  128. dest = dest or (1u32 shl (nr and 0x1f))
  129. proc clearBit(nr: int; dest: var uint32) {.inline.} =
  130. dest = dest and not (1u32 shl (nr and 0x1f))
  131. proc mappingSearch(r, fl, sl: var int) {.inline.} =
  132. #let t = (1 shl (msbit(uint32 r) - MaxLog2Sli)) - 1
  133. # This diverges from the standard TLSF algorithm because we need to ensure
  134. # PageSize alignment:
  135. let t = roundup((1 shl (msbit(uint32 r) - MaxLog2Sli)), PageSize) - 1
  136. r = r + t
  137. r = r and not t
  138. r = min(r, MaxBigChunkSize)
  139. fl = msbit(uint32 r)
  140. sl = (r shr (fl - MaxLog2Sli)) - MaxSli
  141. dec fl, FliOffset
  142. sysAssert((r and PageMask) == 0, "mappingSearch: still not aligned")
  143. # See http://www.gii.upv.es/tlsf/files/papers/tlsf_desc.pdf for details of
  144. # this algorithm.
  145. proc mappingInsert(r: int): tuple[fl, sl: int] {.inline.} =
  146. sysAssert((r and PageMask) == 0, "mappingInsert: still not aligned")
  147. result.fl = msbit(uint32 r)
  148. result.sl = (r shr (result.fl - MaxLog2Sli)) - MaxSli
  149. dec result.fl, FliOffset
  150. template mat(): untyped = a.matrix[fl][sl]
  151. proc findSuitableBlock(a: MemRegion; fl, sl: var int): PBigChunk {.inline.} =
  152. let tmp = a.slBitmap[fl] and (not 0u32 shl sl)
  153. result = nil
  154. if tmp != 0:
  155. sl = lsbit(tmp)
  156. result = mat()
  157. else:
  158. fl = lsbit(a.flBitmap and (not 0u32 shl (fl + 1)))
  159. if fl > 0:
  160. sl = lsbit(a.slBitmap[fl])
  161. result = mat()
  162. template clearBits(sl, fl) =
  163. clearBit(sl, a.slBitmap[fl])
  164. if a.slBitmap[fl] == 0u32:
  165. # do not forget to cascade:
  166. clearBit(fl, a.flBitmap)
  167. proc removeChunkFromMatrix(a: var MemRegion; b: PBigChunk) =
  168. let (fl, sl) = mappingInsert(b.size)
  169. if b.next != nil: b.next.prev = b.prev
  170. if b.prev != nil: b.prev.next = b.next
  171. if mat() == b:
  172. mat() = b.next
  173. if mat() == nil:
  174. clearBits(sl, fl)
  175. b.prev = nil
  176. b.next = nil
  177. proc removeChunkFromMatrix2(a: var MemRegion; b: PBigChunk; fl, sl: int) =
  178. mat() = b.next
  179. if mat() != nil:
  180. mat().prev = nil
  181. else:
  182. clearBits(sl, fl)
  183. b.prev = nil
  184. b.next = nil
  185. proc addChunkToMatrix(a: var MemRegion; b: PBigChunk) =
  186. let (fl, sl) = mappingInsert(b.size)
  187. b.prev = nil
  188. b.next = mat()
  189. if mat() != nil:
  190. mat().prev = b
  191. mat() = b
  192. setBit(sl, a.slBitmap[fl])
  193. setBit(fl, a.flBitmap)
  194. proc incCurrMem(a: var MemRegion, bytes: int) {.inline.} =
  195. inc(a.currMem, bytes)
  196. proc decCurrMem(a: var MemRegion, bytes: int) {.inline.} =
  197. a.maxMem = max(a.maxMem, a.currMem)
  198. dec(a.currMem, bytes)
  199. proc getMaxMem(a: var MemRegion): int =
  200. # Since we update maxPagesCount only when freeing pages,
  201. # maxPagesCount may not be up to date. Thus we use the
  202. # maximum of these both values here:
  203. result = max(a.currMem, a.maxMem)
  204. proc llAlloc(a: var MemRegion, size: int): pointer =
  205. # *low-level* alloc for the memory managers data structures. Deallocation
  206. # is done at the end of the allocator's life time.
  207. if a.llmem == nil or size > a.llmem.size:
  208. # the requested size is ``roundup(size+sizeof(LLChunk), PageSize)``, but
  209. # since we know ``size`` is a (small) constant, we know the requested size
  210. # is one page:
  211. sysAssert roundup(size+sizeof(LLChunk), PageSize) == PageSize, "roundup 6"
  212. var old = a.llmem # can be nil and is correct with nil
  213. a.llmem = cast[PLLChunk](osAllocPages(PageSize))
  214. when defined(nimAvlcorruption):
  215. trackLocation(a.llmem, PageSize)
  216. incCurrMem(a, PageSize)
  217. a.llmem.size = PageSize - sizeof(LLChunk)
  218. a.llmem.acc = sizeof(LLChunk)
  219. a.llmem.next = old
  220. result = cast[pointer](cast[ByteAddress](a.llmem) + a.llmem.acc)
  221. dec(a.llmem.size, size)
  222. inc(a.llmem.acc, size)
  223. zeroMem(result, size)
  224. proc getBottom(a: var MemRegion): PAvlNode =
  225. result = addr(a.bottomData)
  226. if result.link[0] == nil:
  227. result.link[0] = result
  228. result.link[1] = result
  229. proc allocAvlNode(a: var MemRegion, key, upperBound: int): PAvlNode =
  230. if a.freeAvlNodes != nil:
  231. result = a.freeAvlNodes
  232. a.freeAvlNodes = a.freeAvlNodes.link[0]
  233. else:
  234. result = cast[PAvlNode](llAlloc(a, sizeof(AvlNode)))
  235. when defined(nimAvlcorruption):
  236. cprintf("tracking location: %p\n", result)
  237. result.key = key
  238. result.upperBound = upperBound
  239. let bottom = getBottom(a)
  240. result.link[0] = bottom
  241. result.link[1] = bottom
  242. result.level = 1
  243. #when defined(nimAvlcorruption):
  244. # track("allocAvlNode", result, sizeof(AvlNode))
  245. sysAssert(bottom == addr(a.bottomData), "bottom data")
  246. sysAssert(bottom.link[0] == bottom, "bottom link[0]")
  247. sysAssert(bottom.link[1] == bottom, "bottom link[1]")
  248. proc deallocAvlNode(a: var MemRegion, n: PAvlNode) {.inline.} =
  249. n.link[0] = a.freeAvlNodes
  250. a.freeAvlNodes = n
  251. proc addHeapLink(a: var MemRegion; p: PBigChunk, size: int) =
  252. var it = addr(a.heapLinks)
  253. while it != nil and it.len >= it.chunks.len: it = it.next
  254. if it == nil:
  255. var n = cast[ptr HeapLinks](llAlloc(a, sizeof(HeapLinks)))
  256. n.next = a.heapLinks.next
  257. a.heapLinks.next = n
  258. n.chunks[0] = (p, size)
  259. n.len = 1
  260. else:
  261. let L = it.len
  262. it.chunks[L] = (p, size)
  263. inc it.len
  264. include "system/avltree"
  265. proc llDeallocAll(a: var MemRegion) =
  266. var it = a.llmem
  267. while it != nil:
  268. # we know each block in the list has the size of 1 page:
  269. var next = it.next
  270. osDeallocPages(it, PageSize)
  271. it = next
  272. a.llmem = nil
  273. proc intSetGet(t: IntSet, key: int): PTrunk =
  274. var it = t.data[key and high(t.data)]
  275. while it != nil:
  276. if it.key == key: return it
  277. it = it.next
  278. result = nil
  279. proc intSetPut(a: var MemRegion, t: var IntSet, key: int): PTrunk =
  280. result = intSetGet(t, key)
  281. if result == nil:
  282. result = cast[PTrunk](llAlloc(a, sizeof(result[])))
  283. result.next = t.data[key and high(t.data)]
  284. t.data[key and high(t.data)] = result
  285. result.key = key
  286. proc contains(s: IntSet, key: int): bool =
  287. var t = intSetGet(s, key shr TrunkShift)
  288. if t != nil:
  289. var u = key and TrunkMask
  290. result = (t.bits[u shr IntShift] and (uint(1) shl (u and IntMask))) != 0
  291. else:
  292. result = false
  293. proc incl(a: var MemRegion, s: var IntSet, key: int) =
  294. var t = intSetPut(a, s, key shr TrunkShift)
  295. var u = key and TrunkMask
  296. t.bits[u shr IntShift] = t.bits[u shr IntShift] or (uint(1) shl (u and IntMask))
  297. proc excl(s: var IntSet, key: int) =
  298. var t = intSetGet(s, key shr TrunkShift)
  299. if t != nil:
  300. var u = key and TrunkMask
  301. t.bits[u shr IntShift] = t.bits[u shr IntShift] and not
  302. (uint(1) shl (u and IntMask))
  303. iterator elements(t: IntSet): int {.inline.} =
  304. # while traversing it is forbidden to change the set!
  305. for h in 0..high(t.data):
  306. var r = t.data[h]
  307. while r != nil:
  308. var i = 0
  309. while i <= high(r.bits):
  310. var w = r.bits[i] # taking a copy of r.bits[i] here is correct, because
  311. # modifying operations are not allowed during traversation
  312. var j = 0
  313. while w != 0: # test all remaining bits for zero
  314. if (w and 1) != 0: # the bit is set!
  315. yield (r.key shl TrunkShift) or (i shl IntShift +% j)
  316. inc(j)
  317. w = w shr 1
  318. inc(i)
  319. r = r.next
  320. proc isSmallChunk(c: PChunk): bool {.inline.} =
  321. return c.size <= SmallChunkSize-smallChunkOverhead()
  322. proc chunkUnused(c: PChunk): bool {.inline.} =
  323. result = (c.prevSize and 1) == 0
  324. iterator allObjects(m: var MemRegion): pointer {.inline.} =
  325. m.locked = true
  326. for s in elements(m.chunkStarts):
  327. # we need to check here again as it could have been modified:
  328. if s in m.chunkStarts:
  329. let c = cast[PChunk](s shl PageShift)
  330. if not chunkUnused(c):
  331. if isSmallChunk(c):
  332. var c = cast[PSmallChunk](c)
  333. let size = c.size
  334. var a = cast[ByteAddress](addr(c.data))
  335. let limit = a + c.acc
  336. while a <% limit:
  337. yield cast[pointer](a)
  338. a = a +% size
  339. else:
  340. let c = cast[PBigChunk](c)
  341. yield addr(c.data)
  342. m.locked = false
  343. proc iterToProc*(iter: typed, envType: typedesc; procName: untyped) {.
  344. magic: "Plugin", compileTime.}
  345. proc isCell(p: pointer): bool {.inline.} =
  346. result = cast[ptr FreeCell](p).zeroField >% 1
  347. # ------------- chunk management ----------------------------------------------
  348. proc pageIndex(c: PChunk): int {.inline.} =
  349. result = cast[ByteAddress](c) shr PageShift
  350. proc pageIndex(p: pointer): int {.inline.} =
  351. result = cast[ByteAddress](p) shr PageShift
  352. proc pageAddr(p: pointer): PChunk {.inline.} =
  353. result = cast[PChunk](cast[ByteAddress](p) and not PageMask)
  354. #sysAssert(Contains(allocator.chunkStarts, pageIndex(result)))
  355. when false:
  356. proc writeFreeList(a: MemRegion) =
  357. var it = a.freeChunksList
  358. c_fprintf(stdout, "freeChunksList: %p\n", it)
  359. while it != nil:
  360. c_fprintf(stdout, "it: %p, next: %p, prev: %p, size: %ld\n",
  361. it, it.next, it.prev, it.size)
  362. it = it.next
  363. const nimMaxHeap {.intdefine.} = 0
  364. proc requestOsChunks(a: var MemRegion, size: int): PBigChunk =
  365. when not defined(emscripten):
  366. if not a.blockChunkSizeIncrease:
  367. let usedMem = a.occ #a.currMem # - a.freeMem
  368. when nimMaxHeap != 0:
  369. if usedMem > nimMaxHeap * 1024 * 1024:
  370. raiseOutOfMem()
  371. if usedMem < 64 * 1024:
  372. a.nextChunkSize = PageSize*4
  373. else:
  374. a.nextChunkSize = min(roundup(usedMem shr 2, PageSize), a.nextChunkSize * 2)
  375. a.nextChunkSize = min(a.nextChunkSize, MaxBigChunkSize)
  376. var size = size
  377. if size > a.nextChunkSize:
  378. result = cast[PBigChunk](osAllocPages(size))
  379. else:
  380. result = cast[PBigChunk](osTryAllocPages(a.nextChunkSize))
  381. if result == nil:
  382. result = cast[PBigChunk](osAllocPages(size))
  383. a.blockChunkSizeIncrease = true
  384. else:
  385. size = a.nextChunkSize
  386. incCurrMem(a, size)
  387. inc(a.freeMem, size)
  388. a.addHeapLink(result, size)
  389. when defined(debugHeapLinks):
  390. cprintf("owner: %p; result: %p; next pointer %p; size: %ld\n", addr(a),
  391. result, result.heapLink, result.size)
  392. when defined(memtracker):
  393. trackLocation(addr result.size, sizeof(int))
  394. sysAssert((cast[ByteAddress](result) and PageMask) == 0, "requestOsChunks 1")
  395. #zeroMem(result, size)
  396. result.next = nil
  397. result.prev = nil
  398. result.size = size
  399. # update next.prevSize:
  400. var nxt = cast[ByteAddress](result) +% size
  401. sysAssert((nxt and PageMask) == 0, "requestOsChunks 2")
  402. var next = cast[PChunk](nxt)
  403. if pageIndex(next) in a.chunkStarts:
  404. #echo("Next already allocated!")
  405. next.prevSize = size or (next.prevSize and 1)
  406. # set result.prevSize:
  407. var lastSize = if a.lastSize != 0: a.lastSize else: PageSize
  408. var prv = cast[ByteAddress](result) -% lastSize
  409. sysAssert((nxt and PageMask) == 0, "requestOsChunks 3")
  410. var prev = cast[PChunk](prv)
  411. if pageIndex(prev) in a.chunkStarts and prev.size == lastSize:
  412. #echo("Prev already allocated!")
  413. result.prevSize = lastSize or (result.prevSize and 1)
  414. else:
  415. result.prevSize = 0 or (result.prevSize and 1) # unknown
  416. # but do not overwrite 'used' field
  417. a.lastSize = size # for next request
  418. sysAssert((cast[int](result) and PageMask) == 0, "requestOschunks: unaligned chunk")
  419. proc isAccessible(a: MemRegion, p: pointer): bool {.inline.} =
  420. result = contains(a.chunkStarts, pageIndex(p))
  421. proc contains[T](list, x: T): bool =
  422. var it = list
  423. while it != nil:
  424. if it == x: return true
  425. it = it.next
  426. proc listAdd[T](head: var T, c: T) {.inline.} =
  427. sysAssert(c notin head, "listAdd 1")
  428. sysAssert c.prev == nil, "listAdd 2"
  429. sysAssert c.next == nil, "listAdd 3"
  430. c.next = head
  431. if head != nil:
  432. sysAssert head.prev == nil, "listAdd 4"
  433. head.prev = c
  434. head = c
  435. proc listRemove[T](head: var T, c: T) {.inline.} =
  436. sysAssert(c in head, "listRemove")
  437. if c == head:
  438. head = c.next
  439. sysAssert c.prev == nil, "listRemove 2"
  440. if head != nil: head.prev = nil
  441. else:
  442. sysAssert c.prev != nil, "listRemove 3"
  443. c.prev.next = c.next
  444. if c.next != nil: c.next.prev = c.prev
  445. c.next = nil
  446. c.prev = nil
  447. proc updatePrevSize(a: var MemRegion, c: PBigChunk,
  448. prevSize: int) {.inline.} =
  449. var ri = cast[PChunk](cast[ByteAddress](c) +% c.size)
  450. sysAssert((cast[ByteAddress](ri) and PageMask) == 0, "updatePrevSize")
  451. if isAccessible(a, ri):
  452. ri.prevSize = prevSize or (ri.prevSize and 1)
  453. proc splitChunk2(a: var MemRegion, c: PBigChunk, size: int): PBigChunk =
  454. result = cast[PBigChunk](cast[ByteAddress](c) +% size)
  455. result.size = c.size - size
  456. track("result.size", addr result.size, sizeof(int))
  457. # XXX check if these two nil assignments are dead code given
  458. # addChunkToMatrix's implementation:
  459. result.next = nil
  460. result.prev = nil
  461. # size and not used:
  462. result.prevSize = size
  463. sysAssert((size and 1) == 0, "splitChunk 2")
  464. sysAssert((size and PageMask) == 0,
  465. "splitChunk: size is not a multiple of the PageSize")
  466. updatePrevSize(a, c, result.size)
  467. c.size = size
  468. incl(a, a.chunkStarts, pageIndex(result))
  469. proc splitChunk(a: var MemRegion, c: PBigChunk, size: int) =
  470. let rest = splitChunk2(a, c, size)
  471. addChunkToMatrix(a, rest)
  472. proc freeBigChunk(a: var MemRegion, c: PBigChunk) =
  473. var c = c
  474. sysAssert(c.size >= PageSize, "freeBigChunk")
  475. inc(a.freeMem, c.size)
  476. c.prevSize = c.prevSize and not 1 # set 'used' to false
  477. when coalescLeft:
  478. let prevSize = c.prevSize
  479. if prevSize != 0:
  480. var le = cast[PChunk](cast[ByteAddress](c) -% prevSize)
  481. sysAssert((cast[ByteAddress](le) and PageMask) == 0, "freeBigChunk 4")
  482. if isAccessible(a, le) and chunkUnused(le):
  483. sysAssert(not isSmallChunk(le), "freeBigChunk 5")
  484. if not isSmallChunk(le) and le.size < MaxBigChunkSize:
  485. removeChunkFromMatrix(a, cast[PBigChunk](le))
  486. inc(le.size, c.size)
  487. excl(a.chunkStarts, pageIndex(c))
  488. c = cast[PBigChunk](le)
  489. if c.size > MaxBigChunkSize:
  490. let rest = splitChunk2(a, c, MaxBigChunkSize)
  491. addChunkToMatrix(a, c)
  492. c = rest
  493. when coalescRight:
  494. var ri = cast[PChunk](cast[ByteAddress](c) +% c.size)
  495. sysAssert((cast[ByteAddress](ri) and PageMask) == 0, "freeBigChunk 2")
  496. if isAccessible(a, ri) and chunkUnused(ri):
  497. sysAssert(not isSmallChunk(ri), "freeBigChunk 3")
  498. if not isSmallChunk(ri) and c.size < MaxBigChunkSize:
  499. removeChunkFromMatrix(a, cast[PBigChunk](ri))
  500. inc(c.size, ri.size)
  501. excl(a.chunkStarts, pageIndex(ri))
  502. if c.size > MaxBigChunkSize:
  503. let rest = splitChunk2(a, c, MaxBigChunkSize)
  504. addChunkToMatrix(a, rest)
  505. addChunkToMatrix(a, c)
  506. proc getBigChunk(a: var MemRegion, size: int): PBigChunk =
  507. sysAssert(size > 0, "getBigChunk 2")
  508. var size = size # roundup(size, PageSize)
  509. var fl, sl: int
  510. mappingSearch(size, fl, sl)
  511. sysAssert((size and PageMask) == 0, "getBigChunk: unaligned chunk")
  512. result = findSuitableBlock(a, fl, sl)
  513. if result == nil:
  514. if size < nimMinHeapPages * PageSize:
  515. result = requestOsChunks(a, nimMinHeapPages * PageSize)
  516. splitChunk(a, result, size)
  517. else:
  518. result = requestOsChunks(a, size)
  519. # if we over allocated split the chunk:
  520. if result.size > size:
  521. splitChunk(a, result, size)
  522. else:
  523. removeChunkFromMatrix2(a, result, fl, sl)
  524. if result.size >= size + PageSize:
  525. splitChunk(a, result, size)
  526. # set 'used' to to true:
  527. result.prevSize = 1
  528. track("setUsedToFalse", addr result.size, sizeof(int))
  529. incl(a, a.chunkStarts, pageIndex(result))
  530. dec(a.freeMem, size)
  531. proc getHugeChunk(a: var MemRegion; size: int): PBigChunk =
  532. result = cast[PBigChunk](osAllocPages(size))
  533. incCurrMem(a, size)
  534. # XXX add this to the heap links. But also remove it from it later.
  535. when false: a.addHeapLink(result, size)
  536. sysAssert((cast[ByteAddress](result) and PageMask) == 0, "getHugeChunk")
  537. result.next = nil
  538. result.prev = nil
  539. result.size = size
  540. # set 'used' to to true:
  541. result.prevSize = 1
  542. incl(a, a.chunkStarts, pageIndex(result))
  543. proc freeHugeChunk(a: var MemRegion; c: PBigChunk) =
  544. let size = c.size
  545. sysAssert(size >= HugeChunkSize, "freeHugeChunk: invalid size")
  546. excl(a.chunkStarts, pageIndex(c))
  547. decCurrMem(a, size)
  548. osDeallocPages(c, size)
  549. proc getSmallChunk(a: var MemRegion): PSmallChunk =
  550. var res = getBigChunk(a, PageSize)
  551. sysAssert res.prev == nil, "getSmallChunk 1"
  552. sysAssert res.next == nil, "getSmallChunk 2"
  553. result = cast[PSmallChunk](res)
  554. # -----------------------------------------------------------------------------
  555. proc isAllocatedPtr(a: MemRegion, p: pointer): bool {.benign.}
  556. when true:
  557. template allocInv(a: MemRegion): bool = true
  558. else:
  559. proc allocInv(a: MemRegion): bool =
  560. ## checks some (not all yet) invariants of the allocator's data structures.
  561. for s in low(a.freeSmallChunks)..high(a.freeSmallChunks):
  562. var c = a.freeSmallChunks[s]
  563. while not (c == nil):
  564. if c.next == c:
  565. echo "[SYSASSERT] c.next == c"
  566. return false
  567. if not (c.size == s * MemAlign):
  568. echo "[SYSASSERT] c.size != s * MemAlign"
  569. return false
  570. var it = c.freeList
  571. while not (it == nil):
  572. if not (it.zeroField == 0):
  573. echo "[SYSASSERT] it.zeroField != 0"
  574. c_printf("%ld %p\n", it.zeroField, it)
  575. return false
  576. it = it.next
  577. c = c.next
  578. result = true
  579. when false:
  580. var
  581. rsizes: array[50_000, int]
  582. rsizesLen: int
  583. proc trackSize(size: int) =
  584. rsizes[rsizesLen] = size
  585. inc rsizesLen
  586. proc untrackSize(size: int) =
  587. for i in 0 .. rsizesLen-1:
  588. if rsizes[i] == size:
  589. rsizes[i] = rsizes[rsizesLen-1]
  590. dec rsizesLen
  591. return
  592. c_fprintf(stdout, "%ld\n", size)
  593. sysAssert(false, "untracked size!")
  594. else:
  595. template trackSize(x) = discard
  596. template untrackSize(x) = discard
  597. when false:
  598. # not yet used by the GCs
  599. proc rawTryAlloc(a: var MemRegion; requestedSize: int): pointer =
  600. sysAssert(allocInv(a), "rawAlloc: begin")
  601. sysAssert(roundup(65, 8) == 72, "rawAlloc: roundup broken")
  602. sysAssert(requestedSize >= sizeof(FreeCell), "rawAlloc: requested size too small")
  603. var size = roundup(requestedSize, MemAlign)
  604. inc a.occ, size
  605. trackSize(size)
  606. sysAssert(size >= requestedSize, "insufficient allocated size!")
  607. #c_fprintf(stdout, "alloc; size: %ld; %ld\n", requestedSize, size)
  608. if size <= SmallChunkSize-smallChunkOverhead():
  609. # allocate a small block: for small chunks, we use only its next pointer
  610. var s = size div MemAlign
  611. var c = a.freeSmallChunks[s]
  612. if c == nil:
  613. result = nil
  614. else:
  615. sysAssert c.size == size, "rawAlloc 6"
  616. if c.freeList == nil:
  617. sysAssert(c.acc + smallChunkOverhead() + size <= SmallChunkSize,
  618. "rawAlloc 7")
  619. result = cast[pointer](cast[ByteAddress](addr(c.data)) +% c.acc)
  620. inc(c.acc, size)
  621. else:
  622. result = c.freeList
  623. sysAssert(c.freeList.zeroField == 0, "rawAlloc 8")
  624. c.freeList = c.freeList.next
  625. dec(c.free, size)
  626. sysAssert((cast[ByteAddress](result) and (MemAlign-1)) == 0, "rawAlloc 9")
  627. if c.free < size:
  628. listRemove(a.freeSmallChunks[s], c)
  629. sysAssert(allocInv(a), "rawAlloc: end listRemove test")
  630. sysAssert(((cast[ByteAddress](result) and PageMask) - smallChunkOverhead()) %%
  631. size == 0, "rawAlloc 21")
  632. sysAssert(allocInv(a), "rawAlloc: end small size")
  633. else:
  634. inc size, bigChunkOverhead()
  635. var fl, sl: int
  636. mappingSearch(size, fl, sl)
  637. sysAssert((size and PageMask) == 0, "getBigChunk: unaligned chunk")
  638. let c = findSuitableBlock(a, fl, sl)
  639. if c != nil:
  640. removeChunkFromMatrix2(a, c, fl, sl)
  641. if c.size >= size + PageSize:
  642. splitChunk(a, c, size)
  643. # set 'used' to to true:
  644. c.prevSize = 1
  645. incl(a, a.chunkStarts, pageIndex(c))
  646. dec(a.freeMem, size)
  647. result = addr(c.data)
  648. sysAssert((cast[ByteAddress](c) and (MemAlign-1)) == 0, "rawAlloc 13")
  649. sysAssert((cast[ByteAddress](c) and PageMask) == 0, "rawAlloc: Not aligned on a page boundary")
  650. if a.root == nil: a.root = getBottom(a)
  651. add(a, a.root, cast[ByteAddress](result), cast[ByteAddress](result)+%size)
  652. else:
  653. result = nil
  654. proc rawAlloc(a: var MemRegion, requestedSize: int): pointer =
  655. when defined(nimTypeNames):
  656. inc(a.allocCounter)
  657. sysAssert(allocInv(a), "rawAlloc: begin")
  658. sysAssert(roundup(65, 8) == 72, "rawAlloc: roundup broken")
  659. sysAssert(requestedSize >= sizeof(FreeCell), "rawAlloc: requested size too small")
  660. var size = roundup(requestedSize, MemAlign)
  661. sysAssert(size >= requestedSize, "insufficient allocated size!")
  662. #c_fprintf(stdout, "alloc; size: %ld; %ld\n", requestedSize, size)
  663. if size <= SmallChunkSize-smallChunkOverhead():
  664. # allocate a small block: for small chunks, we use only its next pointer
  665. var s = size div MemAlign
  666. var c = a.freeSmallChunks[s]
  667. if c == nil:
  668. c = getSmallChunk(a)
  669. c.freeList = nil
  670. sysAssert c.size == PageSize, "rawAlloc 3"
  671. c.size = size
  672. c.acc = size
  673. c.free = SmallChunkSize - smallChunkOverhead() - size
  674. c.next = nil
  675. c.prev = nil
  676. listAdd(a.freeSmallChunks[s], c)
  677. result = addr(c.data)
  678. sysAssert((cast[ByteAddress](result) and (MemAlign-1)) == 0, "rawAlloc 4")
  679. else:
  680. sysAssert(allocInv(a), "rawAlloc: begin c != nil")
  681. sysAssert c.next != c, "rawAlloc 5"
  682. #if c.size != size:
  683. # c_fprintf(stdout, "csize: %lld; size %lld\n", c.size, size)
  684. sysAssert c.size == size, "rawAlloc 6"
  685. if c.freeList == nil:
  686. sysAssert(c.acc + smallChunkOverhead() + size <= SmallChunkSize,
  687. "rawAlloc 7")
  688. result = cast[pointer](cast[ByteAddress](addr(c.data)) +% c.acc)
  689. inc(c.acc, size)
  690. else:
  691. result = c.freeList
  692. sysAssert(c.freeList.zeroField == 0, "rawAlloc 8")
  693. c.freeList = c.freeList.next
  694. dec(c.free, size)
  695. sysAssert((cast[ByteAddress](result) and (MemAlign-1)) == 0, "rawAlloc 9")
  696. sysAssert(allocInv(a), "rawAlloc: end c != nil")
  697. sysAssert(allocInv(a), "rawAlloc: before c.free < size")
  698. if c.free < size:
  699. sysAssert(allocInv(a), "rawAlloc: before listRemove test")
  700. listRemove(a.freeSmallChunks[s], c)
  701. sysAssert(allocInv(a), "rawAlloc: end listRemove test")
  702. sysAssert(((cast[ByteAddress](result) and PageMask) - smallChunkOverhead()) %%
  703. size == 0, "rawAlloc 21")
  704. sysAssert(allocInv(a), "rawAlloc: end small size")
  705. inc a.occ, size
  706. trackSize(c.size)
  707. else:
  708. size = requestedSize + bigChunkOverhead() # roundup(requestedSize+bigChunkOverhead(), PageSize)
  709. # allocate a large block
  710. var c = if size >= HugeChunkSize: getHugeChunk(a, size)
  711. else: getBigChunk(a, size)
  712. sysAssert c.prev == nil, "rawAlloc 10"
  713. sysAssert c.next == nil, "rawAlloc 11"
  714. result = addr(c.data)
  715. sysAssert((cast[ByteAddress](c) and (MemAlign-1)) == 0, "rawAlloc 13")
  716. sysAssert((cast[ByteAddress](c) and PageMask) == 0, "rawAlloc: Not aligned on a page boundary")
  717. if a.root == nil: a.root = getBottom(a)
  718. add(a, a.root, cast[ByteAddress](result), cast[ByteAddress](result)+%size)
  719. inc a.occ, c.size
  720. trackSize(c.size)
  721. sysAssert(isAccessible(a, result), "rawAlloc 14")
  722. sysAssert(allocInv(a), "rawAlloc: end")
  723. when logAlloc: cprintf("var pointer_%p = alloc(%ld)\n", result, requestedSize)
  724. proc rawAlloc0(a: var MemRegion, requestedSize: int): pointer =
  725. result = rawAlloc(a, requestedSize)
  726. zeroMem(result, requestedSize)
  727. proc rawDealloc(a: var MemRegion, p: pointer) =
  728. when defined(nimTypeNames):
  729. inc(a.deallocCounter)
  730. #sysAssert(isAllocatedPtr(a, p), "rawDealloc: no allocated pointer")
  731. sysAssert(allocInv(a), "rawDealloc: begin")
  732. var c = pageAddr(p)
  733. if isSmallChunk(c):
  734. # `p` is within a small chunk:
  735. var c = cast[PSmallChunk](c)
  736. var s = c.size
  737. dec a.occ, s
  738. untrackSize(s)
  739. sysAssert a.occ >= 0, "rawDealloc: negative occupied memory (case A)"
  740. sysAssert(((cast[ByteAddress](p) and PageMask) - smallChunkOverhead()) %%
  741. s == 0, "rawDealloc 3")
  742. var f = cast[ptr FreeCell](p)
  743. #echo("setting to nil: ", $cast[ByteAddress](addr(f.zeroField)))
  744. sysAssert(f.zeroField != 0, "rawDealloc 1")
  745. f.zeroField = 0
  746. f.next = c.freeList
  747. c.freeList = f
  748. when overwriteFree:
  749. # set to 0xff to check for usage after free bugs:
  750. nimSetMem(cast[pointer](cast[int](p) +% sizeof(FreeCell)), -1'i32,
  751. s -% sizeof(FreeCell))
  752. # check if it is not in the freeSmallChunks[s] list:
  753. if c.free < s:
  754. # add it to the freeSmallChunks[s] array:
  755. listAdd(a.freeSmallChunks[s div MemAlign], c)
  756. inc(c.free, s)
  757. else:
  758. inc(c.free, s)
  759. if c.free == SmallChunkSize-smallChunkOverhead():
  760. listRemove(a.freeSmallChunks[s div MemAlign], c)
  761. c.size = SmallChunkSize
  762. freeBigChunk(a, cast[PBigChunk](c))
  763. sysAssert(((cast[ByteAddress](p) and PageMask) - smallChunkOverhead()) %%
  764. s == 0, "rawDealloc 2")
  765. else:
  766. # set to 0xff to check for usage after free bugs:
  767. when overwriteFree: nimSetMem(p, -1'i32, c.size -% bigChunkOverhead())
  768. # free big chunk
  769. var c = cast[PBigChunk](c)
  770. dec a.occ, c.size
  771. untrackSize(c.size)
  772. sysAssert a.occ >= 0, "rawDealloc: negative occupied memory (case B)"
  773. a.deleted = getBottom(a)
  774. del(a, a.root, cast[int](addr(c.data)))
  775. if c.size >= HugeChunkSize: freeHugeChunk(a, c)
  776. else: freeBigChunk(a, c)
  777. sysAssert(allocInv(a), "rawDealloc: end")
  778. when logAlloc: cprintf("dealloc(pointer_%p)\n", p)
  779. proc isAllocatedPtr(a: MemRegion, p: pointer): bool =
  780. if isAccessible(a, p):
  781. var c = pageAddr(p)
  782. if not chunkUnused(c):
  783. if isSmallChunk(c):
  784. var c = cast[PSmallChunk](c)
  785. var offset = (cast[ByteAddress](p) and (PageSize-1)) -%
  786. smallChunkOverhead()
  787. result = (c.acc >% offset) and (offset %% c.size == 0) and
  788. (cast[ptr FreeCell](p).zeroField >% 1)
  789. else:
  790. var c = cast[PBigChunk](c)
  791. result = p == addr(c.data) and cast[ptr FreeCell](p).zeroField >% 1
  792. proc prepareForInteriorPointerChecking(a: var MemRegion) {.inline.} =
  793. a.minLargeObj = lowGauge(a.root)
  794. a.maxLargeObj = highGauge(a.root)
  795. proc interiorAllocatedPtr(a: MemRegion, p: pointer): pointer =
  796. if isAccessible(a, p):
  797. var c = pageAddr(p)
  798. if not chunkUnused(c):
  799. if isSmallChunk(c):
  800. var c = cast[PSmallChunk](c)
  801. var offset = (cast[ByteAddress](p) and (PageSize-1)) -%
  802. smallChunkOverhead()
  803. if c.acc >% offset:
  804. sysAssert(cast[ByteAddress](addr(c.data)) +% offset ==
  805. cast[ByteAddress](p), "offset is not what you think it is")
  806. var d = cast[ptr FreeCell](cast[ByteAddress](addr(c.data)) +%
  807. offset -% (offset %% c.size))
  808. if d.zeroField >% 1:
  809. result = d
  810. sysAssert isAllocatedPtr(a, result), " result wrong pointer!"
  811. else:
  812. var c = cast[PBigChunk](c)
  813. var d = addr(c.data)
  814. if p >= d and cast[ptr FreeCell](d).zeroField >% 1:
  815. result = d
  816. sysAssert isAllocatedPtr(a, result), " result wrong pointer!"
  817. else:
  818. var q = cast[int](p)
  819. if q >=% a.minLargeObj and q <=% a.maxLargeObj:
  820. # this check is highly effective! Test fails for 99,96% of all checks on
  821. # an x86-64.
  822. var avlNode = inRange(a.root, q)
  823. if avlNode != nil:
  824. var k = cast[pointer](avlNode.key)
  825. var c = cast[PBigChunk](pageAddr(k))
  826. sysAssert(addr(c.data) == k, " k is not the same as addr(c.data)!")
  827. if cast[ptr FreeCell](k).zeroField >% 1:
  828. result = k
  829. sysAssert isAllocatedPtr(a, result), " result wrong pointer!"
  830. proc ptrSize(p: pointer): int =
  831. var x = cast[pointer](cast[ByteAddress](p) -% sizeof(FreeCell))
  832. var c = pageAddr(p)
  833. sysAssert(not chunkUnused(c), "ptrSize")
  834. result = c.size -% sizeof(FreeCell)
  835. if not isSmallChunk(c):
  836. dec result, bigChunkOverhead()
  837. proc alloc(allocator: var MemRegion, size: Natural): pointer {.gcsafe.} =
  838. result = rawAlloc(allocator, size+sizeof(FreeCell))
  839. cast[ptr FreeCell](result).zeroField = 1 # mark it as used
  840. sysAssert(not isAllocatedPtr(allocator, result), "alloc")
  841. result = cast[pointer](cast[ByteAddress](result) +% sizeof(FreeCell))
  842. track("alloc", result, size)
  843. proc alloc0(allocator: var MemRegion, size: Natural): pointer =
  844. result = alloc(allocator, size)
  845. zeroMem(result, size)
  846. proc dealloc(allocator: var MemRegion, p: pointer) =
  847. sysAssert(p != nil, "dealloc: p is nil")
  848. var x = cast[pointer](cast[ByteAddress](p) -% sizeof(FreeCell))
  849. sysAssert(x != nil, "dealloc: x is nil")
  850. sysAssert(isAccessible(allocator, x), "is not accessible")
  851. sysAssert(cast[ptr FreeCell](x).zeroField == 1, "dealloc: object header corrupted")
  852. rawDealloc(allocator, x)
  853. sysAssert(not isAllocatedPtr(allocator, x), "dealloc: object still accessible")
  854. track("dealloc", p, 0)
  855. proc realloc(allocator: var MemRegion, p: pointer, newsize: Natural): pointer =
  856. if newsize > 0:
  857. result = alloc0(allocator, newsize)
  858. if p != nil:
  859. copyMem(result, p, min(ptrSize(p), newsize))
  860. dealloc(allocator, p)
  861. elif p != nil:
  862. dealloc(allocator, p)
  863. proc deallocOsPages(a: var MemRegion) =
  864. # we free every 'ordinarily' allocated page by iterating over the page bits:
  865. var it = addr(a.heapLinks)
  866. while true:
  867. let next = it.next
  868. for i in 0..it.len-1:
  869. let (p, size) = it.chunks[i]
  870. when defined(debugHeapLinks):
  871. cprintf("owner %p; dealloc A: %p size: %ld; next: %p\n", addr(a),
  872. it, it.size, next)
  873. sysAssert size >= PageSize, "origSize too small"
  874. osDeallocPages(p, size)
  875. it = next
  876. if it == nil: break
  877. # And then we free the pages that are in use for the page bits:
  878. llDeallocAll(a)
  879. proc getFreeMem(a: MemRegion): int {.inline.} = result = a.freeMem
  880. proc getTotalMem(a: MemRegion): int {.inline.} = result = a.currMem
  881. proc getOccupiedMem(a: MemRegion): int {.inline.} =
  882. result = a.occ
  883. # a.currMem - a.freeMem
  884. when defined(nimTypeNames):
  885. proc getMemCounters(a: MemRegion): (int, int) {.inline.} =
  886. (a.allocCounter, a.deallocCounter)
  887. # ---------------------- thread memory region -------------------------------
  888. template instantiateForRegion(allocator: untyped) {.dirty.} =
  889. {.push stackTrace: off.}
  890. when defined(nimFulldebug):
  891. proc interiorAllocatedPtr*(p: pointer): pointer =
  892. result = interiorAllocatedPtr(allocator, p)
  893. proc isAllocatedPtr*(p: pointer): bool =
  894. let p = cast[pointer](cast[ByteAddress](p)-%ByteAddress(sizeof(Cell)))
  895. result = isAllocatedPtr(allocator, p)
  896. proc deallocOsPages = deallocOsPages(allocator)
  897. proc alloc(size: Natural): pointer =
  898. result = alloc(allocator, size)
  899. proc alloc0(size: Natural): pointer =
  900. result = alloc0(allocator, size)
  901. proc dealloc(p: pointer) =
  902. dealloc(allocator, p)
  903. proc realloc(p: pointer, newSize: Natural): pointer =
  904. result = realloc(allocator, p, newSize)
  905. when false:
  906. proc countFreeMem(): int =
  907. # only used for assertions
  908. var it = allocator.freeChunksList
  909. while it != nil:
  910. inc(result, it.size)
  911. it = it.next
  912. proc getFreeMem(): int =
  913. result = allocator.freeMem
  914. #sysAssert(result == countFreeMem())
  915. proc getTotalMem(): int = return allocator.currMem
  916. proc getOccupiedMem(): int = return allocator.occ #getTotalMem() - getFreeMem()
  917. proc getMaxMem*(): int = return getMaxMem(allocator)
  918. when defined(nimTypeNames):
  919. proc getMemCounters*(): (int, int) = getMemCounters(allocator)
  920. # -------------------- shared heap region ----------------------------------
  921. when hasThreadSupport:
  922. var sharedHeap: MemRegion
  923. var heapLock: SysLock
  924. initSysLock(heapLock)
  925. proc allocShared(size: Natural): pointer =
  926. when hasThreadSupport:
  927. acquireSys(heapLock)
  928. result = alloc(sharedHeap, size)
  929. releaseSys(heapLock)
  930. else:
  931. result = alloc(size)
  932. proc allocShared0(size: Natural): pointer =
  933. result = allocShared(size)
  934. zeroMem(result, size)
  935. proc deallocShared(p: pointer) =
  936. when hasThreadSupport:
  937. acquireSys(heapLock)
  938. dealloc(sharedHeap, p)
  939. releaseSys(heapLock)
  940. else:
  941. dealloc(p)
  942. proc reallocShared(p: pointer, newSize: Natural): pointer =
  943. when hasThreadSupport:
  944. acquireSys(heapLock)
  945. result = realloc(sharedHeap, p, newSize)
  946. releaseSys(heapLock)
  947. else:
  948. result = realloc(p, newSize)
  949. when hasThreadSupport:
  950. template sharedMemStatsShared(v: int) =
  951. acquireSys(heapLock)
  952. result = v
  953. releaseSys(heapLock)
  954. proc getFreeSharedMem(): int =
  955. sharedMemStatsShared(sharedHeap.freeMem)
  956. proc getTotalSharedMem(): int =
  957. sharedMemStatsShared(sharedHeap.currMem)
  958. proc getOccupiedSharedMem(): int =
  959. sharedMemStatsShared(sharedHeap.occ)
  960. #sharedMemStatsShared(sharedHeap.currMem - sharedHeap.freeMem)
  961. {.pop.}
  962. {.pop.}