MarkedBlock.cpp 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. /*
  2. * Copyright (C) 2011 Apple Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
  14. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  15. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  16. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
  17. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  18. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  19. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  20. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  21. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  22. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  23. * THE POSSIBILITY OF SUCH DAMAGE.
  24. */
  25. #include "config.h"
  26. #include "MarkedBlock.h"
  27. #include "IncrementalSweeper.h"
  28. #include "JSCell.h"
  29. #include "JSDestructibleObject.h"
  30. #include "Operations.h"
  31. namespace JSC {
  32. MarkedBlock* MarkedBlock::create(DeadBlock* block, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
  33. {
  34. ASSERT(reinterpret_cast<size_t>(block) == (reinterpret_cast<size_t>(block) & blockMask));
  35. Region* region = block->region();
  36. return new (NotNull, block) MarkedBlock(region, allocator, cellSize, destructorType);
  37. }
  38. MarkedBlock::MarkedBlock(Region* region, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
  39. : HeapBlock<MarkedBlock>(region)
  40. , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
  41. , m_endAtom((allocator->cellSize() ? atomsPerBlock : region->blockSize() / atomSize) - m_atomsPerCell + 1)
  42. , m_destructorType(destructorType)
  43. , m_allocator(allocator)
  44. , m_state(New) // All cells start out unmarked.
  45. , m_weakSet(allocator->heap()->vm())
  46. {
  47. ASSERT(allocator);
  48. HEAP_LOG_BLOCK_STATE_TRANSITION(this);
  49. }
  50. inline void MarkedBlock::callDestructor(JSCell* cell)
  51. {
  52. // A previous eager sweep may already have run cell's destructor.
  53. if (cell->isZapped())
  54. return;
  55. #if ENABLE(SIMPLE_HEAP_PROFILING)
  56. m_heap->m_destroyedTypeCounts.countVPtr(vptr);
  57. #endif
  58. cell->methodTableForDestruction()->destroy(cell);
  59. cell->zap();
  60. }
  61. template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, MarkedBlock::DestructorType dtorType>
  62. MarkedBlock::FreeList MarkedBlock::specializedSweep()
  63. {
  64. ASSERT(blockState != Allocated && blockState != FreeListed);
  65. ASSERT(!(dtorType == MarkedBlock::None && sweepMode == SweepOnly));
  66. // This produces a free list that is ordered in reverse through the block.
  67. // This is fine, since the allocation code makes no assumptions about the
  68. // order of the free list.
  69. FreeCell* head = 0;
  70. size_t count = 0;
  71. for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
  72. if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i))))
  73. continue;
  74. JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
  75. if (dtorType != MarkedBlock::None && blockState != New)
  76. callDestructor(cell);
  77. if (sweepMode == SweepToFreeList) {
  78. FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
  79. freeCell->next = head;
  80. head = freeCell;
  81. ++count;
  82. }
  83. }
  84. // We only want to discard the newlyAllocated bits if we're creating a FreeList,
  85. // otherwise we would lose information on what's currently alive.
  86. if (sweepMode == SweepToFreeList && m_newlyAllocated)
  87. m_newlyAllocated.clear();
  88. m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked);
  89. return FreeList(head, count * cellSize());
  90. }
  91. MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
  92. {
  93. HEAP_LOG_BLOCK_STATE_TRANSITION(this);
  94. m_weakSet.sweep();
  95. if (sweepMode == SweepOnly && m_destructorType == MarkedBlock::None)
  96. return FreeList();
  97. if (m_destructorType == MarkedBlock::ImmortalStructure)
  98. return sweepHelper<MarkedBlock::ImmortalStructure>(sweepMode);
  99. if (m_destructorType == MarkedBlock::Normal)
  100. return sweepHelper<MarkedBlock::Normal>(sweepMode);
  101. return sweepHelper<MarkedBlock::None>(sweepMode);
  102. }
  103. template<MarkedBlock::DestructorType dtorType>
  104. MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
  105. {
  106. switch (m_state) {
  107. case New:
  108. ASSERT(sweepMode == SweepToFreeList);
  109. return specializedSweep<New, SweepToFreeList, dtorType>();
  110. case FreeListed:
  111. // Happens when a block transitions to fully allocated.
  112. ASSERT(sweepMode == SweepToFreeList);
  113. return FreeList();
  114. case Allocated:
  115. RELEASE_ASSERT_NOT_REACHED();
  116. return FreeList();
  117. case Marked:
  118. return sweepMode == SweepToFreeList
  119. ? specializedSweep<Marked, SweepToFreeList, dtorType>()
  120. : specializedSweep<Marked, SweepOnly, dtorType>();
  121. }
  122. RELEASE_ASSERT_NOT_REACHED();
  123. return FreeList();
  124. }
  125. class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor {
  126. public:
  127. SetNewlyAllocatedFunctor(MarkedBlock* block)
  128. : m_block(block)
  129. {
  130. }
  131. void operator()(JSCell* cell)
  132. {
  133. ASSERT(MarkedBlock::blockFor(cell) == m_block);
  134. m_block->setNewlyAllocated(cell);
  135. }
  136. private:
  137. MarkedBlock* m_block;
  138. };
  139. void MarkedBlock::canonicalizeCellLivenessData(const FreeList& freeList)
  140. {
  141. HEAP_LOG_BLOCK_STATE_TRANSITION(this);
  142. FreeCell* head = freeList.head;
  143. if (m_state == Marked) {
  144. // If the block is in the Marked state then we know that:
  145. // 1) It was not used for allocation during the previous allocation cycle.
  146. // 2) It may have dead objects, and we only know them to be dead by the
  147. // fact that their mark bits are unset.
  148. // Hence if the block is Marked we need to leave it Marked.
  149. ASSERT(!head);
  150. return;
  151. }
  152. ASSERT(m_state == FreeListed);
  153. // Roll back to a coherent state for Heap introspection. Cells newly
  154. // allocated from our free list are not currently marked, so we need another
  155. // way to tell what's live vs dead.
  156. ASSERT(!m_newlyAllocated);
  157. m_newlyAllocated = adoptPtr(new WTF::Bitmap<atomsPerBlock>());
  158. SetNewlyAllocatedFunctor functor(this);
  159. forEachCell(functor);
  160. FreeCell* next;
  161. for (FreeCell* current = head; current; current = next) {
  162. next = current->next;
  163. reinterpret_cast<JSCell*>(current)->zap();
  164. clearNewlyAllocated(current);
  165. }
  166. m_state = Marked;
  167. }
  168. } // namespace JSC