ExecutableAllocator.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. /*
  2. * Copyright (C) 2008 Apple Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
  14. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  15. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  16. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
  17. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  18. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  19. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  20. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  21. * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  22. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  23. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  24. */
  25. #include "config.h"
  26. #include "ExecutableAllocator.h"
  27. #include "DemandExecutableAllocator.h"
  28. #include "JSCBridge.h"
  29. #include "SuperRegion.h"
  30. #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
  31. #include "CodeProfiling.h"
  32. #include <wtf/HashSet.h>
  33. #include <wtf/MetaAllocator.h>
  34. #include <wtf/PageReservation.h>
  35. #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
  36. #include <wtf/PassOwnPtr.h>
  37. #endif
  38. #include <wtf/ThreadingPrimitives.h>
  39. #include <wtf/VMTags.h>
  40. #endif
  41. #if OS(PSP2)
  42. #include <manx/Memblock.h>
  43. #endif
  44. // Uncomment to create an artificial executable memory usage limit. This limit
  45. // is imperfect and is primarily useful for testing the VM's ability to handle
  46. // out-of-executable-memory situations.
  47. // #define EXECUTABLE_MEMORY_LIMIT 1000000
  48. #if ENABLE(ASSEMBLER)
  49. using namespace WTF;
  50. namespace JSC {
  51. #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
  52. DemandExecutableAllocator::DemandExecutableAllocator()
  53. #if ENABLE(DETACHED_JIT)
  54. : MetaAllocator(jitAllocationGranule, MetaAllocator::e_DemandAllocatorType)
  55. #else
  56. : MetaAllocator(jitAllocationGranule)
  57. #endif
  58. {
  59. MutexLocker lock(allocatorsMutex());
  60. allocators().add(this);
  61. // Don't preallocate any memory here.
  62. }
  63. DemandExecutableAllocator::~DemandExecutableAllocator()
  64. {
  65. {
  66. MutexLocker lock(allocatorsMutex());
  67. allocators().remove(this);
  68. }
  69. for (unsigned i = 0; i < reservations.size(); ++i)
  70. reservations.at(i).deallocate();
  71. #if ENABLE(DETACHED_JIT)
  72. RELEASE_ASSERT(m_type == MetaAllocator::e_DemandAllocatorType);
  73. m_type = MetaAllocator::e_MetaAllocatorBaseType;
  74. #endif
  75. }
  76. #if ENABLE(DETACHED_JIT)
  77. void DemandExecutableAllocator::DETACHED_JIT_DTOR()
  78. {
  79. delete this;
  80. }
  81. #endif
  82. size_t DemandExecutableAllocator::bytesAllocatedByAllAllocators()
  83. {
  84. size_t total = 0;
  85. MutexLocker lock(allocatorsMutex());
  86. for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
  87. total += (*allocator)->bytesAllocated();
  88. return total;
  89. }
  90. size_t DemandExecutableAllocator::bytesCommittedByAllocactors()
  91. {
  92. size_t total = 0;
  93. MutexLocker lock(allocatorsMutex());
  94. for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
  95. total += (*allocator)->bytesCommitted();
  96. return total;
  97. }
  98. #if ENABLE(META_ALLOCATOR_PROFILE)
  99. void DemandExecutableAllocator::dumpProfileFromAllAllocators()
  100. {
  101. MutexLocker lock(allocatorsMutex());
  102. for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
  103. (*allocator)->dumpProfile();
  104. }
  105. #endif
  106. void* DemandExecutableAllocator::allocateNewSpace(size_t& numPages)
  107. {
  108. size_t newNumPages = (((numPages * pageSize() + JIT_ALLOCATOR_LARGE_ALLOC_SIZE - 1) / JIT_ALLOCATOR_LARGE_ALLOC_SIZE * JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + pageSize() - 1) / pageSize();
  109. ASSERT(newNumPages >= numPages);
  110. numPages = newNumPages;
  111. #ifdef EXECUTABLE_MEMORY_LIMIT
  112. if (bytesAllocatedByAllAllocators() >= EXECUTABLE_MEMORY_LIMIT)
  113. return 0;
  114. #endif
  115. PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
  116. RELEASE_ASSERT(reservation);
  117. reservations.append(reservation);
  118. return reservation.base();
  119. }
  120. void DemandExecutableAllocator::notifyNeedPage(void* page)
  121. {
  122. OSAllocator::commit(page, pageSize(), EXECUTABLE_POOL_WRITABLE, true);
  123. }
  124. void DemandExecutableAllocator::notifyPageIsFree(void* page)
  125. {
  126. OSAllocator::decommit(page, pageSize());
  127. }
  128. #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
  129. void ExecutableAllocator::initializeAllocator()
  130. {
  131. }
  132. #else
  133. static DemandExecutableAllocator* gAllocator;
  134. #if ENABLE(DETACHED_JIT)
  135. #if BUILDING_DETACHED_JIT
  136. void ExecutableAllocator::setMetaAllocator(MetaAllocator* metaAllocator)
  137. {
  138. #if !BUILDING_DETACHED_JIT
  139. ASSERT_NOT_REACHED();
  140. #else
  141. ASSERT(!gAllocator);
  142. ASSERT(metaAllocator);
  143. gAllocator = reinterpret_cast<DemandExecutableAllocator*>(metaAllocator);
  144. #endif
  145. }
  146. #else
  147. MetaAllocator* ExecutableAllocator::metaAllocator()
  148. {
  149. if (!gAllocator) {
  150. initializeAllocator();
  151. }
  152. ASSERT(gAllocator);
  153. return gAllocator;
  154. }
  155. #endif
  156. #endif // #if ENABLE(DETACHED_JIT)
  157. namespace {
  158. static inline DemandExecutableAllocator* allocator()
  159. {
  160. return gAllocator;
  161. }
  162. }
  163. #if !(ENABLE(DETACHED_JIT) && BUILDING_DETACHED_JIT)
  164. void ExecutableAllocator::initializeAllocator()
  165. {
  166. ASSERT(!gAllocator);
  167. gAllocator = new DemandExecutableAllocator();
  168. CodeProfiling::notifyAllocator(gAllocator);
  169. }
  170. #endif
  171. #endif
  172. ExecutableAllocator::ExecutableAllocator(VM&)
  173. #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
  174. : m_allocator(adoptPtr(new DemandExecutableAllocator()))
  175. #endif
  176. {
  177. ASSERT(allocator());
  178. }
  179. ExecutableAllocator::~ExecutableAllocator()
  180. {
  181. }
  182. bool ExecutableAllocator::isValid() const
  183. {
  184. return true;
  185. }
  186. bool ExecutableAllocator::underMemoryPressure()
  187. {
  188. #ifdef EXECUTABLE_MEMORY_LIMIT
  189. return DemandExecutableAllocator::bytesAllocatedByAllAllocators() > EXECUTABLE_MEMORY_LIMIT / 2;
  190. #else
  191. return false;
  192. #endif
  193. }
  194. double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
  195. {
  196. double result;
  197. #ifdef EXECUTABLE_MEMORY_LIMIT
  198. size_t bytesAllocated = DemandExecutableAllocator::bytesAllocatedByAllAllocators() + addedMemoryUsage;
  199. if (bytesAllocated >= EXECUTABLE_MEMORY_LIMIT)
  200. bytesAllocated = EXECUTABLE_MEMORY_LIMIT;
  201. result = static_cast<double>(EXECUTABLE_MEMORY_LIMIT) /
  202. (EXECUTABLE_MEMORY_LIMIT - bytesAllocated);
  203. #else
  204. UNUSED_PARAM(addedMemoryUsage);
  205. result = 1.0;
  206. #endif
  207. if (result < 1.0)
  208. result = 1.0;
  209. return result;
  210. }
  211. PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
  212. {
  213. RefPtr<ExecutableMemoryHandle> result = allocator()->allocate(sizeInBytes, ownerUID);
  214. RELEASE_ASSERT(result || effort != JITCompilationMustSucceed);
  215. return result.release();
  216. }
  217. size_t ExecutableAllocator::committedByteCount()
  218. {
  219. return DemandExecutableAllocator::bytesCommittedByAllocactors();
  220. }
  221. #if ENABLE(META_ALLOCATOR_PROFILE)
  222. void ExecutableAllocator::dumpProfile()
  223. {
  224. DemandExecutableAllocator::dumpProfileFromAllAllocators();
  225. }
  226. #endif
  227. #endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
  228. #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
  229. #if OS(WINDOWS) || OS(ORBIS)
  230. #error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
  231. #elif OS(PSP2)
  232. void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting)
  233. {
  234. Manx::Memblock::setWritable(setting == ExecutableAllocator::Writable);
  235. }
  236. #else
  237. void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting)
  238. {
  239. size_t pageSize = WTF::pageSize();
  240. // Calculate the start of the page containing this region,
  241. // and account for this extra memory within size.
  242. intptr_t startPtr = reinterpret_cast<intptr_t>(start);
  243. intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
  244. void* pageStart = reinterpret_cast<void*>(pageStartPtr);
  245. size += (startPtr - pageStartPtr);
  246. // Round size up
  247. size += (pageSize - 1);
  248. size &= ~(pageSize - 1);
  249. mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX);
  250. }
  251. #endif
  252. #endif
  253. }
  254. #if ENABLE(DETACHED_JIT)
  255. namespace WTF {
  256. void MetaAllocator::DETACHED_JIT_DTOR()
  257. {
  258. switch (m_type) {
  259. case e_MetaAllocatorBaseType: break; // no 'delete this' because MetaAllocator is an abstract class
  260. case e_DemandAllocatorType: reinterpret_cast<JSC::DemandExecutableAllocator*>(this)->DETACHED_JIT_DTOR(); return;
  261. case e_FixedAllocatorType: break; // not supported yet
  262. case e_SuperRegionType:
  263. #if !BUILDING_DETACHED_JIT
  264. reinterpret_cast<JSC::SuperRegion*>(this)->DETACHED_JIT_DTOR();
  265. #endif
  266. ASSERT_NOT_REACHED_BY_DETACHED_JIT();
  267. return;
  268. }
  269. RELEASE_ASSERT_NOT_REACHED();
  270. return;
  271. }
  272. void * MetaAllocator::allocateNewSpace(size_t& numPages)
  273. {
  274. switch (m_type) {
  275. case e_MetaAllocatorBaseType: break;
  276. case e_DemandAllocatorType: return reinterpret_cast<JSC::DemandExecutableAllocator*>(this)->allocateNewSpace(numPages);
  277. case e_FixedAllocatorType: break;
  278. case e_SuperRegionType:
  279. #if !BUILDING_DETACHED_JIT
  280. return reinterpret_cast<JSC::SuperRegion*>(this)->allocateNewSpace(numPages);
  281. #endif
  282. ASSERT_NOT_REACHED_BY_DETACHED_JIT();
  283. return NULL;
  284. }
  285. RELEASE_ASSERT(false);
  286. return NULL;
  287. }
  288. void MetaAllocator::notifyNeedPage(void* page)
  289. {
  290. switch (m_type) {
  291. case e_MetaAllocatorBaseType: break;
  292. case e_DemandAllocatorType: reinterpret_cast<JSC::DemandExecutableAllocator*>(this)->notifyNeedPage(page); return;
  293. case e_FixedAllocatorType: break; // not implemented yet
  294. case e_SuperRegionType:
  295. #if !BUILDING_DETACHED_JIT
  296. reinterpret_cast<JSC::SuperRegion*>(this)->notifyNeedPage(page);
  297. #endif
  298. ASSERT_NOT_REACHED_BY_DETACHED_JIT();
  299. return;
  300. }
  301. RELEASE_ASSERT(false);
  302. return;
  303. }
  304. void MetaAllocator::notifyPageIsFree(void* page)
  305. {
  306. switch (m_type) {
  307. case e_MetaAllocatorBaseType: break;
  308. case e_DemandAllocatorType: reinterpret_cast<JSC::DemandExecutableAllocator*>(this)->notifyPageIsFree(page); return;
  309. case e_FixedAllocatorType: break; // not implemented yet
  310. case e_SuperRegionType:
  311. #if !BUILDING_DETACHED_JIT
  312. reinterpret_cast<JSC::SuperRegion*>(this)->notifyPageIsFree(page);
  313. #endif
  314. ASSERT_NOT_REACHED_BY_DETACHED_JIT();
  315. return;
  316. }
  317. RELEASE_ASSERT(false);
  318. return;
  319. }
  320. } // namespace WTF
  321. #endif
  322. #endif // HAVE(ASSEMBLER)