1/*
2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "ExecutableAllocator.h"
28
29#if ENABLE(JIT)
30
31#include "CodeProfiling.h"
32#include "ExecutableAllocationFuzz.h"
33#include "JSCInlines.h"
34#include <wtf/MetaAllocator.h>
35#include <wtf/PageReservation.h>
36
37#if OS(DARWIN)
38#include <sys/mman.h>
39#endif
40
41#if PLATFORM(IOS_FAMILY)
42#include <wtf/cocoa/Entitlements.h>
43#endif
44
45#include "LinkBuffer.h"
46#include "MacroAssembler.h"
47
48#if PLATFORM(COCOA)
49#define HAVE_REMAP_JIT 1
50#endif
51
52#if HAVE(REMAP_JIT)
53#if CPU(ARM64) && PLATFORM(IOS_FAMILY)
54#define USE_EXECUTE_ONLY_JIT_WRITE_FUNCTION 1
55#endif
56#endif
57
58#if OS(DARWIN)
59#include <mach/mach.h>
60extern "C" {
61 /* Routine mach_vm_remap */
62#ifdef mig_external
63 mig_external
64#else
65 extern
66#endif /* mig_external */
67 kern_return_t mach_vm_remap
68 (
69 vm_map_t target_task,
70 mach_vm_address_t *target_address,
71 mach_vm_size_t size,
72 mach_vm_offset_t mask,
73 int flags,
74 vm_map_t src_task,
75 mach_vm_address_t src_address,
76 boolean_t copy,
77 vm_prot_t *cur_protection,
78 vm_prot_t *max_protection,
79 vm_inherit_t inheritance
80 );
81}
82
83#endif
84
85namespace JSC {
86
87using namespace WTF;
88
89#if defined(FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB) && FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB > 0
90static const size_t fixedExecutableMemoryPoolSize = FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB * 1024 * 1024;
91#elif CPU(ARM)
92static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024;
93#elif CPU(ARM64)
94static const size_t fixedExecutableMemoryPoolSize = 128 * 1024 * 1024;
95#elif CPU(X86_64)
96static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024;
97#else
98static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024;
99#endif
100
101#if CPU(ARM)
102static const double executablePoolReservationFraction = 0.15;
103#else
104static const double executablePoolReservationFraction = 0.25;
105#endif
106
107#if ENABLE(SEPARATED_WX_HEAP)
108JS_EXPORT_PRIVATE bool useFastPermisionsJITCopy { false };
109JS_EXPORT_PRIVATE JITWriteSeparateHeapsFunction jitWriteSeparateHeapsFunction;
110#endif
111
112#if !USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) && HAVE(REMAP_JIT)
113static uintptr_t startOfFixedWritableMemoryPool;
114#endif
115
116class FixedVMPoolExecutableAllocator;
117static FixedVMPoolExecutableAllocator* allocator = nullptr;
118
119static bool s_isJITEnabled = true;
120static bool isJITEnabled()
121{
122#if PLATFORM(IOS_FAMILY) && (CPU(ARM64) || CPU(ARM))
123 return processHasEntitlement("dynamic-codesigning") && s_isJITEnabled;
124#else
125 return s_isJITEnabled;
126#endif
127}
128
129void ExecutableAllocator::setJITEnabled(bool enabled)
130{
131 ASSERT(!allocator);
132 if (s_isJITEnabled == enabled)
133 return;
134
135 s_isJITEnabled = enabled;
136
137#if PLATFORM(IOS_FAMILY) && (CPU(ARM64) || CPU(ARM))
138 if (!enabled) {
139 constexpr size_t size = 1;
140 constexpr int protection = PROT_READ | PROT_WRITE | PROT_EXEC;
141 constexpr int flags = MAP_PRIVATE | MAP_ANON | MAP_JIT;
142 constexpr int fd = OSAllocator::JSJITCodePages;
143 void* allocation = mmap(nullptr, size, protection, flags, fd, 0);
144 const void* executableMemoryAllocationFailure = reinterpret_cast<void*>(-1);
145 RELEASE_ASSERT_WITH_MESSAGE(allocation && allocation != executableMemoryAllocationFailure, "We should not have allocated executable memory before disabling the JIT.");
146 RELEASE_ASSERT_WITH_MESSAGE(!munmap(allocation, size), "Unmapping executable memory should succeed so we do not have any executable memory in the address space");
147 RELEASE_ASSERT_WITH_MESSAGE(mmap(nullptr, size, protection, flags, fd, 0) == executableMemoryAllocationFailure, "Allocating executable memory should fail after setJITEnabled(false) is called.");
148 }
149#endif
150}
151
152class FixedVMPoolExecutableAllocator : public MetaAllocator {
153 WTF_MAKE_FAST_ALLOCATED;
154public:
155 FixedVMPoolExecutableAllocator()
156 : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
157 {
158 if (!isJITEnabled())
159 return;
160
161 size_t reservationSize;
162 if (Options::jitMemoryReservationSize())
163 reservationSize = Options::jitMemoryReservationSize();
164 else
165 reservationSize = fixedExecutableMemoryPoolSize;
166 reservationSize = std::max(roundUpToMultipleOf(pageSize(), reservationSize), pageSize() * 2);
167
168 auto tryCreatePageReservation = [] (size_t reservationSize) {
169#if OS(LINUX)
170 // If we use uncommitted reservation, mmap operation is recorded with small page size in perf command's output.
171 // This makes the following JIT code logging broken and some of JIT code is not recorded correctly.
172 // To avoid this problem, we use committed reservation if we need perf JITDump logging.
173 if (Options::logJITCodeForPerf())
174 return PageReservation::reserveAndCommitWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
175#endif
176 return PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
177 };
178
179 m_reservation = tryCreatePageReservation(reservationSize);
180 if (m_reservation) {
181 ASSERT(m_reservation.size() == reservationSize);
182 void* reservationBase = m_reservation.base();
183
184#if ENABLE(FAST_JIT_PERMISSIONS) && !ENABLE(SEPARATED_WX_HEAP)
185 RELEASE_ASSERT(os_thread_self_restrict_rwx_is_supported());
186 os_thread_self_restrict_rwx_to_rx();
187
188#else // not ENABLE(FAST_JIT_PERMISSIONS) or ENABLE(SEPARATED_WX_HEAP)
189#if ENABLE(FAST_JIT_PERMISSIONS)
190 if (os_thread_self_restrict_rwx_is_supported()) {
191 useFastPermisionsJITCopy = true;
192 os_thread_self_restrict_rwx_to_rx();
193 } else
194#endif
195 if (Options::useSeparatedWXHeap()) {
196 // First page of our JIT allocation is reserved.
197 ASSERT(reservationSize >= pageSize() * 2);
198 reservationBase = (void*)((uintptr_t)reservationBase + pageSize());
199 reservationSize -= pageSize();
200 initializeSeparatedWXHeaps(m_reservation.base(), pageSize(), reservationBase, reservationSize);
201 }
202#endif // not ENABLE(FAST_JIT_PERMISSIONS) or ENABLE(SEPARATED_WX_HEAP)
203
204 addFreshFreeSpace(reservationBase, reservationSize);
205
206 void* reservationEnd = reinterpret_cast<uint8_t*>(reservationBase) + reservationSize;
207
208 m_memoryStart = MacroAssemblerCodePtr<ExecutableMemoryPtrTag>(tagCodePtr<ExecutableMemoryPtrTag>(reservationBase));
209 m_memoryEnd = MacroAssemblerCodePtr<ExecutableMemoryPtrTag>(tagCodePtr<ExecutableMemoryPtrTag>(reservationEnd));
210 }
211 }
212
213 virtual ~FixedVMPoolExecutableAllocator();
214
215 void* memoryStart() { return m_memoryStart.untaggedExecutableAddress(); }
216 void* memoryEnd() { return m_memoryEnd.untaggedExecutableAddress(); }
217 bool isJITPC(void* pc) { return memoryStart() <= pc && pc < memoryEnd(); }
218
219protected:
220 FreeSpacePtr allocateNewSpace(size_t&) override
221 {
222 // We're operating in a fixed pool, so new allocation is always prohibited.
223 return nullptr;
224 }
225
226 void notifyNeedPage(void* page) override
227 {
228#if USE(MADV_FREE_FOR_JIT_MEMORY)
229 UNUSED_PARAM(page);
230#else
231 m_reservation.commit(page, pageSize());
232#endif
233 }
234
235 void notifyPageIsFree(void* page) override
236 {
237#if USE(MADV_FREE_FOR_JIT_MEMORY)
238 for (;;) {
239 int result = madvise(page, pageSize(), MADV_FREE);
240 if (!result)
241 return;
242 ASSERT(result == -1);
243 if (errno != EAGAIN) {
244 RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure.
245 break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway.
246 }
247 }
248#else
249 m_reservation.decommit(page, pageSize());
250#endif
251 }
252
253private:
254#if OS(DARWIN) && HAVE(REMAP_JIT)
255 void initializeSeparatedWXHeaps(void* stubBase, size_t stubSize, void* jitBase, size_t jitSize)
256 {
257 mach_vm_address_t writableAddr = 0;
258
259 // Create a second mapping of the JIT region at a random address.
260 vm_prot_t cur, max;
261 int remapFlags = VM_FLAGS_ANYWHERE;
262#if defined(VM_FLAGS_RANDOM_ADDR)
263 remapFlags |= VM_FLAGS_RANDOM_ADDR;
264#endif
265 kern_return_t ret = mach_vm_remap(mach_task_self(), &writableAddr, jitSize, 0,
266 remapFlags,
267 mach_task_self(), (mach_vm_address_t)jitBase, FALSE,
268 &cur, &max, VM_INHERIT_DEFAULT);
269
270 bool remapSucceeded = (ret == KERN_SUCCESS);
271 if (!remapSucceeded)
272 return;
273
274 // Assemble a thunk that will serve as the means for writing into the JIT region.
275 MacroAssemblerCodeRef<JITThunkPtrTag> writeThunk = jitWriteThunkGenerator(reinterpret_cast<void*>(writableAddr), stubBase, stubSize);
276
277 int result = 0;
278
279#if USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
280 // Prevent reading the write thunk code.
281 result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(stubBase), stubSize, true, VM_PROT_EXECUTE);
282 RELEASE_ASSERT(!result);
283#endif
284
285 // Prevent writing into the executable JIT mapping.
286 result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(jitBase), jitSize, true, VM_PROT_READ | VM_PROT_EXECUTE);
287 RELEASE_ASSERT(!result);
288
289 // Prevent execution in the writable JIT mapping.
290 result = vm_protect(mach_task_self(), static_cast<vm_address_t>(writableAddr), jitSize, true, VM_PROT_READ | VM_PROT_WRITE);
291 RELEASE_ASSERT(!result);
292
293 // Zero out writableAddr to avoid leaking the address of the writable mapping.
294 memset_s(&writableAddr, sizeof(writableAddr), 0, sizeof(writableAddr));
295
296#if ENABLE(SEPARATED_WX_HEAP)
297 jitWriteSeparateHeapsFunction = reinterpret_cast<JITWriteSeparateHeapsFunction>(writeThunk.code().executableAddress());
298#endif
299 }
300
301#if CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
302 MacroAssemblerCodeRef<JITThunkPtrTag> jitWriteThunkGenerator(void* writableAddr, void* stubBase, size_t stubSize)
303 {
304 using namespace ARM64Registers;
305 using TrustedImm32 = MacroAssembler::TrustedImm32;
306
307 MacroAssembler jit;
308
309 jit.tagReturnAddress();
310 jit.move(MacroAssembler::TrustedImmPtr(writableAddr), x7);
311 jit.addPtr(x7, x0);
312
313 jit.move(x0, x3);
314 MacroAssembler::Jump smallCopy = jit.branch64(MacroAssembler::Below, x2, MacroAssembler::TrustedImm64(64));
315
316 jit.add64(TrustedImm32(32), x3);
317 jit.and64(TrustedImm32(-32), x3);
318 jit.loadPair64(x1, x12, x13);
319 jit.loadPair64(x1, TrustedImm32(16), x14, x15);
320 jit.sub64(x3, x0, x5);
321 jit.addPtr(x5, x1);
322
323 jit.loadPair64(x1, x8, x9);
324 jit.loadPair64(x1, TrustedImm32(16), x10, x11);
325 jit.add64(TrustedImm32(32), x1);
326 jit.sub64(x5, x2);
327 jit.storePair64(x12, x13, x0);
328 jit.storePair64(x14, x15, x0, TrustedImm32(16));
329 MacroAssembler::Jump cleanup = jit.branchSub64(MacroAssembler::BelowOrEqual, TrustedImm32(64), x2);
330
331 MacroAssembler::Label copyLoop = jit.label();
332 jit.storePair64WithNonTemporalAccess(x8, x9, x3);
333 jit.storePair64WithNonTemporalAccess(x10, x11, x3, TrustedImm32(16));
334 jit.add64(TrustedImm32(32), x3);
335 jit.loadPair64WithNonTemporalAccess(x1, x8, x9);
336 jit.loadPair64WithNonTemporalAccess(x1, TrustedImm32(16), x10, x11);
337 jit.add64(TrustedImm32(32), x1);
338 jit.branchSub64(MacroAssembler::Above, TrustedImm32(32), x2).linkTo(copyLoop, &jit);
339
340 cleanup.link(&jit);
341 jit.add64(x2, x1);
342 jit.loadPair64(x1, x12, x13);
343 jit.loadPair64(x1, TrustedImm32(16), x14, x15);
344 jit.storePair64(x8, x9, x3);
345 jit.storePair64(x10, x11, x3, TrustedImm32(16));
346 jit.addPtr(x2, x3);
347 jit.storePair64(x12, x13, x3, TrustedImm32(32));
348 jit.storePair64(x14, x15, x3, TrustedImm32(48));
349 jit.ret();
350
351 MacroAssembler::Label local0 = jit.label();
352 jit.load64(x1, PostIndex(8), x6);
353 jit.store64(x6, x3, PostIndex(8));
354 smallCopy.link(&jit);
355 jit.branchSub64(MacroAssembler::AboveOrEqual, TrustedImm32(8), x2).linkTo(local0, &jit);
356 MacroAssembler::Jump local2 = jit.branchAdd64(MacroAssembler::Equal, TrustedImm32(8), x2);
357 MacroAssembler::Label local1 = jit.label();
358 jit.load8(x1, PostIndex(1), x6);
359 jit.store8(x6, x3, PostIndex(1));
360 jit.branchSub64(MacroAssembler::NotEqual, TrustedImm32(1), x2).linkTo(local1, &jit);
361 local2.link(&jit);
362 jit.ret();
363
364 auto stubBaseCodePtr = MacroAssemblerCodePtr<LinkBufferPtrTag>(tagCodePtr<LinkBufferPtrTag>(stubBase));
365 LinkBuffer linkBuffer(jit, stubBaseCodePtr, stubSize);
366 // We don't use FINALIZE_CODE() for two reasons.
367 // The first is that we don't want the writeable address, as disassembled instructions,
368 // to appear in the console or anywhere in memory, via the PrintStream buffer.
369 // The second is we can't guarantee that the code is readable when using the
370 // asyncDisassembly option as our caller will set our pages execute only.
371 return linkBuffer.finalizeCodeWithoutDisassembly<JITThunkPtrTag>();
372 }
373#else // not CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
374 static void genericWriteToJITRegion(off_t offset, const void* data, size_t dataSize)
375 {
376 memcpy((void*)(startOfFixedWritableMemoryPool + offset), data, dataSize);
377 }
378
379 MacroAssemblerCodeRef<JITThunkPtrTag> jitWriteThunkGenerator(void* address, void*, size_t)
380 {
381 startOfFixedWritableMemoryPool = reinterpret_cast<uintptr_t>(address);
382 void* function = reinterpret_cast<void*>(&genericWriteToJITRegion);
383#if CPU(ARM_THUMB2)
384 // Handle thumb offset
385 uintptr_t functionAsInt = reinterpret_cast<uintptr_t>(function);
386 functionAsInt -= 1;
387 function = reinterpret_cast<void*>(functionAsInt);
388#endif
389 auto codePtr = MacroAssemblerCodePtr<JITThunkPtrTag>(tagCFunctionPtr<JITThunkPtrTag>(function));
390 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(codePtr);
391 }
392#endif // CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
393
394#else // OS(DARWIN) && HAVE(REMAP_JIT)
395 void initializeSeparatedWXHeaps(void*, size_t, void*, size_t)
396 {
397 }
398#endif
399
400private:
401 PageReservation m_reservation;
402 MacroAssemblerCodePtr<ExecutableMemoryPtrTag> m_memoryStart;
403 MacroAssemblerCodePtr<ExecutableMemoryPtrTag> m_memoryEnd;
404};
405
406FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator()
407{
408 m_reservation.deallocate();
409}
410
411void ExecutableAllocator::initializeUnderlyingAllocator()
412{
413 ASSERT(!allocator);
414 allocator = new FixedVMPoolExecutableAllocator();
415 CodeProfiling::notifyAllocator(allocator);
416}
417
418bool ExecutableAllocator::isValid() const
419{
420 if (!allocator)
421 return Base::isValid();
422 return !!allocator->bytesReserved();
423}
424
425bool ExecutableAllocator::underMemoryPressure()
426{
427 if (!allocator)
428 return Base::underMemoryPressure();
429 MetaAllocator::Statistics statistics = allocator->currentStatistics();
430 return statistics.bytesAllocated > statistics.bytesReserved / 2;
431}
432
433double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
434{
435 if (!allocator)
436 return Base::memoryPressureMultiplier(addedMemoryUsage);
437 MetaAllocator::Statistics statistics = allocator->currentStatistics();
438 ASSERT(statistics.bytesAllocated <= statistics.bytesReserved);
439 size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage;
440 size_t bytesAvailable = static_cast<size_t>(
441 statistics.bytesReserved * (1 - executablePoolReservationFraction));
442 if (bytesAllocated >= bytesAvailable)
443 bytesAllocated = bytesAvailable;
444 double result = 1.0;
445 size_t divisor = bytesAvailable - bytesAllocated;
446 if (divisor)
447 result = static_cast<double>(bytesAvailable) / divisor;
448 if (result < 1.0)
449 result = 1.0;
450 return result;
451}
452
453RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
454{
455 if (!allocator)
456 return Base::allocate(sizeInBytes, ownerUID, effort);
457 if (Options::logExecutableAllocation()) {
458 MetaAllocator::Statistics stats = allocator->currentStatistics();
459 dataLog("Allocating ", sizeInBytes, " bytes of executable memory with ", stats.bytesAllocated, " bytes allocated, ", stats.bytesReserved, " bytes reserved, and ", stats.bytesCommitted, " committed.\n");
460 }
461
462 if (effort != JITCompilationCanFail && Options::reportMustSucceedExecutableAllocations()) {
463 dataLog("Allocating ", sizeInBytes, " bytes of executable memory with JITCompilationMustSucceed.\n");
464 WTFReportBacktrace();
465 }
466
467 if (effort == JITCompilationCanFail
468 && doExecutableAllocationFuzzingIfEnabled() == PretendToFailExecutableAllocation)
469 return nullptr;
470
471 if (effort == JITCompilationCanFail) {
472 // Don't allow allocations if we are down to reserve.
473 MetaAllocator::Statistics statistics = allocator->currentStatistics();
474 size_t bytesAllocated = statistics.bytesAllocated + sizeInBytes;
475 size_t bytesAvailable = static_cast<size_t>(
476 statistics.bytesReserved * (1 - executablePoolReservationFraction));
477 if (bytesAllocated > bytesAvailable) {
478 if (Options::logExecutableAllocation())
479 dataLog("Allocation failed because bytes allocated ", bytesAllocated, " > ", bytesAvailable, " bytes available.\n");
480 return nullptr;
481 }
482 }
483
484 RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
485 if (!result) {
486 if (effort != JITCompilationCanFail) {
487 dataLog("Ran out of executable memory while allocating ", sizeInBytes, " bytes.\n");
488 CRASH();
489 }
490 return nullptr;
491 }
492
493#if CPU(ARM64E)
494 void* start = allocator->memoryStart();
495 void* end = allocator->memoryEnd();
496 void* resultStart = result->start().untaggedPtr();
497 void* resultEnd = result->end().untaggedPtr();
498 RELEASE_ASSERT(start <= resultStart && resultStart < end);
499 RELEASE_ASSERT(start < resultEnd && resultEnd <= end);
500#endif
501 return result;
502}
503
504bool ExecutableAllocator::isValidExecutableMemory(const AbstractLocker& locker, void* address)
505{
506 if (!allocator)
507 return Base::isValidExecutableMemory(locker, address);
508 return allocator->isInAllocatedMemory(locker, address);
509}
510
511Lock& ExecutableAllocator::getLock() const
512{
513 if (!allocator)
514 return Base::getLock();
515 return allocator->getLock();
516}
517
518size_t ExecutableAllocator::committedByteCount()
519{
520 if (!allocator)
521 return Base::committedByteCount();
522 return allocator->bytesCommitted();
523}
524
525#if ENABLE(META_ALLOCATOR_PROFILE)
526void ExecutableAllocator::dumpProfile()
527{
528 if (!allocator)
529 return;
530 allocator->dumpProfile();
531}
532#endif
533
534void* startOfFixedExecutableMemoryPoolImpl()
535{
536 if (!allocator)
537 return nullptr;
538 return allocator->memoryStart();
539}
540
541void* endOfFixedExecutableMemoryPoolImpl()
542{
543 if (!allocator)
544 return nullptr;
545 return allocator->memoryEnd();
546}
547
548bool isJITPC(void* pc)
549{
550 return allocator && allocator->isJITPC(pc);
551}
552
553} // namespace JSC
554
555#endif // ENABLE(JIT)
556
557namespace JSC {
558
559static ExecutableAllocator* executableAllocator;
560
561void ExecutableAllocator::initialize()
562{
563 executableAllocator = new ExecutableAllocator;
564}
565
566ExecutableAllocator& ExecutableAllocator::singleton()
567{
568 ASSERT(executableAllocator);
569 return *executableAllocator;
570}
571
572} // namespace JSC
573