1/*
2 * Copyright (C) 2017-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "VMInspector.h"
28
29#include "CodeBlock.h"
30#include "CodeBlockSet.h"
31#include "HeapInlines.h"
32#include "HeapIterationScope.h"
33#include "JSCInlines.h"
34#include "MachineContext.h"
35#include "MarkedSpaceInlines.h"
36#include "StackVisitor.h"
37#include <mutex>
38#include <wtf/Expected.h>
39
40#if !OS(WINDOWS)
41#include <unistd.h>
42#endif
43
44namespace JSC {
45
46VMInspector& VMInspector::instance()
47{
48 static VMInspector* manager;
49 static std::once_flag once;
50 std::call_once(once, [] {
51 manager = new VMInspector();
52 });
53 return *manager;
54}
55
56void VMInspector::add(VM* vm)
57{
58 auto locker = holdLock(m_lock);
59 m_list.append(vm);
60}
61
62void VMInspector::remove(VM* vm)
63{
64 auto locker = holdLock(m_lock);
65 m_list.remove(vm);
66}
67
68auto VMInspector::lock(Seconds timeout) -> Expected<Locker, Error>
69{
70 // This function may be called from a signal handler (e.g. via visit()). Hence,
71 // it should only use APIs that are safe to call from signal handlers. This is
72 // why we use unistd.h's sleep() instead of its alternatives.
73
74 // We'll be doing sleep(1) between tries below. Hence, sleepPerRetry is 1.
75 unsigned maxRetries = (timeout < Seconds::infinity()) ? timeout.value() : UINT_MAX;
76
77 Expected<Locker, Error> locker = Locker::tryLock(m_lock);
78 unsigned tryCount = 0;
79 while (!locker && tryCount < maxRetries) {
80 // We want the version of sleep from unistd.h. Cast to disambiguate.
81#if !OS(WINDOWS)
82 (static_cast<unsigned (*)(unsigned)>(sleep))(1);
83#endif
84 locker = Locker::tryLock(m_lock);
85 }
86
87 if (!locker)
88 return makeUnexpected(Error::TimedOut);
89 return locker;
90}
91
92#if ENABLE(JIT)
93static bool ensureIsSafeToLock(Lock& lock)
94{
95 unsigned maxRetries = 2;
96 unsigned tryCount = 0;
97 while (tryCount <= maxRetries) {
98 bool success = lock.tryLock();
99 if (success) {
100 lock.unlock();
101 return true;
102 }
103 tryCount++;
104 }
105 return false;
106};
107#endif // ENABLE(JIT)
108
109auto VMInspector::isValidExecutableMemory(const VMInspector::Locker&, void* machinePC) -> Expected<bool, Error>
110{
111#if ENABLE(JIT)
112 bool found = false;
113 bool hasTimeout = false;
114 iterate([&] (VM&) -> FunctorStatus {
115 auto& allocator = ExecutableAllocator::singleton();
116 auto& lock = allocator.getLock();
117
118 bool isSafeToLock = ensureIsSafeToLock(lock);
119 if (!isSafeToLock) {
120 hasTimeout = true;
121 return FunctorStatus::Continue; // Skip this VM.
122 }
123
124 LockHolder executableAllocatorLocker(lock);
125 if (allocator.isValidExecutableMemory(executableAllocatorLocker, machinePC)) {
126 found = true;
127 return FunctorStatus::Done;
128 }
129 return FunctorStatus::Continue;
130 });
131
132 if (!found && hasTimeout)
133 return makeUnexpected(Error::TimedOut);
134 return found;
135#else
136 UNUSED_PARAM(machinePC);
137 return false;
138#endif
139}
140
141auto VMInspector::codeBlockForMachinePC(const VMInspector::Locker&, void* machinePC) -> Expected<CodeBlock*, Error>
142{
143#if ENABLE(JIT)
144 CodeBlock* codeBlock = nullptr;
145 bool hasTimeout = false;
146 iterate([&] (VM& vm) {
147 if (!vm.currentThreadIsHoldingAPILock())
148 return FunctorStatus::Continue;
149
150 // It is safe to call Heap::forEachCodeBlockIgnoringJITPlans here because:
151 // 1. CodeBlocks are added to the CodeBlockSet from the main thread before
152 // they are handed to the JIT plans. Those codeBlocks will have a null jitCode,
153 // but we check for that in our lambda functor.
154 // 2. We will acquire the CodeBlockSet lock before iterating.
155 // This ensures that a CodeBlock won't be GCed while we're iterating.
156 // 3. We do a tryLock on the CodeBlockSet's lock first to ensure that it is
157 // safe for the current thread to lock it before calling
158 // Heap::forEachCodeBlockIgnoringJITPlans(). Hence, there's no risk of
159 // re-entering the lock and deadlocking on it.
160
161 auto& codeBlockSetLock = vm.heap.codeBlockSet().getLock();
162 bool isSafeToLock = ensureIsSafeToLock(codeBlockSetLock);
163 if (!isSafeToLock) {
164 hasTimeout = true;
165 return FunctorStatus::Continue; // Skip this VM.
166 }
167
168 auto locker = holdLock(codeBlockSetLock);
169 vm.heap.forEachCodeBlockIgnoringJITPlans(locker, [&] (CodeBlock* cb) {
170 JITCode* jitCode = cb->jitCode().get();
171 if (!jitCode) {
172 // If the codeBlock is a replacement codeBlock which is in the process of being
173 // compiled, its jitCode will be null, and we can disregard it as a match for
174 // the machinePC we're searching for.
175 return;
176 }
177
178 if (!JITCode::isJIT(jitCode->jitType()))
179 return;
180
181 if (jitCode->contains(machinePC)) {
182 codeBlock = cb;
183 return;
184 }
185 });
186 if (codeBlock)
187 return FunctorStatus::Done;
188 return FunctorStatus::Continue;
189 });
190
191 if (!codeBlock && hasTimeout)
192 return makeUnexpected(Error::TimedOut);
193 return codeBlock;
194#else
195 UNUSED_PARAM(machinePC);
196 return nullptr;
197#endif
198}
199
200bool VMInspector::currentThreadOwnsJSLock(ExecState* exec)
201{
202 return exec->vm().currentThreadIsHoldingAPILock();
203}
204
205static bool ensureCurrentThreadOwnsJSLock(ExecState* exec)
206{
207 if (VMInspector::currentThreadOwnsJSLock(exec))
208 return true;
209 dataLog("ERROR: current thread does not own the JSLock\n");
210 return false;
211}
212
213void VMInspector::gc(ExecState* exec)
214{
215 VM& vm = exec->vm();
216 if (!ensureCurrentThreadOwnsJSLock(exec))
217 return;
218 vm.heap.collectNow(Sync, CollectionScope::Full);
219}
220
221void VMInspector::edenGC(ExecState* exec)
222{
223 VM& vm = exec->vm();
224 if (!ensureCurrentThreadOwnsJSLock(exec))
225 return;
226 vm.heap.collectSync(CollectionScope::Eden);
227}
228
229bool VMInspector::isInHeap(Heap* heap, void* ptr)
230{
231 MarkedBlock* candidate = MarkedBlock::blockFor(ptr);
232 if (heap->objectSpace().blocks().set().contains(candidate))
233 return true;
234 for (LargeAllocation* allocation : heap->objectSpace().largeAllocations()) {
235 if (allocation->contains(ptr))
236 return true;
237 }
238 return false;
239}
240
241struct CellAddressCheckFunctor : MarkedBlock::CountFunctor {
242 CellAddressCheckFunctor(JSCell* candidate)
243 : candidate(candidate)
244 {
245 }
246
247 IterationStatus operator()(HeapCell* cell, HeapCell::Kind) const
248 {
249 if (cell == candidate) {
250 found = true;
251 return IterationStatus::Done;
252 }
253 return IterationStatus::Continue;
254 }
255
256 JSCell* candidate;
257 mutable bool found { false };
258};
259
260bool VMInspector::isValidCell(Heap* heap, JSCell* candidate)
261{
262 HeapIterationScope iterationScope(*heap);
263 CellAddressCheckFunctor functor(candidate);
264 heap->objectSpace().forEachLiveCell(iterationScope, functor);
265 return functor.found;
266}
267
268bool VMInspector::isValidCodeBlock(ExecState* exec, CodeBlock* candidate)
269{
270 if (!ensureCurrentThreadOwnsJSLock(exec))
271 return false;
272
273 struct CodeBlockValidationFunctor {
274 CodeBlockValidationFunctor(CodeBlock* candidate)
275 : candidate(candidate)
276 {
277 }
278
279 void operator()(CodeBlock* codeBlock) const
280 {
281 if (codeBlock == candidate)
282 found = true;
283 }
284
285 CodeBlock* candidate;
286 mutable bool found { false };
287 };
288
289 VM& vm = exec->vm();
290 CodeBlockValidationFunctor functor(candidate);
291 vm.heap.forEachCodeBlock(functor);
292 return functor.found;
293}
294
295CodeBlock* VMInspector::codeBlockForFrame(CallFrame* topCallFrame, unsigned frameNumber)
296{
297 if (!ensureCurrentThreadOwnsJSLock(topCallFrame))
298 return nullptr;
299
300 if (!topCallFrame)
301 return nullptr;
302
303 struct FetchCodeBlockFunctor {
304 public:
305 FetchCodeBlockFunctor(unsigned targetFrameNumber)
306 : targetFrame(targetFrameNumber)
307 {
308 }
309
310 StackVisitor::Status operator()(StackVisitor& visitor) const
311 {
312 auto currentFrame = nextFrame++;
313 if (currentFrame == targetFrame) {
314 codeBlock = visitor->codeBlock();
315 return StackVisitor::Done;
316 }
317 return StackVisitor::Continue;
318 }
319
320 unsigned targetFrame;
321 mutable unsigned nextFrame { 0 };
322 mutable CodeBlock* codeBlock { nullptr };
323 };
324
325 FetchCodeBlockFunctor functor(frameNumber);
326 topCallFrame->iterate(functor);
327 return functor.codeBlock;
328}
329
330class DumpFrameFunctor {
331public:
332 enum Action {
333 DumpOne,
334 DumpAll
335 };
336
337 DumpFrameFunctor(Action action, unsigned framesToSkip)
338 : m_action(action)
339 , m_framesToSkip(framesToSkip)
340 {
341 }
342
343 StackVisitor::Status operator()(StackVisitor& visitor) const
344 {
345 m_currentFrame++;
346 if (m_currentFrame > m_framesToSkip) {
347 visitor->dump(WTF::dataFile(), Indenter(2), [&] (PrintStream& out) {
348 out.print("[", (m_currentFrame - m_framesToSkip - 1), "] ");
349 });
350 }
351 if (m_action == DumpOne && m_currentFrame > m_framesToSkip)
352 return StackVisitor::Done;
353 return StackVisitor::Continue;
354 }
355
356private:
357 Action m_action;
358 unsigned m_framesToSkip;
359 mutable unsigned m_currentFrame { 0 };
360};
361
362void VMInspector::dumpCallFrame(CallFrame* callFrame, unsigned framesToSkip)
363{
364 if (!ensureCurrentThreadOwnsJSLock(callFrame))
365 return;
366 DumpFrameFunctor functor(DumpFrameFunctor::DumpOne, framesToSkip);
367 callFrame->iterate(functor);
368}
369
370void VMInspector::dumpRegisters(CallFrame* callFrame)
371{
372 CodeBlock* codeBlock = callFrame->codeBlock();
373 if (!codeBlock) {
374 dataLog("Dumping host frame registers not supported.\n");
375 return;
376 }
377 VM& vm = *codeBlock->vm();
378 auto valueAsString = [&] (JSValue v) -> CString {
379 if (!v.isCell() || VMInspector::isValidCell(&vm.heap, reinterpret_cast<JSCell*>(JSValue::encode(v))))
380 return toCString(v);
381 return "";
382 };
383
384 dataLogF("Register frame: \n\n");
385 dataLogF("-----------------------------------------------------------------------------\n");
386 dataLogF(" use | address | value \n");
387 dataLogF("-----------------------------------------------------------------------------\n");
388
389 const Register* it;
390 const Register* end;
391
392 it = callFrame->registers() + CallFrameSlot::thisArgument + callFrame->argumentCount();
393 end = callFrame->registers() + CallFrameSlot::thisArgument - 1;
394 while (it > end) {
395 JSValue v = it->jsValue();
396 int registerNumber = it - callFrame->registers();
397 String name = codeBlock->nameForRegister(VirtualRegister(registerNumber));
398 dataLogF("[r% 3d %14s] | %10p | 0x%-16llx %s\n", registerNumber, name.ascii().data(), it, (long long)JSValue::encode(v), valueAsString(v).data());
399 --it;
400 }
401
402 dataLogF("-----------------------------------------------------------------------------\n");
403 dataLogF("[ArgumentCount] | %10p | %lu \n", it, (unsigned long) callFrame->argumentCount());
404
405 callFrame->iterate([&] (StackVisitor& visitor) {
406 if (visitor->callFrame() == callFrame) {
407 unsigned line = 0;
408 unsigned unusedColumn = 0;
409 visitor->computeLineAndColumn(line, unusedColumn);
410 dataLogF("[ReturnVPC] | %10p | %d (line %d)\n", it, visitor->bytecodeOffset(), line);
411 return StackVisitor::Done;
412 }
413 return StackVisitor::Continue;
414 });
415
416 --it;
417 dataLogF("[Callee] | %10p | 0x%-16llx %s\n", it, (long long)callFrame->callee().rawPtr(), valueAsString(it->jsValue()).data());
418 --it;
419 dataLogF("[CodeBlock] | %10p | 0x%-16llx ", it, (long long)codeBlock);
420 dataLogLn(codeBlock);
421 --it;
422#if ENABLE(JIT)
423 AbstractPC pc = callFrame->abstractReturnPC(callFrame->vm());
424 if (pc.hasJITReturnAddress())
425 dataLogF("[ReturnPC] | %10p | %p \n", it, pc.jitReturnAddress().value());
426 --it;
427#endif
428 dataLogF("[CallerFrame] | %10p | %p \n", it, callFrame->callerFrame());
429 --it;
430 dataLogF("-----------------------------------------------------------------------------\n");
431
432 size_t numberOfCalleeSaveSlots = codeBlock->calleeSaveSpaceAsVirtualRegisters();
433 const Register* endOfCalleeSaves = it - numberOfCalleeSaveSlots;
434
435 end = it - codeBlock->numVars();
436 if (it != end) {
437 do {
438 JSValue v = it->jsValue();
439 int registerNumber = it - callFrame->registers();
440 String name = (it > endOfCalleeSaves)
441 ? "CalleeSaveReg"
442 : codeBlock->nameForRegister(VirtualRegister(registerNumber));
443 dataLogF("[r% 3d %14s] | %10p | 0x%-16llx %s\n", registerNumber, name.ascii().data(), it, (long long)JSValue::encode(v), valueAsString(v).data());
444 --it;
445 } while (it != end);
446 }
447 dataLogF("-----------------------------------------------------------------------------\n");
448
449 end = it - codeBlock->numCalleeLocals() + codeBlock->numVars();
450 if (it != end) {
451 do {
452 JSValue v = (*it).jsValue();
453 int registerNumber = it - callFrame->registers();
454 dataLogF("[r% 3d] | %10p | 0x%-16llx %s\n", registerNumber, it, (long long)JSValue::encode(v), valueAsString(v).data());
455 --it;
456 } while (it != end);
457 }
458 dataLogF("-----------------------------------------------------------------------------\n");
459}
460
461void VMInspector::dumpStack(CallFrame* topCallFrame, unsigned framesToSkip)
462{
463 if (!ensureCurrentThreadOwnsJSLock(topCallFrame))
464 return;
465 if (!topCallFrame)
466 return;
467 DumpFrameFunctor functor(DumpFrameFunctor::DumpAll, framesToSkip);
468 topCallFrame->iterate(functor);
469}
470
471void VMInspector::dumpValue(JSValue value)
472{
473 dataLog(value);
474}
475
476void VMInspector::dumpCellMemory(JSCell* cell)
477{
478 dumpCellMemoryToStream(cell, WTF::dataFile());
479}
480
481class IndentationScope {
482public:
483 IndentationScope(unsigned& indentation)
484 : m_indentation(indentation)
485 {
486 ++m_indentation;
487 }
488
489 ~IndentationScope()
490 {
491 --m_indentation;
492 }
493
494private:
495 unsigned& m_indentation;
496};
497
498void VMInspector::dumpCellMemoryToStream(JSCell* cell, PrintStream& out)
499{
500 VM& vm = *cell->vm();
501 StructureID structureID = cell->structureID();
502 Structure* structure = cell->structure(vm);
503 IndexingType indexingTypeAndMisc = cell->indexingTypeAndMisc();
504 IndexingType indexingType = structure->indexingType();
505 IndexingType indexingMode = structure->indexingMode();
506 JSType type = cell->type();
507 TypeInfo::InlineTypeFlags inlineTypeFlags = cell->inlineTypeFlags();
508 CellState cellState = cell->cellState();
509 size_t cellSize = cell->cellSize();
510 size_t slotCount = cellSize / sizeof(EncodedJSValue);
511
512 EncodedJSValue* slots = bitwise_cast<EncodedJSValue*>(cell);
513 unsigned indentation = 0;
514
515 auto indent = [&] {
516 for (unsigned i = 0 ; i < indentation; ++i)
517 out.print(" ");
518 };
519
520#define INDENT indent(),
521
522 auto dumpSlot = [&] (EncodedJSValue* slots, unsigned index, const char* label = nullptr) {
523 out.print("[", index, "] ", format("%p : 0x%016" PRIx64, &slots[index], slots[index]));
524 if (label)
525 out.print(" ", label);
526 out.print("\n");
527 };
528
529 out.printf("<%p, %s>\n", cell, cell->className(vm));
530 IndentationScope scope(indentation);
531
532 INDENT dumpSlot(slots, 0, "header");
533 {
534 IndentationScope scope(indentation);
535 INDENT out.println("structureID ", format("%d 0x%" PRIx32, structureID, structureID), " structure ", RawPointer(structure));
536 INDENT out.println("indexingTypeAndMisc ", format("%d 0x%" PRIx8, indexingTypeAndMisc, indexingTypeAndMisc), " ", IndexingTypeDump(indexingMode));
537 INDENT out.println("type ", format("%d 0x%" PRIx8, type, type));
538 INDENT out.println("flags ", format("%d 0x%" PRIx8, inlineTypeFlags, inlineTypeFlags));
539 INDENT out.println("cellState ", format("%d", cellState));
540 }
541
542 unsigned slotIndex = 1;
543 if (cell->isObject()) {
544 JSObject* obj = static_cast<JSObject*>(const_cast<JSCell*>(cell));
545 Butterfly* butterfly = obj->butterfly();
546 size_t butterflySize = obj->butterflyTotalSize();
547
548 INDENT dumpSlot(slots, slotIndex, "butterfly");
549 slotIndex++;
550
551 if (butterfly) {
552 IndentationScope scope(indentation);
553
554 bool hasIndexingHeader = structure->hasIndexingHeader(cell);
555 bool hasAnyArrayStorage = JSC::hasAnyArrayStorage(indexingType);
556
557 size_t preCapacity = obj->butterflyPreCapacity();
558 size_t propertyCapacity = structure->outOfLineCapacity();
559
560 void* base = hasIndexingHeader
561 ? butterfly->base(preCapacity, propertyCapacity)
562 : butterfly->base(structure);
563
564 unsigned publicLength = butterfly->publicLength();
565 unsigned vectorLength = butterfly->vectorLength();
566 size_t butterflyCellSize = MarkedSpace::optimalSizeFor(butterflySize);
567
568 size_t endOfIndexedPropertiesIndex = butterflySize / sizeof(EncodedJSValue);
569 size_t endOfButterflyIndex = butterflyCellSize / sizeof(EncodedJSValue);
570
571 INDENT out.println("base ", RawPointer(base));
572 INDENT out.println("hasIndexingHeader ", (hasIndexingHeader ? "YES" : "NO"), " hasAnyArrayStorage ", (hasAnyArrayStorage ? "YES" : "NO"));
573 if (hasIndexingHeader) {
574 INDENT out.print("publicLength ", publicLength, " vectorLength ", vectorLength);
575 if (hasAnyArrayStorage)
576 out.print(" indexBias ", butterfly->arrayStorage()->m_indexBias);
577 out.print("\n");
578 }
579 INDENT out.println("preCapacity ", preCapacity, " propertyCapacity ", propertyCapacity);
580
581 unsigned index = 0;
582 EncodedJSValue* slots = reinterpret_cast<EncodedJSValue*>(base);
583
584 auto asVoidPtr = [] (void* p) {
585 return p;
586 };
587
588 auto dumpSectionHeader = [&] (const char* name) {
589 out.println("<--- ", name);
590 };
591
592 auto dumpSection = [&] (unsigned startIndex, unsigned endIndex, const char* name) -> unsigned {
593 for (unsigned index = startIndex; index < endIndex; ++index) {
594 if (name && index == startIndex)
595 INDENT dumpSectionHeader(name);
596 INDENT dumpSlot(slots, index);
597 }
598 return endIndex;
599 };
600
601 {
602 IndentationScope scope(indentation);
603
604 index = dumpSection(index, preCapacity, "preCapacity");
605 index = dumpSection(index, preCapacity + propertyCapacity, "propertyCapacity");
606
607 if (hasIndexingHeader)
608 index = dumpSection(index, index + 1, "indexingHeader");
609
610 INDENT dumpSectionHeader("butterfly");
611 if (hasAnyArrayStorage) {
612 RELEASE_ASSERT(asVoidPtr(butterfly->arrayStorage()) == asVoidPtr(&slots[index]));
613 RELEASE_ASSERT(ArrayStorage::vectorOffset() == 2 * sizeof(EncodedJSValue));
614 index = dumpSection(index, index + 2, "arrayStorage");
615 }
616
617 index = dumpSection(index, endOfIndexedPropertiesIndex, "indexedProperties");
618 index = dumpSection(index, endOfButterflyIndex, "unallocated capacity");
619 }
620 }
621 }
622
623 for (; slotIndex < slotCount; ++slotIndex)
624 INDENT dumpSlot(slots, slotIndex);
625
626#undef INDENT
627}
628
629} // namespace JSC
630