1/*
2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#pragma once
31
32#include "ArrayProfile.h"
33#include "ByValInfo.h"
34#include "BytecodeConventions.h"
35#include "CallLinkInfo.h"
36#include "CodeBlockHash.h"
37#include "CodeOrigin.h"
38#include "CodeType.h"
39#include "CompilationResult.h"
40#include "ConcurrentJSLock.h"
41#include "DFGCommon.h"
42#include "DirectEvalCodeCache.h"
43#include "EvalExecutable.h"
44#include "ExecutionCounter.h"
45#include "ExpressionRangeInfo.h"
46#include "FunctionExecutable.h"
47#include "HandlerInfo.h"
48#include "ICStatusMap.h"
49#include "Instruction.h"
50#include "InstructionStream.h"
51#include "JITCode.h"
52#include "JITCodeMap.h"
53#include "JITMathICForwards.h"
54#include "JSCast.h"
55#include "JSGlobalObject.h"
56#include "JumpTable.h"
57#include "LLIntCallLinkInfo.h"
58#include "LazyOperandValueProfile.h"
59#include "MetadataTable.h"
60#include "ModuleProgramExecutable.h"
61#include "ObjectAllocationProfile.h"
62#include "Options.h"
63#include "Printer.h"
64#include "ProfilerJettisonReason.h"
65#include "ProgramExecutable.h"
66#include "PutPropertySlot.h"
67#include "ValueProfile.h"
68#include "VirtualRegister.h"
69#include "Watchpoint.h"
70#include <wtf/Bag.h>
71#include <wtf/FastMalloc.h>
72#include <wtf/RefCountedArray.h>
73#include <wtf/RefPtr.h>
74#include <wtf/SegmentedVector.h>
75#include <wtf/Vector.h>
76#include <wtf/text/WTFString.h>
77
78namespace JSC {
79
80#if ENABLE(DFG_JIT)
81namespace DFG {
82struct OSRExitState;
83} // namespace DFG
84#endif
85
86class BytecodeLivenessAnalysis;
87class CodeBlockSet;
88class ExecutableToCodeBlockEdge;
89class JSModuleEnvironment;
90class LLIntOffsetsExtractor;
91class LLIntPrototypeLoadAdaptiveStructureWatchpoint;
92class MetadataTable;
93class PCToCodeOriginMap;
94class RegisterAtOffsetList;
95class StructureStubInfo;
96
97enum class AccessType : int8_t;
98
99struct ArithProfile;
100struct OpCatch;
101
102enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
103
104class CodeBlock : public JSCell {
105 typedef JSCell Base;
106 friend class BytecodeLivenessAnalysis;
107 friend class JIT;
108 friend class LLIntOffsetsExtractor;
109
110public:
111
112 enum CopyParsedBlockTag { CopyParsedBlock };
113
114 static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
115 static const bool needsDestruction = true;
116
117 template<typename, SubspaceAccess>
118 static void subspaceFor(VM&) { }
119
120 DECLARE_INFO;
121
122protected:
123 CodeBlock(VM*, Structure*, CopyParsedBlockTag, CodeBlock& other);
124 CodeBlock(VM*, Structure*, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*);
125
126 void finishCreation(VM&, CopyParsedBlockTag, CodeBlock& other);
127 bool finishCreation(VM&, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*);
128
129 void finishCreationCommon(VM&);
130
131 WriteBarrier<JSGlobalObject> m_globalObject;
132
133public:
134 JS_EXPORT_PRIVATE ~CodeBlock();
135
136 UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
137
138 CString inferredName() const;
139 CodeBlockHash hash() const;
140 bool hasHash() const;
141 bool isSafeToComputeHash() const;
142 CString hashAsStringIfPossible() const;
143 CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
144 CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
145 void dumpAssumingJITType(PrintStream&, JITType) const;
146 JS_EXPORT_PRIVATE void dump(PrintStream&) const;
147
148 MetadataTable* metadataTable() const { return m_metadata.get(); }
149
150 int numParameters() const { return m_numParameters; }
151 void setNumParameters(int newValue);
152
153 int numberOfArgumentsToSkip() const { return m_numberOfArgumentsToSkip; }
154
155 int numCalleeLocals() const { return m_numCalleeLocals; }
156
157 int numVars() const { return m_numVars; }
158
159 int* addressOfNumParameters() { return &m_numParameters; }
160 static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
161
162 CodeBlock* alternative() const { return static_cast<CodeBlock*>(m_alternative.get()); }
163 void setAlternative(VM&, CodeBlock*);
164
165 template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor)
166 {
167 Functor f(std::forward<Functor>(functor));
168 Vector<CodeBlock*, 4> codeBlocks;
169 codeBlocks.append(this);
170
171 while (!codeBlocks.isEmpty()) {
172 CodeBlock* currentCodeBlock = codeBlocks.takeLast();
173 f(currentCodeBlock);
174
175 if (CodeBlock* alternative = currentCodeBlock->alternative())
176 codeBlocks.append(alternative);
177 if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull())
178 codeBlocks.append(osrEntryBlock);
179 }
180 }
181
182 CodeSpecializationKind specializationKind() const
183 {
184 return specializationFromIsConstruct(isConstructor());
185 }
186
187 CodeBlock* alternativeForJettison();
188 JS_EXPORT_PRIVATE CodeBlock* baselineAlternative();
189
190 // FIXME: Get rid of this.
191 // https://bugs.webkit.org/show_bug.cgi?id=123677
192 CodeBlock* baselineVersion();
193
194 static size_t estimatedSize(JSCell*, VM&);
195 static void visitChildren(JSCell*, SlotVisitor&);
196 static void destroy(JSCell*);
197 void visitChildren(SlotVisitor&);
198 void finalizeUnconditionally(VM&);
199
200 void notifyLexicalBindingUpdate();
201
202 void dumpSource();
203 void dumpSource(PrintStream&);
204
205 void dumpBytecode();
206 void dumpBytecode(PrintStream&);
207 void dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& = ICStatusMap());
208 void dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& = ICStatusMap());
209
210 void dumpExceptionHandlers(PrintStream&);
211 void printStructures(PrintStream&, const Instruction*);
212 void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
213
214 void dumpMathICStats();
215
216 bool isStrictMode() const { return m_unlinkedCode->isStrictMode(); }
217 bool isConstructor() const { return m_unlinkedCode->isConstructor(); }
218 ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; }
219 CodeType codeType() const { return m_unlinkedCode->codeType(); }
220
221 JSParserScriptMode scriptMode() const { return m_unlinkedCode->scriptMode(); }
222
223 bool hasInstalledVMTrapBreakpoints() const;
224 bool installVMTrapBreakpoints();
225
226 inline bool isKnownNotImmediate(int index)
227 {
228 if (index == thisRegister().offset() && !isStrictMode())
229 return true;
230
231 if (isConstantRegisterIndex(index))
232 return getConstant(index).isCell();
233
234 return false;
235 }
236
237 ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
238 {
239 return index >= m_numVars;
240 }
241
242 HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
243 HandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
244 void removeExceptionHandlerForCallSite(CallSiteIndex);
245 unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
246 unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
247 void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
248 int& startOffset, int& endOffset, unsigned& line, unsigned& column) const;
249
250 Optional<unsigned> bytecodeOffsetFromCallSiteIndex(CallSiteIndex);
251
252 void getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result);
253 void getICStatusMap(ICStatusMap& result);
254
255#if ENABLE(JIT)
256 struct JITData {
257 WTF_MAKE_STRUCT_FAST_ALLOCATED;
258
259 Bag<StructureStubInfo> m_stubInfos;
260 Bag<JITAddIC> m_addICs;
261 Bag<JITMulIC> m_mulICs;
262 Bag<JITNegIC> m_negICs;
263 Bag<JITSubIC> m_subICs;
264 Bag<ByValInfo> m_byValInfos;
265 Bag<CallLinkInfo> m_callLinkInfos;
266 SentinelLinkedList<CallLinkInfo, PackedRawSentinelNode<CallLinkInfo>> m_incomingCalls;
267 SentinelLinkedList<PolymorphicCallNode, PackedRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls;
268 SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
269 std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap;
270 std::unique_ptr<RegisterAtOffsetList> m_calleeSaveRegisters;
271 JITCodeMap m_jitCodeMap;
272 };
273
274 JITData& ensureJITData(const ConcurrentJSLocker& locker)
275 {
276 if (LIKELY(m_jitData))
277 return *m_jitData;
278 return ensureJITDataSlow(locker);
279 }
280 JITData& ensureJITDataSlow(const ConcurrentJSLocker&);
281
282 JITAddIC* addJITAddIC(ArithProfile*);
283 JITMulIC* addJITMulIC(ArithProfile*);
284 JITNegIC* addJITNegIC(ArithProfile*);
285 JITSubIC* addJITSubIC(ArithProfile*);
286
287 template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITAddGenerator>::value>::type>
288 JITAddIC* addMathIC(ArithProfile* profile) { return addJITAddIC(profile); }
289
290 template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITMulGenerator>::value>::type>
291 JITMulIC* addMathIC(ArithProfile* profile) { return addJITMulIC(profile); }
292
293 template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITNegGenerator>::value>::type>
294 JITNegIC* addMathIC(ArithProfile* profile) { return addJITNegIC(profile); }
295
296 template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITSubGenerator>::value>::type>
297 JITSubIC* addMathIC(ArithProfile* profile) { return addJITSubIC(profile); }
298
299 StructureStubInfo* addStubInfo(AccessType);
300
301 // O(n) operation. Use getStubInfoMap() unless you really only intend to get one
302 // stub info.
303 StructureStubInfo* findStubInfo(CodeOrigin);
304
305 ByValInfo* addByValInfo();
306
307 CallLinkInfo* addCallLinkInfo();
308
309 // This is a slow function call used primarily for compiling OSR exits in the case
310 // that there had been inlining. Chances are if you want to use this, you're really
311 // looking for a CallLinkInfoMap to amortize the cost of calling this.
312 CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
313
314 void setJITCodeMap(JITCodeMap&& jitCodeMap)
315 {
316 ConcurrentJSLocker locker(m_lock);
317 ensureJITData(locker).m_jitCodeMap = WTFMove(jitCodeMap);
318 }
319 const JITCodeMap& jitCodeMap()
320 {
321 ConcurrentJSLocker locker(m_lock);
322 return ensureJITData(locker).m_jitCodeMap;
323 }
324
325 void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&);
326 Optional<CodeOrigin> findPC(void* pc);
327
328 void setCalleeSaveRegisters(RegisterSet);
329 void setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList>);
330
331 RareCaseProfile* addRareCaseProfile(int bytecodeOffset);
332 RareCaseProfile* rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset);
333 unsigned rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset);
334
335 bool likelyToTakeSlowCase(int bytecodeOffset)
336 {
337 if (!hasBaselineJITProfiling())
338 return false;
339 ConcurrentJSLocker locker(m_lock);
340 unsigned value = rareCaseProfileCountForBytecodeOffset(locker, bytecodeOffset);
341 return value >= Options::likelyToTakeSlowCaseMinimumCount();
342 }
343
344 bool couldTakeSlowCase(int bytecodeOffset)
345 {
346 if (!hasBaselineJITProfiling())
347 return false;
348 ConcurrentJSLocker locker(m_lock);
349 unsigned value = rareCaseProfileCountForBytecodeOffset(locker, bytecodeOffset);
350 return value >= Options::couldTakeSlowCaseMinimumCount();
351 }
352
353 // We call this when we want to reattempt compiling something with the baseline JIT. Ideally
354 // the baseline JIT would not add data to CodeBlock, but instead it would put its data into
355 // a newly created JITCode, which could be thrown away if we bail on JIT compilation. Then we
356 // would be able to get rid of this silly function.
357 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159061
358 void resetJITData();
359#endif // ENABLE(JIT)
360
361 void unlinkIncomingCalls();
362
363#if ENABLE(JIT)
364 void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
365 void linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode*);
366#endif // ENABLE(JIT)
367
368 void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
369
370 const Instruction* outOfLineJumpTarget(const Instruction* pc);
371 int outOfLineJumpOffset(const Instruction* pc);
372 int outOfLineJumpOffset(const InstructionStream::Ref& instruction)
373 {
374 return outOfLineJumpOffset(instruction.ptr());
375 }
376
377 inline unsigned bytecodeOffset(const Instruction* returnAddress)
378 {
379 const auto* instructionsBegin = instructions().at(0).ptr();
380 const auto* instructionsEnd = reinterpret_cast<const Instruction*>(reinterpret_cast<uintptr_t>(instructionsBegin) + instructions().size());
381 RELEASE_ASSERT(returnAddress >= instructionsBegin && returnAddress < instructionsEnd);
382 return returnAddress - instructionsBegin;
383 }
384
385 const InstructionStream& instructions() const { return m_unlinkedCode->instructions(); }
386
387 size_t predictedMachineCodeSize();
388
389 unsigned instructionsSize() const { return instructions().size(); }
390 unsigned bytecodeCost() const { return m_bytecodeCost; }
391
392 // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
393 CodeBlock* newReplacement();
394
395 void setJITCode(Ref<JITCode>&& code)
396 {
397 ASSERT(heap()->isDeferred());
398 if (!code->isShared())
399 heap()->reportExtraMemoryAllocated(code->size());
400
401 ConcurrentJSLocker locker(m_lock);
402 WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
403 m_jitCode = WTFMove(code);
404 }
405
406 RefPtr<JITCode> jitCode() { return m_jitCode; }
407 static ptrdiff_t jitCodeOffset() { return OBJECT_OFFSETOF(CodeBlock, m_jitCode); }
408 JITType jitType() const
409 {
410 JITCode* jitCode = m_jitCode.get();
411 WTF::loadLoadFence();
412 JITType result = JITCode::jitTypeFor(jitCode);
413 WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
414 return result;
415 }
416
417 bool hasBaselineJITProfiling() const
418 {
419 return jitType() == JITType::BaselineJIT;
420 }
421
422#if ENABLE(JIT)
423 CodeBlock* replacement();
424
425 DFG::CapabilityLevel computeCapabilityLevel();
426 DFG::CapabilityLevel capabilityLevel();
427 DFG::CapabilityLevel capabilityLevelState() { return static_cast<DFG::CapabilityLevel>(m_capabilityLevelState); }
428
429 bool hasOptimizedReplacement(JITType typeToReplace);
430 bool hasOptimizedReplacement(); // the typeToReplace is my JITType
431#endif
432
433 void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization, const FireDetail* = nullptr);
434
435 ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
436
437 ExecutableToCodeBlockEdge* ownerEdge() const { return m_ownerEdge.get(); }
438
439 VM* vm() const { return m_vm; }
440
441 VirtualRegister thisRegister() const { return m_unlinkedCode->thisRegister(); }
442
443 bool usesEval() const { return m_unlinkedCode->usesEval(); }
444
445 void setScopeRegister(VirtualRegister scopeRegister)
446 {
447 ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid());
448 m_scopeRegister = scopeRegister;
449 }
450
451 VirtualRegister scopeRegister() const
452 {
453 return m_scopeRegister;
454 }
455
456 PutPropertySlot::Context putByIdContext() const
457 {
458 if (codeType() == EvalCode)
459 return PutPropertySlot::PutByIdEval;
460 return PutPropertySlot::PutById;
461 }
462
463 const SourceCode& source() const { return m_ownerExecutable->source(); }
464 unsigned sourceOffset() const { return m_ownerExecutable->source().startOffset(); }
465 unsigned firstLineColumnOffset() const { return m_ownerExecutable->startColumn(); }
466
467 size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
468 unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
469
470 String nameForRegister(VirtualRegister);
471
472 unsigned numberOfArgumentValueProfiles()
473 {
474 ASSERT(m_numParameters >= 0);
475 ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters) || !vm()->canUseJIT());
476 return m_argumentValueProfiles.size();
477 }
478
479 ValueProfile& valueProfileForArgument(unsigned argumentIndex)
480 {
481 ASSERT(vm()->canUseJIT()); // This is only called from the various JIT compilers or places that first check numberOfArgumentValueProfiles before calling this.
482 ValueProfile& result = m_argumentValueProfiles[argumentIndex];
483 return result;
484 }
485
486 ValueProfile& valueProfileForBytecodeOffset(int bytecodeOffset);
487 SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset);
488
489 template<typename Functor> void forEachValueProfile(const Functor&);
490 template<typename Functor> void forEachArrayProfile(const Functor&);
491 template<typename Functor> void forEachArrayAllocationProfile(const Functor&);
492 template<typename Functor> void forEachObjectAllocationProfile(const Functor&);
493 template<typename Functor> void forEachLLIntCallLinkInfo(const Functor&);
494
495 ArithProfile* arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset);
496 ArithProfile* arithProfileForPC(const Instruction*);
497
498 bool couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset);
499
500 ArrayProfile* getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
501 ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
502
503 // Exception handling support
504
505 size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
506 HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
507
508 bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
509
510#if ENABLE(DFG_JIT)
511 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins();
512
513 // Having code origins implies that there has been some inlining.
514 bool hasCodeOrigins()
515 {
516 return JITCode::isOptimizingJIT(jitType());
517 }
518
519 bool canGetCodeOrigin(CallSiteIndex index)
520 {
521 if (!hasCodeOrigins())
522 return false;
523 return index.bits() < codeOrigins().size();
524 }
525
526 CodeOrigin codeOrigin(CallSiteIndex index)
527 {
528 return codeOrigins()[index.bits()];
529 }
530
531 CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles(const ConcurrentJSLocker&)
532 {
533 return m_lazyOperandValueProfiles;
534 }
535#endif // ENABLE(DFG_JIT)
536
537 // Constant Pool
538#if ENABLE(DFG_JIT)
539 size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
540 size_t numberOfDFGIdentifiers() const;
541 const Identifier& identifier(int index) const;
542#else
543 size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
544 const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
545#endif
546
547 Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; }
548 Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
549 unsigned addConstant(JSValue v)
550 {
551 unsigned result = m_constantRegisters.size();
552 m_constantRegisters.append(WriteBarrier<Unknown>());
553 m_constantRegisters.last().set(*m_vm, this, v);
554 m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
555 return result;
556 }
557
558 unsigned addConstantLazily()
559 {
560 unsigned result = m_constantRegisters.size();
561 m_constantRegisters.append(WriteBarrier<Unknown>());
562 m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
563 return result;
564 }
565
566 const Vector<WriteBarrier<Unknown>>& constantRegisters() { return m_constantRegisters; }
567 WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
568 static ALWAYS_INLINE bool isConstantRegisterIndex(int index) { return index >= FirstConstantRegisterIndex; }
569 ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
570 ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(int index) const { return m_constantsSourceCodeRepresentation[index - FirstConstantRegisterIndex]; }
571
572 FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
573 int numberOfFunctionDecls() { return m_functionDecls.size(); }
574 FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
575
576 const BitVector& bitVector(size_t i) { return m_unlinkedCode->bitVector(i); }
577
578 Heap* heap() const { return &m_vm->heap; }
579 JSGlobalObject* globalObject() { return m_globalObject.get(); }
580
581 JSGlobalObject* globalObjectFor(CodeOrigin);
582
583 BytecodeLivenessAnalysis& livenessAnalysis()
584 {
585 return m_unlinkedCode->livenessAnalysis(this);
586 }
587
588 void validate();
589
590 // Jump Tables
591
592 size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; }
593 SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); }
594 SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; }
595 void clearSwitchJumpTables()
596 {
597 if (!m_rareData)
598 return;
599 m_rareData->m_switchJumpTables.clear();
600 }
601
602 size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
603 StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
604 StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
605
606 DirectEvalCodeCache& directEvalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_directEvalCodeCache; }
607
608 enum ShrinkMode {
609 // Shrink prior to generating machine code that may point directly into vectors.
610 EarlyShrink,
611
612 // Shrink after generating machine code, and after possibly creating new vectors
613 // and appending to others. At this time it is not safe to shrink certain vectors
614 // because we would have generated machine code that references them directly.
615 LateShrink
616 };
617 void shrinkToFit(ShrinkMode);
618
619 // Functions for controlling when JITting kicks in, in a mixed mode
620 // execution world.
621
622 bool checkIfJITThresholdReached()
623 {
624 return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
625 }
626
627 void dontJITAnytimeSoon()
628 {
629 m_llintExecuteCounter.deferIndefinitely();
630 }
631
632 int32_t thresholdForJIT(int32_t threshold);
633 void jitAfterWarmUp();
634 void jitSoon();
635
636 const BaselineExecutionCounter& llintExecuteCounter() const
637 {
638 return m_llintExecuteCounter;
639 }
640
641 typedef HashMap<std::tuple<StructureID, unsigned>, Vector<LLIntPrototypeLoadAdaptiveStructureWatchpoint>> StructureWatchpointMap;
642 StructureWatchpointMap& llintGetByIdWatchpointMap() { return m_llintGetByIdWatchpointMap; }
643
644 // Functions for controlling when tiered compilation kicks in. This
645 // controls both when the optimizing compiler is invoked and when OSR
646 // entry happens. Two triggers exist: the loop trigger and the return
647 // trigger. In either case, when an addition to m_jitExecuteCounter
648 // causes it to become non-negative, the optimizing compiler is
649 // invoked. This includes a fast check to see if this CodeBlock has
650 // already been optimized (i.e. replacement() returns a CodeBlock
651 // that was optimized with a higher tier JIT than this one). In the
652 // case of the loop trigger, if the optimized compilation succeeds
653 // (or has already succeeded in the past) then OSR is attempted to
654 // redirect program flow into the optimized code.
655
656 // These functions are called from within the optimization triggers,
657 // and are used as a single point at which we define the heuristics
658 // for how much warm-up is mandated before the next optimization
659 // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
660 // as this is called from the CodeBlock constructor.
661
662 // When we observe a lot of speculation failures, we trigger a
663 // reoptimization. But each time, we increase the optimization trigger
664 // to avoid thrashing.
665 JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const;
666 void countReoptimization();
667
668#if !ENABLE(C_LOOP)
669 const RegisterAtOffsetList* calleeSaveRegisters() const;
670
671 static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return RegisterSet::llintBaselineCalleeSaveRegisters().numberOfSetRegisters(); }
672 static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters();
673 size_t calleeSaveSpaceAsVirtualRegisters();
674#else
675 static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return 0; }
676 static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters() { return 1; };
677 size_t calleeSaveSpaceAsVirtualRegisters() { return 0; }
678#endif
679
680#if ENABLE(JIT)
681 unsigned numberOfDFGCompiles();
682
683 int32_t codeTypeThresholdMultiplier() const;
684
685 int32_t adjustedCounterValue(int32_t desiredThreshold);
686
687 int32_t* addressOfJITExecuteCounter()
688 {
689 return &m_jitExecuteCounter.m_counter;
690 }
691
692 static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); }
693 static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); }
694 static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); }
695
696 const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
697
698 unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
699
700 // Check if the optimization threshold has been reached, and if not,
701 // adjust the heuristics accordingly. Returns true if the threshold has
702 // been reached.
703 bool checkIfOptimizationThresholdReached();
704
705 // Call this to force the next optimization trigger to fire. This is
706 // rarely wise, since optimization triggers are typically more
707 // expensive than executing baseline code.
708 void optimizeNextInvocation();
709
710 // Call this to prevent optimization from happening again. Note that
711 // optimization will still happen after roughly 2^29 invocations,
712 // so this is really meant to delay that as much as possible. This
713 // is called if optimization failed, and we expect it to fail in
714 // the future as well.
715 void dontOptimizeAnytimeSoon();
716
717 // Call this to reinitialize the counter to its starting state,
718 // forcing a warm-up to happen before the next optimization trigger
719 // fires. This is called in the CodeBlock constructor. It also
720 // makes sense to call this if an OSR exit occurred. Note that
721 // OSR exit code is code generated, so the value of the execute
722 // counter that this corresponds to is also available directly.
723 void optimizeAfterWarmUp();
724
725 // Call this to force an optimization trigger to fire only after
726 // a lot of warm-up.
727 void optimizeAfterLongWarmUp();
728
729 // Call this to cause an optimization trigger to fire soon, but
730 // not necessarily the next one. This makes sense if optimization
731 // succeeds. Successful optimization means that all calls are
732 // relinked to the optimized code, so this only affects call
733 // frames that are still executing this CodeBlock. The value here
734 // is tuned to strike a balance between the cost of OSR entry
735 // (which is too high to warrant making every loop back edge to
736 // trigger OSR immediately) and the cost of executing baseline
737 // code (which is high enough that we don't necessarily want to
738 // have a full warm-up). The intuition for calling this instead of
739 // optimizeNextInvocation() is for the case of recursive functions
740 // with loops. Consider that there may be N call frames of some
741 // recursive function, for a reasonably large value of N. The top
742 // one triggers optimization, and then returns, and then all of
743 // the others return. We don't want optimization to be triggered on
744 // each return, as that would be superfluous. It only makes sense
745 // to trigger optimization if one of those functions becomes hot
746 // in the baseline code.
747 void optimizeSoon();
748
749 void forceOptimizationSlowPathConcurrently();
750
751 void setOptimizationThresholdBasedOnCompilationResult(CompilationResult);
752
753 uint32_t osrExitCounter() const { return m_osrExitCounter; }
754
755 void countOSRExit() { m_osrExitCounter++; }
756
757 enum class OptimizeAction { None, ReoptimizeNow };
758#if ENABLE(DFG_JIT)
759 OptimizeAction updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState&);
760#endif
761
762 static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
763
764 uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
765 uint32_t exitCountThresholdForReoptimization();
766 uint32_t exitCountThresholdForReoptimizationFromLoop();
767 bool shouldReoptimizeNow();
768 bool shouldReoptimizeFromLoopNow();
769
770#else // No JIT
771 void optimizeAfterWarmUp() { }
772 unsigned numberOfDFGCompiles() { return 0; }
773#endif
774
775 bool shouldOptimizeNow();
776 void updateAllValueProfilePredictions();
777 void updateAllArrayPredictions();
778 void updateAllPredictions();
779
780 unsigned frameRegisterCount();
781 int stackPointerOffset();
782
783 bool hasOpDebugForLineAndColumn(unsigned line, unsigned column);
784
785 bool hasDebuggerRequests() const { return m_debuggerRequests; }
786 void* debuggerRequestsAddress() { return &m_debuggerRequests; }
787
788 void addBreakpoint(unsigned numBreakpoints);
789 void removeBreakpoint(unsigned numBreakpoints)
790 {
791 ASSERT(m_numBreakpoints >= numBreakpoints);
792 m_numBreakpoints -= numBreakpoints;
793 }
794
795 enum SteppingMode {
796 SteppingModeDisabled,
797 SteppingModeEnabled
798 };
799 void setSteppingMode(SteppingMode);
800
801 void clearDebuggerRequests()
802 {
803 m_steppingMode = SteppingModeDisabled;
804 m_numBreakpoints = 0;
805 }
806
807 bool wasCompiledWithDebuggingOpcodes() const { return m_unlinkedCode->wasCompiledWithDebuggingOpcodes(); }
808
809 // This is intentionally public; it's the responsibility of anyone doing any
810 // of the following to hold the lock:
811 //
812 // - Modifying any inline cache in this code block.
813 //
814 // - Quering any inline cache in this code block, from a thread other than
815 // the main thread.
816 //
817 // Additionally, it's only legal to modify the inline cache on the main
818 // thread. This means that the main thread can query the inline cache without
819 // locking. This is crucial since executing the inline cache is effectively
820 // "querying" it.
821 //
822 // Another exception to the rules is that the GC can do whatever it wants
823 // without holding any locks, because the GC is guaranteed to wait until any
824 // concurrent compilation threads finish what they're doing.
825 mutable ConcurrentJSLock m_lock;
826
827 bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
828
829#if ENABLE(JIT)
830 unsigned m_capabilityLevelState : 2; // DFG::CapabilityLevel
831#endif
832
833 bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC.
834
835 bool m_didFailJITCompilation : 1;
836 bool m_didFailFTLCompilation : 1;
837 bool m_hasBeenCompiledWithFTL : 1;
838
839 // Internal methods for use by validation code. It would be private if it wasn't
840 // for the fact that we use it from anonymous namespaces.
841 void beginValidationDidFail();
842 NO_RETURN_DUE_TO_CRASH void endValidationDidFail();
843
844 struct RareData {
845 WTF_MAKE_FAST_ALLOCATED;
846 public:
847 Vector<HandlerInfo> m_exceptionHandlers;
848
849 // Jump Tables
850 Vector<SimpleJumpTable> m_switchJumpTables;
851 Vector<StringJumpTable> m_stringSwitchJumpTables;
852
853 Vector<std::unique_ptr<ValueProfileAndOperandBuffer>> m_catchProfiles;
854
855 DirectEvalCodeCache m_directEvalCodeCache;
856 };
857
858 void clearExceptionHandlers()
859 {
860 if (m_rareData)
861 m_rareData->m_exceptionHandlers.clear();
862 }
863
864 void appendExceptionHandler(const HandlerInfo& handler)
865 {
866 createRareDataIfNecessary(); // We may be handling the exception of an inlined call frame.
867 m_rareData->m_exceptionHandlers.append(handler);
868 }
869
870 CallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite);
871
872 void ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset);
873
874 bool hasTailCalls() const { return m_unlinkedCode->hasTailCalls(); }
875
876 template<typename Metadata>
877 Metadata& metadata(OpcodeID opcodeID, unsigned metadataID)
878 {
879 ASSERT(m_metadata);
880 return bitwise_cast<Metadata*>(m_metadata->get(opcodeID))[metadataID];
881 }
882
883 size_t metadataSizeInBytes()
884 {
885 return m_unlinkedCode->metadataSizeInBytes();
886 }
887
888protected:
889 void finalizeLLIntInlineCaches();
890#if ENABLE(JIT)
891 void finalizeBaselineJITInlineCaches();
892#endif
893#if ENABLE(DFG_JIT)
894 void tallyFrequentExitSites();
895#else
896 void tallyFrequentExitSites() { }
897#endif
898
899private:
900 friend class CodeBlockSet;
901 friend class ExecutableToCodeBlockEdge;
902
903 BytecodeLivenessAnalysis& livenessAnalysisSlow();
904
905 CodeBlock* specialOSREntryBlockOrNull();
906
907 void noticeIncomingCall(ExecState* callerFrame);
908
909 double optimizationThresholdScalingFactor();
910
911 void updateAllValueProfilePredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
912
913 void setConstantIdentifierSetRegisters(VM&, const Vector<ConstantIdentifierSetEntry>& constants);
914
915 void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable);
916
917 void replaceConstant(int index, JSValue value)
918 {
919 ASSERT(isConstantRegisterIndex(index) && static_cast<size_t>(index - FirstConstantRegisterIndex) < m_constantRegisters.size());
920 m_constantRegisters[index - FirstConstantRegisterIndex].set(*m_vm, this, value);
921 }
922
923 bool shouldVisitStrongly(const ConcurrentJSLocker&);
924 bool shouldJettisonDueToWeakReference(VM&);
925 bool shouldJettisonDueToOldAge(const ConcurrentJSLocker&);
926
927 void propagateTransitions(const ConcurrentJSLocker&, SlotVisitor&);
928 void determineLiveness(const ConcurrentJSLocker&, SlotVisitor&);
929
930 void stronglyVisitStrongReferences(const ConcurrentJSLocker&, SlotVisitor&);
931 void stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor&);
932 void visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor&);
933
934 unsigned numberOfNonArgumentValueProfiles() { return m_numberOfNonArgumentValueProfiles; }
935 unsigned totalNumberOfValueProfiles() { return numberOfArgumentValueProfiles() + numberOfNonArgumentValueProfiles(); }
936 ValueProfile* tryGetValueProfileForBytecodeOffset(int bytecodeOffset);
937
938 Seconds timeSinceCreation()
939 {
940 return MonotonicTime::now() - m_creationTime;
941 }
942
943 void createRareDataIfNecessary()
944 {
945 if (!m_rareData) {
946 auto rareData = std::make_unique<RareData>();
947 WTF::storeStoreFence(); // m_catchProfiles can be touched from compiler threads.
948 m_rareData = WTFMove(rareData);
949 }
950 }
951
952 void insertBasicBlockBoundariesForControlFlowProfiler();
953 void ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch&, InstructionStream::Offset);
954
955 int m_numCalleeLocals;
956 int m_numVars;
957 int m_numParameters;
958 int m_numberOfArgumentsToSkip { 0 };
959 unsigned m_numberOfNonArgumentValueProfiles { 0 };
960 union {
961 unsigned m_debuggerRequests;
962 struct {
963 unsigned m_hasDebuggerStatement : 1;
964 unsigned m_steppingMode : 1;
965 unsigned m_numBreakpoints : 30;
966 };
967 };
968 unsigned m_bytecodeCost { 0 };
969 VirtualRegister m_scopeRegister;
970 mutable CodeBlockHash m_hash;
971
972 WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode;
973 WriteBarrier<ScriptExecutable> m_ownerExecutable;
974 WriteBarrier<ExecutableToCodeBlockEdge> m_ownerEdge;
975 VM* m_vm;
976
977 const void* m_instructionsRawPointer { nullptr };
978 SentinelLinkedList<LLIntCallLinkInfo, PackedRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls;
979 StructureWatchpointMap m_llintGetByIdWatchpointMap;
980 RefPtr<JITCode> m_jitCode;
981#if ENABLE(JIT)
982 std::unique_ptr<JITData> m_jitData;
983#endif
984#if ENABLE(DFG_JIT)
985 // This is relevant to non-DFG code blocks that serve as the profiled code block
986 // for DFG code blocks.
987 CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
988#endif
989 RefCountedArray<ValueProfile> m_argumentValueProfiles;
990
991 // Constant Pool
992 COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
993 // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
994 // it, so we're stuck with it for now.
995 Vector<WriteBarrier<Unknown>> m_constantRegisters;
996 Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation;
997 RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionDecls;
998 RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionExprs;
999
1000 WriteBarrier<CodeBlock> m_alternative;
1001
1002 BaselineExecutionCounter m_llintExecuteCounter;
1003
1004 BaselineExecutionCounter m_jitExecuteCounter;
1005 uint32_t m_osrExitCounter;
1006
1007 uint16_t m_optimizationDelayCounter;
1008 uint16_t m_reoptimizationRetryCounter;
1009
1010 RefPtr<MetadataTable> m_metadata;
1011
1012 MonotonicTime m_creationTime;
1013 double m_previousCounter { 0 };
1014
1015 std::unique_ptr<RareData> m_rareData;
1016};
1017
1018inline Register& ExecState::r(int index)
1019{
1020 CodeBlock* codeBlock = this->codeBlock();
1021 if (codeBlock->isConstantRegisterIndex(index))
1022 return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
1023 return this[index];
1024}
1025
1026inline Register& ExecState::r(VirtualRegister reg)
1027{
1028 return r(reg.offset());
1029}
1030
1031inline Register& ExecState::uncheckedR(int index)
1032{
1033 RELEASE_ASSERT(index < FirstConstantRegisterIndex);
1034 return this[index];
1035}
1036
1037inline Register& ExecState::uncheckedR(VirtualRegister reg)
1038{
1039 return uncheckedR(reg.offset());
1040}
1041
1042template <typename ExecutableType>
1043Exception* ScriptExecutable::prepareForExecution(VM& vm, JSFunction* function, JSScope* scope, CodeSpecializationKind kind, CodeBlock*& resultCodeBlock)
1044{
1045 if (hasJITCodeFor(kind)) {
1046 if (std::is_same<ExecutableType, EvalExecutable>::value)
1047 resultCodeBlock = jsCast<CodeBlock*>(jsCast<EvalExecutable*>(this)->codeBlock());
1048 else if (std::is_same<ExecutableType, ProgramExecutable>::value)
1049 resultCodeBlock = jsCast<CodeBlock*>(jsCast<ProgramExecutable*>(this)->codeBlock());
1050 else if (std::is_same<ExecutableType, ModuleProgramExecutable>::value)
1051 resultCodeBlock = jsCast<CodeBlock*>(jsCast<ModuleProgramExecutable*>(this)->codeBlock());
1052 else if (std::is_same<ExecutableType, FunctionExecutable>::value)
1053 resultCodeBlock = jsCast<CodeBlock*>(jsCast<FunctionExecutable*>(this)->codeBlockFor(kind));
1054 else
1055 RELEASE_ASSERT_NOT_REACHED();
1056 return nullptr;
1057 }
1058 return prepareForExecutionImpl(vm, function, scope, kind, resultCodeBlock);
1059}
1060
1061#define CODEBLOCK_LOG_EVENT(codeBlock, summary, details) \
1062 (codeBlock->vm()->logEvent(codeBlock, summary, [&] () { return toCString details; }))
1063
1064
1065void setPrinter(Printer::PrintRecord&, CodeBlock*);
1066
1067} // namespace JSC
1068
1069namespace WTF {
1070
1071JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::CodeBlock*);
1072
1073} // namespace WTF
1074