1/*
2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#pragma once
31
32#include "ArrayProfile.h"
33#include "ByValInfo.h"
34#include "BytecodeConventions.h"
35#include "CallLinkInfo.h"
36#include "CodeBlockHash.h"
37#include "CodeOrigin.h"
38#include "CodeType.h"
39#include "CompilationResult.h"
40#include "ConcurrentJSLock.h"
41#include "DFGCommon.h"
42#include "DirectEvalCodeCache.h"
43#include "EvalExecutable.h"
44#include "ExecutionCounter.h"
45#include "ExpressionRangeInfo.h"
46#include "FunctionExecutable.h"
47#include "HandlerInfo.h"
48#include "ICStatusMap.h"
49#include "Instruction.h"
50#include "InstructionStream.h"
51#include "JITCode.h"
52#include "JITCodeMap.h"
53#include "JITMathICForwards.h"
54#include "JSCast.h"
55#include "JSGlobalObject.h"
56#include "JumpTable.h"
57#include "LLIntCallLinkInfo.h"
58#include "LazyOperandValueProfile.h"
59#include "MetadataTable.h"
60#include "ModuleProgramExecutable.h"
61#include "ObjectAllocationProfile.h"
62#include "Options.h"
63#include "Printer.h"
64#include "ProfilerJettisonReason.h"
65#include "ProgramExecutable.h"
66#include "PutPropertySlot.h"
67#include "ValueProfile.h"
68#include "VirtualRegister.h"
69#include "Watchpoint.h"
70#include <wtf/Bag.h>
71#include <wtf/FastMalloc.h>
72#include <wtf/RefCountedArray.h>
73#include <wtf/RefPtr.h>
74#include <wtf/SegmentedVector.h>
75#include <wtf/Vector.h>
76#include <wtf/text/WTFString.h>
77
78namespace JSC {
79
80#if ENABLE(DFG_JIT)
81namespace DFG {
82struct OSRExitState;
83} // namespace DFG
84#endif
85
86class BytecodeLivenessAnalysis;
87class CodeBlockSet;
88class ExecutableToCodeBlockEdge;
89class JSModuleEnvironment;
90class LLIntOffsetsExtractor;
91class LLIntPrototypeLoadAdaptiveStructureWatchpoint;
92class MetadataTable;
93class PCToCodeOriginMap;
94class RegisterAtOffsetList;
95class StructureStubInfo;
96
97enum class AccessType : int8_t;
98
99struct ArithProfile;
100struct OpCatch;
101
102enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
103
104class CodeBlock : public JSCell {
105 typedef JSCell Base;
106 friend class BytecodeLivenessAnalysis;
107 friend class JIT;
108 friend class LLIntOffsetsExtractor;
109
110public:
111
112 enum CopyParsedBlockTag { CopyParsedBlock };
113
114 static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
115 static const bool needsDestruction = true;
116
117 template<typename, SubspaceAccess>
118 static void subspaceFor(VM&) { }
119
120 DECLARE_INFO;
121
122protected:
123 CodeBlock(VM*, Structure*, CopyParsedBlockTag, CodeBlock& other);
124 CodeBlock(VM*, Structure*, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*);
125
126 void finishCreation(VM&, CopyParsedBlockTag, CodeBlock& other);
127 bool finishCreation(VM&, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*);
128
129 void finishCreationCommon(VM&);
130
131 WriteBarrier<JSGlobalObject> m_globalObject;
132
133public:
134 JS_EXPORT_PRIVATE ~CodeBlock();
135
136 UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
137
138 CString inferredName() const;
139 CodeBlockHash hash() const;
140 bool hasHash() const;
141 bool isSafeToComputeHash() const;
142 CString hashAsStringIfPossible() const;
143 CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
144 CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
145 void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
146 JS_EXPORT_PRIVATE void dump(PrintStream&) const;
147
148 int numParameters() const { return m_numParameters; }
149 void setNumParameters(int newValue);
150
151 int numberOfArgumentsToSkip() const { return m_numberOfArgumentsToSkip; }
152
153 int numCalleeLocals() const { return m_numCalleeLocals; }
154
155 int numVars() const { return m_numVars; }
156
157 int* addressOfNumParameters() { return &m_numParameters; }
158 static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
159
160 CodeBlock* alternative() const { return static_cast<CodeBlock*>(m_alternative.get()); }
161 void setAlternative(VM&, CodeBlock*);
162
163 template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor)
164 {
165 Functor f(std::forward<Functor>(functor));
166 Vector<CodeBlock*, 4> codeBlocks;
167 codeBlocks.append(this);
168
169 while (!codeBlocks.isEmpty()) {
170 CodeBlock* currentCodeBlock = codeBlocks.takeLast();
171 f(currentCodeBlock);
172
173 if (CodeBlock* alternative = currentCodeBlock->alternative())
174 codeBlocks.append(alternative);
175 if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull())
176 codeBlocks.append(osrEntryBlock);
177 }
178 }
179
180 CodeSpecializationKind specializationKind() const
181 {
182 return specializationFromIsConstruct(isConstructor());
183 }
184
185 CodeBlock* alternativeForJettison();
186 JS_EXPORT_PRIVATE CodeBlock* baselineAlternative();
187
188 // FIXME: Get rid of this.
189 // https://bugs.webkit.org/show_bug.cgi?id=123677
190 CodeBlock* baselineVersion();
191
192 static size_t estimatedSize(JSCell*, VM&);
193 static void visitChildren(JSCell*, SlotVisitor&);
194 static void destroy(JSCell*);
195 void visitChildren(SlotVisitor&);
196 void finalizeUnconditionally(VM&);
197
198 void notifyLexicalBindingUpdate();
199
200 void dumpSource();
201 void dumpSource(PrintStream&);
202
203 void dumpBytecode();
204 void dumpBytecode(PrintStream&);
205 void dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& = ICStatusMap());
206 void dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& = ICStatusMap());
207
208 void dumpExceptionHandlers(PrintStream&);
209 void printStructures(PrintStream&, const Instruction*);
210 void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
211
212 void dumpMathICStats();
213
214 bool isStrictMode() const { return m_unlinkedCode->isStrictMode(); }
215 bool isConstructor() const { return m_unlinkedCode->isConstructor(); }
216 ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; }
217 CodeType codeType() const { return m_unlinkedCode->codeType(); }
218
219 JSParserScriptMode scriptMode() const { return m_unlinkedCode->scriptMode(); }
220
221 bool hasInstalledVMTrapBreakpoints() const;
222 bool installVMTrapBreakpoints();
223
224 inline bool isKnownNotImmediate(int index)
225 {
226 if (index == thisRegister().offset() && !isStrictMode())
227 return true;
228
229 if (isConstantRegisterIndex(index))
230 return getConstant(index).isCell();
231
232 return false;
233 }
234
235 ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
236 {
237 return index >= m_numVars;
238 }
239
240 HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
241 HandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
242 void removeExceptionHandlerForCallSite(CallSiteIndex);
243 unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
244 unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
245 void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
246 int& startOffset, int& endOffset, unsigned& line, unsigned& column) const;
247
248 Optional<unsigned> bytecodeOffsetFromCallSiteIndex(CallSiteIndex);
249
250 void getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result);
251 void getICStatusMap(ICStatusMap& result);
252
253#if ENABLE(JIT)
254 struct JITData {
255 WTF_MAKE_STRUCT_FAST_ALLOCATED;
256
257 Bag<StructureStubInfo> m_stubInfos;
258 Bag<JITAddIC> m_addICs;
259 Bag<JITMulIC> m_mulICs;
260 Bag<JITNegIC> m_negICs;
261 Bag<JITSubIC> m_subICs;
262 Bag<ByValInfo> m_byValInfos;
263 Bag<CallLinkInfo> m_callLinkInfos;
264 SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls;
265 SentinelLinkedList<PolymorphicCallNode, BasicRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls;
266 SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
267 std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap;
268 std::unique_ptr<RegisterAtOffsetList> m_calleeSaveRegisters;
269 JITCodeMap m_jitCodeMap;
270 };
271
272 JITData& ensureJITData(const ConcurrentJSLocker& locker)
273 {
274 if (LIKELY(m_jitData))
275 return *m_jitData;
276 return ensureJITDataSlow(locker);
277 }
278 JITData& ensureJITDataSlow(const ConcurrentJSLocker&);
279
280 JITAddIC* addJITAddIC(ArithProfile*, const Instruction*);
281 JITMulIC* addJITMulIC(ArithProfile*, const Instruction*);
282 JITNegIC* addJITNegIC(ArithProfile*, const Instruction*);
283 JITSubIC* addJITSubIC(ArithProfile*, const Instruction*);
284
285 template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITAddGenerator>::value>::type>
286 JITAddIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITAddIC(profile, instruction); }
287
288 template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITMulGenerator>::value>::type>
289 JITMulIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITMulIC(profile, instruction); }
290
291 template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITNegGenerator>::value>::type>
292 JITNegIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITNegIC(profile, instruction); }
293
294 template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITSubGenerator>::value>::type>
295 JITSubIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITSubIC(profile, instruction); }
296
297 StructureStubInfo* addStubInfo(AccessType);
298
299 // O(n) operation. Use getStubInfoMap() unless you really only intend to get one
300 // stub info.
301 StructureStubInfo* findStubInfo(CodeOrigin);
302
303 ByValInfo* addByValInfo();
304
305 CallLinkInfo* addCallLinkInfo();
306
307 // This is a slow function call used primarily for compiling OSR exits in the case
308 // that there had been inlining. Chances are if you want to use this, you're really
309 // looking for a CallLinkInfoMap to amortize the cost of calling this.
310 CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
311
312 void setJITCodeMap(JITCodeMap&& jitCodeMap)
313 {
314 ConcurrentJSLocker locker(m_lock);
315 ensureJITData(locker).m_jitCodeMap = WTFMove(jitCodeMap);
316 }
317 const JITCodeMap& jitCodeMap()
318 {
319 ConcurrentJSLocker locker(m_lock);
320 return ensureJITData(locker).m_jitCodeMap;
321 }
322
323 void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&);
324 Optional<CodeOrigin> findPC(void* pc);
325
326 void setCalleeSaveRegisters(RegisterSet);
327 void setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList>);
328
329 RareCaseProfile* addRareCaseProfile(int bytecodeOffset);
330 RareCaseProfile* rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset);
331 unsigned rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset);
332
333 bool likelyToTakeSlowCase(int bytecodeOffset)
334 {
335 if (!hasBaselineJITProfiling())
336 return false;
337 ConcurrentJSLocker locker(m_lock);
338 unsigned value = rareCaseProfileCountForBytecodeOffset(locker, bytecodeOffset);
339 return value >= Options::likelyToTakeSlowCaseMinimumCount();
340 }
341
342 bool couldTakeSlowCase(int bytecodeOffset)
343 {
344 if (!hasBaselineJITProfiling())
345 return false;
346 ConcurrentJSLocker locker(m_lock);
347 unsigned value = rareCaseProfileCountForBytecodeOffset(locker, bytecodeOffset);
348 return value >= Options::couldTakeSlowCaseMinimumCount();
349 }
350
351 // We call this when we want to reattempt compiling something with the baseline JIT. Ideally
352 // the baseline JIT would not add data to CodeBlock, but instead it would put its data into
353 // a newly created JITCode, which could be thrown away if we bail on JIT compilation. Then we
354 // would be able to get rid of this silly function.
355 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159061
356 void resetJITData();
357#endif // ENABLE(JIT)
358
359 void unlinkIncomingCalls();
360
361#if ENABLE(JIT)
362 void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
363 void linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode*);
364#endif // ENABLE(JIT)
365
366 void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
367
368 const Instruction* outOfLineJumpTarget(const Instruction* pc);
369 int outOfLineJumpOffset(const Instruction* pc);
370 int outOfLineJumpOffset(const InstructionStream::Ref& instruction)
371 {
372 return outOfLineJumpOffset(instruction.ptr());
373 }
374
375 inline unsigned bytecodeOffset(const Instruction* returnAddress)
376 {
377 const auto* instructionsBegin = instructions().at(0).ptr();
378 const auto* instructionsEnd = reinterpret_cast<const Instruction*>(reinterpret_cast<uintptr_t>(instructionsBegin) + instructions().size());
379 RELEASE_ASSERT(returnAddress >= instructionsBegin && returnAddress < instructionsEnd);
380 return returnAddress - instructionsBegin;
381 }
382
383 const InstructionStream& instructions() const { return m_unlinkedCode->instructions(); }
384
385 size_t predictedMachineCodeSize();
386
387 unsigned instructionCount() const { return m_instructionCount; }
388
389 // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
390 CodeBlock* newReplacement();
391
392 void setJITCode(Ref<JITCode>&& code)
393 {
394 ASSERT(heap()->isDeferred());
395 if (!code->isShared())
396 heap()->reportExtraMemoryAllocated(code->size());
397
398 ConcurrentJSLocker locker(m_lock);
399 WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
400 m_jitCode = WTFMove(code);
401 }
402
403 RefPtr<JITCode> jitCode() { return m_jitCode; }
404 static ptrdiff_t jitCodeOffset() { return OBJECT_OFFSETOF(CodeBlock, m_jitCode); }
405 JITCode::JITType jitType() const
406 {
407 JITCode* jitCode = m_jitCode.get();
408 WTF::loadLoadFence();
409 JITCode::JITType result = JITCode::jitTypeFor(jitCode);
410 WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
411 return result;
412 }
413
414 bool hasBaselineJITProfiling() const
415 {
416 return jitType() == JITCode::BaselineJIT;
417 }
418
419#if ENABLE(JIT)
420 CodeBlock* replacement();
421
422 DFG::CapabilityLevel computeCapabilityLevel();
423 DFG::CapabilityLevel capabilityLevel();
424 DFG::CapabilityLevel capabilityLevelState() { return static_cast<DFG::CapabilityLevel>(m_capabilityLevelState); }
425
426 bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
427 bool hasOptimizedReplacement(); // the typeToReplace is my JITType
428#endif
429
430 void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization, const FireDetail* = nullptr);
431
432 ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
433
434 ExecutableToCodeBlockEdge* ownerEdge() const { return m_ownerEdge.get(); }
435
436 VM* vm() const { return m_vm; }
437
438 VirtualRegister thisRegister() const { return m_unlinkedCode->thisRegister(); }
439
440 bool usesEval() const { return m_unlinkedCode->usesEval(); }
441
442 void setScopeRegister(VirtualRegister scopeRegister)
443 {
444 ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid());
445 m_scopeRegister = scopeRegister;
446 }
447
448 VirtualRegister scopeRegister() const
449 {
450 return m_scopeRegister;
451 }
452
453 PutPropertySlot::Context putByIdContext() const
454 {
455 if (codeType() == EvalCode)
456 return PutPropertySlot::PutByIdEval;
457 return PutPropertySlot::PutById;
458 }
459
460 const SourceCode& source() const { return m_ownerExecutable->source(); }
461 unsigned sourceOffset() const { return m_ownerExecutable->source().startOffset(); }
462 unsigned firstLineColumnOffset() const { return m_ownerExecutable->startColumn(); }
463
464 size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
465 unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
466
467 String nameForRegister(VirtualRegister);
468
469 unsigned numberOfArgumentValueProfiles()
470 {
471 ASSERT(m_numParameters >= 0);
472 ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters) || !vm()->canUseJIT());
473 return m_argumentValueProfiles.size();
474 }
475
476 ValueProfile& valueProfileForArgument(unsigned argumentIndex)
477 {
478 ASSERT(vm()->canUseJIT()); // This is only called from the various JIT compilers or places that first check numberOfArgumentValueProfiles before calling this.
479 ValueProfile& result = m_argumentValueProfiles[argumentIndex];
480 ASSERT(result.m_bytecodeOffset == -1);
481 return result;
482 }
483
484 ValueProfile& valueProfileForBytecodeOffset(int bytecodeOffset);
485 SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset);
486
487 template<typename Functor> void forEachValueProfile(const Functor&);
488 template<typename Functor> void forEachArrayProfile(const Functor&);
489 template<typename Functor> void forEachArrayAllocationProfile(const Functor&);
490 template<typename Functor> void forEachObjectAllocationProfile(const Functor&);
491 template<typename Functor> void forEachLLIntCallLinkInfo(const Functor&);
492
493 ArithProfile* arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset);
494 ArithProfile* arithProfileForPC(const Instruction*);
495
496 bool couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset);
497
498 ArrayProfile* getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
499 ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
500
501 // Exception handling support
502
503 size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
504 HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
505
506 bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
507
508#if ENABLE(DFG_JIT)
509 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins();
510
511 // Having code origins implies that there has been some inlining.
512 bool hasCodeOrigins()
513 {
514 return JITCode::isOptimizingJIT(jitType());
515 }
516
517 bool canGetCodeOrigin(CallSiteIndex index)
518 {
519 if (!hasCodeOrigins())
520 return false;
521 return index.bits() < codeOrigins().size();
522 }
523
524 CodeOrigin codeOrigin(CallSiteIndex index)
525 {
526 return codeOrigins()[index.bits()];
527 }
528
529 CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles(const ConcurrentJSLocker&)
530 {
531 return m_lazyOperandValueProfiles;
532 }
533#endif // ENABLE(DFG_JIT)
534
535 // Constant Pool
536#if ENABLE(DFG_JIT)
537 size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
538 size_t numberOfDFGIdentifiers() const;
539 const Identifier& identifier(int index) const;
540#else
541 size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
542 const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
543#endif
544
545 Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; }
546 Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
547 unsigned addConstant(JSValue v)
548 {
549 unsigned result = m_constantRegisters.size();
550 m_constantRegisters.append(WriteBarrier<Unknown>());
551 m_constantRegisters.last().set(*m_vm, this, v);
552 m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
553 return result;
554 }
555
556 unsigned addConstantLazily()
557 {
558 unsigned result = m_constantRegisters.size();
559 m_constantRegisters.append(WriteBarrier<Unknown>());
560 m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
561 return result;
562 }
563
564 const Vector<WriteBarrier<Unknown>>& constantRegisters() { return m_constantRegisters; }
565 WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
566 static ALWAYS_INLINE bool isConstantRegisterIndex(int index) { return index >= FirstConstantRegisterIndex; }
567 ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
568 ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(int index) const { return m_constantsSourceCodeRepresentation[index - FirstConstantRegisterIndex]; }
569
570 FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
571 int numberOfFunctionDecls() { return m_functionDecls.size(); }
572 FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
573
574 const BitVector& bitVector(size_t i) { return m_unlinkedCode->bitVector(i); }
575
576 Heap* heap() const { return &m_vm->heap; }
577 JSGlobalObject* globalObject() { return m_globalObject.get(); }
578
579 JSGlobalObject* globalObjectFor(CodeOrigin);
580
581 BytecodeLivenessAnalysis& livenessAnalysis()
582 {
583 return m_unlinkedCode->livenessAnalysis(this);
584 }
585
586 void validate();
587
588 // Jump Tables
589
590 size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; }
591 SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); }
592 SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; }
593 void clearSwitchJumpTables()
594 {
595 if (!m_rareData)
596 return;
597 m_rareData->m_switchJumpTables.clear();
598 }
599
600 size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
601 StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
602 StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
603
604 DirectEvalCodeCache& directEvalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_directEvalCodeCache; }
605
606 enum ShrinkMode {
607 // Shrink prior to generating machine code that may point directly into vectors.
608 EarlyShrink,
609
610 // Shrink after generating machine code, and after possibly creating new vectors
611 // and appending to others. At this time it is not safe to shrink certain vectors
612 // because we would have generated machine code that references them directly.
613 LateShrink
614 };
615 void shrinkToFit(ShrinkMode);
616
617 // Functions for controlling when JITting kicks in, in a mixed mode
618 // execution world.
619
620 bool checkIfJITThresholdReached()
621 {
622 return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
623 }
624
625 void dontJITAnytimeSoon()
626 {
627 m_llintExecuteCounter.deferIndefinitely();
628 }
629
630 int32_t thresholdForJIT(int32_t threshold);
631 void jitAfterWarmUp();
632 void jitSoon();
633
634 const BaselineExecutionCounter& llintExecuteCounter() const
635 {
636 return m_llintExecuteCounter;
637 }
638
639 typedef HashMap<std::tuple<Structure*, const Instruction*>, Bag<LLIntPrototypeLoadAdaptiveStructureWatchpoint>> StructureWatchpointMap;
640 StructureWatchpointMap& llintGetByIdWatchpointMap() { return m_llintGetByIdWatchpointMap; }
641
642 // Functions for controlling when tiered compilation kicks in. This
643 // controls both when the optimizing compiler is invoked and when OSR
644 // entry happens. Two triggers exist: the loop trigger and the return
645 // trigger. In either case, when an addition to m_jitExecuteCounter
646 // causes it to become non-negative, the optimizing compiler is
647 // invoked. This includes a fast check to see if this CodeBlock has
648 // already been optimized (i.e. replacement() returns a CodeBlock
649 // that was optimized with a higher tier JIT than this one). In the
650 // case of the loop trigger, if the optimized compilation succeeds
651 // (or has already succeeded in the past) then OSR is attempted to
652 // redirect program flow into the optimized code.
653
654 // These functions are called from within the optimization triggers,
655 // and are used as a single point at which we define the heuristics
656 // for how much warm-up is mandated before the next optimization
657 // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
658 // as this is called from the CodeBlock constructor.
659
660 // When we observe a lot of speculation failures, we trigger a
661 // reoptimization. But each time, we increase the optimization trigger
662 // to avoid thrashing.
663 JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const;
664 void countReoptimization();
665
666#if !ENABLE(C_LOOP)
667 const RegisterAtOffsetList* calleeSaveRegisters() const;
668
669 static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return RegisterSet::llintBaselineCalleeSaveRegisters().numberOfSetRegisters(); }
670 static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters();
671 size_t calleeSaveSpaceAsVirtualRegisters();
672#else
673 static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return 0; }
674 static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters() { return 1; };
675 size_t calleeSaveSpaceAsVirtualRegisters() { return 0; }
676#endif
677
678#if ENABLE(JIT)
679 unsigned numberOfDFGCompiles();
680
681 int32_t codeTypeThresholdMultiplier() const;
682
683 int32_t adjustedCounterValue(int32_t desiredThreshold);
684
685 int32_t* addressOfJITExecuteCounter()
686 {
687 return &m_jitExecuteCounter.m_counter;
688 }
689
690 static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); }
691 static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); }
692 static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); }
693
694 const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
695
696 unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
697
698 // Check if the optimization threshold has been reached, and if not,
699 // adjust the heuristics accordingly. Returns true if the threshold has
700 // been reached.
701 bool checkIfOptimizationThresholdReached();
702
703 // Call this to force the next optimization trigger to fire. This is
704 // rarely wise, since optimization triggers are typically more
705 // expensive than executing baseline code.
706 void optimizeNextInvocation();
707
708 // Call this to prevent optimization from happening again. Note that
709 // optimization will still happen after roughly 2^29 invocations,
710 // so this is really meant to delay that as much as possible. This
711 // is called if optimization failed, and we expect it to fail in
712 // the future as well.
713 void dontOptimizeAnytimeSoon();
714
715 // Call this to reinitialize the counter to its starting state,
716 // forcing a warm-up to happen before the next optimization trigger
717 // fires. This is called in the CodeBlock constructor. It also
718 // makes sense to call this if an OSR exit occurred. Note that
719 // OSR exit code is code generated, so the value of the execute
720 // counter that this corresponds to is also available directly.
721 void optimizeAfterWarmUp();
722
723 // Call this to force an optimization trigger to fire only after
724 // a lot of warm-up.
725 void optimizeAfterLongWarmUp();
726
727 // Call this to cause an optimization trigger to fire soon, but
728 // not necessarily the next one. This makes sense if optimization
729 // succeeds. Successful optimization means that all calls are
730 // relinked to the optimized code, so this only affects call
731 // frames that are still executing this CodeBlock. The value here
732 // is tuned to strike a balance between the cost of OSR entry
733 // (which is too high to warrant making every loop back edge to
734 // trigger OSR immediately) and the cost of executing baseline
735 // code (which is high enough that we don't necessarily want to
736 // have a full warm-up). The intuition for calling this instead of
737 // optimizeNextInvocation() is for the case of recursive functions
738 // with loops. Consider that there may be N call frames of some
739 // recursive function, for a reasonably large value of N. The top
740 // one triggers optimization, and then returns, and then all of
741 // the others return. We don't want optimization to be triggered on
742 // each return, as that would be superfluous. It only makes sense
743 // to trigger optimization if one of those functions becomes hot
744 // in the baseline code.
745 void optimizeSoon();
746
747 void forceOptimizationSlowPathConcurrently();
748
749 void setOptimizationThresholdBasedOnCompilationResult(CompilationResult);
750
751 uint32_t osrExitCounter() const { return m_osrExitCounter; }
752
753 void countOSRExit() { m_osrExitCounter++; }
754
755 enum class OptimizeAction { None, ReoptimizeNow };
756#if ENABLE(DFG_JIT)
757 OptimizeAction updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState&);
758#endif
759
760 static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
761
762 uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
763 uint32_t exitCountThresholdForReoptimization();
764 uint32_t exitCountThresholdForReoptimizationFromLoop();
765 bool shouldReoptimizeNow();
766 bool shouldReoptimizeFromLoopNow();
767
768#else // No JIT
769 void optimizeAfterWarmUp() { }
770 unsigned numberOfDFGCompiles() { return 0; }
771#endif
772
773 bool shouldOptimizeNow();
774 void updateAllValueProfilePredictions();
775 void updateAllArrayPredictions();
776 void updateAllPredictions();
777
778 unsigned frameRegisterCount();
779 int stackPointerOffset();
780
781 bool hasOpDebugForLineAndColumn(unsigned line, unsigned column);
782
783 bool hasDebuggerRequests() const { return m_debuggerRequests; }
784 void* debuggerRequestsAddress() { return &m_debuggerRequests; }
785
786 void addBreakpoint(unsigned numBreakpoints);
787 void removeBreakpoint(unsigned numBreakpoints)
788 {
789 ASSERT(m_numBreakpoints >= numBreakpoints);
790 m_numBreakpoints -= numBreakpoints;
791 }
792
793 enum SteppingMode {
794 SteppingModeDisabled,
795 SteppingModeEnabled
796 };
797 void setSteppingMode(SteppingMode);
798
799 void clearDebuggerRequests()
800 {
801 m_steppingMode = SteppingModeDisabled;
802 m_numBreakpoints = 0;
803 }
804
805 bool wasCompiledWithDebuggingOpcodes() const { return m_unlinkedCode->wasCompiledWithDebuggingOpcodes(); }
806
807 // This is intentionally public; it's the responsibility of anyone doing any
808 // of the following to hold the lock:
809 //
810 // - Modifying any inline cache in this code block.
811 //
812 // - Quering any inline cache in this code block, from a thread other than
813 // the main thread.
814 //
815 // Additionally, it's only legal to modify the inline cache on the main
816 // thread. This means that the main thread can query the inline cache without
817 // locking. This is crucial since executing the inline cache is effectively
818 // "querying" it.
819 //
820 // Another exception to the rules is that the GC can do whatever it wants
821 // without holding any locks, because the GC is guaranteed to wait until any
822 // concurrent compilation threads finish what they're doing.
823 mutable ConcurrentJSLock m_lock;
824
825 bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
826
827#if ENABLE(JIT)
828 unsigned m_capabilityLevelState : 2; // DFG::CapabilityLevel
829#endif
830
831 bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC.
832
833 bool m_didFailJITCompilation : 1;
834 bool m_didFailFTLCompilation : 1;
835 bool m_hasBeenCompiledWithFTL : 1;
836
837 // Internal methods for use by validation code. It would be private if it wasn't
838 // for the fact that we use it from anonymous namespaces.
839 void beginValidationDidFail();
840 NO_RETURN_DUE_TO_CRASH void endValidationDidFail();
841
842 struct RareData {
843 WTF_MAKE_FAST_ALLOCATED;
844 public:
845 Vector<HandlerInfo> m_exceptionHandlers;
846
847 // Jump Tables
848 Vector<SimpleJumpTable> m_switchJumpTables;
849 Vector<StringJumpTable> m_stringSwitchJumpTables;
850
851 Vector<std::unique_ptr<ValueProfileAndOperandBuffer>> m_catchProfiles;
852
853 DirectEvalCodeCache m_directEvalCodeCache;
854 };
855
856 void clearExceptionHandlers()
857 {
858 if (m_rareData)
859 m_rareData->m_exceptionHandlers.clear();
860 }
861
862 void appendExceptionHandler(const HandlerInfo& handler)
863 {
864 createRareDataIfNecessary(); // We may be handling the exception of an inlined call frame.
865 m_rareData->m_exceptionHandlers.append(handler);
866 }
867
868 CallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite);
869
870 void ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset);
871
872 bool hasTailCalls() const { return m_unlinkedCode->hasTailCalls(); }
873
874 template<typename Metadata>
875 Metadata& metadata(OpcodeID opcodeID, unsigned metadataID)
876 {
877 ASSERT(m_metadata);
878 return bitwise_cast<Metadata*>(m_metadata->get(opcodeID))[metadataID];
879 }
880
881 size_t metadataSizeInBytes()
882 {
883 return m_unlinkedCode->metadataSizeInBytes();
884 }
885
886protected:
887 void finalizeLLIntInlineCaches();
888#if ENABLE(JIT)
889 void finalizeBaselineJITInlineCaches();
890#endif
891#if ENABLE(DFG_JIT)
892 void tallyFrequentExitSites();
893#else
894 void tallyFrequentExitSites() { }
895#endif
896
897private:
898 friend class CodeBlockSet;
899 friend class ExecutableToCodeBlockEdge;
900
901 BytecodeLivenessAnalysis& livenessAnalysisSlow();
902
903 CodeBlock* specialOSREntryBlockOrNull();
904
905 void noticeIncomingCall(ExecState* callerFrame);
906
907 double optimizationThresholdScalingFactor();
908
909 void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
910
911 void setConstantIdentifierSetRegisters(VM&, const Vector<ConstantIdentifierSetEntry>& constants);
912
913 void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation);
914
915 void replaceConstant(int index, JSValue value)
916 {
917 ASSERT(isConstantRegisterIndex(index) && static_cast<size_t>(index - FirstConstantRegisterIndex) < m_constantRegisters.size());
918 m_constantRegisters[index - FirstConstantRegisterIndex].set(*m_vm, this, value);
919 }
920
921 bool shouldVisitStrongly(const ConcurrentJSLocker&);
922 bool shouldJettisonDueToWeakReference(VM&);
923 bool shouldJettisonDueToOldAge(const ConcurrentJSLocker&);
924
925 void propagateTransitions(const ConcurrentJSLocker&, SlotVisitor&);
926 void determineLiveness(const ConcurrentJSLocker&, SlotVisitor&);
927
928 void stronglyVisitStrongReferences(const ConcurrentJSLocker&, SlotVisitor&);
929 void stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor&);
930 void visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor&);
931
932 unsigned numberOfNonArgumentValueProfiles() { return m_numberOfNonArgumentValueProfiles; }
933 unsigned totalNumberOfValueProfiles() { return numberOfArgumentValueProfiles() + numberOfNonArgumentValueProfiles(); }
934 ValueProfile* tryGetValueProfileForBytecodeOffset(int bytecodeOffset);
935
936 Seconds timeSinceCreation()
937 {
938 return MonotonicTime::now() - m_creationTime;
939 }
940
941 void createRareDataIfNecessary()
942 {
943 if (!m_rareData) {
944 auto rareData = std::make_unique<RareData>();
945 WTF::storeStoreFence(); // m_catchProfiles can be touched from compiler threads.
946 m_rareData = WTFMove(rareData);
947 }
948 }
949
950 void insertBasicBlockBoundariesForControlFlowProfiler();
951 void ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch&, InstructionStream::Offset);
952
953 int m_numCalleeLocals;
954 int m_numVars;
955 int m_numParameters;
956 int m_numberOfArgumentsToSkip { 0 };
957 unsigned m_numberOfNonArgumentValueProfiles { 0 };
958 union {
959 unsigned m_debuggerRequests;
960 struct {
961 unsigned m_hasDebuggerStatement : 1;
962 unsigned m_steppingMode : 1;
963 unsigned m_numBreakpoints : 30;
964 };
965 };
966 unsigned m_instructionCount { 0 };
967 VirtualRegister m_scopeRegister;
968 mutable CodeBlockHash m_hash;
969
970 WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode;
971 WriteBarrier<ScriptExecutable> m_ownerExecutable;
972 WriteBarrier<ExecutableToCodeBlockEdge> m_ownerEdge;
973 VM* m_vm;
974
975 const void* m_instructionsRawPointer { nullptr };
976 SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls;
977 StructureWatchpointMap m_llintGetByIdWatchpointMap;
978 RefPtr<JITCode> m_jitCode;
979#if ENABLE(JIT)
980 std::unique_ptr<JITData> m_jitData;
981#endif
982#if ENABLE(DFG_JIT)
983 // This is relevant to non-DFG code blocks that serve as the profiled code block
984 // for DFG code blocks.
985 CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
986#endif
987 RefCountedArray<ValueProfile> m_argumentValueProfiles;
988
989 // Constant Pool
990 COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
991 // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
992 // it, so we're stuck with it for now.
993 Vector<WriteBarrier<Unknown>> m_constantRegisters;
994 Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation;
995 RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionDecls;
996 RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionExprs;
997
998 WriteBarrier<CodeBlock> m_alternative;
999
1000 BaselineExecutionCounter m_llintExecuteCounter;
1001
1002 BaselineExecutionCounter m_jitExecuteCounter;
1003 uint32_t m_osrExitCounter;
1004
1005 uint16_t m_optimizationDelayCounter;
1006 uint16_t m_reoptimizationRetryCounter;
1007
1008 RefPtr<MetadataTable> m_metadata;
1009
1010 MonotonicTime m_creationTime;
1011
1012 std::unique_ptr<RareData> m_rareData;
1013};
1014
1015inline Register& ExecState::r(int index)
1016{
1017 CodeBlock* codeBlock = this->codeBlock();
1018 if (codeBlock->isConstantRegisterIndex(index))
1019 return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
1020 return this[index];
1021}
1022
1023inline Register& ExecState::r(VirtualRegister reg)
1024{
1025 return r(reg.offset());
1026}
1027
1028inline Register& ExecState::uncheckedR(int index)
1029{
1030 RELEASE_ASSERT(index < FirstConstantRegisterIndex);
1031 return this[index];
1032}
1033
1034inline Register& ExecState::uncheckedR(VirtualRegister reg)
1035{
1036 return uncheckedR(reg.offset());
1037}
1038
1039template <typename ExecutableType>
1040Exception* ScriptExecutable::prepareForExecution(VM& vm, JSFunction* function, JSScope* scope, CodeSpecializationKind kind, CodeBlock*& resultCodeBlock)
1041{
1042 if (hasJITCodeFor(kind)) {
1043 if (std::is_same<ExecutableType, EvalExecutable>::value)
1044 resultCodeBlock = jsCast<CodeBlock*>(jsCast<EvalExecutable*>(this)->codeBlock());
1045 else if (std::is_same<ExecutableType, ProgramExecutable>::value)
1046 resultCodeBlock = jsCast<CodeBlock*>(jsCast<ProgramExecutable*>(this)->codeBlock());
1047 else if (std::is_same<ExecutableType, ModuleProgramExecutable>::value)
1048 resultCodeBlock = jsCast<CodeBlock*>(jsCast<ModuleProgramExecutable*>(this)->codeBlock());
1049 else if (std::is_same<ExecutableType, FunctionExecutable>::value)
1050 resultCodeBlock = jsCast<CodeBlock*>(jsCast<FunctionExecutable*>(this)->codeBlockFor(kind));
1051 else
1052 RELEASE_ASSERT_NOT_REACHED();
1053 return nullptr;
1054 }
1055 return prepareForExecutionImpl(vm, function, scope, kind, resultCodeBlock);
1056}
1057
1058#define CODEBLOCK_LOG_EVENT(codeBlock, summary, details) \
1059 (codeBlock->vm()->logEvent(codeBlock, summary, [&] () { return toCString details; }))
1060
1061
1062void setPrinter(Printer::PrintRecord&, CodeBlock*);
1063
1064} // namespace JSC
1065
1066namespace WTF {
1067
1068JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::CodeBlock*);
1069
1070} // namespace WTF
1071