1/*
2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGByteCodeParser.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "ArithProfile.h"
32#include "ArrayConstructor.h"
33#include "BasicBlockLocation.h"
34#include "BuiltinNames.h"
35#include "BytecodeGenerator.h"
36#include "CallLinkStatus.h"
37#include "CodeBlock.h"
38#include "CodeBlockWithJITType.h"
39#include "CommonSlowPaths.h"
40#include "DFGAbstractHeap.h"
41#include "DFGArrayMode.h"
42#include "DFGCFG.h"
43#include "DFGCapabilities.h"
44#include "DFGClobberize.h"
45#include "DFGClobbersExitState.h"
46#include "DFGGraph.h"
47#include "DFGJITCode.h"
48#include "FunctionCodeBlock.h"
49#include "GetByStatus.h"
50#include "GetterSetter.h"
51#include "Heap.h"
52#include "InByIdStatus.h"
53#include "InstanceOfStatus.h"
54#include "JSCInlines.h"
55#include "JSFixedArray.h"
56#include "JSImmutableButterfly.h"
57#include "JSInternalPromise.h"
58#include "JSInternalPromiseConstructor.h"
59#include "JSModuleEnvironment.h"
60#include "JSModuleNamespaceObject.h"
61#include "JSPromiseConstructor.h"
62#include "NumberConstructor.h"
63#include "ObjectConstructor.h"
64#include "OpcodeInlines.h"
65#include "PreciseJumpTargets.h"
66#include "PutByIdFlags.h"
67#include "PutByIdStatus.h"
68#include "RegExpPrototype.h"
69#include "StackAlignment.h"
70#include "StringConstructor.h"
71#include "StructureStubInfo.h"
72#include "SymbolConstructor.h"
73#include "Watchdog.h"
74#include <wtf/CommaPrinter.h>
75#include <wtf/HashMap.h>
76#include <wtf/MathExtras.h>
77#include <wtf/SetForScope.h>
78#include <wtf/StdLibExtras.h>
79
80namespace JSC { namespace DFG {
81
82namespace DFGByteCodeParserInternal {
83#ifdef NDEBUG
84static constexpr bool verbose = false;
85#else
86static constexpr bool verbose = true;
87#endif
88} // namespace DFGByteCodeParserInternal
89
90#define VERBOSE_LOG(...) do { \
91if (DFGByteCodeParserInternal::verbose && Options::verboseDFGBytecodeParsing()) \
92dataLog(__VA_ARGS__); \
93} while (false)
94
95// === ByteCodeParser ===
96//
97// This class is used to compile the dataflow graph from a CodeBlock.
98class ByteCodeParser {
99public:
100 ByteCodeParser(Graph& graph)
101 : m_vm(&graph.m_vm)
102 , m_codeBlock(graph.m_codeBlock)
103 , m_profiledBlock(graph.m_profiledBlock)
104 , m_graph(graph)
105 , m_currentBlock(0)
106 , m_currentIndex(0)
107 , m_constantUndefined(graph.freeze(jsUndefined()))
108 , m_constantNull(graph.freeze(jsNull()))
109 , m_constantNaN(graph.freeze(jsNumber(PNaN)))
110 , m_constantOne(graph.freeze(jsNumber(1)))
111 , m_numArguments(m_codeBlock->numParameters())
112 , m_numLocals(m_codeBlock->numCalleeLocals())
113 , m_parameterSlots(0)
114 , m_numPassedVarArgs(0)
115 , m_inlineStackTop(0)
116 , m_currentInstruction(0)
117 , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
118 {
119 ASSERT(m_profiledBlock);
120 }
121
122 // Parse a full CodeBlock of bytecode.
123 void parse();
124
125private:
126 struct InlineStackEntry;
127
128 // Just parse from m_currentIndex to the end of the current CodeBlock.
129 void parseCodeBlock();
130
131 void ensureLocals(unsigned newNumLocals)
132 {
133 VERBOSE_LOG(" ensureLocals: trying to raise m_numLocals from ", m_numLocals, " to ", newNumLocals, "\n");
134 if (newNumLocals <= m_numLocals)
135 return;
136 m_numLocals = newNumLocals;
137 for (size_t i = 0; i < m_graph.numBlocks(); ++i)
138 m_graph.block(i)->ensureLocals(newNumLocals);
139 }
140
141 // Helper for min and max.
142 template<typename ChecksFunctor>
143 bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
144
145 void refineStatically(CallLinkStatus&, Node* callTarget);
146 // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin,
147 // or they can be untargetable, with bytecodeBegin==UINT_MAX, to be managed manually and not by the linkBlock machinery.
148 // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog).
149 // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed
150 // than to move the right index all the way to the treatment of op_ret.
151 BasicBlock* allocateTargetableBlock(BytecodeIndex);
152 BasicBlock* allocateUntargetableBlock();
153 // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction
154 void makeBlockTargetable(BasicBlock*, BytecodeIndex);
155 void addJumpTo(BasicBlock*);
156 void addJumpTo(unsigned bytecodeIndex);
157 // Handle calls. This resolves issues surrounding inlining and intrinsics.
158 enum Terminality { Terminal, NonTerminal };
159 Terminality handleCall(
160 VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
161 Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus,
162 SpeculatedType prediction);
163 template<typename CallOp>
164 Terminality handleCall(const Instruction* pc, NodeType op, CallMode);
165 template<typename CallOp>
166 Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode);
167 void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
168 void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
169 Node* getArgumentCount();
170 template<typename ChecksFunctor>
171 bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded);
172 unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
173 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
174 bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
175 unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind);
176 enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing };
177 CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, BytecodeIndex nextIndex, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
178 CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, BytecodeIndex nextIndex, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
179 template<typename ChecksFunctor>
180 void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
181 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
182 template<typename ChecksFunctor>
183 bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
184 template<typename ChecksFunctor>
185 bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
186 template<typename ChecksFunctor>
187 bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
188 template<typename ChecksFunctor>
189 bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
190 template<typename ChecksFunctor>
191 bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
192 Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
193 Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
194 bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
195 bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByStatus);
196
197 template<typename Bytecode>
198 void handlePutByVal(Bytecode, unsigned instructionSize);
199 template <typename Bytecode>
200 void handlePutAccessorById(NodeType, Bytecode);
201 template <typename Bytecode>
202 void handlePutAccessorByVal(NodeType, Bytecode);
203 template <typename Bytecode>
204 void handleNewFunc(NodeType, Bytecode);
205 template <typename Bytecode>
206 void handleNewFuncExp(NodeType, Bytecode);
207 template <typename Bytecode>
208 void handleCreateInternalFieldObject(const ClassInfo*, NodeType createOp, NodeType newOp, Bytecode);
209
210 // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
211 // check the validity of the condition, but it may return a null one if it encounters a contradiction.
212 ObjectPropertyCondition presenceLike(
213 JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
214
215 // Attempt to watch the presence of a property. It will watch that the property is present in the same
216 // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
217 // Returns true if this all works out.
218 bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
219 void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
220
221 // Works with both GetByIdVariant and the setter form of PutByIdVariant.
222 template<typename VariantType>
223 Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
224
225 Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
226
227 template<typename Op>
228 void parseGetById(const Instruction*);
229 void handleGetById(
230 VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByStatus, AccessType, unsigned instructionSize);
231 void emitPutById(
232 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect);
233 void handlePutById(
234 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
235 bool isDirect, unsigned intructionSize);
236
237 // Either register a watchpoint or emit a check for this condition. Returns false if the
238 // condition no longer holds, and therefore no reasonable check can be emitted.
239 bool check(const ObjectPropertyCondition&);
240
241 GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
242
243 // Either register a watchpoint or emit a check for this condition. It must be a Presence
244 // condition. It will attempt to promote a Presence condition to an Equivalence condition.
245 // Emits code for the loaded value that the condition guards, and returns a node containing
246 // the loaded value. Returns null if the condition no longer holds.
247 GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
248 Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
249 Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
250
251 // Calls check() for each condition in the set: that is, it either emits checks or registers
252 // watchpoints (or a combination of the two) to make the conditions hold. If any of those
253 // conditions are no longer checkable, returns false.
254 bool check(const ObjectPropertyConditionSet&);
255
256 // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
257 // base. Does a combination of watchpoint registration and check emission to guard the
258 // conditions, and emits code to load the value from the slot base. Returns a node containing
259 // the loaded value. Returns null if any of the conditions were no longer checkable.
260 GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
261 Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
262
263 void prepareToParseBlock();
264 void clearCaches();
265
266 // Parse a single basic block of bytecode instructions.
267 void parseBlock(unsigned limit);
268 // Link block successors.
269 void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
270 void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
271
272 VariableAccessData* newVariableAccessData(VirtualRegister operand)
273 {
274 ASSERT(!operand.isConstant());
275
276 m_graph.m_variableAccessData.append(operand);
277 return &m_graph.m_variableAccessData.last();
278 }
279
280 // Get/Set the operands/result of a bytecode instruction.
281 Node* getDirect(VirtualRegister operand)
282 {
283 ASSERT(!operand.isConstant());
284
285 // Is this an argument?
286 if (operand.isArgument())
287 return getArgument(operand);
288
289 // Must be a local.
290 return getLocal(operand);
291 }
292
293 Node* get(VirtualRegister operand)
294 {
295 if (operand.isConstant()) {
296 unsigned constantIndex = operand.toConstantIndex();
297 unsigned oldSize = m_constants.size();
298 if (constantIndex >= oldSize || !m_constants[constantIndex]) {
299 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
300 JSValue value = codeBlock.getConstant(operand.offset());
301 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
302 if (constantIndex >= oldSize) {
303 m_constants.grow(constantIndex + 1);
304 for (unsigned i = oldSize; i < m_constants.size(); ++i)
305 m_constants[i] = nullptr;
306 }
307
308 Node* constantNode = nullptr;
309 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
310 constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
311 else
312 constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
313 m_constants[constantIndex] = constantNode;
314 }
315 ASSERT(m_constants[constantIndex]);
316 return m_constants[constantIndex];
317 }
318
319 if (inlineCallFrame()) {
320 if (!inlineCallFrame()->isClosureCall) {
321 JSFunction* callee = inlineCallFrame()->calleeConstant();
322 if (operand.offset() == CallFrameSlot::callee)
323 return weakJSConstant(callee);
324 }
325 } else if (operand.offset() == CallFrameSlot::callee) {
326 // We have to do some constant-folding here because this enables CreateThis folding. Note
327 // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
328 // case if the function is a singleton then we already know it.
329 if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
330 if (JSFunction* function = executable->singleton().inferredValue()) {
331 m_graph.watchpoints().addLazily(executable);
332 return weakJSConstant(function);
333 }
334 }
335 return addToGraph(GetCallee);
336 }
337
338 return getDirect(m_inlineStackTop->remapOperand(operand));
339 }
340
341 enum SetMode {
342 // A normal set which follows a two-phase commit that spans code origins. During
343 // the current code origin it issues a MovHint, and at the start of the next
344 // code origin there will be a SetLocal. If the local needs flushing, the second
345 // SetLocal will be preceded with a Flush.
346 NormalSet,
347
348 // A set where the SetLocal happens immediately and there is still a Flush. This
349 // is relevant when assigning to a local in tricky situations for the delayed
350 // SetLocal logic but where we know that we have not performed any side effects
351 // within this code origin. This is a safe replacement for NormalSet anytime we
352 // know that we have not yet performed side effects in this code origin.
353 ImmediateSetWithFlush,
354
355 // A set where the SetLocal happens immediately and we do not Flush it even if
356 // this is a local that is marked as needing it. This is relevant when
357 // initializing locals at the top of a function.
358 ImmediateNakedSet
359 };
360 Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
361 {
362 addToGraph(MovHint, OpInfo(operand.offset()), value);
363
364 // We can't exit anymore because our OSR exit state has changed.
365 m_exitOK = false;
366
367 DelayedSetLocal delayed(currentCodeOrigin(), operand, value, setMode);
368
369 if (setMode == NormalSet) {
370 m_setLocalQueue.append(delayed);
371 return nullptr;
372 }
373
374 return delayed.execute(this);
375 }
376
377 void processSetLocalQueue()
378 {
379 for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
380 m_setLocalQueue[i].execute(this);
381 m_setLocalQueue.shrink(0);
382 }
383
384 Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
385 {
386 return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
387 }
388
389 Node* injectLazyOperandSpeculation(Node* node)
390 {
391 ASSERT(node->op() == GetLocal);
392 ASSERT(node->origin.semantic.bytecodeIndex() == m_currentIndex);
393 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
394 LazyOperandValueProfileKey key(m_currentIndex, node->local());
395 SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
396 node->variableAccessData()->predict(prediction);
397 return node;
398 }
399
400 // Used in implementing get/set, above, where the operand is a local variable.
401 Node* getLocal(VirtualRegister operand)
402 {
403 unsigned local = operand.toLocal();
404
405 Node* node = m_currentBlock->variablesAtTail.local(local);
406
407 // This has two goals: 1) link together variable access datas, and 2)
408 // try to avoid creating redundant GetLocals. (1) is required for
409 // correctness - no other phase will ensure that block-local variable
410 // access data unification is done correctly. (2) is purely opportunistic
411 // and is meant as an compile-time optimization only.
412
413 VariableAccessData* variable;
414
415 if (node) {
416 variable = node->variableAccessData();
417
418 switch (node->op()) {
419 case GetLocal:
420 return node;
421 case SetLocal:
422 return node->child1().node();
423 default:
424 break;
425 }
426 } else
427 variable = newVariableAccessData(operand);
428
429 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
430 m_currentBlock->variablesAtTail.local(local) = node;
431 return node;
432 }
433 Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
434 {
435 SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
436
437 unsigned local = operand.toLocal();
438
439 if (setMode != ImmediateNakedSet) {
440 ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
441 if (argumentPosition)
442 flushDirect(operand, argumentPosition);
443 else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
444 flush(operand);
445 }
446
447 VariableAccessData* variableAccessData = newVariableAccessData(operand);
448 variableAccessData->mergeStructureCheckHoistingFailed(
449 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
450 variableAccessData->mergeCheckArrayHoistingFailed(
451 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
452 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
453 m_currentBlock->variablesAtTail.local(local) = node;
454 return node;
455 }
456
457 // Used in implementing get/set, above, where the operand is an argument.
458 Node* getArgument(VirtualRegister operand)
459 {
460 unsigned argument = operand.toArgument();
461 ASSERT(argument < m_numArguments);
462
463 Node* node = m_currentBlock->variablesAtTail.argument(argument);
464
465 VariableAccessData* variable;
466
467 if (node) {
468 variable = node->variableAccessData();
469
470 switch (node->op()) {
471 case GetLocal:
472 return node;
473 case SetLocal:
474 return node->child1().node();
475 default:
476 break;
477 }
478 } else
479 variable = newVariableAccessData(operand);
480
481 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
482 m_currentBlock->variablesAtTail.argument(argument) = node;
483 return node;
484 }
485 Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
486 {
487 SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
488
489 unsigned argument = operand.toArgument();
490 ASSERT(argument < m_numArguments);
491
492 VariableAccessData* variableAccessData = newVariableAccessData(operand);
493
494 // Always flush arguments, except for 'this'. If 'this' is created by us,
495 // then make sure that it's never unboxed.
496 if (argument || m_graph.needsFlushedThis()) {
497 if (setMode != ImmediateNakedSet)
498 flushDirect(operand);
499 }
500
501 if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
502 variableAccessData->mergeShouldNeverUnbox(true);
503
504 variableAccessData->mergeStructureCheckHoistingFailed(
505 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
506 variableAccessData->mergeCheckArrayHoistingFailed(
507 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
508 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
509 m_currentBlock->variablesAtTail.argument(argument) = node;
510 return node;
511 }
512
513 ArgumentPosition* findArgumentPositionForArgument(int argument)
514 {
515 InlineStackEntry* stack = m_inlineStackTop;
516 while (stack->m_inlineCallFrame)
517 stack = stack->m_caller;
518 return stack->m_argumentPositions[argument];
519 }
520
521 ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
522 {
523 for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
524 InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
525 if (!inlineCallFrame)
526 break;
527 if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
528 continue;
529 if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->argumentsWithFixup.size()))
530 continue;
531 int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
532 return stack->m_argumentPositions[argument];
533 }
534 return 0;
535 }
536
537 ArgumentPosition* findArgumentPosition(VirtualRegister operand)
538 {
539 if (operand.isArgument())
540 return findArgumentPositionForArgument(operand.toArgument());
541 return findArgumentPositionForLocal(operand);
542 }
543
544 template<typename AddFlushDirectFunc>
545 void flushImpl(InlineCallFrame* inlineCallFrame, const AddFlushDirectFunc& addFlushDirect)
546 {
547 int numArguments;
548 if (inlineCallFrame) {
549 ASSERT(!m_graph.hasDebuggerEnabled());
550 numArguments = inlineCallFrame->argumentsWithFixup.size();
551 if (inlineCallFrame->isClosureCall)
552 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
553 if (inlineCallFrame->isVarargs())
554 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount)));
555 } else
556 numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
557
558 for (unsigned argument = numArguments; argument--;)
559 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForArgument(argument)));
560
561 if (m_graph.needsScopeRegister())
562 addFlushDirect(nullptr, m_graph.m_codeBlock->scopeRegister());
563 }
564
565 template<typename AddFlushDirectFunc, typename AddPhantomLocalDirectFunc>
566 void flushForTerminalImpl(CodeOrigin origin, const AddFlushDirectFunc& addFlushDirect, const AddPhantomLocalDirectFunc& addPhantomLocalDirect)
567 {
568 origin.walkUpInlineStack(
569 [&] (CodeOrigin origin) {
570 BytecodeIndex bytecodeIndex = origin.bytecodeIndex();
571 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
572 flushImpl(inlineCallFrame, addFlushDirect);
573
574 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
575 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
576 const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
577
578 for (unsigned local = codeBlock->numCalleeLocals(); local--;) {
579 if (livenessAtBytecode[local])
580 addPhantomLocalDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForLocal(local)));
581 }
582 });
583 }
584
585 void flush(VirtualRegister operand)
586 {
587 flushDirect(m_inlineStackTop->remapOperand(operand));
588 }
589
590 void flushDirect(VirtualRegister operand)
591 {
592 flushDirect(operand, findArgumentPosition(operand));
593 }
594
595 void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
596 {
597 addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
598 }
599
600 template<NodeType nodeType>
601 void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
602 {
603 ASSERT(!operand.isConstant());
604
605 Node* node = m_currentBlock->variablesAtTail.operand(operand);
606
607 VariableAccessData* variable;
608
609 if (node)
610 variable = node->variableAccessData();
611 else
612 variable = newVariableAccessData(operand);
613
614 node = addToGraph(nodeType, OpInfo(variable));
615 m_currentBlock->variablesAtTail.operand(operand) = node;
616 if (argumentPosition)
617 argumentPosition->addVariable(variable);
618 }
619
620 void phantomLocalDirect(VirtualRegister operand)
621 {
622 addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
623 }
624
625 void flush(InlineStackEntry* inlineStackEntry)
626 {
627 auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
628 flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect);
629 }
630
631 void flushForTerminal()
632 {
633 auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
634 auto addPhantomLocalDirect = [&] (InlineCallFrame*, VirtualRegister reg) { phantomLocalDirect(reg); };
635 flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
636 }
637
638 void flushForReturn()
639 {
640 flush(m_inlineStackTop);
641 }
642
643 void flushIfTerminal(SwitchData& data)
644 {
645 if (data.fallThrough.bytecodeIndex() > m_currentIndex.offset())
646 return;
647
648 for (unsigned i = data.cases.size(); i--;) {
649 if (data.cases[i].target.bytecodeIndex() > m_currentIndex.offset())
650 return;
651 }
652
653 flushForTerminal();
654 }
655
656 // Assumes that the constant should be strongly marked.
657 Node* jsConstant(JSValue constantValue)
658 {
659 return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
660 }
661
662 Node* weakJSConstant(JSValue constantValue)
663 {
664 return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
665 }
666
667 // Helper functions to get/set the this value.
668 Node* getThis()
669 {
670 return get(m_inlineStackTop->m_codeBlock->thisRegister());
671 }
672
673 void setThis(Node* value)
674 {
675 set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
676 }
677
678 InlineCallFrame* inlineCallFrame()
679 {
680 return m_inlineStackTop->m_inlineCallFrame;
681 }
682
683 bool allInlineFramesAreTailCalls()
684 {
685 return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
686 }
687
688 CodeOrigin currentCodeOrigin()
689 {
690 return CodeOrigin(m_currentIndex, inlineCallFrame());
691 }
692
693 NodeOrigin currentNodeOrigin()
694 {
695 CodeOrigin semantic;
696 CodeOrigin forExit;
697
698 if (m_currentSemanticOrigin.isSet())
699 semantic = m_currentSemanticOrigin;
700 else
701 semantic = currentCodeOrigin();
702
703 forExit = currentCodeOrigin();
704
705 return NodeOrigin(semantic, forExit, m_exitOK);
706 }
707
708 BranchData* branchData(unsigned taken, unsigned notTaken)
709 {
710 // We assume that branches originating from bytecode always have a fall-through. We
711 // use this assumption to avoid checking for the creation of terminal blocks.
712 ASSERT((taken > m_currentIndex.offset()) || (notTaken > m_currentIndex.offset()));
713 BranchData* data = m_graph.m_branchData.add();
714 *data = BranchData::withBytecodeIndices(taken, notTaken);
715 return data;
716 }
717
718 Node* addToGraph(Node* node)
719 {
720 VERBOSE_LOG(" appended ", node, " ", Graph::opName(node->op()), "\n");
721
722 m_hasAnyForceOSRExits |= (node->op() == ForceOSRExit);
723
724 m_currentBlock->append(node);
725 if (clobbersExitState(m_graph, node))
726 m_exitOK = false;
727 return node;
728 }
729
730 Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
731 {
732 Node* result = m_graph.addNode(
733 op, currentNodeOrigin(), Edge(child1), Edge(child2),
734 Edge(child3));
735 return addToGraph(result);
736 }
737 Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
738 {
739 Node* result = m_graph.addNode(
740 op, currentNodeOrigin(), child1, child2, child3);
741 return addToGraph(result);
742 }
743 Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
744 {
745 Node* result = m_graph.addNode(
746 op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
747 Edge(child3));
748 return addToGraph(result);
749 }
750 Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
751 {
752 Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
753 return addToGraph(result);
754 }
755 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
756 {
757 Node* result = m_graph.addNode(
758 op, currentNodeOrigin(), info1, info2,
759 Edge(child1), Edge(child2), Edge(child3));
760 return addToGraph(result);
761 }
762 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
763 {
764 Node* result = m_graph.addNode(
765 op, currentNodeOrigin(), info1, info2, child1, child2, child3);
766 return addToGraph(result);
767 }
768
769 Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
770 {
771 Node* result = m_graph.addNode(
772 Node::VarArg, op, currentNodeOrigin(), info1, info2,
773 m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
774 addToGraph(result);
775
776 m_numPassedVarArgs = 0;
777
778 return result;
779 }
780
781 void addVarArgChild(Node* child)
782 {
783 m_graph.m_varArgChildren.append(Edge(child));
784 m_numPassedVarArgs++;
785 }
786
787 void addVarArgChild(Edge child)
788 {
789 m_graph.m_varArgChildren.append(child);
790 m_numPassedVarArgs++;
791 }
792
793 Node* addCallWithoutSettingResult(
794 NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
795 OpInfo prediction)
796 {
797 addVarArgChild(callee);
798 size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
799
800 if (parameterSlots > m_parameterSlots)
801 m_parameterSlots = parameterSlots;
802
803 for (int i = 0; i < argCount; ++i)
804 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
805
806 return addToGraph(Node::VarArg, op, opInfo, prediction);
807 }
808
809 Node* addCall(
810 VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
811 SpeculatedType prediction)
812 {
813 if (op == TailCall) {
814 if (allInlineFramesAreTailCalls())
815 return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
816 op = TailCallInlinedCaller;
817 }
818
819
820 Node* call = addCallWithoutSettingResult(
821 op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
822 if (result.isValid())
823 set(result, call);
824 return call;
825 }
826
827 Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
828 {
829 // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
830 // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
831 // object's structure as soon as we make it a weakJSCosntant.
832 Node* objectNode = weakJSConstant(object);
833 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
834 return objectNode;
835 }
836
837 SpeculatedType getPredictionWithoutOSRExit(BytecodeIndex bytecodeIndex)
838 {
839 auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin)
840 {
841 SpeculatedType prediction;
842 {
843 ConcurrentJSLocker locker(codeBlock->m_lock);
844 prediction = codeBlock->valueProfilePredictionForBytecodeIndex(locker, codeOrigin.bytecodeIndex());
845 }
846 auto* fuzzerAgent = m_vm->fuzzerAgent();
847 if (UNLIKELY(fuzzerAgent))
848 return fuzzerAgent->getPrediction(codeBlock, codeOrigin, prediction) & SpecBytecodeTop;
849 return prediction;
850 };
851
852 SpeculatedType prediction = getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(m_inlineStackTop->m_profiledBlock, CodeOrigin(bytecodeIndex, inlineCallFrame()));
853 if (prediction != SpecNone)
854 return prediction;
855
856 // If we have no information about the values this
857 // node generates, we check if by any chance it is
858 // a tail call opcode. In that case, we walk up the
859 // inline frames to find a call higher in the call
860 // chain and use its prediction. If we only have
861 // inlined tail call frames, we use SpecFullTop
862 // to avoid a spurious OSR exit.
863 auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex.offset());
864 OpcodeID opcodeID = instruction->opcodeID();
865
866 switch (opcodeID) {
867 case op_tail_call:
868 case op_tail_call_varargs:
869 case op_tail_call_forward_arguments: {
870 // Things should be more permissive to us returning BOTTOM instead of TOP here.
871 // Currently, this will cause us to Force OSR exit. This is bad because returning
872 // TOP will cause anything that transitively touches this speculated type to
873 // also become TOP during prediction propagation.
874 // https://bugs.webkit.org/show_bug.cgi?id=164337
875 if (!inlineCallFrame())
876 return SpecFullTop;
877
878 CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
879 if (!codeOrigin)
880 return SpecFullTop;
881
882 InlineStackEntry* stack = m_inlineStackTop;
883 while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame())
884 stack = stack->m_caller;
885
886 return getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(stack->m_profiledBlock, *codeOrigin);
887 }
888
889 default:
890 return SpecNone;
891 }
892
893 RELEASE_ASSERT_NOT_REACHED();
894 return SpecNone;
895 }
896
897 SpeculatedType getPrediction(BytecodeIndex bytecodeIndex)
898 {
899 SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
900
901 if (prediction == SpecNone) {
902 // We have no information about what values this node generates. Give up
903 // on executing this code, since we're likely to do more damage than good.
904 addToGraph(ForceOSRExit);
905 }
906
907 return prediction;
908 }
909
910 SpeculatedType getPredictionWithoutOSRExit()
911 {
912 return getPredictionWithoutOSRExit(m_currentIndex);
913 }
914
915 SpeculatedType getPrediction()
916 {
917 return getPrediction(m_currentIndex);
918 }
919
920 ArrayMode getArrayMode(Array::Action action)
921 {
922 CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock;
923 ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeIndex(m_currentInstruction));
924 return getArrayMode(*profile, action);
925 }
926
927 ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action)
928 {
929 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
930 profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
931 bool makeSafe = profile.outOfBounds(locker);
932 return ArrayMode::fromObserved(locker, &profile, action, makeSafe);
933 }
934
935 Node* makeSafe(Node* node)
936 {
937 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
938 node->mergeFlags(NodeMayOverflowInt32InDFG);
939 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
940 node->mergeFlags(NodeMayNegZeroInDFG);
941
942 if (!isX86() && (node->op() == ArithMod || node->op() == ValueMod))
943 return node;
944
945 switch (node->op()) {
946 case ArithAdd:
947 case ArithSub:
948 case ValueAdd: {
949 ObservedResults observed;
950 if (BinaryArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->binaryArithProfileForBytecodeIndex(m_currentIndex))
951 observed = arithProfile->observedResults();
952 else if (UnaryArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->unaryArithProfileForBytecodeIndex(m_currentIndex)) {
953 // Happens for OpInc/OpDec
954 observed = arithProfile->observedResults();
955 } else
956 break;
957
958 if (observed.didObserveDouble())
959 node->mergeFlags(NodeMayHaveDoubleResult);
960 if (observed.didObserveNonNumeric())
961 node->mergeFlags(NodeMayHaveNonNumericResult);
962 if (observed.didObserveBigInt())
963 node->mergeFlags(NodeMayHaveBigIntResult);
964 break;
965 }
966 case ValueMul:
967 case ArithMul: {
968 BinaryArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->binaryArithProfileForBytecodeIndex(m_currentIndex);
969 if (!arithProfile)
970 break;
971 if (arithProfile->didObserveInt52Overflow())
972 node->mergeFlags(NodeMayOverflowInt52);
973 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
974 node->mergeFlags(NodeMayOverflowInt32InBaseline);
975 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
976 node->mergeFlags(NodeMayNegZeroInBaseline);
977 if (arithProfile->didObserveDouble())
978 node->mergeFlags(NodeMayHaveDoubleResult);
979 if (arithProfile->didObserveNonNumeric())
980 node->mergeFlags(NodeMayHaveNonNumericResult);
981 if (arithProfile->didObserveBigInt())
982 node->mergeFlags(NodeMayHaveBigIntResult);
983 break;
984 }
985 case ValueNegate:
986 case ArithNegate:
987 case Inc:
988 case Dec: {
989 UnaryArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->unaryArithProfileForBytecodeIndex(m_currentIndex);
990 if (!arithProfile)
991 break;
992 if (arithProfile->argObservedType().sawNumber() || arithProfile->didObserveDouble())
993 node->mergeFlags(NodeMayHaveDoubleResult);
994 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
995 node->mergeFlags(NodeMayNegZeroInBaseline);
996 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
997 node->mergeFlags(NodeMayOverflowInt32InBaseline);
998 if (arithProfile->didObserveNonNumeric())
999 node->mergeFlags(NodeMayHaveNonNumericResult);
1000 if (arithProfile->didObserveBigInt())
1001 node->mergeFlags(NodeMayHaveBigIntResult);
1002 break;
1003 }
1004
1005 default:
1006 break;
1007 }
1008
1009 if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
1010 switch (node->op()) {
1011 case UInt32ToNumber:
1012 case ArithAdd:
1013 case ArithSub:
1014 case ValueAdd:
1015 case ValueMod:
1016 case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
1017 node->mergeFlags(NodeMayOverflowInt32InBaseline);
1018 break;
1019
1020 default:
1021 break;
1022 }
1023 }
1024
1025 return node;
1026 }
1027
1028 Node* makeDivSafe(Node* node)
1029 {
1030 ASSERT(node->op() == ArithDiv || node->op() == ValueDiv);
1031
1032 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1033 node->mergeFlags(NodeMayOverflowInt32InDFG);
1034 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1035 node->mergeFlags(NodeMayNegZeroInDFG);
1036
1037 // The main slow case counter for op_div in the old JIT counts only when
1038 // the operands are not numbers. We don't care about that since we already
1039 // have speculations in place that take care of that separately. We only
1040 // care about when the outcome of the division is not an integer, which
1041 // is what the special fast case counter tells us.
1042
1043 if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialArithFastCase(m_currentIndex))
1044 return node;
1045
1046 // FIXME: It might be possible to make this more granular.
1047 node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
1048
1049 BinaryArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->binaryArithProfileForBytecodeIndex(m_currentIndex);
1050 if (arithProfile->didObserveBigInt())
1051 node->mergeFlags(NodeMayHaveBigIntResult);
1052
1053 return node;
1054 }
1055
1056 void noticeArgumentsUse()
1057 {
1058 // All of the arguments in this function need to be formatted as JSValues because we will
1059 // load from them in a random-access fashion and we don't want to have to switch on
1060 // format.
1061
1062 for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
1063 argument->mergeShouldNeverUnbox(true);
1064 }
1065
1066 bool needsDynamicLookup(ResolveType, OpcodeID);
1067
1068 VM* m_vm;
1069 CodeBlock* m_codeBlock;
1070 CodeBlock* m_profiledBlock;
1071 Graph& m_graph;
1072
1073 // The current block being generated.
1074 BasicBlock* m_currentBlock;
1075 // The bytecode index of the current instruction being generated.
1076 BytecodeIndex m_currentIndex;
1077 // The semantic origin of the current node if different from the current Index.
1078 CodeOrigin m_currentSemanticOrigin;
1079 // True if it's OK to OSR exit right now.
1080 bool m_exitOK { false };
1081
1082 FrozenValue* m_constantUndefined;
1083 FrozenValue* m_constantNull;
1084 FrozenValue* m_constantNaN;
1085 FrozenValue* m_constantOne;
1086 Vector<Node*, 16> m_constants;
1087
1088 HashMap<InlineCallFrame*, Vector<ArgumentPosition*>, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> m_inlineCallFrameToArgumentPositions;
1089
1090 // The number of arguments passed to the function.
1091 unsigned m_numArguments;
1092 // The number of locals (vars + temporaries) used in the function.
1093 unsigned m_numLocals;
1094 // The number of slots (in units of sizeof(Register)) that we need to
1095 // preallocate for arguments to outgoing calls from this frame. This
1096 // number includes the CallFrame slots that we initialize for the callee
1097 // (but not the callee-initialized CallerFrame and ReturnPC slots).
1098 // This number is 0 if and only if this function is a leaf.
1099 unsigned m_parameterSlots;
1100 // The number of var args passed to the next var arg node.
1101 unsigned m_numPassedVarArgs;
1102
1103 struct InlineStackEntry {
1104 ByteCodeParser* m_byteCodeParser;
1105
1106 CodeBlock* m_codeBlock;
1107 CodeBlock* m_profiledBlock;
1108 InlineCallFrame* m_inlineCallFrame;
1109
1110 ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
1111
1112 QueryableExitProfile m_exitProfile;
1113
1114 // Remapping of identifier and constant numbers from the code block being
1115 // inlined (inline callee) to the code block that we're inlining into
1116 // (the machine code block, which is the transitive, though not necessarily
1117 // direct, caller).
1118 Vector<unsigned> m_identifierRemap;
1119 Vector<unsigned> m_switchRemap;
1120
1121 // These are blocks whose terminal is a Jump, Branch or Switch, and whose target has not yet been linked.
1122 // Their terminal instead refers to a bytecode index, and the right BB can be found in m_blockLinkingTargets.
1123 Vector<BasicBlock*> m_unlinkedBlocks;
1124
1125 // Potential block linking targets. Must be sorted by bytecodeBegin, and
1126 // cannot have two blocks that have the same bytecodeBegin.
1127 Vector<BasicBlock*> m_blockLinkingTargets;
1128
1129 // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist.
1130 BasicBlock* m_continuationBlock;
1131
1132 VirtualRegister m_returnValue;
1133
1134 // Speculations about variable types collected from the profiled code block,
1135 // which are based on OSR exit profiles that past DFG compilations of this
1136 // code block had gathered.
1137 LazyOperandValueProfileParser m_lazyOperands;
1138
1139 ICStatusMap m_baselineMap;
1140 ICStatusContext m_optimizedContext;
1141
1142 // Pointers to the argument position trackers for this slice of code.
1143 Vector<ArgumentPosition*> m_argumentPositions;
1144
1145 InlineStackEntry* m_caller;
1146
1147 InlineStackEntry(
1148 ByteCodeParser*,
1149 CodeBlock*,
1150 CodeBlock* profiledBlock,
1151 JSFunction* callee, // Null if this is a closure call.
1152 VirtualRegister returnValueVR,
1153 VirtualRegister inlineCallFrameStart,
1154 int argumentCountIncludingThis,
1155 InlineCallFrame::Kind,
1156 BasicBlock* continuationBlock);
1157
1158 ~InlineStackEntry();
1159
1160 VirtualRegister remapOperand(VirtualRegister operand) const
1161 {
1162 if (!m_inlineCallFrame)
1163 return operand;
1164
1165 ASSERT(!operand.isConstant());
1166
1167 return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1168 }
1169 };
1170
1171 InlineStackEntry* m_inlineStackTop;
1172
1173 ICStatusContextStack m_icContextStack;
1174
1175 struct DelayedSetLocal {
1176 CodeOrigin m_origin;
1177 VirtualRegister m_operand;
1178 Node* m_value;
1179 SetMode m_setMode;
1180
1181 DelayedSetLocal() { }
1182 DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode)
1183 : m_origin(origin)
1184 , m_operand(operand)
1185 , m_value(value)
1186 , m_setMode(setMode)
1187 {
1188 RELEASE_ASSERT(operand.isValid());
1189 }
1190
1191 Node* execute(ByteCodeParser* parser)
1192 {
1193 if (m_operand.isArgument())
1194 return parser->setArgument(m_origin, m_operand, m_value, m_setMode);
1195 return parser->setLocal(m_origin, m_operand, m_value, m_setMode);
1196 }
1197 };
1198
1199 Vector<DelayedSetLocal, 2> m_setLocalQueue;
1200
1201 const Instruction* m_currentInstruction;
1202 bool m_hasDebuggerEnabled;
1203 bool m_hasAnyForceOSRExits { false };
1204};
1205
1206BasicBlock* ByteCodeParser::allocateTargetableBlock(BytecodeIndex bytecodeIndex)
1207{
1208 ASSERT(bytecodeIndex);
1209 Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
1210 BasicBlock* blockPtr = block.ptr();
1211 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1212 if (m_inlineStackTop->m_blockLinkingTargets.size())
1213 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin.offset() < bytecodeIndex.offset());
1214 m_inlineStackTop->m_blockLinkingTargets.append(blockPtr);
1215 m_graph.appendBlock(WTFMove(block));
1216 return blockPtr;
1217}
1218
1219BasicBlock* ByteCodeParser::allocateUntargetableBlock()
1220{
1221 Ref<BasicBlock> block = adoptRef(*new BasicBlock(BytecodeIndex(), m_numArguments, m_numLocals, 1));
1222 BasicBlock* blockPtr = block.ptr();
1223 m_graph.appendBlock(WTFMove(block));
1224 return blockPtr;
1225}
1226
1227void ByteCodeParser::makeBlockTargetable(BasicBlock* block, BytecodeIndex bytecodeIndex)
1228{
1229 RELEASE_ASSERT(!block->bytecodeBegin);
1230 block->bytecodeBegin = bytecodeIndex;
1231 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1232 if (m_inlineStackTop->m_blockLinkingTargets.size())
1233 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin.offset() < bytecodeIndex.offset());
1234 m_inlineStackTop->m_blockLinkingTargets.append(block);
1235}
1236
1237void ByteCodeParser::addJumpTo(BasicBlock* block)
1238{
1239 ASSERT(!m_currentBlock->terminal());
1240 Node* jumpNode = addToGraph(Jump);
1241 jumpNode->targetBlock() = block;
1242 m_currentBlock->didLink();
1243}
1244
1245void ByteCodeParser::addJumpTo(unsigned bytecodeIndex)
1246{
1247 ASSERT(!m_currentBlock->terminal());
1248 addToGraph(Jump, OpInfo(bytecodeIndex));
1249 m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock);
1250}
1251
1252template<typename CallOp>
1253ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode)
1254{
1255 auto bytecode = pc->as<CallOp>();
1256 Node* callTarget = get(bytecode.m_callee);
1257 int registerOffset = -static_cast<int>(bytecode.m_argv);
1258
1259 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1260 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1261 m_inlineStackTop->m_baselineMap, m_icContextStack);
1262
1263 InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
1264
1265 return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget,
1266 bytecode.m_argc, registerOffset, callLinkStatus, getPrediction());
1267}
1268
1269void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1270{
1271 if (callTarget->isCellConstant())
1272 callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1273}
1274
1275ByteCodeParser::Terminality ByteCodeParser::handleCall(
1276 VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1277 Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1278 CallLinkStatus callLinkStatus, SpeculatedType prediction)
1279{
1280 ASSERT(registerOffset <= 0);
1281
1282 refineStatically(callLinkStatus, callTarget);
1283
1284 VERBOSE_LOG(" Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1285
1286 // If we have profiling information about this call, and it did not behave too polymorphically,
1287 // we may be able to inline it, or in the case of recursive tail calls turn it into a jump.
1288 if (callLinkStatus.canOptimize()) {
1289 addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1290
1291 VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
1292 auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
1293 argumentCountIncludingThis, BytecodeIndex(m_currentIndex.offset() + instructionSize), op, kind, prediction);
1294 if (optimizationResult == CallOptimizationResult::OptimizedToJump)
1295 return Terminal;
1296 if (optimizationResult == CallOptimizationResult::Inlined) {
1297 if (UNLIKELY(m_graph.compilation()))
1298 m_graph.compilation()->noticeInlinedCall();
1299 return NonTerminal;
1300 }
1301 }
1302
1303 Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1304 ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1305 return callNode->op() == TailCall ? Terminal : NonTerminal;
1306}
1307
1308template<typename CallOp>
1309ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode)
1310{
1311 auto bytecode = pc->as<CallOp>();
1312 int firstFreeReg = bytecode.m_firstFree.offset();
1313 int firstVarArgOffset = bytecode.m_firstVarArg;
1314
1315 SpeculatedType prediction = getPrediction();
1316
1317 Node* callTarget = get(bytecode.m_callee);
1318
1319 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1320 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1321 m_inlineStackTop->m_baselineMap, m_icContextStack);
1322 refineStatically(callLinkStatus, callTarget);
1323
1324 VERBOSE_LOG(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1325
1326 if (callLinkStatus.canOptimize()) {
1327 addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1328
1329 if (handleVarargsInlining(callTarget, bytecode.m_dst,
1330 callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments,
1331 firstVarArgOffset, op,
1332 InlineCallFrame::varargsKindFor(callMode))) {
1333 if (UNLIKELY(m_graph.compilation()))
1334 m_graph.compilation()->noticeInlinedCall();
1335 return NonTerminal;
1336 }
1337 }
1338
1339 CallVarargsData* data = m_graph.m_callVarargsData.add();
1340 data->firstVarArgOffset = firstVarArgOffset;
1341
1342 Node* thisChild = get(bytecode.m_thisValue);
1343 Node* argumentsChild = nullptr;
1344 if (op != TailCallForwardVarargs)
1345 argumentsChild = get(bytecode.m_arguments);
1346
1347 if (op == TailCallVarargs || op == TailCallForwardVarargs) {
1348 if (allInlineFramesAreTailCalls()) {
1349 addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
1350 return Terminal;
1351 }
1352 op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
1353 }
1354
1355 Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
1356 if (bytecode.m_dst.isValid())
1357 set(bytecode.m_dst, call);
1358 return NonTerminal;
1359}
1360
1361void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1362{
1363 Node* thisArgument;
1364 if (thisArgumentReg.isValid())
1365 thisArgument = get(thisArgumentReg);
1366 else
1367 thisArgument = nullptr;
1368
1369 JSCell* calleeCell;
1370 Node* callTargetForCheck;
1371 if (callee.isClosureCall()) {
1372 calleeCell = callee.executable();
1373 callTargetForCheck = addToGraph(GetExecutable, callTarget);
1374 } else {
1375 calleeCell = callee.nonExecutableCallee();
1376 callTargetForCheck = callTarget;
1377 }
1378
1379 ASSERT(calleeCell);
1380 addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck);
1381 if (thisArgument)
1382 addToGraph(Phantom, thisArgument);
1383}
1384
1385Node* ByteCodeParser::getArgumentCount()
1386{
1387 Node* argumentCount;
1388 if (m_inlineStackTop->m_inlineCallFrame && !m_inlineStackTop->m_inlineCallFrame->isVarargs())
1389 argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->argumentCountIncludingThis))->value());
1390 else
1391 argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(m_inlineStackTop->m_inlineCallFrame), OpInfo(SpecInt32Only));
1392 return argumentCount;
1393}
1394
1395void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1396{
1397 for (int i = 0; i < argumentCountIncludingThis; ++i)
1398 addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1399}
1400
1401template<typename ChecksFunctor>
1402bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant callVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded)
1403{
1404 if (UNLIKELY(!Options::optimizeRecursiveTailCalls()))
1405 return false;
1406
1407 auto targetExecutable = callVariant.executable();
1408 InlineStackEntry* stackEntry = m_inlineStackTop;
1409 do {
1410 if (targetExecutable != stackEntry->executable())
1411 continue;
1412 VERBOSE_LOG(" We found a recursive tail call, trying to optimize it into a jump.\n");
1413
1414 if (auto* callFrame = stackEntry->m_inlineCallFrame) {
1415 // FIXME: We only accept jump to CallFrame which has exact same argumentCountIncludingThis. But we can remove this by fixing up arguments.
1416 // And we can also allow jumping into CallFrame with Varargs if the passing number of arguments is greater than or equal to mandatoryMinimum of CallFrame.
1417 // https://bugs.webkit.org/show_bug.cgi?id=202317
1418
1419 // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match.
1420 // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments.
1421 if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))
1422 continue;
1423 // If the target InlineCallFrame is Varargs, we do not know how many arguments are actually filled by LoadVarargs. Varargs InlineCallFrame's
1424 // argumentCountIncludingThis is maximum number of potentially filled arguments by LoadVarargs. We "continue" to the upper frame which may be
1425 // a good target to jump into.
1426 if (callFrame->isVarargs())
1427 continue;
1428 } else {
1429 // We are in the machine code entry (i.e. the original caller).
1430 // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack.
1431 if (argumentCountIncludingThis > m_codeBlock->numParameters())
1432 return false;
1433 }
1434
1435 // If an InlineCallFrame is not a closure, it was optimized using a constant callee.
1436 // Check if this is the same callee that we try to inline here.
1437 if (stackEntry->m_inlineCallFrame && !stackEntry->m_inlineCallFrame->isClosureCall) {
1438 if (stackEntry->m_inlineCallFrame->calleeConstant() != callVariant.function())
1439 continue;
1440 }
1441
1442 // We must add some check that the profiling information was correct and the target of this call is what we thought.
1443 emitFunctionCheckIfNeeded();
1444 // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock).
1445 flushForTerminal();
1446
1447 // We must set the callee to the right value
1448 if (stackEntry->m_inlineCallFrame) {
1449 if (stackEntry->m_inlineCallFrame->isClosureCall)
1450 setDirect(stackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)), callTargetNode, NormalSet);
1451 } else
1452 addToGraph(SetCallee, callTargetNode);
1453
1454 // We must set the arguments to the right values
1455 if (!stackEntry->m_inlineCallFrame)
1456 addToGraph(SetArgumentCountIncludingThis, OpInfo(argumentCountIncludingThis));
1457 int argIndex = 0;
1458 for (; argIndex < argumentCountIncludingThis; ++argIndex) {
1459 Node* value = get(virtualRegisterForArgument(argIndex, registerOffset));
1460 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet);
1461 }
1462 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1463 for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex)
1464 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet);
1465
1466 // We must repeat the work of op_enter here as we will jump right after it.
1467 // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR.
1468 for (int i = 0; i < stackEntry->m_codeBlock->numVars(); ++i)
1469 setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);
1470
1471 // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.
1472 BytecodeIndex oldIndex = m_currentIndex;
1473 auto oldStackTop = m_inlineStackTop;
1474 m_inlineStackTop = stackEntry;
1475 m_currentIndex = BytecodeIndex(opcodeLengths[op_enter]);
1476 m_exitOK = true;
1477 processSetLocalQueue();
1478 m_currentIndex = oldIndex;
1479 m_inlineStackTop = oldStackTop;
1480 m_exitOK = false;
1481
1482 BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, BytecodeIndex>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), BytecodeIndex(opcodeLengths[op_enter]), getBytecodeBeginForBlock);
1483 RELEASE_ASSERT(entryBlockPtr);
1484 addJumpTo(*entryBlockPtr);
1485 return true;
1486 // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case.
1487 } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller));
1488
1489 // The tail call was not recursive
1490 return false;
1491}
1492
1493unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, InlineCallFrame::Kind kind)
1494{
1495 CallMode callMode = InlineCallFrame::callModeFor(kind);
1496 CodeSpecializationKind specializationKind = specializationKindFor(callMode);
1497 VERBOSE_LOG("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1498
1499 if (m_hasDebuggerEnabled) {
1500 VERBOSE_LOG(" Failing because the debugger is in use.\n");
1501 return UINT_MAX;
1502 }
1503
1504 FunctionExecutable* executable = callee.functionExecutable();
1505 if (!executable) {
1506 VERBOSE_LOG(" Failing because there is no function executable.\n");
1507 return UINT_MAX;
1508 }
1509
1510 // Do we have a code block, and does the code block's size match the heuristics/requirements for
1511 // being an inline candidate? We might not have a code block (1) if code was thrown away,
1512 // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1513 // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1514 // to inline it if we had a static proof of what was being called; this might happen for example
1515 // if you call a global function, where watchpointing gives us static information. Overall,
1516 // it's a rare case because we expect that any hot callees would have already been compiled.
1517 CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1518 if (!codeBlock) {
1519 VERBOSE_LOG(" Failing because no code block available.\n");
1520 return UINT_MAX;
1521 }
1522
1523 if (!Options::useArityFixupInlining()) {
1524 if (codeBlock->numParameters() > argumentCountIncludingThis) {
1525 VERBOSE_LOG(" Failing because of arity mismatch.\n");
1526 return UINT_MAX;
1527 }
1528 }
1529
1530 CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1531 codeBlock, specializationKind, callee.isClosureCall());
1532 VERBOSE_LOG(" Call mode: ", callMode, "\n");
1533 VERBOSE_LOG(" Is closure call: ", callee.isClosureCall(), "\n");
1534 VERBOSE_LOG(" Capability level: ", capabilityLevel, "\n");
1535 VERBOSE_LOG(" Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
1536 VERBOSE_LOG(" Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
1537 VERBOSE_LOG(" Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1538 VERBOSE_LOG(" Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
1539 if (!canInline(capabilityLevel)) {
1540 VERBOSE_LOG(" Failing because the function is not inlineable.\n");
1541 return UINT_MAX;
1542 }
1543
1544 // Check if the caller is already too large. We do this check here because that's just
1545 // where we happen to also have the callee's code block, and we want that for the
1546 // purpose of unsetting SABI.
1547 if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1548 codeBlock->m_shouldAlwaysBeInlined = false;
1549 VERBOSE_LOG(" Failing because the caller is too large.\n");
1550 return UINT_MAX;
1551 }
1552
1553 // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1554 // this function.
1555 // https://bugs.webkit.org/show_bug.cgi?id=127627
1556
1557 // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1558 // functions have very low fidelity profiling, and presumably they weren't very hot if they
1559 // haven't gotten to Baseline yet. Consider not inlining these functions.
1560 // https://bugs.webkit.org/show_bug.cgi?id=145503
1561
1562 // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1563 // too many levels? If either of these are detected, then don't inline. We adjust our
1564 // heuristics if we are dealing with a function that cannot otherwise be compiled.
1565
1566 unsigned depth = 0;
1567 unsigned recursion = 0;
1568
1569 for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1570 ++depth;
1571 if (depth >= Options::maximumInliningDepth()) {
1572 VERBOSE_LOG(" Failing because depth exceeded.\n");
1573 return UINT_MAX;
1574 }
1575
1576 if (entry->executable() == executable) {
1577 ++recursion;
1578 if (recursion >= Options::maximumInliningRecursion()) {
1579 VERBOSE_LOG(" Failing because recursion detected.\n");
1580 return UINT_MAX;
1581 }
1582 }
1583 }
1584
1585 VERBOSE_LOG(" Inlining should be possible.\n");
1586
1587 // It might be possible to inline.
1588 return codeBlock->bytecodeCost();
1589}
1590
1591template<typename ChecksFunctor>
1592void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
1593{
1594 const Instruction* savedCurrentInstruction = m_currentInstruction;
1595 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1596
1597 CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1598 insertChecks(codeBlock);
1599
1600 // FIXME: Don't flush constants!
1601
1602 // arityFixupCount and numberOfStackPaddingSlots are different. While arityFixupCount does not consider about stack alignment,
1603 // numberOfStackPaddingSlots consider alignment. Consider the following case,
1604 //
1605 // before: [ ... ][arg0][header]
1606 // after: [ ... ][ext ][arg1][arg0][header]
1607 //
1608 // In the above case, arityFixupCount is 1. But numberOfStackPaddingSlots is 2 because the stack needs to be aligned.
1609 // We insert extra slots to align stack.
1610 int arityFixupCount = std::max<int>(codeBlock->numParameters() - argumentCountIncludingThis, 0);
1611 int numberOfStackPaddingSlots = CommonSlowPaths::numberOfStackPaddingSlots(codeBlock, argumentCountIncludingThis);
1612 ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters()));
1613 int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots;
1614
1615 int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters;
1616
1617 ensureLocals(
1618 VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1619 CallFrame::headerSizeInRegisters + codeBlock->numCalleeLocals());
1620
1621 size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1622
1623 if (result.isValid())
1624 result = m_inlineStackTop->remapOperand(result);
1625
1626 VariableAccessData* calleeVariable = nullptr;
1627 if (callee.isClosureCall()) {
1628 Node* calleeSet = set(
1629 VirtualRegister(registerOffsetAfterFixup + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
1630
1631 calleeVariable = calleeSet->variableAccessData();
1632 calleeVariable->mergeShouldNeverUnbox(true);
1633 }
1634
1635 InlineStackEntry* callerStackTop = m_inlineStackTop;
1636 InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result,
1637 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
1638
1639 // This is where the actual inlining really happens.
1640 BytecodeIndex oldIndex = m_currentIndex;
1641 m_currentIndex = BytecodeIndex(0);
1642
1643 switch (kind) {
1644 case InlineCallFrame::GetterCall:
1645 case InlineCallFrame::SetterCall: {
1646 // When inlining getter and setter calls, we setup a stack frame which does not appear in the bytecode.
1647 // Because Inlining can switch on executable, we could have a graph like this.
1648 //
1649 // BB#0
1650 // ...
1651 // 30: GetSetter
1652 // 31: MovHint(loc10)
1653 // 32: SetLocal(loc10)
1654 // 33: MovHint(loc9)
1655 // 34: SetLocal(loc9)
1656 // ...
1657 // 37: GetExecutable(@30)
1658 // ...
1659 // 41: Switch(@37)
1660 //
1661 // BB#2
1662 // 42: GetLocal(loc12, bc#7 of caller)
1663 // ...
1664 // --> callee: loc9 and loc10 are arguments of callee.
1665 // ...
1666 // <HERE, exit to callee, loc9 and loc10 are required in the bytecode>
1667 //
1668 // When we prune OSR availability at the beginning of BB#2 (bc#7 in the caller), we prune loc9 and loc10's liveness because the caller does not actually have loc9 and loc10.
1669 // However, when we begin executing the callee, we need OSR exit to be aware of where it can recover the arguments to the setter, loc9 and loc10. The MovHints in the inlined
1670 // callee make it so that if we exit at <HERE>, we can recover loc9 and loc10.
1671 for (int index = 0; index < argumentCountIncludingThis; ++index) {
1672 VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1673 Node* value = getDirect(argumentToGet);
1674 addToGraph(MovHint, OpInfo(argumentToGet.offset()), value);
1675 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToGet, value, ImmediateNakedSet });
1676 }
1677 break;
1678 }
1679 default:
1680 break;
1681 }
1682
1683 if (arityFixupCount) {
1684 // Note: we do arity fixup in two phases:
1685 // 1. We get all the values we need and MovHint them to the expected locals.
1686 // 2. We SetLocal them after that. This way, if we exit, the callee's
1687 // frame is already set up. If any SetLocal exits, we have a valid exit state.
1688 // This is required because if we didn't do this in two phases, we may exit in
1689 // the middle of arity fixup from the callee's CodeOrigin. This is unsound because exited
1690 // code does not have arity fixup so that remaining necessary fixups are not executed.
1691 // For example, consider if we need to pad two args:
1692 // [arg3][arg2][arg1][arg0]
1693 // [fix ][fix ][arg3][arg2][arg1][arg0]
1694 // We memcpy starting from arg0 in the direction of arg3. If we were to exit at a type check
1695 // for arg3's SetLocal in the callee's CodeOrigin, we'd exit with a frame like so:
1696 // [arg3][arg2][arg1][arg2][arg1][arg0]
1697 // Since we do not perform arity fixup in the callee, this is the frame used by the callee.
1698 // And the callee would then just end up thinking its argument are:
1699 // [fix ][fix ][arg3][arg2][arg1][arg0]
1700 // which is incorrect.
1701
1702 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1703 // The stack needs to be aligned due to the JS calling convention. Thus, we have a hole if the count of arguments is not aligned.
1704 // We call this hole "extra slot". Consider the following case, the number of arguments is 2. If this argument
1705 // count does not fulfill the stack alignment requirement, we already inserted extra slots.
1706 //
1707 // before: [ ... ][ext ][arg1][arg0][header]
1708 //
1709 // In the above case, one extra slot is inserted. If the code's parameter count is 3, we will fixup arguments.
1710 // At that time, we can simply use this extra slots. So the fixuped stack is the following.
1711 //
1712 // before: [ ... ][ext ][arg1][arg0][header]
1713 // after: [ ... ][arg2][arg1][arg0][header]
1714 //
1715 // In such cases, we do not need to move frames.
1716 if (registerOffsetAfterFixup != registerOffset) {
1717 for (int index = 0; index < argumentCountIncludingThis; ++index) {
1718 VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1719 Node* value = getDirect(argumentToGet);
1720 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index));
1721 addToGraph(MovHint, OpInfo(argumentToSet.offset()), value);
1722 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet });
1723 }
1724 }
1725 for (int index = 0; index < arityFixupCount; ++index) {
1726 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index));
1727 addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined);
1728 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet });
1729 }
1730
1731 // At this point, it's OK to OSR exit because we finished setting up
1732 // our callee's frame. We emit an ExitOK below.
1733 }
1734
1735 // At this point, it's again OK to OSR exit.
1736 m_exitOK = true;
1737 addToGraph(ExitOK);
1738
1739 processSetLocalQueue();
1740
1741 InlineVariableData inlineVariableData;
1742 inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1743 inlineVariableData.argumentPositionStart = argumentPositionStart;
1744 inlineVariableData.calleeVariable = 0;
1745
1746 RELEASE_ASSERT(
1747 m_inlineStackTop->m_inlineCallFrame->isClosureCall
1748 == callee.isClosureCall());
1749 if (callee.isClosureCall()) {
1750 RELEASE_ASSERT(calleeVariable);
1751 inlineVariableData.calleeVariable = calleeVariable;
1752 }
1753
1754 m_graph.m_inlineVariableData.append(inlineVariableData);
1755
1756 parseCodeBlock();
1757 clearCaches(); // Reset our state now that we're back to the outer code.
1758
1759 m_currentIndex = oldIndex;
1760 m_exitOK = false;
1761
1762 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1763
1764 // Most functions have at least one op_ret and thus set up the continuation block.
1765 // In some rare cases, a function ends in op_unreachable, forcing us to allocate a new continuationBlock here.
1766 if (inlineStackEntry.m_continuationBlock)
1767 m_currentBlock = inlineStackEntry.m_continuationBlock;
1768 else
1769 m_currentBlock = allocateUntargetableBlock();
1770 ASSERT(!m_currentBlock->terminal());
1771
1772 prepareToParseBlock();
1773 m_currentInstruction = savedCurrentInstruction;
1774}
1775
1776ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, BytecodeIndex nextIndex, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
1777{
1778 VERBOSE_LOG(" Considering callee ", callee, "\n");
1779
1780 bool didInsertChecks = false;
1781 auto insertChecksWithAccounting = [&] () {
1782 if (needsToCheckCallee)
1783 emitFunctionChecks(callee, callTargetNode, thisArgument);
1784 didInsertChecks = true;
1785 };
1786
1787 if (kind == InlineCallFrame::TailCall && ByteCodeParser::handleRecursiveTailCall(callTargetNode, callee, registerOffset, argumentCountIncludingThis, insertChecksWithAccounting)) {
1788 RELEASE_ASSERT(didInsertChecks);
1789 return CallOptimizationResult::OptimizedToJump;
1790 }
1791 RELEASE_ASSERT(!didInsertChecks);
1792
1793 if (!inliningBalance)
1794 return CallOptimizationResult::DidNothing;
1795
1796 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1797
1798 auto endSpecialCase = [&] () {
1799 RELEASE_ASSERT(didInsertChecks);
1800 addToGraph(Phantom, callTargetNode);
1801 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1802 inliningBalance--;
1803 if (continuationBlock) {
1804 m_currentIndex = nextIndex;
1805 m_exitOK = true;
1806 processSetLocalQueue();
1807 addJumpTo(continuationBlock);
1808 }
1809 };
1810
1811 if (InternalFunction* function = callee.internalFunction()) {
1812 if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
1813 endSpecialCase();
1814 return CallOptimizationResult::Inlined;
1815 }
1816 RELEASE_ASSERT(!didInsertChecks);
1817 return CallOptimizationResult::DidNothing;
1818 }
1819
1820 Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1821 if (intrinsic != NoIntrinsic) {
1822 if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1823 endSpecialCase();
1824 return CallOptimizationResult::Inlined;
1825 }
1826 RELEASE_ASSERT(!didInsertChecks);
1827 // We might still try to inline the Intrinsic because it might be a builtin JS function.
1828 }
1829
1830 if (Options::useDOMJIT()) {
1831 if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
1832 if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1833 endSpecialCase();
1834 return CallOptimizationResult::Inlined;
1835 }
1836 RELEASE_ASSERT(!didInsertChecks);
1837 }
1838 }
1839
1840 unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, kind);
1841 if (myInliningCost > inliningBalance)
1842 return CallOptimizationResult::DidNothing;
1843
1844 auto insertCheck = [&] (CodeBlock*) {
1845 if (needsToCheckCallee)
1846 emitFunctionChecks(callee, callTargetNode, thisArgument);
1847 };
1848 inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);
1849 inliningBalance -= myInliningCost;
1850 return CallOptimizationResult::Inlined;
1851}
1852
1853bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result,
1854 const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument,
1855 VirtualRegister argumentsArgument, unsigned argumentsOffset,
1856 NodeType callOp, InlineCallFrame::Kind kind)
1857{
1858 VERBOSE_LOG("Handling inlining (Varargs)...\nStack: ", currentCodeOrigin(), "\n");
1859 if (callLinkStatus.maxArgumentCountIncludingThis() > Options::maximumVarargsForInlining()) {
1860 VERBOSE_LOG("Bailing inlining: too many arguments for varargs inlining.\n");
1861 return false;
1862 }
1863 if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1) {
1864 VERBOSE_LOG("Bailing inlining: polymorphic inlining is not yet supported for varargs.\n");
1865 return false;
1866 }
1867
1868 CallVariant callVariant = callLinkStatus[0];
1869
1870 unsigned mandatoryMinimum;
1871 if (FunctionExecutable* functionExecutable = callVariant.functionExecutable())
1872 mandatoryMinimum = functionExecutable->parameterCount();
1873 else
1874 mandatoryMinimum = 0;
1875
1876 // includes "this"
1877 unsigned maxArgumentCountIncludingThis = std::max(callLinkStatus.maxArgumentCountIncludingThis(), mandatoryMinimum + 1);
1878
1879 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1880 if (inliningCost(callVariant, maxArgumentCountIncludingThis, kind) > getInliningBalance(callLinkStatus, specializationKind)) {
1881 VERBOSE_LOG("Bailing inlining: inlining cost too high.\n");
1882 return false;
1883 }
1884
1885 int registerOffset = firstFreeReg;
1886 registerOffset -= maxArgumentCountIncludingThis;
1887 registerOffset -= CallFrame::headerSizeInRegisters;
1888 registerOffset = -WTF::roundUpToMultipleOf(stackAlignmentRegisters(), -registerOffset);
1889
1890 auto insertChecks = [&] (CodeBlock* codeBlock) {
1891 emitFunctionChecks(callVariant, callTargetNode, thisArgument);
1892
1893 int remappedRegisterOffset =
1894 m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1895
1896 ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1897
1898 int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
1899 int remappedArgumentStart = m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1900
1901 LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1902 data->start = VirtualRegister(remappedArgumentStart + 1);
1903 data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
1904 data->offset = argumentsOffset;
1905 data->limit = maxArgumentCountIncludingThis;
1906 data->mandatoryMinimum = mandatoryMinimum;
1907
1908 if (callOp == TailCallForwardVarargs)
1909 addToGraph(ForwardVarargs, OpInfo(data));
1910 else
1911 addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1912
1913 // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1914 // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1915 // callTargetNode because the other 2 are still in use and alive at this point.
1916 addToGraph(Phantom, callTargetNode);
1917
1918 // In DFG IR before SSA, we cannot insert control flow between after the
1919 // LoadVarargs and the last SetArgumentDefinitely. This isn't a problem once we get to DFG
1920 // SSA. Fortunately, we also have other reasons for not inserting control flow
1921 // before SSA.
1922
1923 VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
1924 // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1925 // matter very much, since our use of a SetArgumentDefinitely and Flushes for this local slot is
1926 // mostly just a formality.
1927 countVariable->predict(SpecInt32Only);
1928 countVariable->mergeIsProfitableToUnbox(true);
1929 Node* setArgumentCount = addToGraph(SetArgumentDefinitely, OpInfo(countVariable));
1930 m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1931
1932 set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1933 unsigned numSetArguments = 0;
1934 for (unsigned argument = 1; argument < maxArgumentCountIncludingThis; ++argument) {
1935 VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument));
1936 variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1937
1938 // For a while it had been my intention to do things like this inside the
1939 // prediction injection phase. But in this case it's really best to do it here,
1940 // because it's here that we have access to the variable access datas for the
1941 // inlining we're about to do.
1942 //
1943 // Something else that's interesting here is that we'd really love to get
1944 // predictions from the arguments loaded at the callsite, rather than the
1945 // arguments received inside the callee. But that probably won't matter for most
1946 // calls.
1947 if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1948 ConcurrentJSLocker locker(codeBlock->m_lock);
1949 ValueProfile& profile = codeBlock->valueProfileForArgument(argument);
1950 variable->predict(profile.computeUpdatedPrediction(locker));
1951 }
1952
1953 Node* setArgument = addToGraph(numSetArguments >= mandatoryMinimum ? SetArgumentMaybe : SetArgumentDefinitely, OpInfo(variable));
1954 m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1955 ++numSetArguments;
1956 }
1957 };
1958
1959 // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1960 // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1961 // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1962 // and there are no callsite value profiles and native function won't have callee value profiles for
1963 // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1964 // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1965 // calling LoadVarargs twice.
1966 inlineCall(callTargetNode, result, callVariant, registerOffset, maxArgumentCountIncludingThis, kind, nullptr, insertChecks);
1967
1968
1969 VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n");
1970 return true;
1971}
1972
1973unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus, CodeSpecializationKind specializationKind)
1974{
1975 unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateBytecodeCost();
1976 if (specializationKind == CodeForConstruct)
1977 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateBytecoodeCost());
1978 if (callLinkStatus.isClosureCall())
1979 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateBytecodeCost());
1980 return inliningBalance;
1981}
1982
1983ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
1984 Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus,
1985 int registerOffset, VirtualRegister thisArgument,
1986 int argumentCountIncludingThis,
1987 BytecodeIndex nextIndex, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1988{
1989 VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n");
1990
1991 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1992 unsigned inliningBalance = getInliningBalance(callLinkStatus, specializationKind);
1993
1994 // First check if we can avoid creating control flow. Our inliner does some CFG
1995 // simplification on the fly and this helps reduce compile times, but we can only leverage
1996 // this in cases where we don't need control flow diamonds to check the callee.
1997 if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1998 return handleCallVariant(
1999 callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument,
2000 argumentCountIncludingThis, nextIndex, kind, prediction, inliningBalance, nullptr, true);
2001 }
2002
2003 // We need to create some kind of switch over callee. For now we only do this if we believe that
2004 // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
2005 // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
2006 // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
2007 // we could improve that aspect of this by doing polymorphic inlining but having the profiling
2008 // also.
2009 if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) {
2010 VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
2011 return CallOptimizationResult::DidNothing;
2012 }
2013
2014 // If the claim is that this did not originate from a stub, then we don't want to emit a switch
2015 // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
2016 // it has no idea.
2017 if (!Options::usePolymorphicCallInliningForNonStubStatus()
2018 && !callLinkStatus.isBasedOnStub()) {
2019 VERBOSE_LOG("Bailing inlining (non-stub polymorphism).\nStack: ", currentCodeOrigin(), "\n");
2020 return CallOptimizationResult::DidNothing;
2021 }
2022
2023 bool allAreClosureCalls = true;
2024 bool allAreDirectCalls = true;
2025 for (unsigned i = callLinkStatus.size(); i--;) {
2026 if (callLinkStatus[i].isClosureCall())
2027 allAreDirectCalls = false;
2028 else
2029 allAreClosureCalls = false;
2030 }
2031
2032 Node* thingToSwitchOn;
2033 if (allAreDirectCalls)
2034 thingToSwitchOn = callTargetNode;
2035 else if (allAreClosureCalls)
2036 thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
2037 else {
2038 // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
2039 // where it would be beneficial. It might be best to handle these cases as if all calls were
2040 // closure calls.
2041 // https://bugs.webkit.org/show_bug.cgi?id=136020
2042 VERBOSE_LOG("Bailing inlining (mix).\nStack: ", currentCodeOrigin(), "\n");
2043 return CallOptimizationResult::DidNothing;
2044 }
2045
2046 VERBOSE_LOG("Doing hard inlining...\nStack: ", currentCodeOrigin(), "\n");
2047
2048 // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
2049 // store the callee so that it will be accessible to all of the blocks we're about to create. We
2050 // get away with doing an immediate-set here because we wouldn't have performed any side effects
2051 // yet.
2052 VERBOSE_LOG("Register offset: ", registerOffset);
2053 VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
2054 calleeReg = m_inlineStackTop->remapOperand(calleeReg);
2055 VERBOSE_LOG("Callee is going to be ", calleeReg, "\n");
2056 setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
2057
2058 // It's OK to exit right now, even though we set some locals. That's because those locals are not
2059 // user-visible.
2060 m_exitOK = true;
2061 addToGraph(ExitOK);
2062
2063 SwitchData& data = *m_graph.m_switchData.add();
2064 data.kind = SwitchCell;
2065 addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
2066 m_currentBlock->didLink();
2067
2068 BasicBlock* continuationBlock = allocateUntargetableBlock();
2069 VERBOSE_LOG("Adding untargetable block ", RawPointer(continuationBlock), " (continuation)\n");
2070
2071 // We may force this true if we give up on inlining any of the edges.
2072 bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
2073
2074 VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n");
2075
2076 BytecodeIndex oldIndex = m_currentIndex;
2077 for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
2078 m_currentIndex = oldIndex;
2079 BasicBlock* calleeEntryBlock = allocateUntargetableBlock();
2080 m_currentBlock = calleeEntryBlock;
2081 prepareToParseBlock();
2082
2083 // At the top of each switch case, we can exit.
2084 m_exitOK = true;
2085
2086 Node* myCallTargetNode = getDirect(calleeReg);
2087
2088 auto inliningResult = handleCallVariant(
2089 myCallTargetNode, result, callLinkStatus[i], registerOffset,
2090 thisArgument, argumentCountIncludingThis, nextIndex, kind, prediction,
2091 inliningBalance, continuationBlock, false);
2092
2093 if (inliningResult == CallOptimizationResult::DidNothing) {
2094 // That failed so we let the block die. Nothing interesting should have been added to
2095 // the block. We also give up on inlining any of the (less frequent) callees.
2096 ASSERT(m_graph.m_blocks.last() == m_currentBlock);
2097 m_graph.killBlockAndItsContents(m_currentBlock);
2098 m_graph.m_blocks.removeLast();
2099 VERBOSE_LOG("Inlining of a poly call failed, we will have to go through a slow path\n");
2100
2101 // The fact that inlining failed means we need a slow path.
2102 couldTakeSlowPath = true;
2103 break;
2104 }
2105
2106 JSCell* thingToCaseOn;
2107 if (allAreDirectCalls)
2108 thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
2109 else {
2110 ASSERT(allAreClosureCalls);
2111 thingToCaseOn = callLinkStatus[i].executable();
2112 }
2113 data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), calleeEntryBlock));
2114 VERBOSE_LOG("Finished optimizing ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
2115 }
2116
2117 // Slow path block
2118 m_currentBlock = allocateUntargetableBlock();
2119 m_currentIndex = oldIndex;
2120 m_exitOK = true;
2121 data.fallThrough = BranchTarget(m_currentBlock);
2122 prepareToParseBlock();
2123 Node* myCallTargetNode = getDirect(calleeReg);
2124 if (couldTakeSlowPath) {
2125 addCall(
2126 result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
2127 registerOffset, prediction);
2128 VERBOSE_LOG("We added a call in the slow path\n");
2129 } else {
2130 addToGraph(CheckBadCell);
2131 addToGraph(Phantom, myCallTargetNode);
2132 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
2133
2134 if (result.isValid())
2135 set(result, addToGraph(BottomValue));
2136 VERBOSE_LOG("couldTakeSlowPath was false\n");
2137 }
2138
2139 m_currentIndex = nextIndex;
2140 m_exitOK = true; // Origin changed, so it's fine to exit again.
2141 processSetLocalQueue();
2142
2143 if (Node* terminal = m_currentBlock->terminal())
2144 ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2145 else {
2146 addJumpTo(continuationBlock);
2147 }
2148
2149 prepareToParseBlock();
2150
2151 m_currentIndex = oldIndex;
2152 m_currentBlock = continuationBlock;
2153 m_exitOK = true;
2154
2155 VERBOSE_LOG("Done inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
2156 return CallOptimizationResult::Inlined;
2157}
2158
2159template<typename ChecksFunctor>
2160bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
2161{
2162 ASSERT(op == ArithMin || op == ArithMax);
2163
2164 if (argumentCountIncludingThis == 1) {
2165 insertChecks();
2166 double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
2167 set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit)))));
2168 return true;
2169 }
2170
2171 if (argumentCountIncludingThis == 2) {
2172 insertChecks();
2173 Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2174 addToGraph(Phantom, Edge(resultNode, NumberUse));
2175 set(result, resultNode);
2176 return true;
2177 }
2178
2179 if (argumentCountIncludingThis == 3) {
2180 insertChecks();
2181 set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2182 return true;
2183 }
2184
2185 // Don't handle >=3 arguments for now.
2186 return false;
2187}
2188
2189template<typename ChecksFunctor>
2190bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2191{
2192 VERBOSE_LOG(" The intrinsic is ", intrinsic, "\n");
2193
2194 if (!isOpcodeShape<OpCallShape>(m_currentInstruction))
2195 return false;
2196
2197 // It so happens that the code below doesn't handle the invalid result case. We could fix that, but
2198 // it would only benefit intrinsics called as setters, like if you do:
2199 //
2200 // o.__defineSetter__("foo", Math.pow)
2201 //
2202 // Which is extremely amusing, but probably not worth optimizing.
2203 if (!result.isValid())
2204 return false;
2205
2206 bool didSetResult = false;
2207 auto setResult = [&] (Node* node) {
2208 RELEASE_ASSERT(!didSetResult);
2209 set(result, node);
2210 didSetResult = true;
2211 };
2212
2213 auto inlineIntrinsic = [&] {
2214 switch (intrinsic) {
2215
2216 // Intrinsic Functions:
2217
2218 case AbsIntrinsic: {
2219 if (argumentCountIncludingThis == 1) { // Math.abs()
2220 insertChecks();
2221 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2222 return true;
2223 }
2224
2225 if (!MacroAssembler::supportsFloatingPointAbs())
2226 return false;
2227
2228 insertChecks();
2229 Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2230 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2231 node->mergeFlags(NodeMayOverflowInt32InDFG);
2232 setResult(node);
2233 return true;
2234 }
2235
2236 case MinIntrinsic:
2237 case MaxIntrinsic:
2238 if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) {
2239 didSetResult = true;
2240 return true;
2241 }
2242 return false;
2243
2244#define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2245 case capitalizedName##Intrinsic:
2246 FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2247#undef DFG_ARITH_UNARY
2248 {
2249 if (argumentCountIncludingThis == 1) {
2250 insertChecks();
2251 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2252 return true;
2253 }
2254 Arith::UnaryType type = Arith::UnaryType::Sin;
2255 switch (intrinsic) {
2256#define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2257 case capitalizedName##Intrinsic: \
2258 type = Arith::UnaryType::capitalizedName; \
2259 break;
2260 FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2261#undef DFG_ARITH_UNARY
2262 default:
2263 RELEASE_ASSERT_NOT_REACHED();
2264 }
2265 insertChecks();
2266 setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
2267 return true;
2268 }
2269
2270 case FRoundIntrinsic:
2271 case SqrtIntrinsic: {
2272 if (argumentCountIncludingThis == 1) {
2273 insertChecks();
2274 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2275 return true;
2276 }
2277
2278 NodeType nodeType = Unreachable;
2279 switch (intrinsic) {
2280 case FRoundIntrinsic:
2281 nodeType = ArithFRound;
2282 break;
2283 case SqrtIntrinsic:
2284 nodeType = ArithSqrt;
2285 break;
2286 default:
2287 RELEASE_ASSERT_NOT_REACHED();
2288 }
2289 insertChecks();
2290 setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
2291 return true;
2292 }
2293
2294 case PowIntrinsic: {
2295 if (argumentCountIncludingThis < 3) {
2296 // Math.pow() and Math.pow(x) return NaN.
2297 insertChecks();
2298 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2299 return true;
2300 }
2301 insertChecks();
2302 VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2303 VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2304 setResult(addToGraph(ArithPow, get(xOperand), get(yOperand)));
2305 return true;
2306 }
2307
2308 case ArrayPushIntrinsic: {
2309 if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
2310 return false;
2311
2312 ArrayMode arrayMode = getArrayMode(Array::Write);
2313 if (!arrayMode.isJSArray())
2314 return false;
2315 switch (arrayMode.type()) {
2316 case Array::Int32:
2317 case Array::Double:
2318 case Array::Contiguous:
2319 case Array::ArrayStorage: {
2320 insertChecks();
2321
2322 addVarArgChild(nullptr); // For storage.
2323 for (int i = 0; i < argumentCountIncludingThis; ++i)
2324 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2325 Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
2326 setResult(arrayPush);
2327 return true;
2328 }
2329
2330 default:
2331 return false;
2332 }
2333 }
2334
2335 case ArraySliceIntrinsic: {
2336 if (argumentCountIncludingThis < 1)
2337 return false;
2338
2339 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2340 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
2341 return false;
2342
2343 ArrayMode arrayMode = getArrayMode(Array::Read);
2344 if (!arrayMode.isJSArray())
2345 return false;
2346
2347 if (!arrayMode.isJSArrayWithOriginalStructure())
2348 return false;
2349
2350 switch (arrayMode.type()) {
2351 case Array::Double:
2352 case Array::Int32:
2353 case Array::Contiguous: {
2354 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2355
2356 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2357 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2358
2359 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2360 // https://bugs.webkit.org/show_bug.cgi?id=173171
2361 if (globalObject->arraySpeciesWatchpointSet().state() == IsWatched
2362 && globalObject->havingABadTimeWatchpoint()->isStillValid()
2363 && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2364 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2365 && globalObject->arrayPrototypeChainIsSane()) {
2366
2367 m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpointSet());
2368 m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2369 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2370 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2371
2372 insertChecks();
2373
2374 Node* array = get(virtualRegisterForArgument(0, registerOffset));
2375 // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
2376 // 1. We ensure that the "constructor" property hasn't been changed (because the observable
2377 // effects of slice require that we perform a Get(array, "constructor") and we can skip
2378 // that if we're an original array structure. (We can relax this in the future by using
2379 // TryGetById and CheckCell).
2380 //
2381 // 2. We check that the array we're calling slice on has the same global object as the lexical
2382 // global object that this code is running in. This requirement is necessary because we setup the
2383 // watchpoints above on the lexical global object. This means that code that calls slice on
2384 // arrays produced by other global objects won't get this optimization. We could relax this
2385 // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
2386 // we generate instead of registering it as a watchpoint that would invalidate the compilation.
2387 //
2388 // 3. By proving we're an original array structure, we guarantee that the incoming array
2389 // isn't a subclass of Array.
2390
2391 StructureSet structureSet;
2392 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
2393 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
2394 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
2395 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
2396 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
2397 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
2398 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
2399
2400 addVarArgChild(array);
2401 if (argumentCountIncludingThis >= 2)
2402 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
2403 if (argumentCountIncludingThis >= 3)
2404 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
2405 addVarArgChild(addToGraph(GetButterfly, array));
2406
2407 Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
2408 setResult(arraySlice);
2409 return true;
2410 }
2411
2412 return false;
2413 }
2414 default:
2415 return false;
2416 }
2417
2418 RELEASE_ASSERT_NOT_REACHED();
2419 return false;
2420 }
2421
2422 case ArrayIndexOfIntrinsic: {
2423 if (argumentCountIncludingThis < 2)
2424 return false;
2425
2426 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
2427 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2428 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2429 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2430 return false;
2431
2432 ArrayMode arrayMode = getArrayMode(Array::Read);
2433 if (!arrayMode.isJSArray())
2434 return false;
2435
2436 if (!arrayMode.isJSArrayWithOriginalStructure())
2437 return false;
2438
2439 // We do not want to convert arrays into one type just to perform indexOf.
2440 if (arrayMode.doesConversion())
2441 return false;
2442
2443 switch (arrayMode.type()) {
2444 case Array::Double:
2445 case Array::Int32:
2446 case Array::Contiguous: {
2447 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2448
2449 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2450 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2451
2452 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2453 // https://bugs.webkit.org/show_bug.cgi?id=173171
2454 if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2455 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2456 && globalObject->arrayPrototypeChainIsSane()) {
2457
2458 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2459 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2460
2461 insertChecks();
2462
2463 Node* array = get(virtualRegisterForArgument(0, registerOffset));
2464 addVarArgChild(array);
2465 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
2466 if (argumentCountIncludingThis >= 3)
2467 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
2468 addVarArgChild(nullptr);
2469
2470 Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
2471 setResult(node);
2472 return true;
2473 }
2474
2475 return false;
2476 }
2477 default:
2478 return false;
2479 }
2480
2481 RELEASE_ASSERT_NOT_REACHED();
2482 return false;
2483
2484 }
2485
2486 case ArrayPopIntrinsic: {
2487 ArrayMode arrayMode = getArrayMode(Array::Write);
2488 if (!arrayMode.isJSArray())
2489 return false;
2490 switch (arrayMode.type()) {
2491 case Array::Int32:
2492 case Array::Double:
2493 case Array::Contiguous:
2494 case Array::ArrayStorage: {
2495 insertChecks();
2496 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2497 setResult(arrayPop);
2498 return true;
2499 }
2500
2501 default:
2502 return false;
2503 }
2504 }
2505
2506 case AtomicsAddIntrinsic:
2507 case AtomicsAndIntrinsic:
2508 case AtomicsCompareExchangeIntrinsic:
2509 case AtomicsExchangeIntrinsic:
2510 case AtomicsIsLockFreeIntrinsic:
2511 case AtomicsLoadIntrinsic:
2512 case AtomicsOrIntrinsic:
2513 case AtomicsStoreIntrinsic:
2514 case AtomicsSubIntrinsic:
2515 case AtomicsXorIntrinsic: {
2516 if (!is64Bit())
2517 return false;
2518
2519 NodeType op = LastNodeType;
2520 Array::Action action = Array::Write;
2521 unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
2522 switch (intrinsic) {
2523 case AtomicsAddIntrinsic:
2524 op = AtomicsAdd;
2525 numArgs = 3;
2526 break;
2527 case AtomicsAndIntrinsic:
2528 op = AtomicsAnd;
2529 numArgs = 3;
2530 break;
2531 case AtomicsCompareExchangeIntrinsic:
2532 op = AtomicsCompareExchange;
2533 numArgs = 4;
2534 break;
2535 case AtomicsExchangeIntrinsic:
2536 op = AtomicsExchange;
2537 numArgs = 3;
2538 break;
2539 case AtomicsIsLockFreeIntrinsic:
2540 // This gets no backing store, but we need no special logic for this since this also does
2541 // not need varargs.
2542 op = AtomicsIsLockFree;
2543 numArgs = 1;
2544 break;
2545 case AtomicsLoadIntrinsic:
2546 op = AtomicsLoad;
2547 numArgs = 2;
2548 action = Array::Read;
2549 break;
2550 case AtomicsOrIntrinsic:
2551 op = AtomicsOr;
2552 numArgs = 3;
2553 break;
2554 case AtomicsStoreIntrinsic:
2555 op = AtomicsStore;
2556 numArgs = 3;
2557 break;
2558 case AtomicsSubIntrinsic:
2559 op = AtomicsSub;
2560 numArgs = 3;
2561 break;
2562 case AtomicsXorIntrinsic:
2563 op = AtomicsXor;
2564 numArgs = 3;
2565 break;
2566 default:
2567 RELEASE_ASSERT_NOT_REACHED();
2568 break;
2569 }
2570
2571 if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
2572 return false;
2573
2574 insertChecks();
2575
2576 Vector<Node*, 3> args;
2577 for (unsigned i = 0; i < numArgs; ++i)
2578 args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
2579
2580 Node* resultNode;
2581 if (numArgs + 1 <= 3) {
2582 while (args.size() < 3)
2583 args.append(nullptr);
2584 resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
2585 } else {
2586 for (Node* node : args)
2587 addVarArgChild(node);
2588 addVarArgChild(nullptr);
2589 resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
2590 }
2591
2592 setResult(resultNode);
2593 return true;
2594 }
2595
2596 case ParseIntIntrinsic: {
2597 if (argumentCountIncludingThis < 2)
2598 return false;
2599
2600 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2601 return false;
2602
2603 insertChecks();
2604 VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
2605 Node* parseInt;
2606 if (argumentCountIncludingThis == 2)
2607 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
2608 else {
2609 ASSERT(argumentCountIncludingThis > 2);
2610 VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
2611 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
2612 }
2613 setResult(parseInt);
2614 return true;
2615 }
2616
2617 case CharCodeAtIntrinsic: {
2618 if (argumentCountIncludingThis < 2)
2619 return false;
2620
2621 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Uncountable) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2622 return false;
2623
2624 insertChecks();
2625 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2626 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2627 Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2628
2629 setResult(charCode);
2630 return true;
2631 }
2632
2633 case StringPrototypeCodePointAtIntrinsic: {
2634 if (!is64Bit())
2635 return false;
2636
2637 if (argumentCountIncludingThis < 2)
2638 return false;
2639
2640 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Uncountable) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2641 return false;
2642
2643 insertChecks();
2644 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2645 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2646 Node* result = addToGraph(StringCodePointAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2647
2648 setResult(result);
2649 return true;
2650 }
2651
2652 case CharAtIntrinsic: {
2653 if (argumentCountIncludingThis < 2)
2654 return false;
2655
2656 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2657 return false;
2658
2659 // FIXME: String#charAt returns empty string when index is out-of-bounds, and this does not break the AI's claim.
2660 // Only FTL supports out-of-bounds version now. We should support out-of-bounds version even in DFG.
2661 // https://bugs.webkit.org/show_bug.cgi?id=201678
2662
2663 insertChecks();
2664 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2665 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2666 Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2667
2668 setResult(charCode);
2669 return true;
2670 }
2671 case Clz32Intrinsic: {
2672 insertChecks();
2673 if (argumentCountIncludingThis == 1)
2674 setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2675 else {
2676 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2677 setResult(addToGraph(ArithClz32, operand));
2678 }
2679 return true;
2680 }
2681 case FromCharCodeIntrinsic: {
2682 if (argumentCountIncludingThis != 2)
2683 return false;
2684
2685 insertChecks();
2686 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2687 Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2688
2689 setResult(charCode);
2690
2691 return true;
2692 }
2693
2694 case RegExpExecIntrinsic: {
2695 if (argumentCountIncludingThis < 2)
2696 return false;
2697
2698 insertChecks();
2699 Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2700 setResult(regExpExec);
2701
2702 return true;
2703 }
2704
2705 case RegExpTestIntrinsic:
2706 case RegExpTestFastIntrinsic: {
2707 if (argumentCountIncludingThis < 2)
2708 return false;
2709
2710 if (intrinsic == RegExpTestIntrinsic) {
2711 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2712 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2713 return false;
2714
2715 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2716 Structure* regExpStructure = globalObject->regExpStructure();
2717 m_graph.registerStructure(regExpStructure);
2718 ASSERT(regExpStructure->storedPrototype().isObject());
2719 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2720
2721 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2722 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2723
2724 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2725 JSValue currentProperty;
2726 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2727 return false;
2728
2729 return currentProperty == primordialProperty;
2730 };
2731
2732 // Check that RegExp.exec is still the primordial RegExp.prototype.exec
2733 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2734 return false;
2735
2736 // Check that regExpObject is actually a RegExp object.
2737 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2738 addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
2739
2740 // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
2741 UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
2742 unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
2743 Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
2744 FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
2745 addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
2746 }
2747
2748 insertChecks();
2749 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2750 Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
2751 setResult(regExpExec);
2752
2753 return true;
2754 }
2755
2756 case RegExpMatchFastIntrinsic: {
2757 RELEASE_ASSERT(argumentCountIncludingThis == 2);
2758
2759 insertChecks();
2760 Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2761 setResult(regExpMatch);
2762 return true;
2763 }
2764
2765 case ObjectCreateIntrinsic: {
2766 if (argumentCountIncludingThis != 2)
2767 return false;
2768
2769 insertChecks();
2770 setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
2771 return true;
2772 }
2773
2774 case ObjectGetPrototypeOfIntrinsic: {
2775 if (argumentCountIncludingThis < 2)
2776 return false;
2777
2778 insertChecks();
2779 setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2780 return true;
2781 }
2782
2783 case ObjectIsIntrinsic: {
2784 if (argumentCountIncludingThis < 3)
2785 return false;
2786
2787 insertChecks();
2788 setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2789 return true;
2790 }
2791
2792 case ObjectKeysIntrinsic: {
2793 if (argumentCountIncludingThis < 2)
2794 return false;
2795
2796 insertChecks();
2797 setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset))));
2798 return true;
2799 }
2800
2801 case ReflectGetPrototypeOfIntrinsic: {
2802 if (argumentCountIncludingThis < 2)
2803 return false;
2804
2805 insertChecks();
2806 setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
2807 return true;
2808 }
2809
2810 case IsTypedArrayViewIntrinsic: {
2811 ASSERT(argumentCountIncludingThis == 2);
2812
2813 insertChecks();
2814 setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2815 return true;
2816 }
2817
2818 case StringPrototypeValueOfIntrinsic: {
2819 insertChecks();
2820 Node* value = get(virtualRegisterForArgument(0, registerOffset));
2821 setResult(addToGraph(StringValueOf, value));
2822 return true;
2823 }
2824
2825 case StringPrototypeReplaceIntrinsic: {
2826 if (argumentCountIncludingThis < 3)
2827 return false;
2828
2829 // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
2830 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2831 return false;
2832
2833 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2834 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2835 return false;
2836
2837 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2838 Structure* regExpStructure = globalObject->regExpStructure();
2839 m_graph.registerStructure(regExpStructure);
2840 ASSERT(regExpStructure->storedPrototype().isObject());
2841 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2842
2843 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2844 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2845
2846 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2847 JSValue currentProperty;
2848 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2849 return false;
2850
2851 return currentProperty == primordialProperty;
2852 };
2853
2854 // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
2855 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2856 return false;
2857
2858 // Check that searchRegExp.global is still the primordial RegExp.prototype.global
2859 if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
2860 return false;
2861
2862 // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
2863 if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
2864 return false;
2865
2866 // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
2867 if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
2868 return false;
2869
2870 insertChecks();
2871
2872 Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2873 setResult(resultNode);
2874 return true;
2875 }
2876
2877 case StringPrototypeReplaceRegExpIntrinsic: {
2878 if (argumentCountIncludingThis < 3)
2879 return false;
2880
2881 insertChecks();
2882 Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2883 setResult(resultNode);
2884 return true;
2885 }
2886
2887 case RoundIntrinsic:
2888 case FloorIntrinsic:
2889 case CeilIntrinsic:
2890 case TruncIntrinsic: {
2891 if (argumentCountIncludingThis == 1) {
2892 insertChecks();
2893 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2894 return true;
2895 }
2896 insertChecks();
2897 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2898 NodeType op;
2899 if (intrinsic == RoundIntrinsic)
2900 op = ArithRound;
2901 else if (intrinsic == FloorIntrinsic)
2902 op = ArithFloor;
2903 else if (intrinsic == CeilIntrinsic)
2904 op = ArithCeil;
2905 else {
2906 ASSERT(intrinsic == TruncIntrinsic);
2907 op = ArithTrunc;
2908 }
2909 Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2910 setResult(roundNode);
2911 return true;
2912 }
2913 case IMulIntrinsic: {
2914 if (argumentCountIncludingThis < 3)
2915 return false;
2916 insertChecks();
2917 VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2918 VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2919 Node* left = get(leftOperand);
2920 Node* right = get(rightOperand);
2921 setResult(addToGraph(ArithIMul, left, right));
2922 return true;
2923 }
2924
2925 case RandomIntrinsic: {
2926 insertChecks();
2927 setResult(addToGraph(ArithRandom));
2928 return true;
2929 }
2930
2931 case DFGTrueIntrinsic: {
2932 insertChecks();
2933 setResult(jsConstant(jsBoolean(true)));
2934 return true;
2935 }
2936
2937 case FTLTrueIntrinsic: {
2938 insertChecks();
2939 setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
2940 return true;
2941 }
2942
2943 case OSRExitIntrinsic: {
2944 insertChecks();
2945 addToGraph(ForceOSRExit);
2946 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2947 return true;
2948 }
2949
2950 case IsFinalTierIntrinsic: {
2951 insertChecks();
2952 setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
2953 return true;
2954 }
2955
2956 case SetInt32HeapPredictionIntrinsic: {
2957 insertChecks();
2958 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2959 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2960 if (node->hasHeapPrediction())
2961 node->setHeapPrediction(SpecInt32Only);
2962 }
2963 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2964 return true;
2965 }
2966
2967 case CheckInt32Intrinsic: {
2968 insertChecks();
2969 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2970 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2971 addToGraph(Phantom, Edge(node, Int32Use));
2972 }
2973 setResult(jsConstant(jsBoolean(true)));
2974 return true;
2975 }
2976
2977 case FiatInt52Intrinsic: {
2978 if (argumentCountIncludingThis < 2)
2979 return false;
2980 insertChecks();
2981 VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2982 if (enableInt52())
2983 setResult(addToGraph(FiatInt52, get(operand)));
2984 else
2985 setResult(get(operand));
2986 return true;
2987 }
2988
2989 case JSMapGetIntrinsic: {
2990 if (argumentCountIncludingThis < 2)
2991 return false;
2992
2993 insertChecks();
2994 Node* map = get(virtualRegisterForArgument(0, registerOffset));
2995 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2996 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2997 Node* hash = addToGraph(MapHash, normalizedKey);
2998 Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
2999 Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
3000 setResult(resultNode);
3001 return true;
3002 }
3003
3004 case JSSetHasIntrinsic:
3005 case JSMapHasIntrinsic: {
3006 if (argumentCountIncludingThis < 2)
3007 return false;
3008
3009 insertChecks();
3010 Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
3011 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3012 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3013 Node* hash = addToGraph(MapHash, normalizedKey);
3014 UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
3015 Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
3016 JSCell* sentinel = nullptr;
3017 if (intrinsic == JSMapHasIntrinsic)
3018 sentinel = m_vm->sentinelMapBucket();
3019 else
3020 sentinel = m_vm->sentinelSetBucket();
3021
3022 FrozenValue* frozenPointer = m_graph.freeze(sentinel);
3023 Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
3024 Node* resultNode = addToGraph(LogicalNot, invertedResult);
3025 setResult(resultNode);
3026 return true;
3027 }
3028
3029 case JSSetAddIntrinsic: {
3030 if (argumentCountIncludingThis < 2)
3031 return false;
3032
3033 insertChecks();
3034 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3035 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3036 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3037 Node* hash = addToGraph(MapHash, normalizedKey);
3038 addToGraph(SetAdd, base, normalizedKey, hash);
3039 setResult(base);
3040 return true;
3041 }
3042
3043 case JSMapSetIntrinsic: {
3044 if (argumentCountIncludingThis < 3)
3045 return false;
3046
3047 insertChecks();
3048 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3049 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3050 Node* value = get(virtualRegisterForArgument(2, registerOffset));
3051
3052 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3053 Node* hash = addToGraph(MapHash, normalizedKey);
3054
3055 addVarArgChild(base);
3056 addVarArgChild(normalizedKey);
3057 addVarArgChild(value);
3058 addVarArgChild(hash);
3059 addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
3060 setResult(base);
3061 return true;
3062 }
3063
3064 case JSSetBucketHeadIntrinsic:
3065 case JSMapBucketHeadIntrinsic: {
3066 ASSERT(argumentCountIncludingThis == 2);
3067
3068 insertChecks();
3069 Node* map = get(virtualRegisterForArgument(1, registerOffset));
3070 UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
3071 Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
3072 setResult(resultNode);
3073 return true;
3074 }
3075
3076 case JSSetBucketNextIntrinsic:
3077 case JSMapBucketNextIntrinsic: {
3078 ASSERT(argumentCountIncludingThis == 2);
3079
3080 insertChecks();
3081 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3082 BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3083 Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
3084 setResult(resultNode);
3085 return true;
3086 }
3087
3088 case JSSetBucketKeyIntrinsic:
3089 case JSMapBucketKeyIntrinsic: {
3090 ASSERT(argumentCountIncludingThis == 2);
3091
3092 insertChecks();
3093 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3094 BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3095 Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
3096 setResult(resultNode);
3097 return true;
3098 }
3099
3100 case JSMapBucketValueIntrinsic: {
3101 ASSERT(argumentCountIncludingThis == 2);
3102
3103 insertChecks();
3104 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3105 Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
3106 setResult(resultNode);
3107 return true;
3108 }
3109
3110 case JSWeakMapGetIntrinsic: {
3111 if (argumentCountIncludingThis < 2)
3112 return false;
3113
3114 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3115 return false;
3116
3117 insertChecks();
3118 Node* map = get(virtualRegisterForArgument(0, registerOffset));
3119 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3120 addToGraph(Check, Edge(key, ObjectUse));
3121 Node* hash = addToGraph(MapHash, key);
3122 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3123 Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
3124
3125 setResult(resultNode);
3126 return true;
3127 }
3128
3129 case JSWeakMapHasIntrinsic: {
3130 if (argumentCountIncludingThis < 2)
3131 return false;
3132
3133 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3134 return false;
3135
3136 insertChecks();
3137 Node* map = get(virtualRegisterForArgument(0, registerOffset));
3138 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3139 addToGraph(Check, Edge(key, ObjectUse));
3140 Node* hash = addToGraph(MapHash, key);
3141 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3142 Node* invertedResult = addToGraph(IsEmpty, holder);
3143 Node* resultNode = addToGraph(LogicalNot, invertedResult);
3144
3145 setResult(resultNode);
3146 return true;
3147 }
3148
3149 case JSWeakSetHasIntrinsic: {
3150 if (argumentCountIncludingThis < 2)
3151 return false;
3152
3153 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3154 return false;
3155
3156 insertChecks();
3157 Node* map = get(virtualRegisterForArgument(0, registerOffset));
3158 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3159 addToGraph(Check, Edge(key, ObjectUse));
3160 Node* hash = addToGraph(MapHash, key);
3161 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3162 Node* invertedResult = addToGraph(IsEmpty, holder);
3163 Node* resultNode = addToGraph(LogicalNot, invertedResult);
3164
3165 setResult(resultNode);
3166 return true;
3167 }
3168
3169 case JSWeakSetAddIntrinsic: {
3170 if (argumentCountIncludingThis < 2)
3171 return false;
3172
3173 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3174 return false;
3175
3176 insertChecks();
3177 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3178 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3179 addToGraph(Check, Edge(key, ObjectUse));
3180 Node* hash = addToGraph(MapHash, key);
3181 addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3182 setResult(base);
3183 return true;
3184 }
3185
3186 case JSWeakMapSetIntrinsic: {
3187 if (argumentCountIncludingThis < 3)
3188 return false;
3189
3190 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3191 return false;
3192
3193 insertChecks();
3194 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3195 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3196 Node* value = get(virtualRegisterForArgument(2, registerOffset));
3197
3198 addToGraph(Check, Edge(key, ObjectUse));
3199 Node* hash = addToGraph(MapHash, key);
3200
3201 addVarArgChild(Edge(base, WeakMapObjectUse));
3202 addVarArgChild(Edge(key, ObjectUse));
3203 addVarArgChild(Edge(value));
3204 addVarArgChild(Edge(hash, Int32Use));
3205 addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0));
3206 setResult(base);
3207 return true;
3208 }
3209
3210 case DatePrototypeGetTimeIntrinsic: {
3211 if (!is64Bit())
3212 return false;
3213 insertChecks();
3214 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3215 setResult(addToGraph(DateGetTime, OpInfo(intrinsic), OpInfo(), base));
3216 return true;
3217 }
3218
3219 case DatePrototypeGetFullYearIntrinsic:
3220 case DatePrototypeGetUTCFullYearIntrinsic:
3221 case DatePrototypeGetMonthIntrinsic:
3222 case DatePrototypeGetUTCMonthIntrinsic:
3223 case DatePrototypeGetDateIntrinsic:
3224 case DatePrototypeGetUTCDateIntrinsic:
3225 case DatePrototypeGetDayIntrinsic:
3226 case DatePrototypeGetUTCDayIntrinsic:
3227 case DatePrototypeGetHoursIntrinsic:
3228 case DatePrototypeGetUTCHoursIntrinsic:
3229 case DatePrototypeGetMinutesIntrinsic:
3230 case DatePrototypeGetUTCMinutesIntrinsic:
3231 case DatePrototypeGetSecondsIntrinsic:
3232 case DatePrototypeGetUTCSecondsIntrinsic:
3233 case DatePrototypeGetMillisecondsIntrinsic:
3234 case DatePrototypeGetUTCMillisecondsIntrinsic:
3235 case DatePrototypeGetTimezoneOffsetIntrinsic:
3236 case DatePrototypeGetYearIntrinsic: {
3237 if (!is64Bit())
3238 return false;
3239 insertChecks();
3240 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3241 setResult(addToGraph(DateGetInt32OrNaN, OpInfo(intrinsic), OpInfo(prediction), base));
3242 return true;
3243 }
3244
3245 case DataViewGetInt8:
3246 case DataViewGetUint8:
3247 case DataViewGetInt16:
3248 case DataViewGetUint16:
3249 case DataViewGetInt32:
3250 case DataViewGetUint32:
3251 case DataViewGetFloat32:
3252 case DataViewGetFloat64: {
3253 if (!is64Bit())
3254 return false;
3255
3256 // To inline data view accesses, we assume the architecture we're running on:
3257 // - Is little endian.
3258 // - Allows unaligned loads/stores without crashing.
3259
3260 if (argumentCountIncludingThis < 2)
3261 return false;
3262 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3263 return false;
3264
3265 insertChecks();
3266
3267 uint8_t byteSize;
3268 NodeType op = DataViewGetInt;
3269 bool isSigned = false;
3270 switch (intrinsic) {
3271 case DataViewGetInt8:
3272 isSigned = true;
3273 FALLTHROUGH;
3274 case DataViewGetUint8:
3275 byteSize = 1;
3276 break;
3277
3278 case DataViewGetInt16:
3279 isSigned = true;
3280 FALLTHROUGH;
3281 case DataViewGetUint16:
3282 byteSize = 2;
3283 break;
3284
3285 case DataViewGetInt32:
3286 isSigned = true;
3287 FALLTHROUGH;
3288 case DataViewGetUint32:
3289 byteSize = 4;
3290 break;
3291
3292 case DataViewGetFloat32:
3293 byteSize = 4;
3294 op = DataViewGetFloat;
3295 break;
3296 case DataViewGetFloat64:
3297 byteSize = 8;
3298 op = DataViewGetFloat;
3299 break;
3300 default:
3301 RELEASE_ASSERT_NOT_REACHED();
3302 }
3303
3304 TriState isLittleEndian = MixedTriState;
3305 Node* littleEndianChild = nullptr;
3306 if (byteSize > 1) {
3307 if (argumentCountIncludingThis < 3)
3308 isLittleEndian = FalseTriState;
3309 else {
3310 littleEndianChild = get(virtualRegisterForArgument(2, registerOffset));
3311 if (littleEndianChild->hasConstant()) {
3312 JSValue constant = littleEndianChild->constant()->value();
3313 if (constant) {
3314 isLittleEndian = constant.pureToBoolean();
3315 if (isLittleEndian != MixedTriState)
3316 littleEndianChild = nullptr;
3317 }
3318 } else
3319 isLittleEndian = MixedTriState;
3320 }
3321 }
3322
3323 DataViewData data { };
3324 data.isLittleEndian = isLittleEndian;
3325 data.isSigned = isSigned;
3326 data.byteSize = byteSize;
3327
3328 setResult(
3329 addToGraph(op, OpInfo(data.asQuadWord), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), littleEndianChild));
3330 return true;
3331 }
3332
3333 case DataViewSetInt8:
3334 case DataViewSetUint8:
3335 case DataViewSetInt16:
3336 case DataViewSetUint16:
3337 case DataViewSetInt32:
3338 case DataViewSetUint32:
3339 case DataViewSetFloat32:
3340 case DataViewSetFloat64: {
3341 if (!is64Bit())
3342 return false;
3343
3344 if (argumentCountIncludingThis < 3)
3345 return false;
3346
3347 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3348 return false;
3349
3350 insertChecks();
3351
3352 uint8_t byteSize;
3353 bool isFloatingPoint = false;
3354 bool isSigned = false;
3355 switch (intrinsic) {
3356 case DataViewSetInt8:
3357 isSigned = true;
3358 FALLTHROUGH;
3359 case DataViewSetUint8:
3360 byteSize = 1;
3361 break;
3362
3363 case DataViewSetInt16:
3364 isSigned = true;
3365 FALLTHROUGH;
3366 case DataViewSetUint16:
3367 byteSize = 2;
3368 break;
3369
3370 case DataViewSetInt32:
3371 isSigned = true;
3372 FALLTHROUGH;
3373 case DataViewSetUint32:
3374 byteSize = 4;
3375 break;
3376
3377 case DataViewSetFloat32:
3378 isFloatingPoint = true;
3379 byteSize = 4;
3380 break;
3381 case DataViewSetFloat64:
3382 isFloatingPoint = true;
3383 byteSize = 8;
3384 break;
3385 default:
3386 RELEASE_ASSERT_NOT_REACHED();
3387 }
3388
3389 TriState isLittleEndian = MixedTriState;
3390 Node* littleEndianChild = nullptr;
3391 if (byteSize > 1) {
3392 if (argumentCountIncludingThis < 4)
3393 isLittleEndian = FalseTriState;
3394 else {
3395 littleEndianChild = get(virtualRegisterForArgument(3, registerOffset));
3396 if (littleEndianChild->hasConstant()) {
3397 JSValue constant = littleEndianChild->constant()->value();
3398 if (constant) {
3399 isLittleEndian = constant.pureToBoolean();
3400 if (isLittleEndian != MixedTriState)
3401 littleEndianChild = nullptr;
3402 }
3403 } else
3404 isLittleEndian = MixedTriState;
3405 }
3406 }
3407
3408 DataViewData data { };
3409 data.isLittleEndian = isLittleEndian;
3410 data.isSigned = isSigned;
3411 data.byteSize = byteSize;
3412 data.isFloatingPoint = isFloatingPoint;
3413
3414 addVarArgChild(get(virtualRegisterForArgument(0, registerOffset)));
3415 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset)));
3416 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset)));
3417 addVarArgChild(littleEndianChild);
3418
3419 addToGraph(Node::VarArg, DataViewSet, OpInfo(data.asQuadWord), OpInfo());
3420 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
3421 return true;
3422 }
3423
3424 case HasOwnPropertyIntrinsic: {
3425 if (argumentCountIncludingThis < 2)
3426 return false;
3427
3428 // This can be racy, that's fine. We know that once we observe that this is created,
3429 // that it will never be destroyed until the VM is destroyed. It's unlikely that
3430 // we'd ever get to the point where we inline this as an intrinsic without the
3431 // cache being created, however, it's possible if we always throw exceptions inside
3432 // hasOwnProperty.
3433 if (!m_vm->hasOwnPropertyCache())
3434 return false;
3435
3436 insertChecks();
3437 Node* object = get(virtualRegisterForArgument(0, registerOffset));
3438 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3439 Node* resultNode = addToGraph(HasOwnProperty, object, key);
3440 setResult(resultNode);
3441 return true;
3442 }
3443
3444 case StringPrototypeSliceIntrinsic: {
3445 if (argumentCountIncludingThis < 2)
3446 return false;
3447
3448 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3449 return false;
3450
3451 insertChecks();
3452 Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3453 Node* start = get(virtualRegisterForArgument(1, registerOffset));
3454 Node* end = nullptr;
3455 if (argumentCountIncludingThis > 2)
3456 end = get(virtualRegisterForArgument(2, registerOffset));
3457 Node* resultNode = addToGraph(StringSlice, thisString, start, end);
3458 setResult(resultNode);
3459 return true;
3460 }
3461
3462 case StringPrototypeToLowerCaseIntrinsic: {
3463 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3464 return false;
3465
3466 insertChecks();
3467 Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3468 Node* resultNode = addToGraph(ToLowerCase, thisString);
3469 setResult(resultNode);
3470 return true;
3471 }
3472
3473 case NumberPrototypeToStringIntrinsic: {
3474 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3475 return false;
3476
3477 insertChecks();
3478 Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
3479 if (argumentCountIncludingThis == 1) {
3480 Node* resultNode = addToGraph(ToString, thisNumber);
3481 setResult(resultNode);
3482 } else {
3483 Node* radix = get(virtualRegisterForArgument(1, registerOffset));
3484 Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix);
3485 setResult(resultNode);
3486 }
3487 return true;
3488 }
3489
3490 case NumberIsIntegerIntrinsic: {
3491 if (argumentCountIncludingThis < 2)
3492 return false;
3493
3494 insertChecks();
3495 Node* input = get(virtualRegisterForArgument(1, registerOffset));
3496 Node* resultNode = addToGraph(NumberIsInteger, input);
3497 setResult(resultNode);
3498 return true;
3499 }
3500
3501 case CPUMfenceIntrinsic:
3502 case CPURdtscIntrinsic:
3503 case CPUCpuidIntrinsic:
3504 case CPUPauseIntrinsic: {
3505#if CPU(X86_64)
3506 if (!m_graph.m_plan.isFTL())
3507 return false;
3508 insertChecks();
3509 setResult(addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo()));
3510 return true;
3511#else
3512 return false;
3513#endif
3514 }
3515
3516 default:
3517 return false;
3518 }
3519 };
3520
3521 if (inlineIntrinsic()) {
3522 RELEASE_ASSERT(didSetResult);
3523 return true;
3524 }
3525
3526 return false;
3527}
3528
3529template<typename ChecksFunctor>
3530bool ByteCodeParser::handleDOMJITCall(Node* callTarget, VirtualRegister result, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
3531{
3532 if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount))
3533 return false;
3534 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3535 return false;
3536
3537 // FIXME: Currently, we only support functions which arguments are up to 2.
3538 // Eventually, we should extend this. But possibly, 2 or 3 can cover typical use cases.
3539 // https://bugs.webkit.org/show_bug.cgi?id=164346
3540 ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments.");
3541
3542 insertChecks();
3543 addCall(result, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction);
3544 return true;
3545}
3546
3547
3548template<typename ChecksFunctor>
3549bool ByteCodeParser::handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)
3550{
3551 switch (variant.intrinsic()) {
3552 case TypedArrayByteLengthIntrinsic: {
3553 insertChecks();
3554
3555 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3556 Array::Type arrayType = toArrayType(type);
3557 size_t logSize = logElementSize(type);
3558
3559 variant.structureSet().forEach([&] (Structure* structure) {
3560 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3561 ASSERT(logSize == logElementSize(curType));
3562 arrayType = refineTypedArrayType(arrayType, curType);
3563 ASSERT(arrayType != Array::Generic);
3564 });
3565
3566 Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode);
3567
3568 if (!logSize) {
3569 set(result, lengthNode);
3570 return true;
3571 }
3572
3573 // We can use a BitLShift here because typed arrays will never have a byteLength
3574 // that overflows int32.
3575 Node* shiftNode = jsConstant(jsNumber(logSize));
3576 set(result, addToGraph(ArithBitLShift, lengthNode, shiftNode));
3577
3578 return true;
3579 }
3580
3581 case TypedArrayLengthIntrinsic: {
3582 insertChecks();
3583
3584 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3585 Array::Type arrayType = toArrayType(type);
3586
3587 variant.structureSet().forEach([&] (Structure* structure) {
3588 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3589 arrayType = refineTypedArrayType(arrayType, curType);
3590 ASSERT(arrayType != Array::Generic);
3591 });
3592
3593 set(result, addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3594
3595 return true;
3596
3597 }
3598
3599 case TypedArrayByteOffsetIntrinsic: {
3600 insertChecks();
3601
3602 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3603 Array::Type arrayType = toArrayType(type);
3604
3605 variant.structureSet().forEach([&] (Structure* structure) {
3606 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3607 arrayType = refineTypedArrayType(arrayType, curType);
3608 ASSERT(arrayType != Array::Generic);
3609 });
3610
3611 set(result, addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3612
3613 return true;
3614 }
3615
3616 case UnderscoreProtoIntrinsic: {
3617 insertChecks();
3618
3619 bool canFold = !variant.structureSet().isEmpty();
3620 JSValue prototype;
3621 variant.structureSet().forEach([&] (Structure* structure) {
3622 auto getPrototypeMethod = structure->classInfo()->methodTable.getPrototype;
3623 MethodTable::GetPrototypeFunctionPtr defaultGetPrototype = JSObject::getPrototype;
3624 if (getPrototypeMethod != defaultGetPrototype) {
3625 canFold = false;
3626 return;
3627 }
3628
3629 if (structure->hasPolyProto()) {
3630 canFold = false;
3631 return;
3632 }
3633 if (!prototype)
3634 prototype = structure->storedPrototype();
3635 else if (prototype != structure->storedPrototype())
3636 canFold = false;
3637 });
3638
3639 // OK, only one prototype is found. We perform constant folding here.
3640 // This information is important for super's constructor call to get new.target constant.
3641 if (prototype && canFold) {
3642 set(result, weakJSConstant(prototype));
3643 return true;
3644 }
3645
3646 set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), thisNode));
3647 return true;
3648 }
3649
3650 default:
3651 return false;
3652 }
3653 RELEASE_ASSERT_NOT_REACHED();
3654}
3655
3656static void blessCallDOMGetter(Node* node)
3657{
3658 DOMJIT::CallDOMGetterSnippet* snippet = node->callDOMGetterData()->snippet;
3659 if (snippet && !snippet->effect.mustGenerate())
3660 node->clearFlags(NodeMustGenerate);
3661}
3662
3663bool ByteCodeParser::handleDOMJITGetter(VirtualRegister result, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction)
3664{
3665 if (!variant.domAttribute())
3666 return false;
3667
3668 auto* domAttribute = variant.domAttribute();
3669
3670 // We do not need to actually look up CustomGetterSetter here. Checking Structures or registering watchpoints are enough,
3671 // since replacement of CustomGetterSetter always incurs Structure transition.
3672 if (!check(variant.conditionSet()))
3673 return false;
3674 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), thisNode);
3675
3676 // We do not need to emit CheckCell thingy here. When the custom accessor is replaced to different one, Structure transition occurs.
3677 addToGraph(CheckSubClass, OpInfo(domAttribute->classInfo), thisNode);
3678
3679 bool wasSeenInJIT = true;
3680 GetByStatus* status = m_graph.m_plan.recordedStatuses().addGetByStatus(currentCodeOrigin(), GetByStatus(GetByStatus::Custom, wasSeenInJIT));
3681 bool success = status->appendVariant(variant);
3682 RELEASE_ASSERT(success);
3683 addToGraph(FilterGetByStatus, OpInfo(status), thisNode);
3684
3685 CallDOMGetterData* callDOMGetterData = m_graph.m_callDOMGetterData.add();
3686 callDOMGetterData->customAccessorGetter = variant.customAccessorGetter();
3687 ASSERT(callDOMGetterData->customAccessorGetter);
3688
3689 if (const auto* domJIT = domAttribute->domJIT) {
3690 callDOMGetterData->domJIT = domJIT;
3691 Ref<DOMJIT::CallDOMGetterSnippet> snippet = domJIT->compiler()();
3692 callDOMGetterData->snippet = snippet.ptr();
3693 m_graph.m_domJITSnippets.append(WTFMove(snippet));
3694 }
3695 DOMJIT::CallDOMGetterSnippet* callDOMGetterSnippet = callDOMGetterData->snippet;
3696 callDOMGetterData->identifierNumber = identifierNumber;
3697
3698 Node* callDOMGetterNode = nullptr;
3699 // GlobalObject of thisNode is always used to create a DOMWrapper.
3700 if (callDOMGetterSnippet && callDOMGetterSnippet->requireGlobalObject) {
3701 Node* globalObject = addToGraph(GetGlobalObject, thisNode);
3702 callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode, globalObject);
3703 } else
3704 callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode);
3705 blessCallDOMGetter(callDOMGetterNode);
3706 set(result, callDOMGetterNode);
3707 return true;
3708}
3709
3710bool ByteCodeParser::handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType prediction, Node* base, GetByStatus getById)
3711{
3712 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
3713 return false;
3714 addToGraph(CheckCell, OpInfo(m_graph.freeze(getById.moduleNamespaceObject())), Edge(base, CellUse));
3715
3716 addToGraph(FilterGetByStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByStatus(currentCodeOrigin(), getById)), base);
3717
3718 // Ideally we wouldn't have to do this Phantom. But:
3719 //
3720 // For the constant case: we must do it because otherwise we would have no way of knowing
3721 // that the scope is live at OSR here.
3722 //
3723 // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation
3724 // won't be able to handle an Undefined scope.
3725 addToGraph(Phantom, base);
3726
3727 // Constant folding in the bytecode parser is important for performance. This may not
3728 // have executed yet. If it hasn't, then we won't have a prediction. Lacking a
3729 // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
3730 // would recompile. But if we can fold it here, we avoid the exit.
3731 m_graph.freeze(getById.moduleEnvironment());
3732 if (JSValue value = m_graph.tryGetConstantClosureVar(getById.moduleEnvironment(), getById.scopeOffset())) {
3733 set(result, weakJSConstant(value));
3734 return true;
3735 }
3736 set(result, addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment())));
3737 return true;
3738}
3739
3740template<typename ChecksFunctor>
3741bool ByteCodeParser::handleTypedArrayConstructor(
3742 VirtualRegister result, InternalFunction* function, int registerOffset,
3743 int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks)
3744{
3745 if (!isTypedView(type))
3746 return false;
3747
3748 if (function->classInfo() != constructorClassInfoForType(type))
3749 return false;
3750
3751 if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
3752 return false;
3753
3754 // We only have an intrinsic for the case where you say:
3755 //
3756 // new FooArray(blah);
3757 //
3758 // Of course, 'blah' could be any of the following:
3759 //
3760 // - Integer, indicating that you want to allocate an array of that length.
3761 // This is the thing we're hoping for, and what we can actually do meaningful
3762 // optimizations for.
3763 //
3764 // - Array buffer, indicating that you want to create a view onto that _entire_
3765 // buffer.
3766 //
3767 // - Non-buffer object, indicating that you want to create a copy of that
3768 // object by pretending that it quacks like an array.
3769 //
3770 // - Anything else, indicating that you want to have an exception thrown at
3771 // you.
3772 //
3773 // The intrinsic, NewTypedArray, will behave as if it could do any of these
3774 // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
3775 // predicted Int32, then we lock it in as a normal typed array allocation.
3776 // Otherwise, NewTypedArray turns into a totally opaque function call that
3777 // may clobber the world - by virtue of it accessing properties on what could
3778 // be an object.
3779 //
3780 // Note that although the generic form of NewTypedArray sounds sort of awful,
3781 // it is actually quite likely to be more efficient than a fully generic
3782 // Construct. So, we might want to think about making NewTypedArray variadic,
3783 // or else making Construct not super slow.
3784
3785 if (argumentCountIncludingThis != 2)
3786 return false;
3787
3788 if (!function->globalObject()->typedArrayStructureConcurrently(type))
3789 return false;
3790
3791 insertChecks();
3792 set(result,
3793 addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
3794 return true;
3795}
3796
3797template<typename ChecksFunctor>
3798bool ByteCodeParser::handleConstantInternalFunction(
3799 Node* callTargetNode, VirtualRegister result, InternalFunction* function, int registerOffset,
3800 int argumentCountIncludingThis, CodeSpecializationKind kind, SpeculatedType prediction, const ChecksFunctor& insertChecks)
3801{
3802 VERBOSE_LOG(" Handling constant internal function ", JSValue(function), "\n");
3803
3804 // It so happens that the code below assumes that the result operand is valid. It's extremely
3805 // unlikely that the result operand would be invalid - you'd have to call this via a setter call.
3806 if (!result.isValid())
3807 return false;
3808
3809 if (kind == CodeForConstruct) {
3810 Node* newTargetNode = get(virtualRegisterForArgument(0, registerOffset));
3811 // We cannot handle the case where new.target != callee (i.e. a construct from a super call) because we
3812 // don't know what the prototype of the constructed object will be.
3813 // FIXME: If we have inlined super calls up to the call site, however, we should be able to figure out the structure. https://bugs.webkit.org/show_bug.cgi?id=152700
3814 if (newTargetNode != callTargetNode)
3815 return false;
3816 }
3817
3818 if (function->classInfo() == ArrayConstructor::info()) {
3819 if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
3820 return false;
3821
3822 insertChecks();
3823 if (argumentCountIncludingThis == 2) {
3824 set(result,
3825 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
3826 return true;
3827 }
3828
3829 for (int i = 1; i < argumentCountIncludingThis; ++i)
3830 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
3831 set(result,
3832 addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(argumentCountIncludingThis - 1)));
3833 return true;
3834 }
3835
3836 if (function->classInfo() == NumberConstructor::info()) {
3837 if (kind == CodeForConstruct)
3838 return false;
3839
3840 insertChecks();
3841 if (argumentCountIncludingThis <= 1)
3842 set(result, jsConstant(jsNumber(0)));
3843 else
3844 set(result, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
3845
3846 return true;
3847 }
3848
3849 if (function->classInfo() == StringConstructor::info()) {
3850 insertChecks();
3851
3852 Node* resultNode;
3853
3854 if (argumentCountIncludingThis <= 1)
3855 resultNode = jsConstant(m_vm->smallStrings.emptyString());
3856 else
3857 resultNode = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));
3858
3859 if (kind == CodeForConstruct)
3860 resultNode = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject()->stringObjectStructure())), resultNode);
3861
3862 set(result, resultNode);
3863 return true;
3864 }
3865
3866 if (function->classInfo() == SymbolConstructor::info() && kind == CodeForCall) {
3867 insertChecks();
3868
3869 Node* resultNode;
3870
3871 if (argumentCountIncludingThis <= 1)
3872 resultNode = addToGraph(NewSymbol);
3873 else
3874 resultNode = addToGraph(NewSymbol, addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset))));
3875
3876 set(result, resultNode);
3877 return true;
3878 }
3879
3880 // FIXME: This should handle construction as well. https://bugs.webkit.org/show_bug.cgi?id=155591
3881 if (function->classInfo() == ObjectConstructor::info() && kind == CodeForCall) {
3882 insertChecks();
3883
3884 Node* resultNode;
3885 if (argumentCountIncludingThis <= 1)
3886 resultNode = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject()->objectStructureForObjectConstructor())));
3887 else
3888 resultNode = addToGraph(CallObjectConstructor, OpInfo(m_graph.freeze(function->globalObject())), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)));
3889 set(result, resultNode);
3890 return true;
3891 }
3892
3893 for (unsigned typeIndex = 0; typeIndex < NumberOfTypedArrayTypes; ++typeIndex) {
3894 bool handled = handleTypedArrayConstructor(
3895 result, function, registerOffset, argumentCountIncludingThis,
3896 indexToTypedArrayType(typeIndex), insertChecks);
3897 if (handled)
3898 return true;
3899 }
3900
3901 return false;
3902}
3903
3904Node* ByteCodeParser::handleGetByOffset(
3905 SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset, NodeType op)
3906{
3907 Node* propertyStorage;
3908 if (isInlineOffset(offset))
3909 propertyStorage = base;
3910 else
3911 propertyStorage = addToGraph(GetButterfly, base);
3912
3913 StorageAccessData* data = m_graph.m_storageAccessData.add();
3914 data->offset = offset;
3915 data->identifierNumber = identifierNumber;
3916
3917 Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
3918
3919 return getByOffset;
3920}
3921
3922Node* ByteCodeParser::handlePutByOffset(
3923 Node* base, unsigned identifier, PropertyOffset offset,
3924 Node* value)
3925{
3926 Node* propertyStorage;
3927 if (isInlineOffset(offset))
3928 propertyStorage = base;
3929 else
3930 propertyStorage = addToGraph(GetButterfly, base);
3931
3932 StorageAccessData* data = m_graph.m_storageAccessData.add();
3933 data->offset = offset;
3934 data->identifierNumber = identifier;
3935
3936 Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
3937
3938 return result;
3939}
3940
3941bool ByteCodeParser::check(const ObjectPropertyCondition& condition)
3942{
3943 if (!condition)
3944 return false;
3945
3946 if (m_graph.watchCondition(condition))
3947 return true;
3948
3949 Structure* structure = condition.object()->structure(*m_vm);
3950 if (!condition.structureEnsuresValidity(structure))
3951 return false;
3952
3953 addToGraph(
3954 CheckStructure,
3955 OpInfo(m_graph.addStructureSet(structure)),
3956 weakJSConstant(condition.object()));
3957 return true;
3958}
3959
3960GetByOffsetMethod ByteCodeParser::promoteToConstant(GetByOffsetMethod method)
3961{
3962 if (method.kind() == GetByOffsetMethod::LoadFromPrototype
3963 && method.prototype()->structure()->dfgShouldWatch()) {
3964 if (JSValue constant = m_graph.tryGetConstantProperty(method.prototype()->value(), method.prototype()->structure(), method.offset()))
3965 return GetByOffsetMethod::constant(m_graph.freeze(constant));
3966 }
3967
3968 return method;
3969}
3970
3971bool ByteCodeParser::needsDynamicLookup(ResolveType type, OpcodeID opcode)
3972{
3973 ASSERT(opcode == op_resolve_scope || opcode == op_get_from_scope || opcode == op_put_to_scope);
3974
3975 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3976 if (needsVarInjectionChecks(type) && globalObject->varInjectionWatchpoint()->hasBeenInvalidated())
3977 return true;
3978
3979 switch (type) {
3980 case GlobalProperty:
3981 case GlobalVar:
3982 case GlobalLexicalVar:
3983 case ClosureVar:
3984 case LocalClosureVar:
3985 case ModuleVar:
3986 return false;
3987
3988 case UnresolvedProperty:
3989 case UnresolvedPropertyWithVarInjectionChecks: {
3990 // The heuristic for UnresolvedProperty scope accesses is we will ForceOSRExit if we
3991 // haven't exited from from this access before to let the baseline JIT try to better
3992 // cache the access. If we've already exited from this operation, it's unlikely that
3993 // the baseline will come up with a better ResolveType and instead we will compile
3994 // this as a dynamic scope access.
3995
3996 // We only track our heuristic through resolve_scope since resolve_scope will
3997 // dominate unresolved gets/puts on that scope.
3998 if (opcode != op_resolve_scope)
3999 return true;
4000
4001 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, InadequateCoverage)) {
4002 // We've already exited so give up on getting better ResolveType information.
4003 return true;
4004 }
4005
4006 // We have not exited yet, so let's have the baseline get better ResolveType information for us.
4007 // This type of code is often seen when we tier up in a loop but haven't executed the part
4008 // of a function that comes after the loop.
4009 return false;
4010 }
4011
4012 case Dynamic:
4013 return true;
4014
4015 case GlobalPropertyWithVarInjectionChecks:
4016 case GlobalVarWithVarInjectionChecks:
4017 case GlobalLexicalVarWithVarInjectionChecks:
4018 case ClosureVarWithVarInjectionChecks:
4019 return false;
4020 }
4021
4022 ASSERT_NOT_REACHED();
4023 return false;
4024}
4025
4026GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyCondition& condition)
4027{
4028 VERBOSE_LOG("Planning a load: ", condition, "\n");
4029
4030 // We might promote this to Equivalence, and a later DFG pass might also do such promotion
4031 // even if we fail, but for simplicity this cannot be asked to load an equivalence condition.
4032 // None of the clients of this method will request a load of an Equivalence condition anyway,
4033 // and supporting it would complicate the heuristics below.
4034 RELEASE_ASSERT(condition.kind() == PropertyCondition::Presence);
4035
4036 // Here's the ranking of how to handle this, from most preferred to least preferred:
4037 //
4038 // 1) Watchpoint on an equivalence condition and return a constant node for the loaded value.
4039 // No other code is emitted, and the structure of the base object is never registered.
4040 // Hence this results in zero code and we won't jettison this compilation if the object
4041 // transitions, even if the structure is watchable right now.
4042 //
4043 // 2) Need to emit a load, and the current structure of the base is going to be watched by the
4044 // DFG anyway (i.e. dfgShouldWatch). Watch the structure and emit the load. Don't watch the
4045 // condition, since the act of turning the base into a constant in IR will cause the DFG to
4046 // watch the structure anyway and doing so would subsume watching the condition.
4047 //
4048 // 3) Need to emit a load, and the current structure of the base is watchable but not by the
4049 // DFG (i.e. transitionWatchpointSetIsStillValid() and !dfgShouldWatchIfPossible()). Watch
4050 // the condition, and emit a load.
4051 //
4052 // 4) Need to emit a load, and the current structure of the base is not watchable. Emit a
4053 // structure check, and emit a load.
4054 //
4055 // 5) The condition does not hold. Give up and return null.
4056
4057 // First, try to promote Presence to Equivalence. We do this before doing anything else
4058 // because it's the most profitable. Also, there are cases where the presence is watchable but
4059 // we don't want to watch it unless it became an equivalence (see the relationship between
4060 // (1), (2), and (3) above).
4061 ObjectPropertyCondition equivalenceCondition = condition.attemptToMakeEquivalenceWithoutBarrier(*m_vm);
4062 if (m_graph.watchCondition(equivalenceCondition))
4063 return GetByOffsetMethod::constant(m_graph.freeze(equivalenceCondition.requiredValue()));
4064
4065 // At this point, we'll have to materialize the condition's base as a constant in DFG IR. Once
4066 // we do this, the frozen value will have its own idea of what the structure is. Use that from
4067 // now on just because it's less confusing.
4068 FrozenValue* base = m_graph.freeze(condition.object());
4069 Structure* structure = base->structure();
4070
4071 // Check if the structure that we've registered makes the condition hold. If not, just give
4072 // up. This is case (5) above.
4073 if (!condition.structureEnsuresValidity(structure))
4074 return GetByOffsetMethod();
4075
4076 // If the structure is watched by the DFG already, then just use this fact to emit the load.
4077 // This is case (2) above.
4078 if (structure->dfgShouldWatch())
4079 return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
4080
4081 // If we can watch the condition right now, then we can emit the load after watching it. This
4082 // is case (3) above.
4083 if (m_graph.watchCondition(condition))
4084 return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
4085
4086 // We can't watch anything but we know that the current structure satisfies the condition. So,
4087 // check for that structure and then emit the load.
4088 addToGraph(
4089 CheckStructure,
4090 OpInfo(m_graph.addStructureSet(structure)),
4091 addToGraph(JSConstant, OpInfo(base)));
4092 return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
4093}
4094
4095Node* ByteCodeParser::load(
4096 SpeculatedType prediction, unsigned identifierNumber, const GetByOffsetMethod& method,
4097 NodeType op)
4098{
4099 switch (method.kind()) {
4100 case GetByOffsetMethod::Invalid:
4101 return nullptr;
4102 case GetByOffsetMethod::Constant:
4103 return addToGraph(JSConstant, OpInfo(method.constant()));
4104 case GetByOffsetMethod::LoadFromPrototype: {
4105 Node* baseNode = addToGraph(JSConstant, OpInfo(method.prototype()));
4106 return handleGetByOffset(
4107 prediction, baseNode, identifierNumber, method.offset(), op);
4108 }
4109 case GetByOffsetMethod::Load:
4110 // Will never see this from planLoad().
4111 RELEASE_ASSERT_NOT_REACHED();
4112 return nullptr;
4113 }
4114
4115 RELEASE_ASSERT_NOT_REACHED();
4116 return nullptr;
4117}
4118
4119Node* ByteCodeParser::load(
4120 SpeculatedType prediction, const ObjectPropertyCondition& condition, NodeType op)
4121{
4122 GetByOffsetMethod method = planLoad(condition);
4123 return load(prediction, m_graph.identifiers().ensure(condition.uid()), method, op);
4124}
4125
4126bool ByteCodeParser::check(const ObjectPropertyConditionSet& conditionSet)
4127{
4128 for (const ObjectPropertyCondition& condition : conditionSet) {
4129 if (!check(condition))
4130 return false;
4131 }
4132 return true;
4133}
4134
4135GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyConditionSet& conditionSet)
4136{
4137 VERBOSE_LOG("conditionSet = ", conditionSet, "\n");
4138
4139 GetByOffsetMethod result;
4140 for (const ObjectPropertyCondition& condition : conditionSet) {
4141 switch (condition.kind()) {
4142 case PropertyCondition::Presence:
4143 RELEASE_ASSERT(!result); // Should only see exactly one of these.
4144 result = planLoad(condition);
4145 if (!result)
4146 return GetByOffsetMethod();
4147 break;
4148 default:
4149 if (!check(condition))
4150 return GetByOffsetMethod();
4151 break;
4152 }
4153 }
4154 if (!result) {
4155 // We have a unset property.
4156 ASSERT(!conditionSet.numberOfConditionsWithKind(PropertyCondition::Presence));
4157 return GetByOffsetMethod::constant(m_constantUndefined);
4158 }
4159 return result;
4160}
4161
4162Node* ByteCodeParser::load(
4163 SpeculatedType prediction, const ObjectPropertyConditionSet& conditionSet, NodeType op)
4164{
4165 GetByOffsetMethod method = planLoad(conditionSet);
4166 return load(
4167 prediction,
4168 m_graph.identifiers().ensure(conditionSet.slotBaseCondition().uid()),
4169 method, op);
4170}
4171
4172ObjectPropertyCondition ByteCodeParser::presenceLike(
4173 JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
4174{
4175 if (set.isEmpty())
4176 return ObjectPropertyCondition();
4177 unsigned attributes;
4178 PropertyOffset firstOffset = set[0]->getConcurrently(uid, attributes);
4179 if (firstOffset != offset)
4180 return ObjectPropertyCondition();
4181 for (unsigned i = 1; i < set.size(); ++i) {
4182 unsigned otherAttributes;
4183 PropertyOffset otherOffset = set[i]->getConcurrently(uid, otherAttributes);
4184 if (otherOffset != offset || otherAttributes != attributes)
4185 return ObjectPropertyCondition();
4186 }
4187 return ObjectPropertyCondition::presenceWithoutBarrier(knownBase, uid, offset, attributes);
4188}
4189
4190bool ByteCodeParser::checkPresenceLike(
4191 JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
4192{
4193 return check(presenceLike(knownBase, uid, offset, set));
4194}
4195
4196void ByteCodeParser::checkPresenceLike(
4197 Node* base, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
4198{
4199 if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>(*m_vm)) {
4200 if (checkPresenceLike(knownBase, uid, offset, set))
4201 return;
4202 }
4203
4204 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(set)), base);
4205}
4206
4207template<typename VariantType>
4208Node* ByteCodeParser::load(
4209 SpeculatedType prediction, Node* base, unsigned identifierNumber, const VariantType& variant)
4210{
4211 // Make sure backwards propagation knows that we've used base.
4212 addToGraph(Phantom, base);
4213
4214 bool needStructureCheck = true;
4215
4216 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
4217
4218 if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>(*m_vm)) {
4219 // Try to optimize away the structure check. Note that it's not worth doing anything about this
4220 // if the base's structure is watched.
4221 Structure* structure = base->constant()->structure();
4222 if (!structure->dfgShouldWatch()) {
4223 if (!variant.conditionSet().isEmpty()) {
4224 // This means that we're loading from a prototype or we have a property miss. We expect
4225 // the base not to have the property. We can only use ObjectPropertyCondition if all of
4226 // the structures in the variant.structureSet() agree on the prototype (it would be
4227 // hilariously rare if they didn't). Note that we are relying on structureSet() having
4228 // at least one element. That will always be true here because of how GetByStatus/PutByIdStatus work.
4229
4230 // FIXME: right now, if we have an OPCS, we have mono proto. However, this will
4231 // need to be changed in the future once we have a hybrid data structure for
4232 // poly proto:
4233 // https://bugs.webkit.org/show_bug.cgi?id=177339
4234 JSObject* prototype = variant.structureSet()[0]->storedPrototypeObject();
4235 bool allAgree = true;
4236 for (unsigned i = 1; i < variant.structureSet().size(); ++i) {
4237 if (variant.structureSet()[i]->storedPrototypeObject() != prototype) {
4238 allAgree = false;
4239 break;
4240 }
4241 }
4242 if (allAgree) {
4243 ObjectPropertyCondition condition = ObjectPropertyCondition::absenceWithoutBarrier(
4244 knownBase, uid, prototype);
4245 if (check(condition))
4246 needStructureCheck = false;
4247 }
4248 } else {
4249 // This means we're loading directly from base. We can avoid all of the code that follows
4250 // if we can prove that the property is a constant. Otherwise, we try to prove that the
4251 // property is watchably present, in which case we get rid of the structure check.
4252
4253 ObjectPropertyCondition presenceCondition =
4254 presenceLike(knownBase, uid, variant.offset(), variant.structureSet());
4255 if (presenceCondition) {
4256 ObjectPropertyCondition equivalenceCondition =
4257 presenceCondition.attemptToMakeEquivalenceWithoutBarrier(*m_vm);
4258 if (m_graph.watchCondition(equivalenceCondition))
4259 return weakJSConstant(equivalenceCondition.requiredValue());
4260
4261 if (check(presenceCondition))
4262 needStructureCheck = false;
4263 }
4264 }
4265 }
4266 }
4267
4268 if (needStructureCheck)
4269 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
4270
4271 if (variant.isPropertyUnset()) {
4272 if (m_graph.watchConditions(variant.conditionSet()))
4273 return jsConstant(jsUndefined());
4274 return nullptr;
4275 }
4276
4277 SpeculatedType loadPrediction;
4278 NodeType loadOp;
4279 if (variant.callLinkStatus() || variant.intrinsic() != NoIntrinsic) {
4280 loadPrediction = SpecCellOther;
4281 loadOp = GetGetterSetterByOffset;
4282 } else {
4283 loadPrediction = prediction;
4284 loadOp = GetByOffset;
4285 }
4286
4287 Node* loadedValue;
4288 if (!variant.conditionSet().isEmpty())
4289 loadedValue = load(loadPrediction, variant.conditionSet(), loadOp);
4290 else {
4291 if (needStructureCheck && base->hasConstant()) {
4292 // We did emit a structure check. That means that we have an opportunity to do constant folding
4293 // here, since we didn't do it above.
4294 JSValue constant = m_graph.tryGetConstantProperty(
4295 base->asJSValue(), *m_graph.addStructureSet(variant.structureSet()), variant.offset());
4296 if (constant)
4297 return weakJSConstant(constant);
4298 }
4299
4300 loadedValue = handleGetByOffset(
4301 loadPrediction, base, identifierNumber, variant.offset(), loadOp);
4302 }
4303
4304 return loadedValue;
4305}
4306
4307Node* ByteCodeParser::store(Node* base, unsigned identifier, const PutByIdVariant& variant, Node* value)
4308{
4309 RELEASE_ASSERT(variant.kind() == PutByIdVariant::Replace);
4310
4311 checkPresenceLike(base, m_graph.identifiers()[identifier], variant.offset(), variant.structure());
4312 return handlePutByOffset(base, identifier, variant.offset(), value);
4313}
4314
4315void ByteCodeParser::handleGetById(
4316 VirtualRegister destination, SpeculatedType prediction, Node* base, unsigned identifierNumber,
4317 GetByStatus getByStatus, AccessType type, unsigned instructionSize)
4318{
4319 // Attempt to reduce the set of things in the GetByStatus.
4320 if (base->op() == NewObject) {
4321 bool ok = true;
4322 for (unsigned i = m_currentBlock->size(); i--;) {
4323 Node* node = m_currentBlock->at(i);
4324 if (node == base)
4325 break;
4326 if (writesOverlap(m_graph, node, JSCell_structureID)) {
4327 ok = false;
4328 break;
4329 }
4330 }
4331 if (ok)
4332 getByStatus.filter(base->structure().get());
4333 }
4334
4335 NodeType getById;
4336 if (type == AccessType::GetById)
4337 getById = getByStatus.makesCalls() ? GetByIdFlush : GetById;
4338 else if (type == AccessType::TryGetById)
4339 getById = TryGetById;
4340 else
4341 getById = getByStatus.makesCalls() ? GetByIdDirectFlush : GetByIdDirect;
4342
4343 if (getById != TryGetById && getByStatus.isModuleNamespace()) {
4344 if (handleModuleNamespaceLoad(destination, prediction, base, getByStatus)) {
4345 if (UNLIKELY(m_graph.compilation()))
4346 m_graph.compilation()->noticeInlinedGetById();
4347 return;
4348 }
4349 }
4350
4351 // Special path for custom accessors since custom's offset does not have any meanings.
4352 // So, this is completely different from Simple one. But we have a chance to optimize it when we use DOMJIT.
4353 if (Options::useDOMJIT() && getByStatus.isCustom()) {
4354 ASSERT(getByStatus.numVariants() == 1);
4355 ASSERT(!getByStatus.makesCalls());
4356 GetByIdVariant variant = getByStatus[0];
4357 ASSERT(variant.domAttribute());
4358 if (handleDOMJITGetter(destination, variant, base, identifierNumber, prediction)) {
4359 if (UNLIKELY(m_graph.compilation()))
4360 m_graph.compilation()->noticeInlinedGetById();
4361 return;
4362 }
4363 }
4364
4365 ASSERT(type == AccessType::GetById || type == AccessType::GetByIdDirect || !getByStatus.makesCalls());
4366 if (!getByStatus.isSimple() || !getByStatus.numVariants() || !Options::useAccessInlining()) {
4367 set(destination,
4368 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4369 return;
4370 }
4371
4372 // FIXME: If we use the GetByStatus for anything then we should record it and insert a node
4373 // after everything else (like the GetByOffset or whatever) that will filter the recorded
4374 // GetByStatus. That means that the constant folder also needs to do the same!
4375
4376 if (getByStatus.numVariants() > 1) {
4377 if (getByStatus.makesCalls() || !m_graph.m_plan.isFTL()
4378 || !Options::usePolymorphicAccessInlining()
4379 || getByStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) {
4380 set(destination,
4381 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4382 return;
4383 }
4384
4385 addToGraph(FilterGetByStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByStatus(currentCodeOrigin(), getByStatus)), base);
4386
4387 Vector<MultiGetByOffsetCase, 2> cases;
4388
4389 // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
4390 // optimal, if there is some rarely executed case in the chain that requires a lot
4391 // of checks and those checks are not watchpointable.
4392 for (const GetByIdVariant& variant : getByStatus.variants()) {
4393 if (variant.intrinsic() != NoIntrinsic) {
4394 set(destination,
4395 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4396 return;
4397 }
4398
4399 if (variant.conditionSet().isEmpty()) {
4400 cases.append(
4401 MultiGetByOffsetCase(
4402 *m_graph.addStructureSet(variant.structureSet()),
4403 GetByOffsetMethod::load(variant.offset())));
4404 continue;
4405 }
4406
4407 GetByOffsetMethod method = planLoad(variant.conditionSet());
4408 if (!method) {
4409 set(destination,
4410 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4411 return;
4412 }
4413
4414 cases.append(MultiGetByOffsetCase(*m_graph.addStructureSet(variant.structureSet()), method));
4415 }
4416
4417 if (UNLIKELY(m_graph.compilation()))
4418 m_graph.compilation()->noticeInlinedGetById();
4419
4420 // 2) Emit a MultiGetByOffset
4421 MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
4422 data->cases = cases;
4423 data->identifierNumber = identifierNumber;
4424 set(destination,
4425 addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
4426 return;
4427 }
4428
4429 addToGraph(FilterGetByStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByStatus(currentCodeOrigin(), getByStatus)), base);
4430
4431 ASSERT(getByStatus.numVariants() == 1);
4432 GetByIdVariant variant = getByStatus[0];
4433
4434 Node* loadedValue = load(prediction, base, identifierNumber, variant);
4435 if (!loadedValue) {
4436 set(destination,
4437 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4438 return;
4439 }
4440
4441 if (UNLIKELY(m_graph.compilation()))
4442 m_graph.compilation()->noticeInlinedGetById();
4443
4444 ASSERT(type == AccessType::GetById || type == AccessType::GetByIdDirect || !variant.callLinkStatus());
4445 if (!variant.callLinkStatus() && variant.intrinsic() == NoIntrinsic) {
4446 set(destination, loadedValue);
4447 return;
4448 }
4449
4450 Node* getter = addToGraph(GetGetter, loadedValue);
4451
4452 if (handleIntrinsicGetter(destination, prediction, variant, base,
4453 [&] () {
4454 addToGraph(CheckCell, OpInfo(m_graph.freeze(variant.intrinsicFunction())), getter);
4455 })) {
4456 addToGraph(Phantom, base);
4457 return;
4458 }
4459
4460 ASSERT(variant.intrinsic() == NoIntrinsic);
4461
4462 // Make a call. We don't try to get fancy with using the smallest operand number because
4463 // the stack layout phase should compress the stack anyway.
4464
4465 unsigned numberOfParameters = 0;
4466 numberOfParameters++; // The 'this' argument.
4467 numberOfParameters++; // True return PC.
4468
4469 // Start with a register offset that corresponds to the last in-use register.
4470 int registerOffset = virtualRegisterForLocal(
4471 m_inlineStackTop->m_profiledBlock->numCalleeLocals() - 1).offset();
4472 registerOffset -= numberOfParameters;
4473 registerOffset -= CallFrame::headerSizeInRegisters;
4474
4475 // Get the alignment right.
4476 registerOffset = -WTF::roundUpToMultipleOf(
4477 stackAlignmentRegisters(),
4478 -registerOffset);
4479
4480 ensureLocals(
4481 m_inlineStackTop->remapOperand(
4482 VirtualRegister(registerOffset)).toLocal());
4483
4484 // Issue SetLocals. This has two effects:
4485 // 1) That's how handleCall() sees the arguments.
4486 // 2) If we inline then this ensures that the arguments are flushed so that if you use
4487 // the dreaded arguments object on the getter, the right things happen. Well, sort of -
4488 // since we only really care about 'this' in this case. But we're not going to take that
4489 // shortcut.
4490 set(virtualRegisterForArgument(0, registerOffset), base, ImmediateNakedSet);
4491
4492 // We've set some locals, but they are not user-visible. It's still OK to exit from here.
4493 m_exitOK = true;
4494 addToGraph(ExitOK);
4495
4496 handleCall(
4497 destination, Call, InlineCallFrame::GetterCall, instructionSize,
4498 getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
4499}
4500
4501void ByteCodeParser::emitPutById(
4502 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
4503{
4504 if (isDirect)
4505 addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
4506 else
4507 addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
4508}
4509
4510void ByteCodeParser::handlePutById(
4511 Node* base, unsigned identifierNumber, Node* value,
4512 const PutByIdStatus& putByIdStatus, bool isDirect, unsigned instructionSize)
4513{
4514 if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::useAccessInlining()) {
4515 if (!putByIdStatus.isSet())
4516 addToGraph(ForceOSRExit);
4517 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4518 return;
4519 }
4520
4521 if (putByIdStatus.numVariants() > 1) {
4522 if (!m_graph.m_plan.isFTL() || putByIdStatus.makesCalls()
4523 || !Options::usePolymorphicAccessInlining()
4524 || putByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) {
4525 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4526 return;
4527 }
4528
4529 if (!isDirect) {
4530 for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
4531 if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
4532 continue;
4533 if (!check(putByIdStatus[variantIndex].conditionSet())) {
4534 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4535 return;
4536 }
4537 }
4538 }
4539
4540 if (UNLIKELY(m_graph.compilation()))
4541 m_graph.compilation()->noticeInlinedPutById();
4542
4543 addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
4544
4545 for (const PutByIdVariant& variant : putByIdStatus.variants()) {
4546 for (Structure* structure : variant.oldStructure())
4547 m_graph.registerStructure(structure);
4548 if (variant.kind() == PutByIdVariant::Transition)
4549 m_graph.registerStructure(variant.newStructure());
4550 }
4551
4552 MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
4553 data->variants = putByIdStatus.variants();
4554 data->identifierNumber = identifierNumber;
4555 addToGraph(MultiPutByOffset, OpInfo(data), base, value);
4556 return;
4557 }
4558
4559 ASSERT(putByIdStatus.numVariants() == 1);
4560 const PutByIdVariant& variant = putByIdStatus[0];
4561
4562 switch (variant.kind()) {
4563 case PutByIdVariant::Replace: {
4564 addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
4565
4566 store(base, identifierNumber, variant, value);
4567 if (UNLIKELY(m_graph.compilation()))
4568 m_graph.compilation()->noticeInlinedPutById();
4569 return;
4570 }
4571
4572 case PutByIdVariant::Transition: {
4573 addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
4574
4575 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
4576 if (!check(variant.conditionSet())) {
4577 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4578 return;
4579 }
4580
4581 ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
4582
4583 Node* propertyStorage;
4584 Transition* transition = m_graph.m_transitions.add(
4585 m_graph.registerStructure(variant.oldStructureForTransition()), m_graph.registerStructure(variant.newStructure()));
4586
4587 if (variant.reallocatesStorage()) {
4588
4589 // If we're growing the property storage then it must be because we're
4590 // storing into the out-of-line storage.
4591 ASSERT(!isInlineOffset(variant.offset()));
4592
4593 if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
4594 propertyStorage = addToGraph(
4595 AllocatePropertyStorage, OpInfo(transition), base);
4596 } else {
4597 propertyStorage = addToGraph(
4598 ReallocatePropertyStorage, OpInfo(transition),
4599 base, addToGraph(GetButterfly, base));
4600 }
4601 } else {
4602 if (isInlineOffset(variant.offset()))
4603 propertyStorage = base;
4604 else
4605 propertyStorage = addToGraph(GetButterfly, base);
4606 }
4607
4608 StorageAccessData* data = m_graph.m_storageAccessData.add();
4609 data->offset = variant.offset();
4610 data->identifierNumber = identifierNumber;
4611
4612 // NOTE: We could GC at this point because someone could insert an operation that GCs.
4613 // That's fine because:
4614 // - Things already in the structure will get scanned because we haven't messed with
4615 // the object yet.
4616 // - The value we are fixing to put is going to be kept live by OSR exit handling. So
4617 // if the GC does a conservative scan here it will see the new value.
4618
4619 addToGraph(
4620 PutByOffset,
4621 OpInfo(data),
4622 propertyStorage,
4623 base,
4624 value);
4625
4626 if (variant.reallocatesStorage())
4627 addToGraph(NukeStructureAndSetButterfly, base, propertyStorage);
4628
4629 // FIXME: PutStructure goes last until we fix either
4630 // https://bugs.webkit.org/show_bug.cgi?id=142921 or
4631 // https://bugs.webkit.org/show_bug.cgi?id=142924.
4632 addToGraph(PutStructure, OpInfo(transition), base);
4633
4634 if (UNLIKELY(m_graph.compilation()))
4635 m_graph.compilation()->noticeInlinedPutById();
4636 return;
4637 }
4638
4639 case PutByIdVariant::Setter: {
4640 addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
4641
4642 Node* loadedValue = load(SpecCellOther, base, identifierNumber, variant);
4643 if (!loadedValue) {
4644 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4645 return;
4646 }
4647
4648 Node* setter = addToGraph(GetSetter, loadedValue);
4649
4650 // Make a call. We don't try to get fancy with using the smallest operand number because
4651 // the stack layout phase should compress the stack anyway.
4652
4653 unsigned numberOfParameters = 0;
4654 numberOfParameters++; // The 'this' argument.
4655 numberOfParameters++; // The new value.
4656 numberOfParameters++; // True return PC.
4657
4658 // Start with a register offset that corresponds to the last in-use register.
4659 int registerOffset = virtualRegisterForLocal(
4660 m_inlineStackTop->m_profiledBlock->numCalleeLocals() - 1).offset();
4661 registerOffset -= numberOfParameters;
4662 registerOffset -= CallFrame::headerSizeInRegisters;
4663
4664 // Get the alignment right.
4665 registerOffset = -WTF::roundUpToMultipleOf(
4666 stackAlignmentRegisters(),
4667 -registerOffset);
4668
4669 ensureLocals(
4670 m_inlineStackTop->remapOperand(
4671 VirtualRegister(registerOffset)).toLocal());
4672
4673 set(virtualRegisterForArgument(0, registerOffset), base, ImmediateNakedSet);
4674 set(virtualRegisterForArgument(1, registerOffset), value, ImmediateNakedSet);
4675
4676 // We've set some locals, but they are not user-visible. It's still OK to exit from here.
4677 m_exitOK = true;
4678 addToGraph(ExitOK);
4679
4680 handleCall(
4681 VirtualRegister(), Call, InlineCallFrame::SetterCall,
4682 instructionSize, setter, numberOfParameters - 1, registerOffset,
4683 *variant.callLinkStatus(), SpecOther);
4684 return;
4685 }
4686
4687 default: {
4688 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4689 return;
4690 } }
4691}
4692
4693void ByteCodeParser::prepareToParseBlock()
4694{
4695 clearCaches();
4696 ASSERT(m_setLocalQueue.isEmpty());
4697}
4698
4699void ByteCodeParser::clearCaches()
4700{
4701 m_constants.shrink(0);
4702}
4703
4704template<typename Op>
4705void ByteCodeParser::parseGetById(const Instruction* currentInstruction)
4706{
4707 auto bytecode = currentInstruction->as<Op>();
4708 SpeculatedType prediction = getPrediction();
4709
4710 Node* base = get(bytecode.m_base);
4711 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
4712
4713 GetByStatus getByStatus = GetByStatus::computeFor(
4714 m_inlineStackTop->m_profiledBlock,
4715 m_inlineStackTop->m_baselineMap, m_icContextStack,
4716 currentCodeOrigin(), GetByStatus::TrackIdentifiers::No);
4717
4718 AccessType type = AccessType::GetById;
4719 unsigned opcodeLength = currentInstruction->size();
4720 if (Op::opcodeID == op_try_get_by_id)
4721 type = AccessType::TryGetById;
4722 else if (Op::opcodeID == op_get_by_id_direct)
4723 type = AccessType::GetByIdDirect;
4724
4725 handleGetById(
4726 bytecode.m_dst, prediction, base, identifierNumber, getByStatus, type, opcodeLength);
4727
4728}
4729
4730static uint64_t makeDynamicVarOpInfo(unsigned identifierNumber, unsigned getPutInfo)
4731{
4732 static_assert(sizeof(identifierNumber) == 4,
4733 "We cannot fit identifierNumber into the high bits of m_opInfo");
4734 return static_cast<uint64_t>(identifierNumber) | (static_cast<uint64_t>(getPutInfo) << 32);
4735}
4736
4737// The idiom:
4738// if (true) { ...; goto label; } else label: continue
4739// Allows using NEXT_OPCODE as a statement, even in unbraced if+else, while containing a `continue`.
4740// The more common idiom:
4741// do { ...; } while (false)
4742// Doesn't allow using `continue`.
4743#define NEXT_OPCODE(name) \
4744 if (true) { \
4745 m_currentIndex = BytecodeIndex(m_currentIndex.offset() + currentInstruction->size()); \
4746 goto WTF_CONCAT(NEXT_OPCODE_, __LINE__); /* Need a unique label: usable more than once per function. */ \
4747 } else \
4748 WTF_CONCAT(NEXT_OPCODE_, __LINE__): \
4749 continue
4750
4751#define LAST_OPCODE_LINKED(name) do { \
4752 m_currentIndex = BytecodeIndex(m_currentIndex.offset() + currentInstruction->size()); \
4753 m_exitOK = false; \
4754 return; \
4755 } while (false)
4756
4757#define LAST_OPCODE(name) \
4758 do { \
4759 if (m_currentBlock->terminal()) { \
4760 switch (m_currentBlock->terminal()->op()) { \
4761 case Jump: \
4762 case Branch: \
4763 case Switch: \
4764 ASSERT(!m_currentBlock->isLinked); \
4765 m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock); \
4766 break;\
4767 default: break; \
4768 } \
4769 } \
4770 LAST_OPCODE_LINKED(name); \
4771 } while (false)
4772
4773void ByteCodeParser::parseBlock(unsigned limit)
4774{
4775 auto& instructions = m_inlineStackTop->m_codeBlock->instructions();
4776 BytecodeIndex blockBegin = m_currentIndex;
4777
4778 // If we are the first basic block, introduce markers for arguments. This allows
4779 // us to track if a use of an argument may use the actual argument passed, as
4780 // opposed to using a value we set explicitly.
4781 if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
4782 auto addResult = m_graph.m_rootToArguments.add(m_currentBlock, ArgumentsVector());
4783 RELEASE_ASSERT(addResult.isNewEntry);
4784 ArgumentsVector& entrypointArguments = addResult.iterator->value;
4785 entrypointArguments.resize(m_numArguments);
4786
4787 // We will emit SetArgumentDefinitely nodes. They don't exit, but we're at the top of an op_enter so
4788 // exitOK = true.
4789 m_exitOK = true;
4790 for (unsigned argument = 0; argument < m_numArguments; ++argument) {
4791 VariableAccessData* variable = newVariableAccessData(
4792 virtualRegisterForArgument(argument));
4793 variable->mergeStructureCheckHoistingFailed(
4794 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
4795 variable->mergeCheckArrayHoistingFailed(
4796 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
4797
4798 Node* setArgument = addToGraph(SetArgumentDefinitely, OpInfo(variable));
4799 entrypointArguments[argument] = setArgument;
4800 m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
4801 }
4802 }
4803
4804 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
4805
4806 auto jumpTarget = [&](int target) {
4807 if (target)
4808 return target;
4809 return codeBlock->outOfLineJumpOffset(m_currentInstruction);
4810 };
4811
4812 while (true) {
4813 // We're staring at a new bytecode instruction. So we once again have a place that we can exit
4814 // to.
4815 m_exitOK = true;
4816
4817 processSetLocalQueue();
4818
4819 // Don't extend over jump destinations.
4820 if (m_currentIndex.offset() == limit) {
4821 // Ordinarily we want to plant a jump. But refuse to do this if the block is
4822 // empty. This is a special case for inlining, which might otherwise create
4823 // some empty blocks in some cases. When parseBlock() returns with an empty
4824 // block, it will get repurposed instead of creating a new one. Note that this
4825 // logic relies on every bytecode resulting in one or more nodes, which would
4826 // be true anyway except for op_loop_hint, which emits a Phantom to force this
4827 // to be true.
4828
4829 if (!m_currentBlock->isEmpty())
4830 addJumpTo(m_currentIndex.offset());
4831 return;
4832 }
4833
4834 // Switch on the current bytecode opcode.
4835 const Instruction* currentInstruction = instructions.at(m_currentIndex).ptr();
4836 m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
4837 OpcodeID opcodeID = currentInstruction->opcodeID();
4838
4839 VERBOSE_LOG(" parsing ", currentCodeOrigin(), ": ", opcodeID, "\n");
4840
4841 if (UNLIKELY(m_graph.compilation())) {
4842 addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
4843 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
4844 }
4845
4846 switch (opcodeID) {
4847
4848 // === Function entry opcodes ===
4849
4850 case op_enter: {
4851 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
4852 // Initialize all locals to undefined.
4853 for (int i = 0; i < m_inlineStackTop->m_codeBlock->numVars(); ++i)
4854 set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
4855
4856 NEXT_OPCODE(op_enter);
4857 }
4858
4859 case op_to_this: {
4860 Node* op1 = getThis();
4861 auto& metadata = currentInstruction->as<OpToThis>().metadata(codeBlock);
4862 StructureID cachedStructureID = metadata.m_cachedStructureID;
4863 Structure* cachedStructure = nullptr;
4864 if (cachedStructureID)
4865 cachedStructure = m_vm->heap.structureIDTable().get(cachedStructureID);
4866 if (metadata.m_toThisStatus != ToThisOK
4867 || !cachedStructure
4868 || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
4869 || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
4870 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
4871 || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
4872 setThis(addToGraph(ToThis, OpInfo(), OpInfo(getPrediction()), op1));
4873 } else {
4874 addToGraph(
4875 CheckStructure,
4876 OpInfo(m_graph.addStructureSet(cachedStructure)),
4877 op1);
4878 }
4879 NEXT_OPCODE(op_to_this);
4880 }
4881
4882 case op_create_this: {
4883 auto bytecode = currentInstruction->as<OpCreateThis>();
4884 Node* callee = get(VirtualRegister(bytecode.m_callee));
4885
4886 JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm);
4887 if (!function) {
4888 JSCell* cachedFunction = bytecode.metadata(codeBlock).m_cachedCallee.unvalidatedGet();
4889 if (cachedFunction
4890 && cachedFunction != JSCell::seenMultipleCalleeObjects()
4891 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
4892 ASSERT(cachedFunction->inherits<JSFunction>(*m_vm));
4893
4894 FrozenValue* frozen = m_graph.freeze(cachedFunction);
4895 addToGraph(CheckCell, OpInfo(frozen), callee);
4896
4897 function = static_cast<JSFunction*>(cachedFunction);
4898 }
4899 }
4900
4901 bool alreadyEmitted = false;
4902 if (function) {
4903 if (FunctionRareData* rareData = function->rareData()) {
4904 if (rareData->allocationProfileWatchpointSet().isStillValid()) {
4905 Structure* structure = rareData->objectAllocationStructure();
4906 JSObject* prototype = rareData->objectAllocationPrototype();
4907 if (structure
4908 && (structure->hasMonoProto() || prototype)
4909 && rareData->allocationProfileWatchpointSet().isStillValid()) {
4910
4911 m_graph.freeze(rareData);
4912 m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet());
4913
4914 Node* object = addToGraph(NewObject, OpInfo(m_graph.registerStructure(structure)));
4915 if (structure->hasPolyProto()) {
4916 StorageAccessData* data = m_graph.m_storageAccessData.add();
4917 data->offset = knownPolyProtoOffset;
4918 data->identifierNumber = m_graph.identifiers().ensure(m_graph.m_vm.propertyNames->builtinNames().polyProtoName().impl());
4919 ASSERT(isInlineOffset(knownPolyProtoOffset));
4920 addToGraph(PutByOffset, OpInfo(data), object, object, weakJSConstant(prototype));
4921 }
4922 set(VirtualRegister(bytecode.m_dst), object);
4923 // The callee is still live up to this point.
4924 addToGraph(Phantom, callee);
4925 alreadyEmitted = true;
4926 }
4927 }
4928 }
4929 }
4930 if (!alreadyEmitted) {
4931 set(VirtualRegister(bytecode.m_dst),
4932 addToGraph(CreateThis, OpInfo(bytecode.m_inlineCapacity), callee));
4933 }
4934 NEXT_OPCODE(op_create_this);
4935 }
4936
4937 case op_create_promise: {
4938 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
4939 auto bytecode = currentInstruction->as<OpCreatePromise>();
4940 Node* callee = get(VirtualRegister(bytecode.m_callee));
4941
4942 bool alreadyEmitted = false;
4943
4944 {
4945 // Attempt to convert to NewPromise first in easy case.
4946 JSPromiseConstructor* promiseConstructor = callee->dynamicCastConstant<JSPromiseConstructor*>(*m_vm);
4947 if (promiseConstructor == (bytecode.m_isInternalPromise ? globalObject->internalPromiseConstructor() : globalObject->promiseConstructor())) {
4948 JSCell* cachedFunction = bytecode.metadata(codeBlock).m_cachedCallee.unvalidatedGet();
4949 if (cachedFunction
4950 && cachedFunction != JSCell::seenMultipleCalleeObjects()
4951 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)
4952 && cachedFunction == (bytecode.m_isInternalPromise ? globalObject->internalPromiseConstructor() : globalObject->promiseConstructor())) {
4953 FrozenValue* frozen = m_graph.freeze(cachedFunction);
4954 addToGraph(CheckCell, OpInfo(frozen), callee);
4955
4956 promiseConstructor = jsCast<JSPromiseConstructor*>(cachedFunction);
4957 }
4958 }
4959 if (promiseConstructor) {
4960 addToGraph(Phantom, callee);
4961 set(VirtualRegister(bytecode.m_dst), addToGraph(NewPromise, OpInfo(m_graph.registerStructure(bytecode.m_isInternalPromise ? globalObject->internalPromiseStructure() : globalObject->promiseStructure())), OpInfo(bytecode.m_isInternalPromise)));
4962 alreadyEmitted = true;
4963 }
4964 }
4965
4966 // Derived function case.
4967 if (!alreadyEmitted) {
4968 JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm);
4969 if (!function) {
4970 JSCell* cachedFunction = bytecode.metadata(codeBlock).m_cachedCallee.unvalidatedGet();
4971 if (cachedFunction
4972 && cachedFunction != JSCell::seenMultipleCalleeObjects()
4973 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
4974 ASSERT(cachedFunction->inherits<JSFunction>(*m_vm));
4975
4976 FrozenValue* frozen = m_graph.freeze(cachedFunction);
4977 addToGraph(CheckCell, OpInfo(frozen), callee);
4978
4979 function = static_cast<JSFunction*>(cachedFunction);
4980 }
4981 }
4982
4983 if (function) {
4984 if (FunctionRareData* rareData = function->rareData()) {
4985 if (rareData->allocationProfileWatchpointSet().isStillValid()) {
4986 Structure* structure = rareData->internalFunctionAllocationStructure();
4987 if (structure
4988 && structure->classInfo() == (bytecode.m_isInternalPromise ? JSInternalPromise::info() : JSPromise::info())
4989 && structure->globalObject() == globalObject
4990 && rareData->allocationProfileWatchpointSet().isStillValid()) {
4991 m_graph.freeze(rareData);
4992 m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet());
4993
4994 set(VirtualRegister(bytecode.m_dst), addToGraph(NewPromise, OpInfo(m_graph.registerStructure(structure)), OpInfo(bytecode.m_isInternalPromise)));
4995 // The callee is still live up to this point.
4996 addToGraph(Phantom, callee);
4997 alreadyEmitted = true;
4998 }
4999 }
5000 }
5001 }
5002 if (!alreadyEmitted)
5003 set(VirtualRegister(bytecode.m_dst), addToGraph(CreatePromise, OpInfo(), OpInfo(bytecode.m_isInternalPromise), callee));
5004 }
5005 NEXT_OPCODE(op_create_promise);
5006 }
5007
5008 case op_create_generator: {
5009 handleCreateInternalFieldObject(JSGenerator::info(), CreateGenerator, NewGenerator, currentInstruction->as<OpCreateGenerator>());
5010 NEXT_OPCODE(op_create_generator);
5011 }
5012
5013 case op_create_async_generator: {
5014 handleCreateInternalFieldObject(JSAsyncGenerator::info(), CreateAsyncGenerator, NewAsyncGenerator, currentInstruction->as<OpCreateAsyncGenerator>());
5015 NEXT_OPCODE(op_create_async_generator);
5016 }
5017
5018 case op_new_object: {
5019 auto bytecode = currentInstruction->as<OpNewObject>();
5020 set(bytecode.m_dst,
5021 addToGraph(NewObject,
5022 OpInfo(m_graph.registerStructure(bytecode.metadata(codeBlock).m_objectAllocationProfile.structure()))));
5023 NEXT_OPCODE(op_new_object);
5024 }
5025
5026 case op_new_promise: {
5027 auto bytecode = currentInstruction->as<OpNewPromise>();
5028 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
5029 set(bytecode.m_dst, addToGraph(NewPromise, OpInfo(m_graph.registerStructure(bytecode.m_isInternalPromise ? globalObject->internalPromiseStructure() : globalObject->promiseStructure())), OpInfo(bytecode.m_isInternalPromise)));
5030 NEXT_OPCODE(op_new_promise);
5031 }
5032
5033 case op_new_generator: {
5034 auto bytecode = currentInstruction->as<OpNewGenerator>();
5035 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
5036 set(bytecode.m_dst, addToGraph(NewGenerator, OpInfo(m_graph.registerStructure(globalObject->generatorStructure()))));
5037 NEXT_OPCODE(op_new_generator);
5038 }
5039
5040 case op_new_array: {
5041 auto bytecode = currentInstruction->as<OpNewArray>();
5042 int startOperand = bytecode.m_argv.offset();
5043 int numOperands = bytecode.m_argc;
5044 ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).m_arrayAllocationProfile;
5045 for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
5046 addVarArgChild(get(VirtualRegister(operandIdx)));
5047 unsigned vectorLengthHint = std::max<unsigned>(profile.vectorLengthHintConcurrently(), numOperands);
5048 set(bytecode.m_dst, addToGraph(Node::VarArg, NewArray, OpInfo(profile.selectIndexingTypeConcurrently()), OpInfo(vectorLengthHint)));
5049 NEXT_OPCODE(op_new_array);
5050 }
5051
5052 case op_new_array_with_spread: {
5053 auto bytecode = currentInstruction->as<OpNewArrayWithSpread>();
5054 int startOperand = bytecode.m_argv.offset();
5055 int numOperands = bytecode.m_argc;
5056 const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(bytecode.m_bitVector);
5057 for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
5058 addVarArgChild(get(VirtualRegister(operandIdx)));
5059
5060 BitVector* copy = m_graph.m_bitVectors.add(bitVector);
5061 ASSERT(*copy == bitVector);
5062
5063 set(bytecode.m_dst,
5064 addToGraph(Node::VarArg, NewArrayWithSpread, OpInfo(copy)));
5065 NEXT_OPCODE(op_new_array_with_spread);
5066 }
5067
5068 case op_spread: {
5069 auto bytecode = currentInstruction->as<OpSpread>();
5070 set(bytecode.m_dst,
5071 addToGraph(Spread, get(bytecode.m_argument)));
5072 NEXT_OPCODE(op_spread);
5073 }
5074
5075 case op_new_array_with_size: {
5076 auto bytecode = currentInstruction->as<OpNewArrayWithSize>();
5077 ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).m_arrayAllocationProfile;
5078 set(bytecode.m_dst, addToGraph(NewArrayWithSize, OpInfo(profile.selectIndexingTypeConcurrently()), get(bytecode.m_length)));
5079 NEXT_OPCODE(op_new_array_with_size);
5080 }
5081
5082 case op_new_array_buffer: {
5083 auto bytecode = currentInstruction->as<OpNewArrayBuffer>();
5084 // Unfortunately, we can't allocate a new JSImmutableButterfly if the profile tells us new information because we
5085 // cannot allocate from compilation threads.
5086 WTF::loadLoadFence();
5087 FrozenValue* frozen = get(VirtualRegister(bytecode.m_immutableButterfly))->constant();
5088 WTF::loadLoadFence();
5089 JSImmutableButterfly* immutableButterfly = frozen->cast<JSImmutableButterfly*>();
5090 NewArrayBufferData data { };
5091 data.indexingMode = immutableButterfly->indexingMode();
5092 data.vectorLengthHint = immutableButterfly->toButterfly()->vectorLength();
5093
5094 set(VirtualRegister(bytecode.m_dst), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord)));
5095 NEXT_OPCODE(op_new_array_buffer);
5096 }
5097
5098 case op_new_regexp: {
5099 auto bytecode = currentInstruction->as<OpNewRegexp>();
5100 ASSERT(bytecode.m_regexp.isConstant());
5101 FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_regexp.offset()));
5102 set(bytecode.m_dst, addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0))));
5103 NEXT_OPCODE(op_new_regexp);
5104 }
5105
5106 case op_get_rest_length: {
5107 auto bytecode = currentInstruction->as<OpGetRestLength>();
5108 InlineCallFrame* inlineCallFrame = this->inlineCallFrame();
5109 Node* length;
5110 if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
5111 unsigned argumentsLength = inlineCallFrame->argumentCountIncludingThis - 1;
5112 JSValue restLength;
5113 if (argumentsLength <= bytecode.m_numParametersToSkip)
5114 restLength = jsNumber(0);
5115 else
5116 restLength = jsNumber(argumentsLength - bytecode.m_numParametersToSkip);
5117
5118 length = jsConstant(restLength);
5119 } else
5120 length = addToGraph(GetRestLength, OpInfo(bytecode.m_numParametersToSkip));
5121 set(bytecode.m_dst, length);
5122 NEXT_OPCODE(op_get_rest_length);
5123 }
5124
5125 case op_create_rest: {
5126 auto bytecode = currentInstruction->as<OpCreateRest>();
5127 noticeArgumentsUse();
5128 Node* arrayLength = get(bytecode.m_arraySize);
5129 set(bytecode.m_dst,
5130 addToGraph(CreateRest, OpInfo(bytecode.m_numParametersToSkip), arrayLength));
5131 NEXT_OPCODE(op_create_rest);
5132 }
5133
5134 // === Bitwise operations ===
5135
5136 case op_bitnot: {
5137 auto bytecode = currentInstruction->as<OpBitnot>();
5138 SpeculatedType prediction = getPrediction();
5139 Node* op1 = get(bytecode.m_operand);
5140 if (op1->hasNumberOrAnyIntResult())
5141 set(bytecode.m_dst, addToGraph(ArithBitNot, op1));
5142 else
5143 set(bytecode.m_dst, addToGraph(ValueBitNot, OpInfo(), OpInfo(prediction), op1));
5144 NEXT_OPCODE(op_bitnot);
5145 }
5146
5147 case op_bitand: {
5148 auto bytecode = currentInstruction->as<OpBitand>();
5149 SpeculatedType prediction = getPrediction();
5150 Node* op1 = get(bytecode.m_lhs);
5151 Node* op2 = get(bytecode.m_rhs);
5152 if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
5153 set(bytecode.m_dst, addToGraph(ArithBitAnd, op1, op2));
5154 else
5155 set(bytecode.m_dst, addToGraph(ValueBitAnd, OpInfo(), OpInfo(prediction), op1, op2));
5156 NEXT_OPCODE(op_bitand);
5157 }
5158
5159 case op_bitor: {
5160 auto bytecode = currentInstruction->as<OpBitor>();
5161 SpeculatedType prediction = getPrediction();
5162 Node* op1 = get(bytecode.m_lhs);
5163 Node* op2 = get(bytecode.m_rhs);
5164 if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
5165 set(bytecode.m_dst, addToGraph(ArithBitOr, op1, op2));
5166 else
5167 set(bytecode.m_dst, addToGraph(ValueBitOr, OpInfo(), OpInfo(prediction), op1, op2));
5168 NEXT_OPCODE(op_bitor);
5169 }
5170
5171 case op_bitxor: {
5172 auto bytecode = currentInstruction->as<OpBitxor>();
5173 SpeculatedType prediction = getPrediction();
5174 Node* op1 = get(bytecode.m_lhs);
5175 Node* op2 = get(bytecode.m_rhs);
5176 if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
5177 set(bytecode.m_dst, addToGraph(ArithBitXor, op1, op2));
5178 else
5179 set(bytecode.m_dst, addToGraph(ValueBitXor, OpInfo(), OpInfo(prediction), op1, op2));
5180 NEXT_OPCODE(op_bitxor);
5181 }
5182
5183 case op_rshift: {
5184 auto bytecode = currentInstruction->as<OpRshift>();
5185 Node* op1 = get(bytecode.m_lhs);
5186 Node* op2 = get(bytecode.m_rhs);
5187 if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
5188 set(bytecode.m_dst, addToGraph(ArithBitRShift, op1, op2));
5189 else {
5190 SpeculatedType prediction = getPredictionWithoutOSRExit();
5191 set(bytecode.m_dst, addToGraph(ValueBitRShift, OpInfo(), OpInfo(prediction), op1, op2));
5192 }
5193 NEXT_OPCODE(op_rshift);
5194 }
5195
5196 case op_lshift: {
5197 auto bytecode = currentInstruction->as<OpLshift>();
5198 Node* op1 = get(bytecode.m_lhs);
5199 Node* op2 = get(bytecode.m_rhs);
5200 if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
5201 set(bytecode.m_dst, addToGraph(ArithBitLShift, op1, op2));
5202 else {
5203 SpeculatedType prediction = getPredictionWithoutOSRExit();
5204 set(bytecode.m_dst, addToGraph(ValueBitLShift, OpInfo(), OpInfo(prediction), op1, op2));
5205 }
5206 NEXT_OPCODE(op_lshift);
5207 }
5208
5209 case op_urshift: {
5210 auto bytecode = currentInstruction->as<OpUrshift>();
5211 Node* op1 = get(bytecode.m_lhs);
5212 Node* op2 = get(bytecode.m_rhs);
5213 set(bytecode.m_dst, addToGraph(BitURShift, op1, op2));
5214 NEXT_OPCODE(op_urshift);
5215 }
5216
5217 case op_unsigned: {
5218 auto bytecode = currentInstruction->as<OpUnsigned>();
5219 set(bytecode.m_dst, makeSafe(addToGraph(UInt32ToNumber, get(bytecode.m_operand))));
5220 NEXT_OPCODE(op_unsigned);
5221 }
5222
5223 // === Increment/Decrement opcodes ===
5224
5225 case op_inc: {
5226 auto bytecode = currentInstruction->as<OpInc>();
5227 Node* op = get(bytecode.m_srcDst);
5228 // FIXME: we can replace the Inc by either ArithAdd with m_constantOne or ArithAdd with the equivalent BigInt in many cases.
5229 // For now we only do so in DFGFixupPhase.
5230 // We could probably do it earlier in some cases, but it is not clearly worth the trouble.
5231 set(bytecode.m_srcDst, makeSafe(addToGraph(Inc, op)));
5232 NEXT_OPCODE(op_inc);
5233 }
5234
5235 case op_dec: {
5236 auto bytecode = currentInstruction->as<OpDec>();
5237 Node* op = get(bytecode.m_srcDst);
5238 // FIXME: we can replace the Inc by either ArithSub with m_constantOne or ArithSub with the equivalent BigInt in many cases.
5239 // For now we only do so in DFGFixupPhase.
5240 // We could probably do it earlier in some cases, but it is not clearly worth the trouble.
5241 set(bytecode.m_srcDst, makeSafe(addToGraph(Dec, op)));
5242 NEXT_OPCODE(op_dec);
5243 }
5244
5245 // === Arithmetic operations ===
5246
5247 case op_add: {
5248 auto bytecode = currentInstruction->as<OpAdd>();
5249 Node* op1 = get(bytecode.m_lhs);
5250 Node* op2 = get(bytecode.m_rhs);
5251 if (op1->hasNumberResult() && op2->hasNumberResult())
5252 set(bytecode.m_dst, makeSafe(addToGraph(ArithAdd, op1, op2)));
5253 else
5254 set(bytecode.m_dst, makeSafe(addToGraph(ValueAdd, op1, op2)));
5255 NEXT_OPCODE(op_add);
5256 }
5257
5258 case op_sub: {
5259 auto bytecode = currentInstruction->as<OpSub>();
5260 Node* op1 = get(bytecode.m_lhs);
5261 Node* op2 = get(bytecode.m_rhs);
5262 if (op1->hasNumberResult() && op2->hasNumberResult())
5263 set(bytecode.m_dst, makeSafe(addToGraph(ArithSub, op1, op2)));
5264 else
5265 set(bytecode.m_dst, makeSafe(addToGraph(ValueSub, op1, op2)));
5266 NEXT_OPCODE(op_sub);
5267 }
5268
5269 case op_negate: {
5270 auto bytecode = currentInstruction->as<OpNegate>();
5271 Node* op1 = get(bytecode.m_operand);
5272 if (op1->hasNumberResult())
5273 set(bytecode.m_dst, makeSafe(addToGraph(ArithNegate, op1)));
5274 else
5275 set(bytecode.m_dst, makeSafe(addToGraph(ValueNegate, op1)));
5276 NEXT_OPCODE(op_negate);
5277 }
5278
5279 case op_mul: {
5280 // Multiply requires that the inputs are not truncated, unfortunately.
5281 auto bytecode = currentInstruction->as<OpMul>();
5282 Node* op1 = get(bytecode.m_lhs);
5283 Node* op2 = get(bytecode.m_rhs);
5284 if (op1->hasNumberResult() && op2->hasNumberResult())
5285 set(bytecode.m_dst, makeSafe(addToGraph(ArithMul, op1, op2)));
5286 else
5287 set(bytecode.m_dst, makeSafe(addToGraph(ValueMul, op1, op2)));
5288 NEXT_OPCODE(op_mul);
5289 }
5290
5291 case op_mod: {
5292 auto bytecode = currentInstruction->as<OpMod>();
5293 Node* op1 = get(bytecode.m_lhs);
5294 Node* op2 = get(bytecode.m_rhs);
5295 if (op1->hasNumberResult() && op2->hasNumberResult())
5296 set(bytecode.m_dst, makeSafe(addToGraph(ArithMod, op1, op2)));
5297 else
5298 set(bytecode.m_dst, makeSafe(addToGraph(ValueMod, op1, op2)));
5299 NEXT_OPCODE(op_mod);
5300 }
5301
5302 case op_pow: {
5303 auto bytecode = currentInstruction->as<OpPow>();
5304 Node* op1 = get(bytecode.m_lhs);
5305 Node* op2 = get(bytecode.m_rhs);
5306 if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
5307 set(bytecode.m_dst, addToGraph(ArithPow, op1, op2));
5308 else
5309 set(bytecode.m_dst, addToGraph(ValuePow, op1, op2));
5310 NEXT_OPCODE(op_pow);
5311 }
5312
5313 case op_div: {
5314 auto bytecode = currentInstruction->as<OpDiv>();
5315 Node* op1 = get(bytecode.m_lhs);
5316 Node* op2 = get(bytecode.m_rhs);
5317 if (op1->hasNumberResult() && op2->hasNumberResult())
5318 set(bytecode.m_dst, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
5319 else
5320 set(bytecode.m_dst, makeDivSafe(addToGraph(ValueDiv, op1, op2)));
5321 NEXT_OPCODE(op_div);
5322 }
5323
5324 // === Misc operations ===
5325
5326 case op_debug: {
5327 // This is a nop in the DFG/FTL because when we set a breakpoint in the debugger,
5328 // we will jettison all optimized CodeBlocks that contains the breakpoint.
5329 addToGraph(Check); // We add a nop here so that basic block linking doesn't break.
5330 NEXT_OPCODE(op_debug);
5331 }
5332
5333 case op_mov: {
5334 auto bytecode = currentInstruction->as<OpMov>();
5335 Node* op = get(bytecode.m_src);
5336 set(bytecode.m_dst, op);
5337 NEXT_OPCODE(op_mov);
5338 }
5339
5340 case op_check_tdz: {
5341 auto bytecode = currentInstruction->as<OpCheckTdz>();
5342 addToGraph(CheckNotEmpty, get(bytecode.m_targetVirtualRegister));
5343 NEXT_OPCODE(op_check_tdz);
5344 }
5345
5346 case op_overrides_has_instance: {
5347 auto bytecode = currentInstruction->as<OpOverridesHasInstance>();
5348 JSFunction* defaultHasInstanceSymbolFunction = m_inlineStackTop->m_codeBlock->globalObjectFor(currentCodeOrigin())->functionProtoHasInstanceSymbolFunction();
5349
5350 Node* constructor = get(VirtualRegister(bytecode.m_constructor));
5351 Node* hasInstanceValue = get(VirtualRegister(bytecode.m_hasInstanceValue));
5352
5353 set(VirtualRegister(bytecode.m_dst), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue));
5354 NEXT_OPCODE(op_overrides_has_instance);
5355 }
5356
5357 case op_identity_with_profile: {
5358 auto bytecode = currentInstruction->as<OpIdentityWithProfile>();
5359 Node* srcDst = get(bytecode.m_srcDst);
5360 SpeculatedType speculation = static_cast<SpeculatedType>(bytecode.m_topProfile) << 32 | static_cast<SpeculatedType>(bytecode.m_bottomProfile);
5361 set(bytecode.m_srcDst, addToGraph(IdentityWithProfile, OpInfo(speculation), srcDst));
5362 NEXT_OPCODE(op_identity_with_profile);
5363 }
5364
5365 case op_instanceof: {
5366 auto bytecode = currentInstruction->as<OpInstanceof>();
5367
5368 InstanceOfStatus status = InstanceOfStatus::computeFor(
5369 m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_baselineMap,
5370 m_currentIndex);
5371
5372 Node* value = get(bytecode.m_value);
5373 Node* prototype = get(bytecode.m_prototype);
5374
5375 // Only inline it if it's Simple with a commonPrototype; bottom/top or variable
5376 // prototypes both get handled by the IC. This makes sense for bottom (unprofiled)
5377 // instanceof ICs because the profit of this optimization is fairly low. So, in the
5378 // absence of any information, it's better to avoid making this be the cause of a
5379 // recompilation.
5380 if (JSObject* commonPrototype = status.commonPrototype()) {
5381 addToGraph(CheckCell, OpInfo(m_graph.freeze(commonPrototype)), prototype);
5382
5383 bool allOK = true;
5384 MatchStructureData* data = m_graph.m_matchStructureData.add();
5385 for (const InstanceOfVariant& variant : status.variants()) {
5386 if (!check(variant.conditionSet())) {
5387 allOK = false;
5388 break;
5389 }
5390 for (Structure* structure : variant.structureSet()) {
5391 MatchStructureVariant matchVariant;
5392 matchVariant.structure = m_graph.registerStructure(structure);
5393 matchVariant.result = variant.isHit();
5394
5395 data->variants.append(WTFMove(matchVariant));
5396 }
5397 }
5398
5399 if (allOK) {
5400 Node* match = addToGraph(MatchStructure, OpInfo(data), value);
5401 set(bytecode.m_dst, match);
5402 NEXT_OPCODE(op_instanceof);
5403 }
5404 }
5405
5406 set(bytecode.m_dst, addToGraph(InstanceOf, value, prototype));
5407 NEXT_OPCODE(op_instanceof);
5408 }
5409
5410 case op_instanceof_custom: {
5411 auto bytecode = currentInstruction->as<OpInstanceofCustom>();
5412 Node* value = get(bytecode.m_value);
5413 Node* constructor = get(bytecode.m_constructor);
5414 Node* hasInstanceValue = get(bytecode.m_hasInstanceValue);
5415 set(bytecode.m_dst, addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue));
5416 NEXT_OPCODE(op_instanceof_custom);
5417 }
5418 case op_is_empty: {
5419 auto bytecode = currentInstruction->as<OpIsEmpty>();
5420 Node* value = get(bytecode.m_operand);
5421 set(bytecode.m_dst, addToGraph(IsEmpty, value));
5422 NEXT_OPCODE(op_is_empty);
5423 }
5424 case op_is_undefined: {
5425 auto bytecode = currentInstruction->as<OpIsUndefined>();
5426 Node* value = get(bytecode.m_operand);
5427 set(bytecode.m_dst, addToGraph(IsUndefined, value));
5428 NEXT_OPCODE(op_is_undefined);
5429 }
5430 case op_is_undefined_or_null: {
5431 auto bytecode = currentInstruction->as<OpIsUndefinedOrNull>();
5432 Node* value = get(bytecode.m_operand);
5433 set(bytecode.m_dst, addToGraph(IsUndefinedOrNull, value));
5434 NEXT_OPCODE(op_is_undefined_or_null);
5435 }
5436
5437 case op_is_boolean: {
5438 auto bytecode = currentInstruction->as<OpIsBoolean>();
5439 Node* value = get(bytecode.m_operand);
5440 set(bytecode.m_dst, addToGraph(IsBoolean, value));
5441 NEXT_OPCODE(op_is_boolean);
5442 }
5443
5444 case op_is_number: {
5445 auto bytecode = currentInstruction->as<OpIsNumber>();
5446 Node* value = get(bytecode.m_operand);
5447 set(bytecode.m_dst, addToGraph(IsNumber, value));
5448 NEXT_OPCODE(op_is_number);
5449 }
5450
5451 case op_is_cell_with_type: {
5452 auto bytecode = currentInstruction->as<OpIsCellWithType>();
5453 Node* value = get(bytecode.m_operand);
5454 set(bytecode.m_dst, addToGraph(IsCellWithType, OpInfo(bytecode.m_type), value));
5455 NEXT_OPCODE(op_is_cell_with_type);
5456 }
5457
5458 case op_is_object: {
5459 auto bytecode = currentInstruction->as<OpIsObject>();
5460 Node* value = get(bytecode.m_operand);
5461 set(bytecode.m_dst, addToGraph(IsObject, value));
5462 NEXT_OPCODE(op_is_object);
5463 }
5464
5465 case op_is_object_or_null: {
5466 auto bytecode = currentInstruction->as<OpIsObjectOrNull>();
5467 Node* value = get(bytecode.m_operand);
5468 set(bytecode.m_dst, addToGraph(IsObjectOrNull, value));
5469 NEXT_OPCODE(op_is_object_or_null);
5470 }
5471
5472 case op_is_function: {
5473 auto bytecode = currentInstruction->as<OpIsFunction>();
5474 Node* value = get(bytecode.m_operand);
5475 set(bytecode.m_dst, addToGraph(IsFunction, value));
5476 NEXT_OPCODE(op_is_function);
5477 }
5478
5479 case op_not: {
5480 auto bytecode = currentInstruction->as<OpNot>();
5481 Node* value = get(bytecode.m_operand);
5482 set(bytecode.m_dst, addToGraph(LogicalNot, value));
5483 NEXT_OPCODE(op_not);
5484 }
5485
5486 case op_to_primitive: {
5487 auto bytecode = currentInstruction->as<OpToPrimitive>();
5488 Node* value = get(bytecode.m_src);
5489 set(bytecode.m_dst, addToGraph(ToPrimitive, value));
5490 NEXT_OPCODE(op_to_primitive);
5491 }
5492
5493 case op_strcat: {
5494 auto bytecode = currentInstruction->as<OpStrcat>();
5495 int startOperand = bytecode.m_src.offset();
5496 int numOperands = bytecode.m_count;
5497 const unsigned maxArguments = 3;
5498 Node* operands[AdjacencyList::Size];
5499 unsigned indexInOperands = 0;
5500 for (unsigned i = 0; i < AdjacencyList::Size; ++i)
5501 operands[i] = 0;
5502 for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
5503 if (indexInOperands == maxArguments) {
5504 operands[0] = addToGraph(StrCat, operands[0], operands[1], operands[2]);
5505 for (unsigned i = 1; i < AdjacencyList::Size; ++i)
5506 operands[i] = 0;
5507 indexInOperands = 1;
5508 }
5509
5510 ASSERT(indexInOperands < AdjacencyList::Size);
5511 ASSERT(indexInOperands < maxArguments);
5512 operands[indexInOperands++] = get(VirtualRegister(startOperand - operandIdx));
5513 }
5514 set(bytecode.m_dst, addToGraph(StrCat, operands[0], operands[1], operands[2]));
5515 NEXT_OPCODE(op_strcat);
5516 }
5517
5518 case op_less: {
5519 auto bytecode = currentInstruction->as<OpLess>();
5520 Node* op1 = get(bytecode.m_lhs);
5521 Node* op2 = get(bytecode.m_rhs);
5522 set(bytecode.m_dst, addToGraph(CompareLess, op1, op2));
5523 NEXT_OPCODE(op_less);
5524 }
5525
5526 case op_lesseq: {
5527 auto bytecode = currentInstruction->as<OpLesseq>();
5528 Node* op1 = get(bytecode.m_lhs);
5529 Node* op2 = get(bytecode.m_rhs);
5530 set(bytecode.m_dst, addToGraph(CompareLessEq, op1, op2));
5531 NEXT_OPCODE(op_lesseq);
5532 }
5533
5534 case op_greater: {
5535 auto bytecode = currentInstruction->as<OpGreater>();
5536 Node* op1 = get(bytecode.m_lhs);
5537 Node* op2 = get(bytecode.m_rhs);
5538 set(bytecode.m_dst, addToGraph(CompareGreater, op1, op2));
5539 NEXT_OPCODE(op_greater);
5540 }
5541
5542 case op_greatereq: {
5543 auto bytecode = currentInstruction->as<OpGreatereq>();
5544 Node* op1 = get(bytecode.m_lhs);
5545 Node* op2 = get(bytecode.m_rhs);
5546 set(bytecode.m_dst, addToGraph(CompareGreaterEq, op1, op2));
5547 NEXT_OPCODE(op_greatereq);
5548 }
5549
5550 case op_below: {
5551 auto bytecode = currentInstruction->as<OpBelow>();
5552 Node* op1 = get(bytecode.m_lhs);
5553 Node* op2 = get(bytecode.m_rhs);
5554 set(bytecode.m_dst, addToGraph(CompareBelow, op1, op2));
5555 NEXT_OPCODE(op_below);
5556 }
5557
5558 case op_beloweq: {
5559 auto bytecode = currentInstruction->as<OpBeloweq>();
5560 Node* op1 = get(bytecode.m_lhs);
5561 Node* op2 = get(bytecode.m_rhs);
5562 set(bytecode.m_dst, addToGraph(CompareBelowEq, op1, op2));
5563 NEXT_OPCODE(op_beloweq);
5564 }
5565
5566 case op_eq: {
5567 auto bytecode = currentInstruction->as<OpEq>();
5568 Node* op1 = get(bytecode.m_lhs);
5569 Node* op2 = get(bytecode.m_rhs);
5570 set(bytecode.m_dst, addToGraph(CompareEq, op1, op2));
5571 NEXT_OPCODE(op_eq);
5572 }
5573
5574 case op_eq_null: {
5575 auto bytecode = currentInstruction->as<OpEqNull>();
5576 Node* value = get(bytecode.m_operand);
5577 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
5578 set(bytecode.m_dst, addToGraph(CompareEq, value, nullConstant));
5579 NEXT_OPCODE(op_eq_null);
5580 }
5581
5582 case op_stricteq: {
5583 auto bytecode = currentInstruction->as<OpStricteq>();
5584 Node* op1 = get(bytecode.m_lhs);
5585 Node* op2 = get(bytecode.m_rhs);
5586 set(bytecode.m_dst, addToGraph(CompareStrictEq, op1, op2));
5587 NEXT_OPCODE(op_stricteq);
5588 }
5589
5590 case op_neq: {
5591 auto bytecode = currentInstruction->as<OpNeq>();
5592 Node* op1 = get(bytecode.m_lhs);
5593 Node* op2 = get(bytecode.m_rhs);
5594 set(bytecode.m_dst, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
5595 NEXT_OPCODE(op_neq);
5596 }
5597
5598 case op_neq_null: {
5599 auto bytecode = currentInstruction->as<OpNeqNull>();
5600 Node* value = get(bytecode.m_operand);
5601 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
5602 set(bytecode.m_dst, addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant)));
5603 NEXT_OPCODE(op_neq_null);
5604 }
5605
5606 case op_nstricteq: {
5607 auto bytecode = currentInstruction->as<OpNstricteq>();
5608 Node* op1 = get(bytecode.m_lhs);
5609 Node* op2 = get(bytecode.m_rhs);
5610 Node* invertedResult;
5611 invertedResult = addToGraph(CompareStrictEq, op1, op2);
5612 set(bytecode.m_dst, addToGraph(LogicalNot, invertedResult));
5613 NEXT_OPCODE(op_nstricteq);
5614 }
5615
5616 // === Property access operations ===
5617
5618 case op_get_by_val: {
5619 auto bytecode = currentInstruction->as<OpGetByVal>();
5620 SpeculatedType prediction = getPredictionWithoutOSRExit();
5621
5622 Node* base = get(bytecode.m_base);
5623 Node* property = get(bytecode.m_property);
5624 bool shouldCompileAsGetById = false;
5625 GetByStatus getByStatus = GetByStatus::computeFor(m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_baselineMap, m_icContextStack, currentCodeOrigin(), GetByStatus::TrackIdentifiers::Yes);
5626 unsigned identifierNumber = 0;
5627 {
5628 // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
5629 // At that time, there is no information.
5630 if (!m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent)
5631 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)
5632 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
5633
5634 // FIXME: In the future, we should be able to do something like MultiGetByOffset in a multi identifier mode.
5635 // That way, we could both switch on multiple structures and multiple identifiers (or int 32 properties).
5636 // https://bugs.webkit.org/show_bug.cgi?id=204216
5637 if (Box<Identifier> impl = getByStatus.singleIdentifier()) {
5638 identifierNumber = m_graph.identifiers().ensure(impl);
5639 shouldCompileAsGetById = true;
5640 addToGraph(CheckIdent, OpInfo(impl->impl()), property);
5641 }
5642 }
5643 }
5644
5645 if (shouldCompileAsGetById)
5646 handleGetById(bytecode.m_dst, prediction, base, identifierNumber, getByStatus, AccessType::GetById, currentInstruction->size());
5647 else {
5648 ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
5649 // FIXME: We could consider making this not vararg, since it only uses three child
5650 // slots.
5651 // https://bugs.webkit.org/show_bug.cgi?id=184192
5652 addVarArgChild(base);
5653 addVarArgChild(property);
5654 addVarArgChild(0); // Leave room for property storage.
5655 Node* getByVal = addToGraph(Node::VarArg, GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction));
5656 m_exitOK = false; // GetByVal must be treated as if it clobbers exit state, since FixupPhase may make it generic.
5657 set(bytecode.m_dst, getByVal);
5658 if (getByStatus.observedStructureStubInfoSlowPath())
5659 m_graph.m_slowGetByVal.add(getByVal);
5660 }
5661
5662 NEXT_OPCODE(op_get_by_val);
5663 }
5664
5665 case op_get_by_val_with_this: {
5666 auto bytecode = currentInstruction->as<OpGetByValWithThis>();
5667 SpeculatedType prediction = getPrediction();
5668
5669 Node* base = get(bytecode.m_base);
5670 Node* thisValue = get(bytecode.m_thisValue);
5671 Node* property = get(bytecode.m_property);
5672 Node* getByValWithThis = addToGraph(GetByValWithThis, OpInfo(), OpInfo(prediction), base, thisValue, property);
5673 set(bytecode.m_dst, getByValWithThis);
5674
5675 NEXT_OPCODE(op_get_by_val_with_this);
5676 }
5677
5678 case op_put_by_val_direct:
5679 handlePutByVal(currentInstruction->as<OpPutByValDirect>(), currentInstruction->size());
5680 NEXT_OPCODE(op_put_by_val_direct);
5681
5682 case op_put_by_val: {
5683 handlePutByVal(currentInstruction->as<OpPutByVal>(), currentInstruction->size());
5684 NEXT_OPCODE(op_put_by_val);
5685 }
5686
5687 case op_put_by_val_with_this: {
5688 auto bytecode = currentInstruction->as<OpPutByValWithThis>();
5689 Node* base = get(bytecode.m_base);
5690 Node* thisValue = get(bytecode.m_thisValue);
5691 Node* property = get(bytecode.m_property);
5692 Node* value = get(bytecode.m_value);
5693
5694 addVarArgChild(base);
5695 addVarArgChild(thisValue);
5696 addVarArgChild(property);
5697 addVarArgChild(value);
5698 addToGraph(Node::VarArg, PutByValWithThis, OpInfo(0), OpInfo(0));
5699
5700 NEXT_OPCODE(op_put_by_val_with_this);
5701 }
5702
5703 case op_define_data_property: {
5704 auto bytecode = currentInstruction->as<OpDefineDataProperty>();
5705 Node* base = get(bytecode.m_base);
5706 Node* property = get(bytecode.m_property);
5707 Node* value = get(bytecode.m_value);
5708 Node* attributes = get(bytecode.m_attributes);
5709
5710 addVarArgChild(base);
5711 addVarArgChild(property);
5712 addVarArgChild(value);
5713 addVarArgChild(attributes);
5714 addToGraph(Node::VarArg, DefineDataProperty, OpInfo(0), OpInfo(0));
5715
5716 NEXT_OPCODE(op_define_data_property);
5717 }
5718
5719 case op_define_accessor_property: {
5720 auto bytecode = currentInstruction->as<OpDefineAccessorProperty>();
5721 Node* base = get(bytecode.m_base);
5722 Node* property = get(bytecode.m_property);
5723 Node* getter = get(bytecode.m_getter);
5724 Node* setter = get(bytecode.m_setter);
5725 Node* attributes = get(bytecode.m_attributes);
5726
5727 addVarArgChild(base);
5728 addVarArgChild(property);
5729 addVarArgChild(getter);
5730 addVarArgChild(setter);
5731 addVarArgChild(attributes);
5732 addToGraph(Node::VarArg, DefineAccessorProperty, OpInfo(0), OpInfo(0));
5733
5734 NEXT_OPCODE(op_define_accessor_property);
5735 }
5736
5737 case op_get_by_id_direct: {
5738 parseGetById<OpGetByIdDirect>(currentInstruction);
5739 NEXT_OPCODE(op_get_by_id_direct);
5740 }
5741 case op_try_get_by_id: {
5742 parseGetById<OpTryGetById>(currentInstruction);
5743 NEXT_OPCODE(op_try_get_by_id);
5744 }
5745 case op_get_by_id: {
5746 parseGetById<OpGetById>(currentInstruction);
5747 NEXT_OPCODE(op_get_by_id);
5748 }
5749 case op_get_by_id_with_this: {
5750 SpeculatedType prediction = getPrediction();
5751
5752 auto bytecode = currentInstruction->as<OpGetByIdWithThis>();
5753 Node* base = get(bytecode.m_base);
5754 Node* thisValue = get(bytecode.m_thisValue);
5755 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5756
5757 set(bytecode.m_dst,
5758 addToGraph(GetByIdWithThis, OpInfo(identifierNumber), OpInfo(prediction), base, thisValue));
5759
5760 NEXT_OPCODE(op_get_by_id_with_this);
5761 }
5762 case op_put_by_id: {
5763 auto bytecode = currentInstruction->as<OpPutById>();
5764 Node* value = get(bytecode.m_value);
5765 Node* base = get(bytecode.m_base);
5766 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5767 bool direct = !!(bytecode.m_flags & PutByIdIsDirect);
5768
5769 PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
5770 m_inlineStackTop->m_profiledBlock,
5771 m_inlineStackTop->m_baselineMap, m_icContextStack,
5772 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
5773
5774 handlePutById(base, identifierNumber, value, putByIdStatus, direct, currentInstruction->size());
5775 NEXT_OPCODE(op_put_by_id);
5776 }
5777
5778 case op_put_by_id_with_this: {
5779 auto bytecode = currentInstruction->as<OpPutByIdWithThis>();
5780 Node* base = get(bytecode.m_base);
5781 Node* thisValue = get(bytecode.m_thisValue);
5782 Node* value = get(bytecode.m_value);
5783 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5784
5785 addToGraph(PutByIdWithThis, OpInfo(identifierNumber), base, thisValue, value);
5786 NEXT_OPCODE(op_put_by_id_with_this);
5787 }
5788
5789 case op_put_getter_by_id:
5790 handlePutAccessorById(PutGetterById, currentInstruction->as<OpPutGetterById>());
5791 NEXT_OPCODE(op_put_getter_by_id);
5792 case op_put_setter_by_id: {
5793 handlePutAccessorById(PutSetterById, currentInstruction->as<OpPutSetterById>());
5794 NEXT_OPCODE(op_put_setter_by_id);
5795 }
5796
5797 case op_put_getter_setter_by_id: {
5798 auto bytecode = currentInstruction->as<OpPutGetterSetterById>();
5799 Node* base = get(bytecode.m_base);
5800 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5801 Node* getter = get(bytecode.m_getter);
5802 Node* setter = get(bytecode.m_setter);
5803 addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(bytecode.m_attributes), base, getter, setter);
5804 NEXT_OPCODE(op_put_getter_setter_by_id);
5805 }
5806
5807 case op_put_getter_by_val:
5808 handlePutAccessorByVal(PutGetterByVal, currentInstruction->as<OpPutGetterByVal>());
5809 NEXT_OPCODE(op_put_getter_by_val);
5810 case op_put_setter_by_val: {
5811 handlePutAccessorByVal(PutSetterByVal, currentInstruction->as<OpPutSetterByVal>());
5812 NEXT_OPCODE(op_put_setter_by_val);
5813 }
5814
5815 case op_del_by_id: {
5816 auto bytecode = currentInstruction->as<OpDelById>();
5817 Node* base = get(bytecode.m_base);
5818 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5819 set(bytecode.m_dst, addToGraph(DeleteById, OpInfo(identifierNumber), base));
5820 NEXT_OPCODE(op_del_by_id);
5821 }
5822
5823 case op_del_by_val: {
5824 auto bytecode = currentInstruction->as<OpDelByVal>();
5825 Node* base = get(bytecode.m_base);
5826 Node* key = get(bytecode.m_property);
5827 set(bytecode.m_dst, addToGraph(DeleteByVal, base, key));
5828 NEXT_OPCODE(op_del_by_val);
5829 }
5830
5831 case op_profile_type: {
5832 auto bytecode = currentInstruction->as<OpProfileType>();
5833 auto& metadata = bytecode.metadata(codeBlock);
5834 Node* valueToProfile = get(bytecode.m_targetVirtualRegister);
5835 addToGraph(ProfileType, OpInfo(metadata.m_typeLocation), valueToProfile);
5836 NEXT_OPCODE(op_profile_type);
5837 }
5838
5839 case op_profile_control_flow: {
5840 auto bytecode = currentInstruction->as<OpProfileControlFlow>();
5841 BasicBlockLocation* basicBlockLocation = bytecode.metadata(codeBlock).m_basicBlockLocation;
5842 addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation));
5843 NEXT_OPCODE(op_profile_control_flow);
5844 }
5845
5846 // === Block terminators. ===
5847
5848 case op_jmp: {
5849 ASSERT(!m_currentBlock->terminal());
5850 auto bytecode = currentInstruction->as<OpJmp>();
5851 int relativeOffset = jumpTarget(bytecode.m_targetLabel);
5852 addToGraph(Jump, OpInfo(m_currentIndex.offset() + relativeOffset));
5853 if (relativeOffset <= 0)
5854 flushForTerminal();
5855 LAST_OPCODE(op_jmp);
5856 }
5857
5858 case op_jtrue: {
5859 auto bytecode = currentInstruction->as<OpJtrue>();
5860 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5861 Node* condition = get(bytecode.m_condition);
5862 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
5863 LAST_OPCODE(op_jtrue);
5864 }
5865
5866 case op_jfalse: {
5867 auto bytecode = currentInstruction->as<OpJfalse>();
5868 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5869 Node* condition = get(bytecode.m_condition);
5870 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
5871 LAST_OPCODE(op_jfalse);
5872 }
5873
5874 case op_jeq_null: {
5875 auto bytecode = currentInstruction->as<OpJeqNull>();
5876 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5877 Node* value = get(bytecode.m_value);
5878 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
5879 Node* condition = addToGraph(CompareEq, value, nullConstant);
5880 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
5881 LAST_OPCODE(op_jeq_null);
5882 }
5883
5884 case op_jneq_null: {
5885 auto bytecode = currentInstruction->as<OpJneqNull>();
5886 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5887 Node* value = get(bytecode.m_value);
5888 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
5889 Node* condition = addToGraph(CompareEq, value, nullConstant);
5890 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
5891 LAST_OPCODE(op_jneq_null);
5892 }
5893
5894 case op_jundefined_or_null: {
5895 auto bytecode = currentInstruction->as<OpJundefinedOrNull>();
5896 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5897 Node* value = get(bytecode.m_value);
5898 Node* condition = addToGraph(IsUndefinedOrNull, value);
5899 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
5900 LAST_OPCODE(op_jundefined_or_null);
5901 }
5902
5903 case op_jnundefined_or_null: {
5904 auto bytecode = currentInstruction->as<OpJnundefinedOrNull>();
5905 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5906 Node* value = get(bytecode.m_value);
5907 Node* condition = addToGraph(IsUndefinedOrNull, value);
5908 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
5909 LAST_OPCODE(op_jnundefined_or_null);
5910 }
5911
5912 case op_jless: {
5913 auto bytecode = currentInstruction->as<OpJless>();
5914 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5915 Node* op1 = get(bytecode.m_lhs);
5916 Node* op2 = get(bytecode.m_rhs);
5917 Node* condition = addToGraph(CompareLess, op1, op2);
5918 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
5919 LAST_OPCODE(op_jless);
5920 }
5921
5922 case op_jlesseq: {
5923 auto bytecode = currentInstruction->as<OpJlesseq>();
5924 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5925 Node* op1 = get(bytecode.m_lhs);
5926 Node* op2 = get(bytecode.m_rhs);
5927 Node* condition = addToGraph(CompareLessEq, op1, op2);
5928 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
5929 LAST_OPCODE(op_jlesseq);
5930 }
5931
5932 case op_jgreater: {
5933 auto bytecode = currentInstruction->as<OpJgreater>();
5934 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5935 Node* op1 = get(bytecode.m_lhs);
5936 Node* op2 = get(bytecode.m_rhs);
5937 Node* condition = addToGraph(CompareGreater, op1, op2);
5938 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
5939 LAST_OPCODE(op_jgreater);
5940 }
5941
5942 case op_jgreatereq: {
5943 auto bytecode = currentInstruction->as<OpJgreatereq>();
5944 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5945 Node* op1 = get(bytecode.m_lhs);
5946 Node* op2 = get(bytecode.m_rhs);
5947 Node* condition = addToGraph(CompareGreaterEq, op1, op2);
5948 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
5949 LAST_OPCODE(op_jgreatereq);
5950 }
5951
5952 case op_jeq: {
5953 auto bytecode = currentInstruction->as<OpJeq>();
5954 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5955 Node* op1 = get(bytecode.m_lhs);
5956 Node* op2 = get(bytecode.m_rhs);
5957 Node* condition = addToGraph(CompareEq, op1, op2);
5958 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
5959 LAST_OPCODE(op_jeq);
5960 }
5961
5962 case op_jstricteq: {
5963 auto bytecode = currentInstruction->as<OpJstricteq>();
5964 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5965 Node* op1 = get(bytecode.m_lhs);
5966 Node* op2 = get(bytecode.m_rhs);
5967 Node* condition = addToGraph(CompareStrictEq, op1, op2);
5968 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
5969 LAST_OPCODE(op_jstricteq);
5970 }
5971
5972 case op_jnless: {
5973 auto bytecode = currentInstruction->as<OpJnless>();
5974 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5975 Node* op1 = get(bytecode.m_lhs);
5976 Node* op2 = get(bytecode.m_rhs);
5977 Node* condition = addToGraph(CompareLess, op1, op2);
5978 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
5979 LAST_OPCODE(op_jnless);
5980 }
5981
5982 case op_jnlesseq: {
5983 auto bytecode = currentInstruction->as<OpJnlesseq>();
5984 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5985 Node* op1 = get(bytecode.m_lhs);
5986 Node* op2 = get(bytecode.m_rhs);
5987 Node* condition = addToGraph(CompareLessEq, op1, op2);
5988 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
5989 LAST_OPCODE(op_jnlesseq);
5990 }
5991
5992 case op_jngreater: {
5993 auto bytecode = currentInstruction->as<OpJngreater>();
5994 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5995 Node* op1 = get(bytecode.m_lhs);
5996 Node* op2 = get(bytecode.m_rhs);
5997 Node* condition = addToGraph(CompareGreater, op1, op2);
5998 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
5999 LAST_OPCODE(op_jngreater);
6000 }
6001
6002 case op_jngreatereq: {
6003 auto bytecode = currentInstruction->as<OpJngreatereq>();
6004 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
6005 Node* op1 = get(bytecode.m_lhs);
6006 Node* op2 = get(bytecode.m_rhs);
6007 Node* condition = addToGraph(CompareGreaterEq, op1, op2);
6008 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
6009 LAST_OPCODE(op_jngreatereq);
6010 }
6011
6012 case op_jneq: {
6013 auto bytecode = currentInstruction->as<OpJneq>();
6014 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
6015 Node* op1 = get(bytecode.m_lhs);
6016 Node* op2 = get(bytecode.m_rhs);
6017 Node* condition = addToGraph(CompareEq, op1, op2);
6018 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
6019 LAST_OPCODE(op_jneq);
6020 }
6021
6022 case op_jnstricteq: {
6023 auto bytecode = currentInstruction->as<OpJnstricteq>();
6024 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
6025 Node* op1 = get(bytecode.m_lhs);
6026 Node* op2 = get(bytecode.m_rhs);
6027 Node* condition = addToGraph(CompareStrictEq, op1, op2);
6028 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
6029 LAST_OPCODE(op_jnstricteq);
6030 }
6031
6032 case op_jbelow: {
6033 auto bytecode = currentInstruction->as<OpJbelow>();
6034 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
6035 Node* op1 = get(bytecode.m_lhs);
6036 Node* op2 = get(bytecode.m_rhs);
6037 Node* condition = addToGraph(CompareBelow, op1, op2);
6038 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
6039 LAST_OPCODE(op_jbelow);
6040 }
6041
6042 case op_jbeloweq: {
6043 auto bytecode = currentInstruction->as<OpJbeloweq>();
6044 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
6045 Node* op1 = get(bytecode.m_lhs);
6046 Node* op2 = get(bytecode.m_rhs);
6047 Node* condition = addToGraph(CompareBelowEq, op1, op2);
6048 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
6049 LAST_OPCODE(op_jbeloweq);
6050 }
6051
6052 case op_switch_imm: {
6053 auto bytecode = currentInstruction->as<OpSwitchImm>();
6054 SwitchData& data = *m_graph.m_switchData.add();
6055 data.kind = SwitchImm;
6056 data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex];
6057 data.fallThrough.setBytecodeIndex(m_currentIndex.offset() + jumpTarget(bytecode.m_defaultOffset));
6058 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
6059 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
6060 if (!table.branchOffsets[i])
6061 continue;
6062 unsigned target = m_currentIndex.offset() + table.branchOffsets[i];
6063 if (target == data.fallThrough.bytecodeIndex())
6064 continue;
6065 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
6066 }
6067 addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
6068 flushIfTerminal(data);
6069 LAST_OPCODE(op_switch_imm);
6070 }
6071
6072 case op_switch_char: {
6073 auto bytecode = currentInstruction->as<OpSwitchChar>();
6074 SwitchData& data = *m_graph.m_switchData.add();
6075 data.kind = SwitchChar;
6076 data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex];
6077 data.fallThrough.setBytecodeIndex(m_currentIndex.offset() + jumpTarget(bytecode.m_defaultOffset));
6078 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
6079 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
6080 if (!table.branchOffsets[i])
6081 continue;
6082 unsigned target = m_currentIndex.offset() + table.branchOffsets[i];
6083 if (target == data.fallThrough.bytecodeIndex())
6084 continue;
6085 data.cases.append(
6086 SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
6087 }
6088 addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
6089 flushIfTerminal(data);
6090 LAST_OPCODE(op_switch_char);
6091 }
6092
6093 case op_switch_string: {
6094 auto bytecode = currentInstruction->as<OpSwitchString>();
6095 SwitchData& data = *m_graph.m_switchData.add();
6096 data.kind = SwitchString;
6097 data.switchTableIndex = bytecode.m_tableIndex;
6098 data.fallThrough.setBytecodeIndex(m_currentIndex.offset() + jumpTarget(bytecode.m_defaultOffset));
6099 StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
6100 StringJumpTable::StringOffsetTable::iterator iter;
6101 StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
6102 for (iter = table.offsetTable.begin(); iter != end; ++iter) {
6103 unsigned target = m_currentIndex.offset() + iter->value.branchOffset;
6104 if (target == data.fallThrough.bytecodeIndex())
6105 continue;
6106 data.cases.append(
6107 SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
6108 }
6109 addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
6110 flushIfTerminal(data);
6111 LAST_OPCODE(op_switch_string);
6112 }
6113
6114 case op_ret: {
6115 auto bytecode = currentInstruction->as<OpRet>();
6116 ASSERT(!m_currentBlock->terminal());
6117 if (!inlineCallFrame()) {
6118 // Simple case: we are just producing a return
6119 addToGraph(Return, get(bytecode.m_value));
6120 flushForReturn();
6121 LAST_OPCODE(op_ret);
6122 }
6123
6124 flushForReturn();
6125 if (m_inlineStackTop->m_returnValue.isValid())
6126 setDirect(m_inlineStackTop->m_returnValue, get(bytecode.m_value), ImmediateSetWithFlush);
6127
6128 if (!m_inlineStackTop->m_continuationBlock && m_currentIndex.offset() + currentInstruction->size() != m_inlineStackTop->m_codeBlock->instructions().size()) {
6129 // This is an early return from an inlined function and we do not have a continuation block, so we must allocate one.
6130 // It is untargetable, because we do not know the appropriate index.
6131 // If this block turns out to be a jump target, parseCodeBlock will fix its bytecodeIndex before putting it in m_blockLinkingTargets
6132 m_inlineStackTop->m_continuationBlock = allocateUntargetableBlock();
6133 }
6134
6135 if (m_inlineStackTop->m_continuationBlock)
6136 addJumpTo(m_inlineStackTop->m_continuationBlock);
6137 else {
6138 // We are returning from an inlined function, and do not need to jump anywhere, so we just keep the current block
6139 m_inlineStackTop->m_continuationBlock = m_currentBlock;
6140 }
6141 LAST_OPCODE_LINKED(op_ret);
6142 }
6143 case op_end:
6144 ASSERT(!inlineCallFrame());
6145 addToGraph(Return, get(currentInstruction->as<OpEnd>().m_value));
6146 flushForReturn();
6147 LAST_OPCODE(op_end);
6148
6149 case op_throw:
6150 addToGraph(Throw, get(currentInstruction->as<OpThrow>().m_value));
6151 flushForTerminal();
6152 LAST_OPCODE(op_throw);
6153
6154 case op_throw_static_error: {
6155 auto bytecode = currentInstruction->as<OpThrowStaticError>();
6156 addToGraph(ThrowStaticError, OpInfo(bytecode.m_errorType), get(bytecode.m_message));
6157 flushForTerminal();
6158 LAST_OPCODE(op_throw_static_error);
6159 }
6160
6161 case op_catch: {
6162 auto bytecode = currentInstruction->as<OpCatch>();
6163 m_graph.m_hasExceptionHandlers = true;
6164
6165 if (inlineCallFrame()) {
6166 // We can't do OSR entry into an inlined frame.
6167 NEXT_OPCODE(op_catch);
6168 }
6169
6170 if (m_graph.m_plan.mode() == FTLForOSREntryMode) {
6171 NEXT_OPCODE(op_catch);
6172 }
6173
6174 RELEASE_ASSERT(!m_currentBlock->size() || (m_graph.compilation() && m_currentBlock->size() == 1 && m_currentBlock->at(0)->op() == CountExecution));
6175
6176 ValueProfileAndOperandBuffer* buffer = bytecode.metadata(codeBlock).m_buffer;
6177
6178 if (!buffer) {
6179 NEXT_OPCODE(op_catch); // This catch has yet to execute. Note: this load can be racy with the main thread.
6180 }
6181
6182 // We're now committed to compiling this as an entrypoint.
6183 m_currentBlock->isCatchEntrypoint = true;
6184 m_graph.m_roots.append(m_currentBlock);
6185
6186 Vector<SpeculatedType> argumentPredictions(m_numArguments);
6187 Vector<SpeculatedType> localPredictions;
6188 HashSet<unsigned, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> seenArguments;
6189
6190 {
6191 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
6192
6193 buffer->forEach([&] (ValueProfileAndOperand& profile) {
6194 VirtualRegister operand(profile.m_operand);
6195 SpeculatedType prediction = profile.computeUpdatedPrediction(locker);
6196 if (operand.isLocal())
6197 localPredictions.append(prediction);
6198 else {
6199 RELEASE_ASSERT(operand.isArgument());
6200 RELEASE_ASSERT(static_cast<uint32_t>(operand.toArgument()) < argumentPredictions.size());
6201 if (validationEnabled())
6202 seenArguments.add(operand.toArgument());
6203 argumentPredictions[operand.toArgument()] = prediction;
6204 }
6205 });
6206
6207 if (validationEnabled()) {
6208 for (unsigned argument = 0; argument < m_numArguments; ++argument)
6209 RELEASE_ASSERT(seenArguments.contains(argument));
6210 }
6211 }
6212
6213 Vector<std::pair<VirtualRegister, Node*>> localsToSet;
6214 localsToSet.reserveInitialCapacity(buffer->m_size); // Note: This will reserve more than the number of locals we see below because the buffer includes arguments.
6215
6216 // We're not allowed to exit here since we would not properly recover values.
6217 // We first need to bootstrap the catch entrypoint state.
6218 m_exitOK = false;
6219
6220 unsigned numberOfLocals = 0;
6221 buffer->forEach([&] (ValueProfileAndOperand& profile) {
6222 VirtualRegister operand(profile.m_operand);
6223 if (operand.isArgument())
6224 return;
6225 ASSERT(operand.isLocal());
6226 Node* value = addToGraph(ExtractCatchLocal, OpInfo(numberOfLocals), OpInfo(localPredictions[numberOfLocals]));
6227 ++numberOfLocals;
6228 addToGraph(MovHint, OpInfo(profile.m_operand), value);
6229 localsToSet.uncheckedAppend(std::make_pair(operand, value));
6230 });
6231 if (numberOfLocals)
6232 addToGraph(ClearCatchLocals);
6233
6234 if (!m_graph.m_maxLocalsForCatchOSREntry)
6235 m_graph.m_maxLocalsForCatchOSREntry = 0;
6236 m_graph.m_maxLocalsForCatchOSREntry = std::max(numberOfLocals, *m_graph.m_maxLocalsForCatchOSREntry);
6237
6238 // We could not exit before this point in the program because we would not know how to do value
6239 // recovery for live locals. The above IR sets up the necessary state so we can recover values
6240 // during OSR exit.
6241 //
6242 // The nodes that follow here all exit to the following bytecode instruction, not
6243 // the op_catch. Exiting to op_catch is reserved for when an exception is thrown.
6244 // The SetArgumentDefinitely nodes that follow below may exit because we may hoist type checks
6245 // to them. The SetLocal nodes that follow below may exit because we may choose
6246 // a flush format that speculates on the type of the local.
6247 m_exitOK = true;
6248 addToGraph(ExitOK);
6249
6250 {
6251 auto addResult = m_graph.m_rootToArguments.add(m_currentBlock, ArgumentsVector());
6252 RELEASE_ASSERT(addResult.isNewEntry);
6253 ArgumentsVector& entrypointArguments = addResult.iterator->value;
6254 entrypointArguments.resize(m_numArguments);
6255
6256 BytecodeIndex exitBytecodeIndex = BytecodeIndex(m_currentIndex.offset() + currentInstruction->size());
6257
6258 for (unsigned argument = 0; argument < argumentPredictions.size(); ++argument) {
6259 VariableAccessData* variable = newVariableAccessData(virtualRegisterForArgument(argument));
6260 variable->predict(argumentPredictions[argument]);
6261
6262 variable->mergeStructureCheckHoistingFailed(
6263 m_inlineStackTop->m_exitProfile.hasExitSite(exitBytecodeIndex, BadCache));
6264 variable->mergeCheckArrayHoistingFailed(
6265 m_inlineStackTop->m_exitProfile.hasExitSite(exitBytecodeIndex, BadIndexingType));
6266
6267 Node* setArgument = addToGraph(SetArgumentDefinitely, OpInfo(variable));
6268 setArgument->origin.forExit = CodeOrigin(exitBytecodeIndex, setArgument->origin.forExit.inlineCallFrame());
6269 m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
6270 entrypointArguments[argument] = setArgument;
6271 }
6272 }
6273
6274 for (const std::pair<VirtualRegister, Node*>& pair : localsToSet) {
6275 DelayedSetLocal delayed { currentCodeOrigin(), pair.first, pair.second, ImmediateNakedSet };
6276 m_setLocalQueue.append(delayed);
6277 }
6278
6279 NEXT_OPCODE(op_catch);
6280 }
6281
6282 case op_call:
6283 handleCall<OpCall>(currentInstruction, Call, CallMode::Regular);
6284 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
6285 NEXT_OPCODE(op_call);
6286
6287 case op_tail_call: {
6288 flushForReturn();
6289 Terminality terminality = handleCall<OpTailCall>(currentInstruction, TailCall, CallMode::Tail);
6290 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
6291 // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
6292 // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean
6293 // things up.
6294 if (terminality == NonTerminal)
6295 NEXT_OPCODE(op_tail_call);
6296 else
6297 LAST_OPCODE_LINKED(op_tail_call);
6298 // We use LAST_OPCODE_LINKED instead of LAST_OPCODE because if the tail call was optimized, it may now be a jump to a bytecode index in a different InlineStackEntry.
6299 }
6300
6301 case op_construct:
6302 handleCall<OpConstruct>(currentInstruction, Construct, CallMode::Construct);
6303 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
6304 NEXT_OPCODE(op_construct);
6305
6306 case op_call_varargs: {
6307 handleVarargsCall<OpCallVarargs>(currentInstruction, CallVarargs, CallMode::Regular);
6308 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
6309 NEXT_OPCODE(op_call_varargs);
6310 }
6311
6312 case op_tail_call_varargs: {
6313 flushForReturn();
6314 Terminality terminality = handleVarargsCall<OpTailCallVarargs>(currentInstruction, TailCallVarargs, CallMode::Tail);
6315 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
6316 // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
6317 // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean
6318 // things up.
6319 if (terminality == NonTerminal)
6320 NEXT_OPCODE(op_tail_call_varargs);
6321 else
6322 LAST_OPCODE(op_tail_call_varargs);
6323 }
6324
6325 case op_tail_call_forward_arguments: {
6326 // We need to make sure that we don't unbox our arguments here since that won't be
6327 // done by the arguments object creation node as that node may not exist.
6328 noticeArgumentsUse();
6329 flushForReturn();
6330 Terminality terminality = handleVarargsCall<OpTailCallForwardArguments>(currentInstruction, TailCallForwardVarargs, CallMode::Tail);
6331 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
6332 // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
6333 // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean
6334 // things up.
6335 if (terminality == NonTerminal)
6336 NEXT_OPCODE(op_tail_call_forward_arguments);
6337 else
6338 LAST_OPCODE(op_tail_call_forward_arguments);
6339 }
6340
6341 case op_construct_varargs: {
6342 handleVarargsCall<OpConstructVarargs>(currentInstruction, ConstructVarargs, CallMode::Construct);
6343 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
6344 NEXT_OPCODE(op_construct_varargs);
6345 }
6346
6347 case op_call_eval: {
6348 auto bytecode = currentInstruction->as<OpCallEval>();
6349 int registerOffset = -bytecode.m_argv;
6350 addCall(bytecode.m_dst, CallEval, nullptr, get(bytecode.m_callee), bytecode.m_argc, registerOffset, getPrediction());
6351 NEXT_OPCODE(op_call_eval);
6352 }
6353
6354 case op_jneq_ptr: {
6355 auto bytecode = currentInstruction->as<OpJneqPtr>();
6356 FrozenValue* frozenPointer = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_specialPointer.offset()));
6357 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
6358 Node* child = get(bytecode.m_value);
6359 if (bytecode.metadata(codeBlock).m_hasJumped) {
6360 Node* condition = addToGraph(CompareEqPtr, OpInfo(frozenPointer), child);
6361 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
6362 LAST_OPCODE(op_jneq_ptr);
6363 }
6364 addToGraph(CheckCell, OpInfo(frozenPointer), child);
6365 NEXT_OPCODE(op_jneq_ptr);
6366 }
6367
6368 case op_resolve_scope: {
6369 auto bytecode = currentInstruction->as<OpResolveScope>();
6370 auto& metadata = bytecode.metadata(codeBlock);
6371
6372 ResolveType resolveType;
6373 unsigned depth;
6374 JSScope* constantScope = nullptr;
6375 JSCell* lexicalEnvironment = nullptr;
6376 SymbolTable* symbolTable = nullptr;
6377 {
6378 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
6379 resolveType = metadata.m_resolveType;
6380 depth = metadata.m_localScopeDepth;
6381 switch (resolveType) {
6382 case GlobalProperty:
6383 case GlobalVar:
6384 case GlobalPropertyWithVarInjectionChecks:
6385 case GlobalVarWithVarInjectionChecks:
6386 case GlobalLexicalVar:
6387 case GlobalLexicalVarWithVarInjectionChecks:
6388 constantScope = metadata.m_constantScope.get();
6389 break;
6390 case ModuleVar:
6391 lexicalEnvironment = metadata.m_lexicalEnvironment.get();
6392 break;
6393 case LocalClosureVar:
6394 case ClosureVar:
6395 case ClosureVarWithVarInjectionChecks:
6396 symbolTable = metadata.m_symbolTable.get();
6397 break;
6398 default:
6399 break;
6400 }
6401 }
6402
6403 if (needsDynamicLookup(resolveType, op_resolve_scope)) {
6404 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
6405 set(bytecode.m_dst, addToGraph(ResolveScope, OpInfo(identifierNumber), get(bytecode.m_scope)));
6406 NEXT_OPCODE(op_resolve_scope);
6407 }
6408
6409 // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
6410 if (needsVarInjectionChecks(resolveType))
6411 m_graph.watchpoints().addLazily(m_inlineStackTop->m_codeBlock->globalObject()->varInjectionWatchpoint());
6412
6413 // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
6414 // https://bugs.webkit.org/show_bug.cgi?id=193347
6415 if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
6416 if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) {
6417 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
6418 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
6419 if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
6420 addToGraph(ForceOSRExit);
6421 }
6422 }
6423
6424 switch (resolveType) {
6425 case GlobalProperty:
6426 case GlobalVar:
6427 case GlobalPropertyWithVarInjectionChecks:
6428 case GlobalVarWithVarInjectionChecks:
6429 case GlobalLexicalVar:
6430 case GlobalLexicalVarWithVarInjectionChecks: {
6431 RELEASE_ASSERT(constantScope);
6432 RELEASE_ASSERT(constantScope == JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
6433 set(bytecode.m_dst, weakJSConstant(constantScope));
6434 addToGraph(Phantom, get(bytecode.m_scope));
6435 break;
6436 }
6437 case ModuleVar: {
6438 // Since the value of the "scope" virtual register is not used in LLInt / baseline op_resolve_scope with ModuleVar,
6439 // we need not to keep it alive by the Phantom node.
6440 // Module environment is already strongly referenced by the CodeBlock.
6441 set(bytecode.m_dst, weakJSConstant(lexicalEnvironment));
6442 break;
6443 }
6444 case LocalClosureVar:
6445 case ClosureVar:
6446 case ClosureVarWithVarInjectionChecks: {
6447 Node* localBase = get(bytecode.m_scope);
6448 addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope.
6449
6450 // We have various forms of constant folding here. This is necessary to avoid
6451 // spurious recompiles in dead-but-foldable code.
6452
6453 if (symbolTable) {
6454 if (JSScope* scope = symbolTable->singleton().inferredValue()) {
6455 m_graph.watchpoints().addLazily(symbolTable);
6456 set(bytecode.m_dst, weakJSConstant(scope));
6457 break;
6458 }
6459 }
6460 if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>(*m_vm)) {
6461 for (unsigned n = depth; n--;)
6462 scope = scope->next();
6463 set(bytecode.m_dst, weakJSConstant(scope));
6464 break;
6465 }
6466 for (unsigned n = depth; n--;)
6467 localBase = addToGraph(SkipScope, localBase);
6468 set(bytecode.m_dst, localBase);
6469 break;
6470 }
6471 case UnresolvedProperty:
6472 case UnresolvedPropertyWithVarInjectionChecks: {
6473 addToGraph(Phantom, get(bytecode.m_scope));
6474 addToGraph(ForceOSRExit);
6475 set(bytecode.m_dst, addToGraph(JSConstant, OpInfo(m_constantNull)));
6476 break;
6477 }
6478 case Dynamic:
6479 RELEASE_ASSERT_NOT_REACHED();
6480 break;
6481 }
6482 NEXT_OPCODE(op_resolve_scope);
6483 }
6484 case op_resolve_scope_for_hoisting_func_decl_in_eval: {
6485 auto bytecode = currentInstruction->as<OpResolveScopeForHoistingFuncDeclInEval>();
6486 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
6487 set(bytecode.m_dst, addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(bytecode.m_scope)));
6488
6489 NEXT_OPCODE(op_resolve_scope_for_hoisting_func_decl_in_eval);
6490 }
6491
6492 case op_get_from_scope: {
6493 auto bytecode = currentInstruction->as<OpGetFromScope>();
6494 auto& metadata = bytecode.metadata(codeBlock);
6495 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
6496 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
6497
6498 ResolveType resolveType;
6499 GetPutInfo getPutInfo(0);
6500 Structure* structure = 0;
6501 WatchpointSet* watchpoints = 0;
6502 uintptr_t operand;
6503 {
6504 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
6505 getPutInfo = metadata.m_getPutInfo;
6506 resolveType = getPutInfo.resolveType();
6507 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
6508 watchpoints = metadata.m_watchpointSet;
6509 else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks)
6510 structure = metadata.m_structure.get();
6511 operand = metadata.m_operand;
6512 }
6513
6514 if (needsDynamicLookup(resolveType, op_get_from_scope)) {
6515 uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, getPutInfo.operand());
6516 SpeculatedType prediction = getPrediction();
6517 set(bytecode.m_dst,
6518 addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get(bytecode.m_scope)));
6519 NEXT_OPCODE(op_get_from_scope);
6520 }
6521
6522 UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
6523
6524 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
6525
6526 switch (resolveType) {
6527 case GlobalProperty:
6528 case GlobalPropertyWithVarInjectionChecks: {
6529 // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
6530 // https://bugs.webkit.org/show_bug.cgi?id=193347
6531 if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
6532 if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
6533 addToGraph(ForceOSRExit);
6534 }
6535
6536 SpeculatedType prediction = getPrediction();
6537
6538 GetByStatus status = GetByStatus::computeFor(structure, uid);
6539 if (status.state() != GetByStatus::Simple
6540 || status.numVariants() != 1
6541 || status[0].structureSet().size() != 1) {
6542 set(bytecode.m_dst, addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(bytecode.m_scope)));
6543 break;
6544 }
6545
6546 Node* base = weakJSConstant(globalObject);
6547 Node* result = load(prediction, base, identifierNumber, status[0]);
6548 addToGraph(Phantom, get(bytecode.m_scope));
6549 set(bytecode.m_dst, result);
6550 break;
6551 }
6552 case GlobalVar:
6553 case GlobalVarWithVarInjectionChecks:
6554 case GlobalLexicalVar:
6555 case GlobalLexicalVarWithVarInjectionChecks: {
6556 addToGraph(Phantom, get(bytecode.m_scope));
6557 WatchpointSet* watchpointSet;
6558 ScopeOffset offset;
6559 JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
6560 {
6561 ConcurrentJSLocker locker(scopeObject->symbolTable()->m_lock);
6562 SymbolTableEntry entry = scopeObject->symbolTable()->get(locker, uid);
6563 watchpointSet = entry.watchpointSet();
6564 offset = entry.scopeOffset();
6565 }
6566 if (watchpointSet && watchpointSet->state() == IsWatched) {
6567 // This has a fun concurrency story. There is the possibility of a race in two
6568 // directions:
6569 //
6570 // We see that the set IsWatched, but in the meantime it gets invalidated: this is
6571 // fine because if we saw that it IsWatched then we add a watchpoint. If it gets
6572 // invalidated, then this compilation is invalidated. Note that in the meantime we
6573 // may load an absurd value from the global object. It's fine to load an absurd
6574 // value if the compilation is invalidated anyway.
6575 //
6576 // We see that the set IsWatched, but the value isn't yet initialized: this isn't
6577 // possible because of the ordering of operations.
6578 //
6579 // Here's how we order operations:
6580 //
6581 // Main thread stores to the global object: always store a value first, and only
6582 // after that do we touch the watchpoint set. There is a fence in the touch, that
6583 // ensures that the store to the global object always happens before the touch on the
6584 // set.
6585 //
6586 // Compilation thread: always first load the state of the watchpoint set, and then
6587 // load the value. The WatchpointSet::state() method does fences for us to ensure
6588 // that the load of the state happens before our load of the value.
6589 //
6590 // Finalizing compilation: this happens on the main thread and synchronously checks
6591 // validity of all watchpoint sets.
6592 //
6593 // We will only perform optimizations if the load of the state yields IsWatched. That
6594 // means that at least one store would have happened to initialize the original value
6595 // of the variable (that is, the value we'd like to constant fold to). There may be
6596 // other stores that happen after that, but those stores will invalidate the
6597 // watchpoint set and also the compilation.
6598
6599 // Note that we need to use the operand, which is a direct pointer at the global,
6600 // rather than looking up the global by doing variableAt(offset). That's because the
6601 // internal data structures of JSSegmentedVariableObject are not thread-safe even
6602 // though accessing the global itself is. The segmentation involves a vector spine
6603 // that resizes with malloc/free, so if new globals unrelated to the one we are
6604 // reading are added, we might access freed memory if we do variableAt().
6605 WriteBarrier<Unknown>* pointer = bitwise_cast<WriteBarrier<Unknown>*>(operand);
6606
6607 ASSERT(scopeObject->findVariableIndex(pointer) == offset);
6608
6609 JSValue value = pointer->get();
6610 if (value) {
6611 m_graph.watchpoints().addLazily(watchpointSet);
6612 set(bytecode.m_dst, weakJSConstant(value));
6613 break;
6614 }
6615 }
6616
6617 SpeculatedType prediction = getPrediction();
6618 NodeType nodeType;
6619 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
6620 nodeType = GetGlobalVar;
6621 else
6622 nodeType = GetGlobalLexicalVariable;
6623 Node* value = addToGraph(nodeType, OpInfo(operand), OpInfo(prediction));
6624 if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
6625 addToGraph(CheckNotEmpty, value);
6626 set(bytecode.m_dst, value);
6627 break;
6628 }
6629 case LocalClosureVar:
6630 case ClosureVar:
6631 case ClosureVarWithVarInjectionChecks: {
6632 Node* scopeNode = get(bytecode.m_scope);
6633
6634 // Ideally we wouldn't have to do this Phantom. But:
6635 //
6636 // For the constant case: we must do it because otherwise we would have no way of knowing
6637 // that the scope is live at OSR here.
6638 //
6639 // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation
6640 // won't be able to handle an Undefined scope.
6641 addToGraph(Phantom, scopeNode);
6642
6643 // Constant folding in the bytecode parser is important for performance. This may not
6644 // have executed yet. If it hasn't, then we won't have a prediction. Lacking a
6645 // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
6646 // would recompile. But if we can fold it here, we avoid the exit.
6647 if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) {
6648 set(bytecode.m_dst, weakJSConstant(value));
6649 break;
6650 }
6651 SpeculatedType prediction = getPrediction();
6652 set(bytecode.m_dst,
6653 addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode));
6654 break;
6655 }
6656 case UnresolvedProperty:
6657 case UnresolvedPropertyWithVarInjectionChecks:
6658 case ModuleVar:
6659 case Dynamic:
6660 RELEASE_ASSERT_NOT_REACHED();
6661 break;
6662 }
6663 NEXT_OPCODE(op_get_from_scope);
6664 }
6665
6666 case op_put_to_scope: {
6667 auto bytecode = currentInstruction->as<OpPutToScope>();
6668 auto& metadata = bytecode.metadata(codeBlock);
6669 unsigned identifierNumber = bytecode.m_var;
6670 if (identifierNumber != UINT_MAX)
6671 identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber];
6672 UniquedStringImpl* uid;
6673 if (identifierNumber != UINT_MAX)
6674 uid = m_graph.identifiers()[identifierNumber];
6675 else
6676 uid = nullptr;
6677
6678 ResolveType resolveType;
6679 GetPutInfo getPutInfo(0);
6680 Structure* structure = nullptr;
6681 WatchpointSet* watchpoints = nullptr;
6682 uintptr_t operand;
6683 {
6684 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
6685 getPutInfo = metadata.m_getPutInfo;
6686 resolveType = getPutInfo.resolveType();
6687 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
6688 watchpoints = metadata.m_watchpointSet;
6689 else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks)
6690 structure = metadata.m_structure.get();
6691 operand = metadata.m_operand;
6692 }
6693
6694 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
6695
6696 if (needsDynamicLookup(resolveType, op_put_to_scope)) {
6697 ASSERT(identifierNumber != UINT_MAX);
6698 uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, getPutInfo.operand());
6699 addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get(bytecode.m_scope), get(bytecode.m_value));
6700 NEXT_OPCODE(op_put_to_scope);
6701 }
6702
6703 switch (resolveType) {
6704 case GlobalProperty:
6705 case GlobalPropertyWithVarInjectionChecks: {
6706 // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
6707 // https://bugs.webkit.org/show_bug.cgi?id=193347
6708 if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
6709 if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
6710 addToGraph(ForceOSRExit);
6711 }
6712
6713 PutByIdStatus status;
6714 if (uid)
6715 status = PutByIdStatus::computeFor(globalObject, structure, uid, false);
6716 else
6717 status = PutByIdStatus(PutByIdStatus::TakesSlowPath);
6718 if (status.numVariants() != 1
6719 || status[0].kind() != PutByIdVariant::Replace
6720 || status[0].structure().size() != 1) {
6721 addToGraph(PutById, OpInfo(identifierNumber), get(bytecode.m_scope), get(bytecode.m_value));
6722 break;
6723 }
6724 Node* base = weakJSConstant(globalObject);
6725 store(base, identifierNumber, status[0], get(bytecode.m_value));
6726 // Keep scope alive until after put.
6727 addToGraph(Phantom, get(bytecode.m_scope));
6728 break;
6729 }
6730 case GlobalLexicalVar:
6731 case GlobalLexicalVarWithVarInjectionChecks:
6732 case GlobalVar:
6733 case GlobalVarWithVarInjectionChecks: {
6734 if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
6735 SpeculatedType prediction = SpecEmpty;
6736 Node* value = addToGraph(GetGlobalLexicalVariable, OpInfo(operand), OpInfo(prediction));
6737 addToGraph(CheckNotEmpty, value);
6738 }
6739
6740 JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
6741 if (watchpoints) {
6742 SymbolTableEntry entry = scopeObject->symbolTable()->get(uid);
6743 ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet());
6744 }
6745 Node* valueNode = get(bytecode.m_value);
6746 addToGraph(PutGlobalVariable, OpInfo(operand), weakJSConstant(scopeObject), valueNode);
6747 if (watchpoints && watchpoints->state() != IsInvalidated) {
6748 // Must happen after the store. See comment for GetGlobalVar.
6749 addToGraph(NotifyWrite, OpInfo(watchpoints));
6750 }
6751 // Keep scope alive until after put.
6752 addToGraph(Phantom, get(bytecode.m_scope));
6753 break;
6754 }
6755 case LocalClosureVar:
6756 case ClosureVar:
6757 case ClosureVarWithVarInjectionChecks: {
6758 Node* scopeNode = get(bytecode.m_scope);
6759 Node* valueNode = get(bytecode.m_value);
6760
6761 addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode);
6762
6763 if (watchpoints && watchpoints->state() != IsInvalidated) {
6764 // Must happen after the store. See comment for GetGlobalVar.
6765 addToGraph(NotifyWrite, OpInfo(watchpoints));
6766 }
6767 break;
6768 }
6769
6770 case ModuleVar:
6771 // Need not to keep "scope" and "value" register values here by Phantom because
6772 // they are not used in LLInt / baseline op_put_to_scope with ModuleVar.
6773 addToGraph(ForceOSRExit);
6774 break;
6775
6776 case Dynamic:
6777 case UnresolvedProperty:
6778 case UnresolvedPropertyWithVarInjectionChecks:
6779 RELEASE_ASSERT_NOT_REACHED();
6780 break;
6781 }
6782 NEXT_OPCODE(op_put_to_scope);
6783 }
6784
6785 case op_loop_hint: {
6786 // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
6787 // OSR can only happen at basic block boundaries. Assert that these two statements
6788 // are compatible.
6789 RELEASE_ASSERT(m_currentIndex == blockBegin);
6790
6791 // We never do OSR into an inlined code block. That could not happen, since OSR
6792 // looks up the code block that is the replacement for the baseline JIT code
6793 // block. Hence, machine code block = true code block = not inline code block.
6794 if (!m_inlineStackTop->m_caller)
6795 m_currentBlock->isOSRTarget = true;
6796
6797 addToGraph(LoopHint);
6798 NEXT_OPCODE(op_loop_hint);
6799 }
6800
6801 case op_check_traps: {
6802 addToGraph(Options::usePollingTraps() ? CheckTraps : InvalidationPoint);
6803 NEXT_OPCODE(op_check_traps);
6804 }
6805
6806 case op_nop: {
6807 addToGraph(Check); // We add a nop here so that basic block linking doesn't break.
6808 NEXT_OPCODE(op_nop);
6809 }
6810
6811 case op_super_sampler_begin: {
6812 addToGraph(SuperSamplerBegin);
6813 NEXT_OPCODE(op_super_sampler_begin);
6814 }
6815
6816 case op_super_sampler_end: {
6817 addToGraph(SuperSamplerEnd);
6818 NEXT_OPCODE(op_super_sampler_end);
6819 }
6820
6821 case op_create_lexical_environment: {
6822 auto bytecode = currentInstruction->as<OpCreateLexicalEnvironment>();
6823 ASSERT(bytecode.m_symbolTable.isConstant() && bytecode.m_initialValue.isConstant());
6824 FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_symbolTable.offset()));
6825 FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_initialValue.offset()));
6826 Node* scope = get(bytecode.m_scope);
6827 Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), OpInfo(initialValue), scope);
6828 set(bytecode.m_dst, lexicalEnvironment);
6829 NEXT_OPCODE(op_create_lexical_environment);
6830 }
6831
6832 case op_push_with_scope: {
6833 auto bytecode = currentInstruction->as<OpPushWithScope>();
6834 Node* currentScope = get(bytecode.m_currentScope);
6835 Node* object = get(bytecode.m_newScope);
6836 set(bytecode.m_dst, addToGraph(PushWithScope, currentScope, object));
6837 NEXT_OPCODE(op_push_with_scope);
6838 }
6839
6840 case op_get_parent_scope: {
6841 auto bytecode = currentInstruction->as<OpGetParentScope>();
6842 Node* currentScope = get(bytecode.m_scope);
6843 Node* newScope = addToGraph(SkipScope, currentScope);
6844 set(bytecode.m_dst, newScope);
6845 addToGraph(Phantom, currentScope);
6846 NEXT_OPCODE(op_get_parent_scope);
6847 }
6848
6849 case op_get_scope: {
6850 // Help the later stages a bit by doing some small constant folding here. Note that this
6851 // only helps for the first basic block. It's extremely important not to constant fold
6852 // loads from the scope register later, as that would prevent the DFG from tracking the
6853 // bytecode-level liveness of the scope register.
6854 auto bytecode = currentInstruction->as<OpGetScope>();
6855 Node* callee = get(VirtualRegister(CallFrameSlot::callee));
6856 Node* result;
6857 if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm))
6858 result = weakJSConstant(function->scope());
6859 else
6860 result = addToGraph(GetScope, callee);
6861 set(bytecode.m_dst, result);
6862 NEXT_OPCODE(op_get_scope);
6863 }
6864
6865 case op_argument_count: {
6866 auto bytecode = currentInstruction->as<OpArgumentCount>();
6867 Node* sub = addToGraph(ArithSub, OpInfo(Arith::Unchecked), OpInfo(SpecInt32Only), getArgumentCount(), addToGraph(JSConstant, OpInfo(m_constantOne)));
6868 set(bytecode.m_dst, sub);
6869 NEXT_OPCODE(op_argument_count);
6870 }
6871
6872 case op_create_direct_arguments: {
6873 auto bytecode = currentInstruction->as<OpCreateDirectArguments>();
6874 noticeArgumentsUse();
6875 Node* createArguments = addToGraph(CreateDirectArguments);
6876 set(bytecode.m_dst, createArguments);
6877 NEXT_OPCODE(op_create_direct_arguments);
6878 }
6879
6880 case op_create_scoped_arguments: {
6881 auto bytecode = currentInstruction->as<OpCreateScopedArguments>();
6882 noticeArgumentsUse();
6883 Node* createArguments = addToGraph(CreateScopedArguments, get(bytecode.m_scope));
6884 set(bytecode.m_dst, createArguments);
6885 NEXT_OPCODE(op_create_scoped_arguments);
6886 }
6887
6888 case op_create_cloned_arguments: {
6889 auto bytecode = currentInstruction->as<OpCreateClonedArguments>();
6890 noticeArgumentsUse();
6891 Node* createArguments = addToGraph(CreateClonedArguments);
6892 set(bytecode.m_dst, createArguments);
6893 NEXT_OPCODE(op_create_cloned_arguments);
6894 }
6895
6896 case op_get_from_arguments: {
6897 auto bytecode = currentInstruction->as<OpGetFromArguments>();
6898 set(bytecode.m_dst,
6899 addToGraph(
6900 GetFromArguments,
6901 OpInfo(bytecode.m_index),
6902 OpInfo(getPrediction()),
6903 get(bytecode.m_arguments)));
6904 NEXT_OPCODE(op_get_from_arguments);
6905 }
6906
6907 case op_put_to_arguments: {
6908 auto bytecode = currentInstruction->as<OpPutToArguments>();
6909 addToGraph(
6910 PutToArguments,
6911 OpInfo(bytecode.m_index),
6912 get(bytecode.m_arguments),
6913 get(bytecode.m_value));
6914 NEXT_OPCODE(op_put_to_arguments);
6915 }
6916
6917 case op_get_argument: {
6918 auto bytecode = currentInstruction->as<OpGetArgument>();
6919 InlineCallFrame* inlineCallFrame = this->inlineCallFrame();
6920 Node* argument;
6921 int32_t argumentIndexIncludingThis = bytecode.m_index;
6922 if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
6923 int32_t argumentCountIncludingThisWithFixup = inlineCallFrame->argumentsWithFixup.size();
6924 if (argumentIndexIncludingThis < argumentCountIncludingThisWithFixup)
6925 argument = get(virtualRegisterForArgument(argumentIndexIncludingThis));
6926 else
6927 argument = addToGraph(JSConstant, OpInfo(m_constantUndefined));
6928 } else
6929 argument = addToGraph(GetArgument, OpInfo(argumentIndexIncludingThis), OpInfo(getPrediction()));
6930 set(bytecode.m_dst, argument);
6931 NEXT_OPCODE(op_get_argument);
6932 }
6933 case op_new_async_generator_func:
6934 handleNewFunc(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFunc>());
6935 NEXT_OPCODE(op_new_async_generator_func);
6936 case op_new_func:
6937 handleNewFunc(NewFunction, currentInstruction->as<OpNewFunc>());
6938 NEXT_OPCODE(op_new_func);
6939 case op_new_generator_func:
6940 handleNewFunc(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFunc>());
6941 NEXT_OPCODE(op_new_generator_func);
6942 case op_new_async_func:
6943 handleNewFunc(NewAsyncFunction, currentInstruction->as<OpNewAsyncFunc>());
6944 NEXT_OPCODE(op_new_async_func);
6945
6946 case op_new_func_exp:
6947 handleNewFuncExp(NewFunction, currentInstruction->as<OpNewFuncExp>());
6948 NEXT_OPCODE(op_new_func_exp);
6949 case op_new_generator_func_exp:
6950 handleNewFuncExp(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFuncExp>());
6951 NEXT_OPCODE(op_new_generator_func_exp);
6952 case op_new_async_generator_func_exp:
6953 handleNewFuncExp(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFuncExp>());
6954 NEXT_OPCODE(op_new_async_generator_func_exp);
6955 case op_new_async_func_exp:
6956 handleNewFuncExp(NewAsyncFunction, currentInstruction->as<OpNewAsyncFuncExp>());
6957 NEXT_OPCODE(op_new_async_func_exp);
6958
6959 case op_set_function_name: {
6960 auto bytecode = currentInstruction->as<OpSetFunctionName>();
6961 Node* func = get(bytecode.m_function);
6962 Node* name = get(bytecode.m_name);
6963 addToGraph(SetFunctionName, func, name);
6964 NEXT_OPCODE(op_set_function_name);
6965 }
6966
6967 case op_typeof: {
6968 auto bytecode = currentInstruction->as<OpTypeof>();
6969 set(bytecode.m_dst, addToGraph(TypeOf, get(bytecode.m_value)));
6970 NEXT_OPCODE(op_typeof);
6971 }
6972
6973 case op_to_number: {
6974 auto bytecode = currentInstruction->as<OpToNumber>();
6975 SpeculatedType prediction = getPrediction();
6976 Node* value = get(bytecode.m_operand);
6977 set(bytecode.m_dst, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value));
6978 NEXT_OPCODE(op_to_number);
6979 }
6980
6981 case op_to_numeric: {
6982 auto bytecode = currentInstruction->as<OpToNumeric>();
6983 SpeculatedType prediction = getPrediction();
6984 Node* value = get(bytecode.m_operand);
6985 set(bytecode.m_dst, addToGraph(ToNumeric, OpInfo(0), OpInfo(prediction), value));
6986 NEXT_OPCODE(op_to_numeric);
6987 }
6988
6989 case op_to_string: {
6990 auto bytecode = currentInstruction->as<OpToString>();
6991 Node* value = get(bytecode.m_operand);
6992 set(bytecode.m_dst, addToGraph(ToString, value));
6993 NEXT_OPCODE(op_to_string);
6994 }
6995
6996 case op_to_object: {
6997 auto bytecode = currentInstruction->as<OpToObject>();
6998 SpeculatedType prediction = getPrediction();
6999 Node* value = get(bytecode.m_operand);
7000 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_message];
7001 set(bytecode.m_dst, addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value));
7002 NEXT_OPCODE(op_to_object);
7003 }
7004
7005 case op_in_by_val: {
7006 auto bytecode = currentInstruction->as<OpInByVal>();
7007 ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
7008 set(bytecode.m_dst, addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(bytecode.m_base), get(bytecode.m_property)));
7009 NEXT_OPCODE(op_in_by_val);
7010 }
7011
7012 case op_in_by_id: {
7013 auto bytecode = currentInstruction->as<OpInById>();
7014 Node* base = get(bytecode.m_base);
7015 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
7016 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
7017
7018 InByIdStatus status = InByIdStatus::computeFor(
7019 m_inlineStackTop->m_profiledBlock,
7020 m_inlineStackTop->m_baselineMap, m_icContextStack,
7021 currentCodeOrigin(), uid);
7022
7023 if (status.isSimple()) {
7024 bool allOK = true;
7025 MatchStructureData* data = m_graph.m_matchStructureData.add();
7026 for (const InByIdVariant& variant : status.variants()) {
7027 if (!check(variant.conditionSet())) {
7028 allOK = false;
7029 break;
7030 }
7031 for (Structure* structure : variant.structureSet()) {
7032 MatchStructureVariant matchVariant;
7033 matchVariant.structure = m_graph.registerStructure(structure);
7034 matchVariant.result = variant.isHit();
7035
7036 data->variants.append(WTFMove(matchVariant));
7037 }
7038 }
7039
7040 if (allOK) {
7041 addToGraph(FilterInByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addInByIdStatus(currentCodeOrigin(), status)), base);
7042
7043 Node* match = addToGraph(MatchStructure, OpInfo(data), base);
7044 set(bytecode.m_dst, match);
7045 NEXT_OPCODE(op_in_by_id);
7046 }
7047 }
7048
7049 set(bytecode.m_dst, addToGraph(InById, OpInfo(identifierNumber), base));
7050 NEXT_OPCODE(op_in_by_id);
7051 }
7052
7053 case op_get_enumerable_length: {
7054 auto bytecode = currentInstruction->as<OpGetEnumerableLength>();
7055 set(bytecode.m_dst, addToGraph(GetEnumerableLength, get(bytecode.m_base)));
7056 NEXT_OPCODE(op_get_enumerable_length);
7057 }
7058
7059 case op_has_generic_property: {
7060 auto bytecode = currentInstruction->as<OpHasGenericProperty>();
7061 set(bytecode.m_dst, addToGraph(HasGenericProperty, get(bytecode.m_base), get(bytecode.m_property)));
7062 NEXT_OPCODE(op_has_generic_property);
7063 }
7064
7065 case op_has_structure_property: {
7066 auto bytecode = currentInstruction->as<OpHasStructureProperty>();
7067 set(bytecode.m_dst, addToGraph(HasStructureProperty,
7068 get(bytecode.m_base),
7069 get(bytecode.m_property),
7070 get(bytecode.m_enumerator)));
7071 NEXT_OPCODE(op_has_structure_property);
7072 }
7073
7074 case op_has_indexed_property: {
7075 auto bytecode = currentInstruction->as<OpHasIndexedProperty>();
7076 Node* base = get(bytecode.m_base);
7077 ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
7078 Node* property = get(bytecode.m_property);
7079 addVarArgChild(base);
7080 addVarArgChild(property);
7081 addVarArgChild(nullptr);
7082 Node* hasIterableProperty = addToGraph(Node::VarArg, HasIndexedProperty, OpInfo(arrayMode.asWord()), OpInfo(static_cast<uint32_t>(PropertySlot::InternalMethodType::GetOwnProperty)));
7083 m_exitOK = false; // HasIndexedProperty must be treated as if it clobbers exit state, since FixupPhase may make it generic.
7084 set(bytecode.m_dst, hasIterableProperty);
7085 NEXT_OPCODE(op_has_indexed_property);
7086 }
7087
7088 case op_get_direct_pname: {
7089 auto bytecode = currentInstruction->as<OpGetDirectPname>();
7090 SpeculatedType prediction = getPredictionWithoutOSRExit();
7091
7092 Node* base = get(bytecode.m_base);
7093 Node* property = get(bytecode.m_property);
7094 Node* index = get(bytecode.m_index);
7095 Node* enumerator = get(bytecode.m_enumerator);
7096
7097 addVarArgChild(base);
7098 addVarArgChild(property);
7099 addVarArgChild(index);
7100 addVarArgChild(enumerator);
7101 set(bytecode.m_dst, addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction)));
7102
7103 NEXT_OPCODE(op_get_direct_pname);
7104 }
7105
7106 case op_get_property_enumerator: {
7107 auto bytecode = currentInstruction->as<OpGetPropertyEnumerator>();
7108 set(bytecode.m_dst, addToGraph(GetPropertyEnumerator, get(bytecode.m_base)));
7109 NEXT_OPCODE(op_get_property_enumerator);
7110 }
7111
7112 case op_enumerator_structure_pname: {
7113 auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>();
7114 set(bytecode.m_dst, addToGraph(GetEnumeratorStructurePname,
7115 get(bytecode.m_enumerator),
7116 get(bytecode.m_index)));
7117 NEXT_OPCODE(op_enumerator_structure_pname);
7118 }
7119
7120 case op_enumerator_generic_pname: {
7121 auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>();
7122 set(bytecode.m_dst, addToGraph(GetEnumeratorGenericPname,
7123 get(bytecode.m_enumerator),
7124 get(bytecode.m_index)));
7125 NEXT_OPCODE(op_enumerator_generic_pname);
7126 }
7127
7128 case op_to_index_string: {
7129 auto bytecode = currentInstruction->as<OpToIndexString>();
7130 set(bytecode.m_dst, addToGraph(ToIndexString, get(bytecode.m_index)));
7131 NEXT_OPCODE(op_to_index_string);
7132 }
7133
7134 case op_get_internal_field: {
7135 auto bytecode = currentInstruction->as<OpGetInternalField>();
7136 set(bytecode.m_dst, addToGraph(GetInternalField, OpInfo(bytecode.m_index), OpInfo(getPrediction()), get(bytecode.m_base)));
7137 NEXT_OPCODE(op_get_internal_field);
7138 }
7139
7140 case op_put_internal_field: {
7141 auto bytecode = currentInstruction->as<OpPutInternalField>();
7142 addToGraph(PutInternalField, OpInfo(bytecode.m_index), get(bytecode.m_base), get(bytecode.m_value));
7143 NEXT_OPCODE(op_put_internal_field);
7144 }
7145
7146 case op_log_shadow_chicken_prologue: {
7147 auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>();
7148 if (!m_inlineStackTop->m_inlineCallFrame)
7149 addToGraph(LogShadowChickenPrologue, get(bytecode.m_scope));
7150 NEXT_OPCODE(op_log_shadow_chicken_prologue);
7151 }
7152
7153 case op_log_shadow_chicken_tail: {
7154 auto bytecode = currentInstruction->as<OpLogShadowChickenTail>();
7155 if (!m_inlineStackTop->m_inlineCallFrame) {
7156 // FIXME: The right solution for inlining is to elide these whenever the tail call
7157 // ends up being inlined.
7158 // https://bugs.webkit.org/show_bug.cgi?id=155686
7159 addToGraph(LogShadowChickenTail, get(bytecode.m_thisValue), get(bytecode.m_scope));
7160 }
7161 NEXT_OPCODE(op_log_shadow_chicken_tail);
7162 }
7163
7164 case op_unreachable: {
7165 flushForTerminal();
7166 addToGraph(Unreachable);
7167 LAST_OPCODE(op_unreachable);
7168 }
7169
7170 default:
7171 // Parse failed! This should not happen because the capabilities checker
7172 // should have caught it.
7173 RELEASE_ASSERT_NOT_REACHED();
7174 return;
7175 }
7176 }
7177}
7178
7179void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleTargets)
7180{
7181 ASSERT(!block->isLinked);
7182 ASSERT(!block->isEmpty());
7183 Node* node = block->terminal();
7184 ASSERT(node->isTerminal());
7185
7186 switch (node->op()) {
7187 case Jump:
7188 node->targetBlock() = blockForBytecodeIndex(possibleTargets, BytecodeIndex(node->targetBytecodeOffsetDuringParsing()));
7189 break;
7190
7191 case Branch: {
7192 BranchData* data = node->branchData();
7193 data->taken.block = blockForBytecodeIndex(possibleTargets, BytecodeIndex(data->takenBytecodeIndex()));
7194 data->notTaken.block = blockForBytecodeIndex(possibleTargets, BytecodeIndex(data->notTakenBytecodeIndex()));
7195 break;
7196 }
7197
7198 case Switch: {
7199 SwitchData* data = node->switchData();
7200 for (unsigned i = node->switchData()->cases.size(); i--;)
7201 data->cases[i].target.block = blockForBytecodeIndex(possibleTargets, BytecodeIndex(data->cases[i].target.bytecodeIndex()));
7202 data->fallThrough.block = blockForBytecodeIndex(possibleTargets, BytecodeIndex(data->fallThrough.bytecodeIndex()));
7203 break;
7204 }
7205
7206 default:
7207 RELEASE_ASSERT_NOT_REACHED();
7208 }
7209
7210 VERBOSE_LOG("Marking ", RawPointer(block), " as linked (actually did linking)\n");
7211 block->didLink();
7212}
7213
7214void ByteCodeParser::linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets)
7215{
7216 for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
7217 VERBOSE_LOG("Attempting to link ", RawPointer(unlinkedBlocks[i]), "\n");
7218 linkBlock(unlinkedBlocks[i], possibleTargets);
7219 }
7220}
7221
7222ByteCodeParser::InlineStackEntry::InlineStackEntry(
7223 ByteCodeParser* byteCodeParser,
7224 CodeBlock* codeBlock,
7225 CodeBlock* profiledBlock,
7226 JSFunction* callee, // Null if this is a closure call.
7227 VirtualRegister returnValueVR,
7228 VirtualRegister inlineCallFrameStart,
7229 int argumentCountIncludingThis,
7230 InlineCallFrame::Kind kind,
7231 BasicBlock* continuationBlock)
7232 : m_byteCodeParser(byteCodeParser)
7233 , m_codeBlock(codeBlock)
7234 , m_profiledBlock(profiledBlock)
7235 , m_continuationBlock(continuationBlock)
7236 , m_returnValue(returnValueVR)
7237 , m_caller(byteCodeParser->m_inlineStackTop)
7238{
7239 {
7240 m_exitProfile.initialize(m_profiledBlock->unlinkedCodeBlock());
7241
7242 ConcurrentJSLocker locker(m_profiledBlock->m_lock);
7243 m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles(locker));
7244
7245 // We do this while holding the lock because we want to encourage StructureStubInfo's
7246 // to be potentially added to operations and because the profiled block could be in the
7247 // middle of LLInt->JIT tier-up in which case we would be adding the info's right now.
7248 if (m_profiledBlock->hasBaselineJITProfiling())
7249 m_profiledBlock->getICStatusMap(locker, m_baselineMap);
7250 }
7251
7252 CodeBlock* optimizedBlock = m_profiledBlock->replacement();
7253 m_optimizedContext.optimizedCodeBlock = optimizedBlock;
7254 if (Options::usePolyvariantDevirtualization() && optimizedBlock) {
7255 ConcurrentJSLocker locker(optimizedBlock->m_lock);
7256 optimizedBlock->getICStatusMap(locker, m_optimizedContext.map);
7257 }
7258 byteCodeParser->m_icContextStack.append(&m_optimizedContext);
7259
7260 int argumentCountIncludingThisWithFixup = std::max<int>(argumentCountIncludingThis, codeBlock->numParameters());
7261
7262 if (m_caller) {
7263 // Inline case.
7264 ASSERT(codeBlock != byteCodeParser->m_codeBlock);
7265 ASSERT(inlineCallFrameStart.isValid());
7266
7267 m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames()->add();
7268 m_optimizedContext.inlineCallFrame = m_inlineCallFrame;
7269
7270 // The owner is the machine code block, and we already have a barrier on that when the
7271 // plan finishes.
7272 m_inlineCallFrame->baselineCodeBlock.setWithoutWriteBarrier(codeBlock->baselineVersion());
7273 m_inlineCallFrame->setStackOffset(inlineCallFrameStart.offset() - CallFrame::headerSizeInRegisters);
7274 m_inlineCallFrame->argumentCountIncludingThis = argumentCountIncludingThis;
7275 if (callee) {
7276 m_inlineCallFrame->calleeRecovery = ValueRecovery::constant(callee);
7277 m_inlineCallFrame->isClosureCall = false;
7278 } else
7279 m_inlineCallFrame->isClosureCall = true;
7280 m_inlineCallFrame->directCaller = byteCodeParser->currentCodeOrigin();
7281 m_inlineCallFrame->argumentsWithFixup.resizeToFit(argumentCountIncludingThisWithFixup); // Set the number of arguments including this, but don't configure the value recoveries, yet.
7282 m_inlineCallFrame->kind = kind;
7283
7284 m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
7285 m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
7286
7287 for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
7288 UniquedStringImpl* rep = codeBlock->identifier(i).impl();
7289 unsigned index = byteCodeParser->m_graph.identifiers().ensure(rep);
7290 m_identifierRemap[i] = index;
7291 }
7292 for (unsigned i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i) {
7293 m_switchRemap[i] = byteCodeParser->m_codeBlock->numberOfSwitchJumpTables();
7294 byteCodeParser->m_codeBlock->addSwitchJumpTableFromProfiledCodeBlock(codeBlock->switchJumpTable(i));
7295 }
7296 } else {
7297 // Machine code block case.
7298 ASSERT(codeBlock == byteCodeParser->m_codeBlock);
7299 ASSERT(!callee);
7300 ASSERT(!returnValueVR.isValid());
7301 ASSERT(!inlineCallFrameStart.isValid());
7302
7303 m_inlineCallFrame = 0;
7304
7305 m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
7306 m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
7307 for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
7308 m_identifierRemap[i] = i;
7309 for (size_t i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i)
7310 m_switchRemap[i] = i;
7311 }
7312
7313 m_argumentPositions.resize(argumentCountIncludingThisWithFixup);
7314 for (int i = 0; i < argumentCountIncludingThisWithFixup; ++i) {
7315 byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition());
7316 ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last();
7317 m_argumentPositions[i] = argumentPosition;
7318 }
7319 byteCodeParser->m_inlineCallFrameToArgumentPositions.add(m_inlineCallFrame, m_argumentPositions);
7320
7321 byteCodeParser->m_inlineStackTop = this;
7322}
7323
7324ByteCodeParser::InlineStackEntry::~InlineStackEntry()
7325{
7326 m_byteCodeParser->m_inlineStackTop = m_caller;
7327 RELEASE_ASSERT(m_byteCodeParser->m_icContextStack.last() == &m_optimizedContext);
7328 m_byteCodeParser->m_icContextStack.removeLast();
7329}
7330
7331void ByteCodeParser::parseCodeBlock()
7332{
7333 clearCaches();
7334
7335 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
7336
7337 if (UNLIKELY(m_graph.compilation())) {
7338 m_graph.compilation()->addProfiledBytecodes(
7339 *m_vm->m_perBytecodeProfiler, m_inlineStackTop->m_profiledBlock);
7340 }
7341
7342 if (UNLIKELY(Options::dumpSourceAtDFGTime())) {
7343 Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback()->ensureDeferredSourceDump();
7344 if (inlineCallFrame()) {
7345 DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITType::DFGJIT, inlineCallFrame()->directCaller.bytecodeIndex());
7346 deferredSourceDump.append(dump);
7347 } else
7348 deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion()));
7349 }
7350
7351 if (Options::dumpBytecodeAtDFGTime()) {
7352 dataLog("Parsing ", *codeBlock);
7353 if (inlineCallFrame()) {
7354 dataLog(
7355 " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITType::DFGJIT),
7356 " ", inlineCallFrame()->directCaller);
7357 }
7358 dataLog(
7359 ", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n");
7360 codeBlock->baselineVersion()->dumpBytecode();
7361 }
7362
7363 Vector<InstructionStream::Offset, 32> jumpTargets;
7364 computePreciseJumpTargets(codeBlock, jumpTargets);
7365 if (Options::dumpBytecodeAtDFGTime()) {
7366 dataLog("Jump targets: ");
7367 CommaPrinter comma;
7368 for (unsigned i = 0; i < jumpTargets.size(); ++i)
7369 dataLog(comma, jumpTargets[i]);
7370 dataLog("\n");
7371 }
7372
7373 for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= jumpTargets.size(); ++jumpTargetIndex) {
7374 // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
7375 unsigned limit = jumpTargetIndex < jumpTargets.size() ? jumpTargets[jumpTargetIndex] : codeBlock->instructions().size();
7376 ASSERT(m_currentIndex.offset() < limit);
7377
7378 // Loop until we reach the current limit (i.e. next jump target).
7379 do {
7380 // There may already be a currentBlock in two cases:
7381 // - we may have just entered the loop for the first time
7382 // - we may have just returned from an inlined callee that had some early returns and
7383 // so allocated a continuation block, and the instruction after the call is a jump target.
7384 // In both cases, we want to keep using it.
7385 if (!m_currentBlock) {
7386 m_currentBlock = allocateTargetableBlock(m_currentIndex);
7387
7388 // The first block is definitely an OSR target.
7389 if (m_graph.numBlocks() == 1) {
7390 m_currentBlock->isOSRTarget = true;
7391 m_graph.m_roots.append(m_currentBlock);
7392 }
7393 prepareToParseBlock();
7394 }
7395
7396 parseBlock(limit);
7397
7398 // We should not have gone beyond the limit.
7399 ASSERT(m_currentIndex.offset() <= limit);
7400
7401 if (m_currentBlock->isEmpty()) {
7402 // This case only happens if the last instruction was an inlined call with early returns
7403 // or polymorphic (creating an empty continuation block),
7404 // and then we hit the limit before putting anything in the continuation block.
7405 ASSERT(m_currentIndex.offset() == limit);
7406 makeBlockTargetable(m_currentBlock, m_currentIndex);
7407 } else {
7408 ASSERT(m_currentBlock->terminal() || (m_currentIndex.offset() == codeBlock->instructions().size() && inlineCallFrame()));
7409 m_currentBlock = nullptr;
7410 }
7411 } while (m_currentIndex.offset() < limit);
7412 }
7413
7414 // Should have reached the end of the instructions.
7415 ASSERT(m_currentIndex.offset() == codeBlock->instructions().size());
7416
7417 VERBOSE_LOG("Done parsing ", *codeBlock, " (fell off end)\n");
7418}
7419
7420template <typename Bytecode>
7421void ByteCodeParser::handlePutByVal(Bytecode bytecode, unsigned instructionSize)
7422{
7423 Node* base = get(bytecode.m_base);
7424 Node* property = get(bytecode.m_property);
7425 Node* value = get(bytecode.m_value);
7426 bool isDirect = Bytecode::opcodeID == op_put_by_val_direct;
7427 bool compiledAsPutById = false;
7428 {
7429 unsigned identifierNumber = std::numeric_limits<unsigned>::max();
7430 PutByIdStatus putByIdStatus;
7431 {
7432 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
7433 ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex())).byValInfo;
7434 // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
7435 // At that time, there is no information.
7436 if (byValInfo
7437 && byValInfo->stubInfo
7438 && !byValInfo->tookSlowPath
7439 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent)
7440 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)
7441 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
7442 compiledAsPutById = true;
7443 identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl());
7444 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
7445
7446 if (Symbol* symbol = byValInfo->cachedSymbol.get()) {
7447 FrozenValue* frozen = m_graph.freezeStrong(symbol);
7448 addToGraph(CheckCell, OpInfo(frozen), property);
7449 } else {
7450 ASSERT(!uid->isSymbol());
7451 addToGraph(CheckIdent, OpInfo(uid), property);
7452 }
7453
7454 putByIdStatus = PutByIdStatus::computeForStubInfo(
7455 locker, m_inlineStackTop->m_profiledBlock,
7456 byValInfo->stubInfo, currentCodeOrigin(), uid);
7457
7458 }
7459 }
7460
7461 if (compiledAsPutById)
7462 handlePutById(base, identifierNumber, value, putByIdStatus, isDirect, instructionSize);
7463 }
7464
7465 if (!compiledAsPutById) {
7466 ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_inlineStackTop->m_codeBlock).m_arrayProfile, Array::Write);
7467
7468 addVarArgChild(base);
7469 addVarArgChild(property);
7470 addVarArgChild(value);
7471 addVarArgChild(0); // Leave room for property storage.
7472 addVarArgChild(0); // Leave room for length.
7473 addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
7474 m_exitOK = false; // PutByVal and PutByValDirect must be treated as if they clobber exit state, since FixupPhase may make them generic.
7475 }
7476}
7477
7478template <typename Bytecode>
7479void ByteCodeParser::handlePutAccessorById(NodeType op, Bytecode bytecode)
7480{
7481 Node* base = get(bytecode.m_base);
7482 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
7483 Node* accessor = get(bytecode.m_accessor);
7484 addToGraph(op, OpInfo(identifierNumber), OpInfo(bytecode.m_attributes), base, accessor);
7485}
7486
7487template <typename Bytecode>
7488void ByteCodeParser::handlePutAccessorByVal(NodeType op, Bytecode bytecode)
7489{
7490 Node* base = get(bytecode.m_base);
7491 Node* subscript = get(bytecode.m_property);
7492 Node* accessor = get(bytecode.m_accessor);
7493 addToGraph(op, OpInfo(bytecode.m_attributes), base, subscript, accessor);
7494}
7495
7496template <typename Bytecode>
7497void ByteCodeParser::handleNewFunc(NodeType op, Bytecode bytecode)
7498{
7499 FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(bytecode.m_functionDecl);
7500 FrozenValue* frozen = m_graph.freezeStrong(decl);
7501 Node* scope = get(bytecode.m_scope);
7502 set(bytecode.m_dst, addToGraph(op, OpInfo(frozen), scope));
7503 // Ideally we wouldn't have to do this Phantom. But:
7504 //
7505 // For the constant case: we must do it because otherwise we would have no way of knowing
7506 // that the scope is live at OSR here.
7507 //
7508 // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation
7509 // won't be able to handle an Undefined scope.
7510 addToGraph(Phantom, scope);
7511}
7512
7513template <typename Bytecode>
7514void ByteCodeParser::handleNewFuncExp(NodeType op, Bytecode bytecode)
7515{
7516 FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(bytecode.m_functionDecl);
7517 FrozenValue* frozen = m_graph.freezeStrong(expr);
7518 Node* scope = get(bytecode.m_scope);
7519 set(bytecode.m_dst, addToGraph(op, OpInfo(frozen), scope));
7520 // Ideally we wouldn't have to do this Phantom. But:
7521 //
7522 // For the constant case: we must do it because otherwise we would have no way of knowing
7523 // that the scope is live at OSR here.
7524 //
7525 // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation
7526 // won't be able to handle an Undefined scope.
7527 addToGraph(Phantom, scope);
7528}
7529
7530template <typename Bytecode>
7531void ByteCodeParser::handleCreateInternalFieldObject(const ClassInfo* classInfo, NodeType createOp, NodeType newOp, Bytecode bytecode)
7532{
7533 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
7534 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
7535 Node* callee = get(VirtualRegister(bytecode.m_callee));
7536
7537 JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm);
7538 if (!function) {
7539 JSCell* cachedFunction = bytecode.metadata(codeBlock).m_cachedCallee.unvalidatedGet();
7540 if (cachedFunction
7541 && cachedFunction != JSCell::seenMultipleCalleeObjects()
7542 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
7543 ASSERT(cachedFunction->inherits<JSFunction>(*m_vm));
7544
7545 FrozenValue* frozen = m_graph.freeze(cachedFunction);
7546 addToGraph(CheckCell, OpInfo(frozen), callee);
7547
7548 function = static_cast<JSFunction*>(cachedFunction);
7549 }
7550 }
7551
7552 if (function) {
7553 if (FunctionRareData* rareData = function->rareData()) {
7554 if (rareData->allocationProfileWatchpointSet().isStillValid()) {
7555 Structure* structure = rareData->internalFunctionAllocationStructure();
7556 if (structure
7557 && structure->classInfo() == classInfo
7558 && structure->globalObject() == globalObject
7559 && rareData->allocationProfileWatchpointSet().isStillValid()) {
7560 m_graph.freeze(rareData);
7561 m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet());
7562
7563 set(VirtualRegister(bytecode.m_dst), addToGraph(newOp, OpInfo(m_graph.registerStructure(structure))));
7564 // The callee is still live up to this point.
7565 addToGraph(Phantom, callee);
7566 return;
7567 }
7568 }
7569 }
7570 }
7571
7572 set(VirtualRegister(bytecode.m_dst), addToGraph(createOp, callee));
7573}
7574
7575void ByteCodeParser::parse()
7576{
7577 // Set during construction.
7578 ASSERT(!m_currentIndex.offset());
7579
7580 VERBOSE_LOG("Parsing ", *m_codeBlock, "\n");
7581
7582 InlineStackEntry inlineStackEntry(
7583 this, m_codeBlock, m_profiledBlock, 0, VirtualRegister(), VirtualRegister(),
7584 m_codeBlock->numParameters(), InlineCallFrame::Call, nullptr);
7585
7586 parseCodeBlock();
7587 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
7588
7589 if (m_hasAnyForceOSRExits) {
7590 BlockSet blocksToIgnore;
7591 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
7592 if (block->isOSRTarget && block->bytecodeBegin == m_graph.m_plan.osrEntryBytecodeIndex()) {
7593 blocksToIgnore.add(block);
7594 break;
7595 }
7596 }
7597
7598 {
7599 bool isSafeToValidate = false;
7600 auto postOrder = m_graph.blocksInPostOrder(isSafeToValidate); // This algorithm doesn't rely on the predecessors list, which is not yet built.
7601 bool changed;
7602 do {
7603 changed = false;
7604 for (BasicBlock* block : postOrder) {
7605 for (BasicBlock* successor : block->successors()) {
7606 if (blocksToIgnore.contains(successor)) {
7607 changed |= blocksToIgnore.add(block);
7608 break;
7609 }
7610 }
7611 }
7612 } while (changed);
7613 }
7614
7615 InsertionSet insertionSet(m_graph);
7616 Operands<VariableAccessData*> mapping(OperandsLike, m_graph.block(0)->variablesAtHead);
7617
7618 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
7619 if (blocksToIgnore.contains(block))
7620 continue;
7621
7622 mapping.fill(nullptr);
7623 if (validationEnabled()) {
7624 // Verify that it's correct to fill mapping with nullptr.
7625 for (unsigned i = 0; i < block->variablesAtHead.size(); ++i) {
7626 Node* node = block->variablesAtHead.at(i);
7627 RELEASE_ASSERT(!node);
7628 }
7629 }
7630
7631 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
7632 {
7633 Node* node = block->at(nodeIndex);
7634
7635 if (node->hasVariableAccessData(m_graph))
7636 mapping.operand(node->local()) = node->variableAccessData();
7637
7638 if (node->op() != ForceOSRExit)
7639 continue;
7640 }
7641
7642 NodeOrigin origin = block->at(nodeIndex)->origin;
7643 RELEASE_ASSERT(origin.exitOK);
7644
7645 ++nodeIndex;
7646
7647 {
7648 if (validationEnabled()) {
7649 // This verifies that we don't need to change any of the successors's predecessor
7650 // list after planting the Unreachable below. At this point in the bytecode
7651 // parser, we haven't linked up the predecessor lists yet.
7652 for (BasicBlock* successor : block->successors())
7653 RELEASE_ASSERT(successor->predecessors.isEmpty());
7654 }
7655
7656 auto insertLivenessPreservingOp = [&] (InlineCallFrame* inlineCallFrame, NodeType op, VirtualRegister operand) {
7657 VariableAccessData* variable = mapping.operand(operand);
7658 if (!variable) {
7659 variable = newVariableAccessData(operand);
7660 mapping.operand(operand) = variable;
7661 }
7662
7663 VirtualRegister argument = operand - (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
7664 if (argument.isArgument() && !argument.isHeader()) {
7665 const Vector<ArgumentPosition*>& arguments = m_inlineCallFrameToArgumentPositions.get(inlineCallFrame);
7666 arguments[argument.toArgument()]->addVariable(variable);
7667 }
7668 insertionSet.insertNode(nodeIndex, SpecNone, op, origin, OpInfo(variable));
7669 };
7670 auto addFlushDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {
7671 insertLivenessPreservingOp(inlineCallFrame, Flush, operand);
7672 };
7673 auto addPhantomLocalDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {
7674 insertLivenessPreservingOp(inlineCallFrame, PhantomLocal, operand);
7675 };
7676 flushForTerminalImpl(origin.semantic, addFlushDirect, addPhantomLocalDirect);
7677 }
7678
7679 while (true) {
7680 RELEASE_ASSERT(nodeIndex < block->size());
7681
7682 Node* node = block->at(nodeIndex);
7683
7684 node->origin = origin;
7685 m_graph.doToChildren(node, [&] (Edge edge) {
7686 // We only need to keep data flow edges to nodes defined prior to the ForceOSRExit. The reason
7687 // for this is we rely on backwards propagation being able to see the "full" bytecode. To model
7688 // this, we preserve uses of a node in a generic way so that backwards propagation can reason
7689 // about them. Therefore, we can't remove uses of a node which is defined before the ForceOSRExit
7690 // even when we're at a point in the program after the ForceOSRExit, because that would break backwards
7691 // propagation's analysis over the uses of a node. However, we don't need this same preservation for
7692 // nodes defined after ForceOSRExit, as we've already exitted before those defs.
7693 if (edge->hasResult())
7694 insertionSet.insertNode(nodeIndex, SpecNone, Phantom, origin, Edge(edge.node(), UntypedUse));
7695 });
7696
7697 bool isTerminal = node->isTerminal();
7698
7699 node->removeWithoutChecks();
7700
7701 if (isTerminal) {
7702 insertionSet.insertNode(nodeIndex, SpecNone, Unreachable, origin);
7703 break;
7704 }
7705
7706 ++nodeIndex;
7707 }
7708
7709 insertionSet.execute(block);
7710
7711 auto nodeAndIndex = block->findTerminal();
7712 RELEASE_ASSERT(nodeAndIndex.node->op() == Unreachable);
7713 block->resize(nodeAndIndex.index + 1);
7714 break;
7715 }
7716 }
7717 } else if (validationEnabled()) {
7718 // Ensure our bookkeeping for ForceOSRExit nodes is working.
7719 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
7720 for (Node* node : *block)
7721 RELEASE_ASSERT(node->op() != ForceOSRExit);
7722 }
7723 }
7724
7725 m_graph.determineReachability();
7726 m_graph.killUnreachableBlocks();
7727
7728 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
7729 BasicBlock* block = m_graph.block(blockIndex);
7730 if (!block)
7731 continue;
7732 ASSERT(block->variablesAtHead.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals());
7733 ASSERT(block->variablesAtHead.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments());
7734 ASSERT(block->variablesAtTail.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals());
7735 ASSERT(block->variablesAtTail.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments());
7736 }
7737
7738 m_graph.m_localVars = m_numLocals;
7739 m_graph.m_parameterSlots = m_parameterSlots;
7740}
7741
7742void parse(Graph& graph)
7743{
7744 ByteCodeParser(graph).parse();
7745}
7746
7747} } // namespace JSC::DFG
7748
7749#endif
7750