| 1 | /* |
| 2 | * Copyright (C) 2011-2019 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * |
| 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
| 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
| 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
| 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 24 | */ |
| 25 | |
| 26 | #include "config.h" |
| 27 | #include "DFGByteCodeParser.h" |
| 28 | |
| 29 | #if ENABLE(DFG_JIT) |
| 30 | |
| 31 | #include "ArithProfile.h" |
| 32 | #include "ArrayConstructor.h" |
| 33 | #include "BasicBlockLocation.h" |
| 34 | #include "BuiltinNames.h" |
| 35 | #include "BytecodeStructs.h" |
| 36 | #include "CallLinkStatus.h" |
| 37 | #include "CodeBlock.h" |
| 38 | #include "CodeBlockWithJITType.h" |
| 39 | #include "CommonSlowPaths.h" |
| 40 | #include "DFGAbstractHeap.h" |
| 41 | #include "DFGArrayMode.h" |
| 42 | #include "DFGCFG.h" |
| 43 | #include "DFGCapabilities.h" |
| 44 | #include "DFGClobberize.h" |
| 45 | #include "DFGClobbersExitState.h" |
| 46 | #include "DFGGraph.h" |
| 47 | #include "DFGJITCode.h" |
| 48 | #include "FunctionCodeBlock.h" |
| 49 | #include "GetByIdStatus.h" |
| 50 | #include "Heap.h" |
| 51 | #include "InByIdStatus.h" |
| 52 | #include "InstanceOfStatus.h" |
| 53 | #include "JSCInlines.h" |
| 54 | #include "JSFixedArray.h" |
| 55 | #include "JSImmutableButterfly.h" |
| 56 | #include "JSModuleEnvironment.h" |
| 57 | #include "JSModuleNamespaceObject.h" |
| 58 | #include "NumberConstructor.h" |
| 59 | #include "ObjectConstructor.h" |
| 60 | #include "OpcodeInlines.h" |
| 61 | #include "PreciseJumpTargets.h" |
| 62 | #include "PutByIdFlags.h" |
| 63 | #include "PutByIdStatus.h" |
| 64 | #include "RegExpPrototype.h" |
| 65 | #include "StackAlignment.h" |
| 66 | #include "StringConstructor.h" |
| 67 | #include "StructureStubInfo.h" |
| 68 | #include "SymbolConstructor.h" |
| 69 | #include "Watchdog.h" |
| 70 | #include <wtf/CommaPrinter.h> |
| 71 | #include <wtf/HashMap.h> |
| 72 | #include <wtf/MathExtras.h> |
| 73 | #include <wtf/SetForScope.h> |
| 74 | #include <wtf/StdLibExtras.h> |
| 75 | |
| 76 | namespace JSC { namespace DFG { |
| 77 | |
| 78 | namespace DFGByteCodeParserInternal { |
| 79 | #ifdef NDEBUG |
| 80 | static const bool verbose = false; |
| 81 | #else |
| 82 | static const bool verbose = true; |
| 83 | #endif |
| 84 | } // namespace DFGByteCodeParserInternal |
| 85 | |
| 86 | #define VERBOSE_LOG(...) do { \ |
| 87 | if (DFGByteCodeParserInternal::verbose && Options::verboseDFGBytecodeParsing()) \ |
| 88 | dataLog(__VA_ARGS__); \ |
| 89 | } while (false) |
| 90 | |
| 91 | // === ByteCodeParser === |
| 92 | // |
| 93 | // This class is used to compile the dataflow graph from a CodeBlock. |
| 94 | class ByteCodeParser { |
| 95 | public: |
| 96 | ByteCodeParser(Graph& graph) |
| 97 | : m_vm(&graph.m_vm) |
| 98 | , m_codeBlock(graph.m_codeBlock) |
| 99 | , m_profiledBlock(graph.m_profiledBlock) |
| 100 | , m_graph(graph) |
| 101 | , m_currentBlock(0) |
| 102 | , m_currentIndex(0) |
| 103 | , m_constantUndefined(graph.freeze(jsUndefined())) |
| 104 | , m_constantNull(graph.freeze(jsNull())) |
| 105 | , m_constantNaN(graph.freeze(jsNumber(PNaN))) |
| 106 | , m_constantOne(graph.freeze(jsNumber(1))) |
| 107 | , m_numArguments(m_codeBlock->numParameters()) |
| 108 | , m_numLocals(m_codeBlock->numCalleeLocals()) |
| 109 | , m_parameterSlots(0) |
| 110 | , m_numPassedVarArgs(0) |
| 111 | , m_inlineStackTop(0) |
| 112 | , m_currentInstruction(0) |
| 113 | , m_hasDebuggerEnabled(graph.hasDebuggerEnabled()) |
| 114 | { |
| 115 | ASSERT(m_profiledBlock); |
| 116 | } |
| 117 | |
| 118 | // Parse a full CodeBlock of bytecode. |
| 119 | void parse(); |
| 120 | |
| 121 | private: |
| 122 | struct InlineStackEntry; |
| 123 | |
| 124 | // Just parse from m_currentIndex to the end of the current CodeBlock. |
| 125 | void parseCodeBlock(); |
| 126 | |
| 127 | void ensureLocals(unsigned newNumLocals) |
| 128 | { |
| 129 | VERBOSE_LOG(" ensureLocals: trying to raise m_numLocals from " , m_numLocals, " to " , newNumLocals, "\n" ); |
| 130 | if (newNumLocals <= m_numLocals) |
| 131 | return; |
| 132 | m_numLocals = newNumLocals; |
| 133 | for (size_t i = 0; i < m_graph.numBlocks(); ++i) |
| 134 | m_graph.block(i)->ensureLocals(newNumLocals); |
| 135 | } |
| 136 | |
| 137 | // Helper for min and max. |
| 138 | template<typename ChecksFunctor> |
| 139 | bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks); |
| 140 | |
| 141 | void refineStatically(CallLinkStatus&, Node* callTarget); |
| 142 | // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin, |
| 143 | // or they can be untargetable, with bytecodeBegin==UINT_MAX, to be managed manually and not by the linkBlock machinery. |
| 144 | // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog). |
| 145 | // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed |
| 146 | // than to move the right index all the way to the treatment of op_ret. |
| 147 | BasicBlock* allocateTargetableBlock(unsigned bytecodeIndex); |
| 148 | BasicBlock* allocateUntargetableBlock(); |
| 149 | // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction |
| 150 | void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex); |
| 151 | void addJumpTo(BasicBlock*); |
| 152 | void addJumpTo(unsigned bytecodeIndex); |
| 153 | // Handle calls. This resolves issues surrounding inlining and intrinsics. |
| 154 | enum Terminality { Terminal, NonTerminal }; |
| 155 | Terminality handleCall( |
| 156 | VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize, |
| 157 | Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus, |
| 158 | SpeculatedType prediction); |
| 159 | template<typename CallOp> |
| 160 | Terminality handleCall(const Instruction* pc, NodeType op, CallMode); |
| 161 | template<typename CallOp> |
| 162 | Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode); |
| 163 | void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt); |
| 164 | void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis); |
| 165 | Node* getArgumentCount(); |
| 166 | template<typename ChecksFunctor> |
| 167 | bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded); |
| 168 | unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1. |
| 169 | // Handle inlining. Return true if it succeeded, false if we need to plant a call. |
| 170 | bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind); |
| 171 | unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind); |
| 172 | enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing }; |
| 173 | CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee); |
| 174 | CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction); |
| 175 | template<typename ChecksFunctor> |
| 176 | void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks); |
| 177 | // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call. |
| 178 | template<typename ChecksFunctor> |
| 179 | bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); |
| 180 | template<typename ChecksFunctor> |
| 181 | bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); |
| 182 | template<typename ChecksFunctor> |
| 183 | bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks); |
| 184 | template<typename ChecksFunctor> |
| 185 | bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks); |
| 186 | template<typename ChecksFunctor> |
| 187 | bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks); |
| 188 | Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value); |
| 189 | Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset); |
| 190 | bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction); |
| 191 | bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus); |
| 192 | |
| 193 | template<typename Bytecode> |
| 194 | void handlePutByVal(Bytecode, unsigned instructionSize); |
| 195 | template <typename Bytecode> |
| 196 | void handlePutAccessorById(NodeType, Bytecode); |
| 197 | template <typename Bytecode> |
| 198 | void handlePutAccessorByVal(NodeType, Bytecode); |
| 199 | template <typename Bytecode> |
| 200 | void handleNewFunc(NodeType, Bytecode); |
| 201 | template <typename Bytecode> |
| 202 | void handleNewFuncExp(NodeType, Bytecode); |
| 203 | |
| 204 | // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not |
| 205 | // check the validity of the condition, but it may return a null one if it encounters a contradiction. |
| 206 | ObjectPropertyCondition presenceLike( |
| 207 | JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&); |
| 208 | |
| 209 | // Attempt to watch the presence of a property. It will watch that the property is present in the same |
| 210 | // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint. |
| 211 | // Returns true if this all works out. |
| 212 | bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&); |
| 213 | void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&); |
| 214 | |
| 215 | // Works with both GetByIdVariant and the setter form of PutByIdVariant. |
| 216 | template<typename VariantType> |
| 217 | Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&); |
| 218 | |
| 219 | Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value); |
| 220 | |
| 221 | template<typename Op> |
| 222 | void parseGetById(const Instruction*); |
| 223 | void handleGetById( |
| 224 | VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize); |
| 225 | void emitPutById( |
| 226 | Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect); |
| 227 | void handlePutById( |
| 228 | Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, |
| 229 | bool isDirect, unsigned intructionSize); |
| 230 | |
| 231 | // Either register a watchpoint or emit a check for this condition. Returns false if the |
| 232 | // condition no longer holds, and therefore no reasonable check can be emitted. |
| 233 | bool check(const ObjectPropertyCondition&); |
| 234 | |
| 235 | GetByOffsetMethod promoteToConstant(GetByOffsetMethod); |
| 236 | |
| 237 | // Either register a watchpoint or emit a check for this condition. It must be a Presence |
| 238 | // condition. It will attempt to promote a Presence condition to an Equivalence condition. |
| 239 | // Emits code for the loaded value that the condition guards, and returns a node containing |
| 240 | // the loaded value. Returns null if the condition no longer holds. |
| 241 | GetByOffsetMethod planLoad(const ObjectPropertyCondition&); |
| 242 | Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset); |
| 243 | Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset); |
| 244 | |
| 245 | // Calls check() for each condition in the set: that is, it either emits checks or registers |
| 246 | // watchpoints (or a combination of the two) to make the conditions hold. If any of those |
| 247 | // conditions are no longer checkable, returns false. |
| 248 | bool check(const ObjectPropertyConditionSet&); |
| 249 | |
| 250 | // Calls check() for those conditions that aren't the slot base, and calls load() for the slot |
| 251 | // base. Does a combination of watchpoint registration and check emission to guard the |
| 252 | // conditions, and emits code to load the value from the slot base. Returns a node containing |
| 253 | // the loaded value. Returns null if any of the conditions were no longer checkable. |
| 254 | GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&); |
| 255 | Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset); |
| 256 | |
| 257 | void prepareToParseBlock(); |
| 258 | void clearCaches(); |
| 259 | |
| 260 | // Parse a single basic block of bytecode instructions. |
| 261 | void parseBlock(unsigned limit); |
| 262 | // Link block successors. |
| 263 | void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets); |
| 264 | void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets); |
| 265 | |
| 266 | VariableAccessData* newVariableAccessData(VirtualRegister operand) |
| 267 | { |
| 268 | ASSERT(!operand.isConstant()); |
| 269 | |
| 270 | m_graph.m_variableAccessData.append(VariableAccessData(operand)); |
| 271 | return &m_graph.m_variableAccessData.last(); |
| 272 | } |
| 273 | |
| 274 | // Get/Set the operands/result of a bytecode instruction. |
| 275 | Node* getDirect(VirtualRegister operand) |
| 276 | { |
| 277 | ASSERT(!operand.isConstant()); |
| 278 | |
| 279 | // Is this an argument? |
| 280 | if (operand.isArgument()) |
| 281 | return getArgument(operand); |
| 282 | |
| 283 | // Must be a local. |
| 284 | return getLocal(operand); |
| 285 | } |
| 286 | |
| 287 | Node* get(VirtualRegister operand) |
| 288 | { |
| 289 | if (operand.isConstant()) { |
| 290 | unsigned constantIndex = operand.toConstantIndex(); |
| 291 | unsigned oldSize = m_constants.size(); |
| 292 | if (constantIndex >= oldSize || !m_constants[constantIndex]) { |
| 293 | const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock; |
| 294 | JSValue value = codeBlock.getConstant(operand.offset()); |
| 295 | SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset()); |
| 296 | if (constantIndex >= oldSize) { |
| 297 | m_constants.grow(constantIndex + 1); |
| 298 | for (unsigned i = oldSize; i < m_constants.size(); ++i) |
| 299 | m_constants[i] = nullptr; |
| 300 | } |
| 301 | |
| 302 | Node* constantNode = nullptr; |
| 303 | if (sourceCodeRepresentation == SourceCodeRepresentation::Double) |
| 304 | constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber())))); |
| 305 | else |
| 306 | constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value))); |
| 307 | m_constants[constantIndex] = constantNode; |
| 308 | } |
| 309 | ASSERT(m_constants[constantIndex]); |
| 310 | return m_constants[constantIndex]; |
| 311 | } |
| 312 | |
| 313 | if (inlineCallFrame()) { |
| 314 | if (!inlineCallFrame()->isClosureCall) { |
| 315 | JSFunction* callee = inlineCallFrame()->calleeConstant(); |
| 316 | if (operand.offset() == CallFrameSlot::callee) |
| 317 | return weakJSConstant(callee); |
| 318 | } |
| 319 | } else if (operand.offset() == CallFrameSlot::callee) { |
| 320 | // We have to do some constant-folding here because this enables CreateThis folding. Note |
| 321 | // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that |
| 322 | // case if the function is a singleton then we already know it. |
| 323 | if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) { |
| 324 | InferredValue* singleton = executable->singletonFunction(); |
| 325 | if (JSValue value = singleton->inferredValue()) { |
| 326 | m_graph.watchpoints().addLazily(singleton); |
| 327 | JSFunction* function = jsCast<JSFunction*>(value); |
| 328 | return weakJSConstant(function); |
| 329 | } |
| 330 | } |
| 331 | return addToGraph(GetCallee); |
| 332 | } |
| 333 | |
| 334 | return getDirect(m_inlineStackTop->remapOperand(operand)); |
| 335 | } |
| 336 | |
| 337 | enum SetMode { |
| 338 | // A normal set which follows a two-phase commit that spans code origins. During |
| 339 | // the current code origin it issues a MovHint, and at the start of the next |
| 340 | // code origin there will be a SetLocal. If the local needs flushing, the second |
| 341 | // SetLocal will be preceded with a Flush. |
| 342 | NormalSet, |
| 343 | |
| 344 | // A set where the SetLocal happens immediately and there is still a Flush. This |
| 345 | // is relevant when assigning to a local in tricky situations for the delayed |
| 346 | // SetLocal logic but where we know that we have not performed any side effects |
| 347 | // within this code origin. This is a safe replacement for NormalSet anytime we |
| 348 | // know that we have not yet performed side effects in this code origin. |
| 349 | ImmediateSetWithFlush, |
| 350 | |
| 351 | // A set where the SetLocal happens immediately and we do not Flush it even if |
| 352 | // this is a local that is marked as needing it. This is relevant when |
| 353 | // initializing locals at the top of a function. |
| 354 | ImmediateNakedSet |
| 355 | }; |
| 356 | Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet) |
| 357 | { |
| 358 | addToGraph(MovHint, OpInfo(operand.offset()), value); |
| 359 | |
| 360 | // We can't exit anymore because our OSR exit state has changed. |
| 361 | m_exitOK = false; |
| 362 | |
| 363 | DelayedSetLocal delayed(currentCodeOrigin(), operand, value, setMode); |
| 364 | |
| 365 | if (setMode == NormalSet) { |
| 366 | m_setLocalQueue.append(delayed); |
| 367 | return nullptr; |
| 368 | } |
| 369 | |
| 370 | return delayed.execute(this); |
| 371 | } |
| 372 | |
| 373 | void processSetLocalQueue() |
| 374 | { |
| 375 | for (unsigned i = 0; i < m_setLocalQueue.size(); ++i) |
| 376 | m_setLocalQueue[i].execute(this); |
| 377 | m_setLocalQueue.shrink(0); |
| 378 | } |
| 379 | |
| 380 | Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet) |
| 381 | { |
| 382 | return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode); |
| 383 | } |
| 384 | |
| 385 | Node* injectLazyOperandSpeculation(Node* node) |
| 386 | { |
| 387 | ASSERT(node->op() == GetLocal); |
| 388 | ASSERT(node->origin.semantic.bytecodeIndex() == m_currentIndex); |
| 389 | ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); |
| 390 | LazyOperandValueProfileKey key(m_currentIndex, node->local()); |
| 391 | SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key); |
| 392 | node->variableAccessData()->predict(prediction); |
| 393 | return node; |
| 394 | } |
| 395 | |
| 396 | // Used in implementing get/set, above, where the operand is a local variable. |
| 397 | Node* getLocal(VirtualRegister operand) |
| 398 | { |
| 399 | unsigned local = operand.toLocal(); |
| 400 | |
| 401 | Node* node = m_currentBlock->variablesAtTail.local(local); |
| 402 | |
| 403 | // This has two goals: 1) link together variable access datas, and 2) |
| 404 | // try to avoid creating redundant GetLocals. (1) is required for |
| 405 | // correctness - no other phase will ensure that block-local variable |
| 406 | // access data unification is done correctly. (2) is purely opportunistic |
| 407 | // and is meant as an compile-time optimization only. |
| 408 | |
| 409 | VariableAccessData* variable; |
| 410 | |
| 411 | if (node) { |
| 412 | variable = node->variableAccessData(); |
| 413 | |
| 414 | switch (node->op()) { |
| 415 | case GetLocal: |
| 416 | return node; |
| 417 | case SetLocal: |
| 418 | return node->child1().node(); |
| 419 | default: |
| 420 | break; |
| 421 | } |
| 422 | } else |
| 423 | variable = newVariableAccessData(operand); |
| 424 | |
| 425 | node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable))); |
| 426 | m_currentBlock->variablesAtTail.local(local) = node; |
| 427 | return node; |
| 428 | } |
| 429 | Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet) |
| 430 | { |
| 431 | SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin); |
| 432 | |
| 433 | unsigned local = operand.toLocal(); |
| 434 | |
| 435 | if (setMode != ImmediateNakedSet) { |
| 436 | ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand); |
| 437 | if (argumentPosition) |
| 438 | flushDirect(operand, argumentPosition); |
| 439 | else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister()) |
| 440 | flush(operand); |
| 441 | } |
| 442 | |
| 443 | VariableAccessData* variableAccessData = newVariableAccessData(operand); |
| 444 | variableAccessData->mergeStructureCheckHoistingFailed( |
| 445 | m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache)); |
| 446 | variableAccessData->mergeCheckArrayHoistingFailed( |
| 447 | m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType)); |
| 448 | Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value); |
| 449 | m_currentBlock->variablesAtTail.local(local) = node; |
| 450 | return node; |
| 451 | } |
| 452 | |
| 453 | // Used in implementing get/set, above, where the operand is an argument. |
| 454 | Node* getArgument(VirtualRegister operand) |
| 455 | { |
| 456 | unsigned argument = operand.toArgument(); |
| 457 | ASSERT(argument < m_numArguments); |
| 458 | |
| 459 | Node* node = m_currentBlock->variablesAtTail.argument(argument); |
| 460 | |
| 461 | VariableAccessData* variable; |
| 462 | |
| 463 | if (node) { |
| 464 | variable = node->variableAccessData(); |
| 465 | |
| 466 | switch (node->op()) { |
| 467 | case GetLocal: |
| 468 | return node; |
| 469 | case SetLocal: |
| 470 | return node->child1().node(); |
| 471 | default: |
| 472 | break; |
| 473 | } |
| 474 | } else |
| 475 | variable = newVariableAccessData(operand); |
| 476 | |
| 477 | node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable))); |
| 478 | m_currentBlock->variablesAtTail.argument(argument) = node; |
| 479 | return node; |
| 480 | } |
| 481 | Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet) |
| 482 | { |
| 483 | SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin); |
| 484 | |
| 485 | unsigned argument = operand.toArgument(); |
| 486 | ASSERT(argument < m_numArguments); |
| 487 | |
| 488 | VariableAccessData* variableAccessData = newVariableAccessData(operand); |
| 489 | |
| 490 | // Always flush arguments, except for 'this'. If 'this' is created by us, |
| 491 | // then make sure that it's never unboxed. |
| 492 | if (argument || m_graph.needsFlushedThis()) { |
| 493 | if (setMode != ImmediateNakedSet) |
| 494 | flushDirect(operand); |
| 495 | } |
| 496 | |
| 497 | if (!argument && m_codeBlock->specializationKind() == CodeForConstruct) |
| 498 | variableAccessData->mergeShouldNeverUnbox(true); |
| 499 | |
| 500 | variableAccessData->mergeStructureCheckHoistingFailed( |
| 501 | m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache)); |
| 502 | variableAccessData->mergeCheckArrayHoistingFailed( |
| 503 | m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType)); |
| 504 | Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value); |
| 505 | m_currentBlock->variablesAtTail.argument(argument) = node; |
| 506 | return node; |
| 507 | } |
| 508 | |
| 509 | ArgumentPosition* findArgumentPositionForArgument(int argument) |
| 510 | { |
| 511 | InlineStackEntry* stack = m_inlineStackTop; |
| 512 | while (stack->m_inlineCallFrame) |
| 513 | stack = stack->m_caller; |
| 514 | return stack->m_argumentPositions[argument]; |
| 515 | } |
| 516 | |
| 517 | ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand) |
| 518 | { |
| 519 | for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) { |
| 520 | InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame; |
| 521 | if (!inlineCallFrame) |
| 522 | break; |
| 523 | if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters)) |
| 524 | continue; |
| 525 | if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->argumentsWithFixup.size())) |
| 526 | continue; |
| 527 | int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument(); |
| 528 | return stack->m_argumentPositions[argument]; |
| 529 | } |
| 530 | return 0; |
| 531 | } |
| 532 | |
| 533 | ArgumentPosition* findArgumentPosition(VirtualRegister operand) |
| 534 | { |
| 535 | if (operand.isArgument()) |
| 536 | return findArgumentPositionForArgument(operand.toArgument()); |
| 537 | return findArgumentPositionForLocal(operand); |
| 538 | } |
| 539 | |
| 540 | template<typename AddFlushDirectFunc> |
| 541 | void flushImpl(InlineCallFrame* inlineCallFrame, const AddFlushDirectFunc& addFlushDirect) |
| 542 | { |
| 543 | int numArguments; |
| 544 | if (inlineCallFrame) { |
| 545 | ASSERT(!m_graph.hasDebuggerEnabled()); |
| 546 | numArguments = inlineCallFrame->argumentsWithFixup.size(); |
| 547 | if (inlineCallFrame->isClosureCall) |
| 548 | addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee))); |
| 549 | if (inlineCallFrame->isVarargs()) |
| 550 | addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount))); |
| 551 | } else |
| 552 | numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters(); |
| 553 | |
| 554 | for (unsigned argument = numArguments; argument--;) |
| 555 | addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForArgument(argument))); |
| 556 | |
| 557 | if (m_graph.needsScopeRegister()) |
| 558 | addFlushDirect(nullptr, m_graph.m_codeBlock->scopeRegister()); |
| 559 | } |
| 560 | |
| 561 | template<typename AddFlushDirectFunc, typename AddPhantomLocalDirectFunc> |
| 562 | void flushForTerminalImpl(CodeOrigin origin, const AddFlushDirectFunc& addFlushDirect, const AddPhantomLocalDirectFunc& addPhantomLocalDirect) |
| 563 | { |
| 564 | origin.walkUpInlineStack( |
| 565 | [&] (CodeOrigin origin) { |
| 566 | unsigned bytecodeIndex = origin.bytecodeIndex(); |
| 567 | InlineCallFrame* inlineCallFrame = origin.inlineCallFrame(); |
| 568 | flushImpl(inlineCallFrame, addFlushDirect); |
| 569 | |
| 570 | CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame); |
| 571 | FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock); |
| 572 | const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex); |
| 573 | |
| 574 | for (unsigned local = codeBlock->numCalleeLocals(); local--;) { |
| 575 | if (livenessAtBytecode[local]) |
| 576 | addPhantomLocalDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForLocal(local))); |
| 577 | } |
| 578 | }); |
| 579 | } |
| 580 | |
| 581 | void flush(VirtualRegister operand) |
| 582 | { |
| 583 | flushDirect(m_inlineStackTop->remapOperand(operand)); |
| 584 | } |
| 585 | |
| 586 | void flushDirect(VirtualRegister operand) |
| 587 | { |
| 588 | flushDirect(operand, findArgumentPosition(operand)); |
| 589 | } |
| 590 | |
| 591 | void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition) |
| 592 | { |
| 593 | addFlushOrPhantomLocal<Flush>(operand, argumentPosition); |
| 594 | } |
| 595 | |
| 596 | template<NodeType nodeType> |
| 597 | void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition) |
| 598 | { |
| 599 | ASSERT(!operand.isConstant()); |
| 600 | |
| 601 | Node* node = m_currentBlock->variablesAtTail.operand(operand); |
| 602 | |
| 603 | VariableAccessData* variable; |
| 604 | |
| 605 | if (node) |
| 606 | variable = node->variableAccessData(); |
| 607 | else |
| 608 | variable = newVariableAccessData(operand); |
| 609 | |
| 610 | node = addToGraph(nodeType, OpInfo(variable)); |
| 611 | m_currentBlock->variablesAtTail.operand(operand) = node; |
| 612 | if (argumentPosition) |
| 613 | argumentPosition->addVariable(variable); |
| 614 | } |
| 615 | |
| 616 | void phantomLocalDirect(VirtualRegister operand) |
| 617 | { |
| 618 | addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand)); |
| 619 | } |
| 620 | |
| 621 | void flush(InlineStackEntry* inlineStackEntry) |
| 622 | { |
| 623 | auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); }; |
| 624 | flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect); |
| 625 | } |
| 626 | |
| 627 | void flushForTerminal() |
| 628 | { |
| 629 | auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); }; |
| 630 | auto addPhantomLocalDirect = [&] (InlineCallFrame*, VirtualRegister reg) { phantomLocalDirect(reg); }; |
| 631 | flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect); |
| 632 | } |
| 633 | |
| 634 | void flushForReturn() |
| 635 | { |
| 636 | flush(m_inlineStackTop); |
| 637 | } |
| 638 | |
| 639 | void flushIfTerminal(SwitchData& data) |
| 640 | { |
| 641 | if (data.fallThrough.bytecodeIndex() > m_currentIndex) |
| 642 | return; |
| 643 | |
| 644 | for (unsigned i = data.cases.size(); i--;) { |
| 645 | if (data.cases[i].target.bytecodeIndex() > m_currentIndex) |
| 646 | return; |
| 647 | } |
| 648 | |
| 649 | flushForTerminal(); |
| 650 | } |
| 651 | |
| 652 | // Assumes that the constant should be strongly marked. |
| 653 | Node* jsConstant(JSValue constantValue) |
| 654 | { |
| 655 | return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue))); |
| 656 | } |
| 657 | |
| 658 | Node* weakJSConstant(JSValue constantValue) |
| 659 | { |
| 660 | return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue))); |
| 661 | } |
| 662 | |
| 663 | // Helper functions to get/set the this value. |
| 664 | Node* getThis() |
| 665 | { |
| 666 | return get(m_inlineStackTop->m_codeBlock->thisRegister()); |
| 667 | } |
| 668 | |
| 669 | void setThis(Node* value) |
| 670 | { |
| 671 | set(m_inlineStackTop->m_codeBlock->thisRegister(), value); |
| 672 | } |
| 673 | |
| 674 | InlineCallFrame* inlineCallFrame() |
| 675 | { |
| 676 | return m_inlineStackTop->m_inlineCallFrame; |
| 677 | } |
| 678 | |
| 679 | bool allInlineFramesAreTailCalls() |
| 680 | { |
| 681 | return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls(); |
| 682 | } |
| 683 | |
| 684 | CodeOrigin currentCodeOrigin() |
| 685 | { |
| 686 | return CodeOrigin(m_currentIndex, inlineCallFrame()); |
| 687 | } |
| 688 | |
| 689 | NodeOrigin currentNodeOrigin() |
| 690 | { |
| 691 | CodeOrigin semantic; |
| 692 | CodeOrigin forExit; |
| 693 | |
| 694 | if (m_currentSemanticOrigin.isSet()) |
| 695 | semantic = m_currentSemanticOrigin; |
| 696 | else |
| 697 | semantic = currentCodeOrigin(); |
| 698 | |
| 699 | forExit = currentCodeOrigin(); |
| 700 | |
| 701 | return NodeOrigin(semantic, forExit, m_exitOK); |
| 702 | } |
| 703 | |
| 704 | BranchData* branchData(unsigned taken, unsigned notTaken) |
| 705 | { |
| 706 | // We assume that branches originating from bytecode always have a fall-through. We |
| 707 | // use this assumption to avoid checking for the creation of terminal blocks. |
| 708 | ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex)); |
| 709 | BranchData* data = m_graph.m_branchData.add(); |
| 710 | *data = BranchData::withBytecodeIndices(taken, notTaken); |
| 711 | return data; |
| 712 | } |
| 713 | |
| 714 | Node* addToGraph(Node* node) |
| 715 | { |
| 716 | VERBOSE_LOG(" appended " , node, " " , Graph::opName(node->op()), "\n" ); |
| 717 | |
| 718 | m_hasAnyForceOSRExits |= (node->op() == ForceOSRExit); |
| 719 | |
| 720 | m_currentBlock->append(node); |
| 721 | if (clobbersExitState(m_graph, node)) |
| 722 | m_exitOK = false; |
| 723 | return node; |
| 724 | } |
| 725 | |
| 726 | Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0) |
| 727 | { |
| 728 | Node* result = m_graph.addNode( |
| 729 | op, currentNodeOrigin(), Edge(child1), Edge(child2), |
| 730 | Edge(child3)); |
| 731 | return addToGraph(result); |
| 732 | } |
| 733 | Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge()) |
| 734 | { |
| 735 | Node* result = m_graph.addNode( |
| 736 | op, currentNodeOrigin(), child1, child2, child3); |
| 737 | return addToGraph(result); |
| 738 | } |
| 739 | Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0) |
| 740 | { |
| 741 | Node* result = m_graph.addNode( |
| 742 | op, currentNodeOrigin(), info, Edge(child1), Edge(child2), |
| 743 | Edge(child3)); |
| 744 | return addToGraph(result); |
| 745 | } |
| 746 | Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge()) |
| 747 | { |
| 748 | Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3); |
| 749 | return addToGraph(result); |
| 750 | } |
| 751 | Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0) |
| 752 | { |
| 753 | Node* result = m_graph.addNode( |
| 754 | op, currentNodeOrigin(), info1, info2, |
| 755 | Edge(child1), Edge(child2), Edge(child3)); |
| 756 | return addToGraph(result); |
| 757 | } |
| 758 | Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge()) |
| 759 | { |
| 760 | Node* result = m_graph.addNode( |
| 761 | op, currentNodeOrigin(), info1, info2, child1, child2, child3); |
| 762 | return addToGraph(result); |
| 763 | } |
| 764 | |
| 765 | Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo()) |
| 766 | { |
| 767 | Node* result = m_graph.addNode( |
| 768 | Node::VarArg, op, currentNodeOrigin(), info1, info2, |
| 769 | m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs); |
| 770 | addToGraph(result); |
| 771 | |
| 772 | m_numPassedVarArgs = 0; |
| 773 | |
| 774 | return result; |
| 775 | } |
| 776 | |
| 777 | void addVarArgChild(Node* child) |
| 778 | { |
| 779 | m_graph.m_varArgChildren.append(Edge(child)); |
| 780 | m_numPassedVarArgs++; |
| 781 | } |
| 782 | |
| 783 | void addVarArgChild(Edge child) |
| 784 | { |
| 785 | m_graph.m_varArgChildren.append(child); |
| 786 | m_numPassedVarArgs++; |
| 787 | } |
| 788 | |
| 789 | Node* addCallWithoutSettingResult( |
| 790 | NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset, |
| 791 | OpInfo prediction) |
| 792 | { |
| 793 | addVarArgChild(callee); |
| 794 | size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount); |
| 795 | |
| 796 | if (parameterSlots > m_parameterSlots) |
| 797 | m_parameterSlots = parameterSlots; |
| 798 | |
| 799 | for (int i = 0; i < argCount; ++i) |
| 800 | addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); |
| 801 | |
| 802 | return addToGraph(Node::VarArg, op, opInfo, prediction); |
| 803 | } |
| 804 | |
| 805 | Node* addCall( |
| 806 | VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset, |
| 807 | SpeculatedType prediction) |
| 808 | { |
| 809 | if (op == TailCall) { |
| 810 | if (allInlineFramesAreTailCalls()) |
| 811 | return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo()); |
| 812 | op = TailCallInlinedCaller; |
| 813 | } |
| 814 | |
| 815 | |
| 816 | Node* call = addCallWithoutSettingResult( |
| 817 | op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction)); |
| 818 | if (result.isValid()) |
| 819 | set(result, call); |
| 820 | return call; |
| 821 | } |
| 822 | |
| 823 | Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure) |
| 824 | { |
| 825 | // FIXME: This should route to emitPropertyCheck, not the other way around. But currently, |
| 826 | // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the |
| 827 | // object's structure as soon as we make it a weakJSCosntant. |
| 828 | Node* objectNode = weakJSConstant(object); |
| 829 | addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode); |
| 830 | return objectNode; |
| 831 | } |
| 832 | |
| 833 | SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex) |
| 834 | { |
| 835 | auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin) |
| 836 | { |
| 837 | SpeculatedType prediction; |
| 838 | { |
| 839 | ConcurrentJSLocker locker(codeBlock->m_lock); |
| 840 | prediction = codeBlock->valueProfilePredictionForBytecodeOffset(locker, codeOrigin.bytecodeIndex()); |
| 841 | } |
| 842 | auto* fuzzerAgent = m_vm->fuzzerAgent(); |
| 843 | if (UNLIKELY(fuzzerAgent)) |
| 844 | return fuzzerAgent->getPrediction(codeBlock, codeOrigin, prediction) & SpecBytecodeTop; |
| 845 | return prediction; |
| 846 | }; |
| 847 | |
| 848 | SpeculatedType prediction = getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(m_inlineStackTop->m_profiledBlock, CodeOrigin(bytecodeIndex, inlineCallFrame())); |
| 849 | if (prediction != SpecNone) |
| 850 | return prediction; |
| 851 | |
| 852 | // If we have no information about the values this |
| 853 | // node generates, we check if by any chance it is |
| 854 | // a tail call opcode. In that case, we walk up the |
| 855 | // inline frames to find a call higher in the call |
| 856 | // chain and use its prediction. If we only have |
| 857 | // inlined tail call frames, we use SpecFullTop |
| 858 | // to avoid a spurious OSR exit. |
| 859 | auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex); |
| 860 | OpcodeID opcodeID = instruction->opcodeID(); |
| 861 | |
| 862 | switch (opcodeID) { |
| 863 | case op_tail_call: |
| 864 | case op_tail_call_varargs: |
| 865 | case op_tail_call_forward_arguments: { |
| 866 | // Things should be more permissive to us returning BOTTOM instead of TOP here. |
| 867 | // Currently, this will cause us to Force OSR exit. This is bad because returning |
| 868 | // TOP will cause anything that transitively touches this speculated type to |
| 869 | // also become TOP during prediction propagation. |
| 870 | // https://bugs.webkit.org/show_bug.cgi?id=164337 |
| 871 | if (!inlineCallFrame()) |
| 872 | return SpecFullTop; |
| 873 | |
| 874 | CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls(); |
| 875 | if (!codeOrigin) |
| 876 | return SpecFullTop; |
| 877 | |
| 878 | InlineStackEntry* stack = m_inlineStackTop; |
| 879 | while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame()) |
| 880 | stack = stack->m_caller; |
| 881 | |
| 882 | return getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(stack->m_profiledBlock, *codeOrigin); |
| 883 | } |
| 884 | |
| 885 | default: |
| 886 | return SpecNone; |
| 887 | } |
| 888 | |
| 889 | RELEASE_ASSERT_NOT_REACHED(); |
| 890 | return SpecNone; |
| 891 | } |
| 892 | |
| 893 | SpeculatedType getPrediction(unsigned bytecodeIndex) |
| 894 | { |
| 895 | SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex); |
| 896 | |
| 897 | if (prediction == SpecNone) { |
| 898 | // We have no information about what values this node generates. Give up |
| 899 | // on executing this code, since we're likely to do more damage than good. |
| 900 | addToGraph(ForceOSRExit); |
| 901 | } |
| 902 | |
| 903 | return prediction; |
| 904 | } |
| 905 | |
| 906 | SpeculatedType getPredictionWithoutOSRExit() |
| 907 | { |
| 908 | return getPredictionWithoutOSRExit(m_currentIndex); |
| 909 | } |
| 910 | |
| 911 | SpeculatedType getPrediction() |
| 912 | { |
| 913 | return getPrediction(m_currentIndex); |
| 914 | } |
| 915 | |
| 916 | ArrayMode getArrayMode(Array::Action action) |
| 917 | { |
| 918 | CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock; |
| 919 | ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction)); |
| 920 | return getArrayMode(*profile, action); |
| 921 | } |
| 922 | |
| 923 | ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action) |
| 924 | { |
| 925 | ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); |
| 926 | profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock); |
| 927 | bool makeSafe = profile.outOfBounds(locker); |
| 928 | return ArrayMode::fromObserved(locker, &profile, action, makeSafe); |
| 929 | } |
| 930 | |
| 931 | Node* makeSafe(Node* node) |
| 932 | { |
| 933 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) |
| 934 | node->mergeFlags(NodeMayOverflowInt32InDFG); |
| 935 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) |
| 936 | node->mergeFlags(NodeMayNegZeroInDFG); |
| 937 | |
| 938 | if (!isX86() && node->op() == ArithMod) |
| 939 | return node; |
| 940 | |
| 941 | { |
| 942 | ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex); |
| 943 | if (arithProfile) { |
| 944 | switch (node->op()) { |
| 945 | case ArithAdd: |
| 946 | case ArithSub: |
| 947 | case ValueAdd: |
| 948 | if (arithProfile->didObserveDouble()) |
| 949 | node->mergeFlags(NodeMayHaveDoubleResult); |
| 950 | if (arithProfile->didObserveNonNumeric()) |
| 951 | node->mergeFlags(NodeMayHaveNonNumericResult); |
| 952 | if (arithProfile->didObserveBigInt()) |
| 953 | node->mergeFlags(NodeMayHaveBigIntResult); |
| 954 | break; |
| 955 | |
| 956 | case ValueMul: |
| 957 | case ArithMul: { |
| 958 | if (arithProfile->didObserveInt52Overflow()) |
| 959 | node->mergeFlags(NodeMayOverflowInt52); |
| 960 | if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) |
| 961 | node->mergeFlags(NodeMayOverflowInt32InBaseline); |
| 962 | if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) |
| 963 | node->mergeFlags(NodeMayNegZeroInBaseline); |
| 964 | if (arithProfile->didObserveDouble()) |
| 965 | node->mergeFlags(NodeMayHaveDoubleResult); |
| 966 | if (arithProfile->didObserveNonNumeric()) |
| 967 | node->mergeFlags(NodeMayHaveNonNumericResult); |
| 968 | if (arithProfile->didObserveBigInt()) |
| 969 | node->mergeFlags(NodeMayHaveBigIntResult); |
| 970 | break; |
| 971 | } |
| 972 | case ValueNegate: |
| 973 | case ArithNegate: { |
| 974 | if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble()) |
| 975 | node->mergeFlags(NodeMayHaveDoubleResult); |
| 976 | if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) |
| 977 | node->mergeFlags(NodeMayNegZeroInBaseline); |
| 978 | if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) |
| 979 | node->mergeFlags(NodeMayOverflowInt32InBaseline); |
| 980 | if (arithProfile->didObserveNonNumeric()) |
| 981 | node->mergeFlags(NodeMayHaveNonNumericResult); |
| 982 | if (arithProfile->didObserveBigInt()) |
| 983 | node->mergeFlags(NodeMayHaveBigIntResult); |
| 984 | break; |
| 985 | } |
| 986 | |
| 987 | default: |
| 988 | break; |
| 989 | } |
| 990 | } |
| 991 | } |
| 992 | |
| 993 | if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) { |
| 994 | switch (node->op()) { |
| 995 | case UInt32ToNumber: |
| 996 | case ArithAdd: |
| 997 | case ArithSub: |
| 998 | case ValueAdd: |
| 999 | case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double. |
| 1000 | node->mergeFlags(NodeMayOverflowInt32InBaseline); |
| 1001 | break; |
| 1002 | |
| 1003 | default: |
| 1004 | break; |
| 1005 | } |
| 1006 | } |
| 1007 | |
| 1008 | return node; |
| 1009 | } |
| 1010 | |
| 1011 | Node* makeDivSafe(Node* node) |
| 1012 | { |
| 1013 | ASSERT(node->op() == ArithDiv || node->op() == ValueDiv); |
| 1014 | |
| 1015 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) |
| 1016 | node->mergeFlags(NodeMayOverflowInt32InDFG); |
| 1017 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) |
| 1018 | node->mergeFlags(NodeMayNegZeroInDFG); |
| 1019 | |
| 1020 | // The main slow case counter for op_div in the old JIT counts only when |
| 1021 | // the operands are not numbers. We don't care about that since we already |
| 1022 | // have speculations in place that take care of that separately. We only |
| 1023 | // care about when the outcome of the division is not an integer, which |
| 1024 | // is what the special fast case counter tells us. |
| 1025 | |
| 1026 | if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex)) |
| 1027 | return node; |
| 1028 | |
| 1029 | // FIXME: It might be possible to make this more granular. |
| 1030 | node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline); |
| 1031 | |
| 1032 | ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex); |
| 1033 | if (arithProfile->didObserveBigInt()) |
| 1034 | node->mergeFlags(NodeMayHaveBigIntResult); |
| 1035 | |
| 1036 | return node; |
| 1037 | } |
| 1038 | |
| 1039 | void noticeArgumentsUse() |
| 1040 | { |
| 1041 | // All of the arguments in this function need to be formatted as JSValues because we will |
| 1042 | // load from them in a random-access fashion and we don't want to have to switch on |
| 1043 | // format. |
| 1044 | |
| 1045 | for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions) |
| 1046 | argument->mergeShouldNeverUnbox(true); |
| 1047 | } |
| 1048 | |
| 1049 | bool needsDynamicLookup(ResolveType, OpcodeID); |
| 1050 | |
| 1051 | VM* m_vm; |
| 1052 | CodeBlock* m_codeBlock; |
| 1053 | CodeBlock* m_profiledBlock; |
| 1054 | Graph& m_graph; |
| 1055 | |
| 1056 | // The current block being generated. |
| 1057 | BasicBlock* m_currentBlock; |
| 1058 | // The bytecode index of the current instruction being generated. |
| 1059 | unsigned m_currentIndex; |
| 1060 | // The semantic origin of the current node if different from the current Index. |
| 1061 | CodeOrigin m_currentSemanticOrigin; |
| 1062 | // True if it's OK to OSR exit right now. |
| 1063 | bool m_exitOK { false }; |
| 1064 | |
| 1065 | FrozenValue* m_constantUndefined; |
| 1066 | FrozenValue* m_constantNull; |
| 1067 | FrozenValue* m_constantNaN; |
| 1068 | FrozenValue* m_constantOne; |
| 1069 | Vector<Node*, 16> m_constants; |
| 1070 | |
| 1071 | HashMap<InlineCallFrame*, Vector<ArgumentPosition*>, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> m_inlineCallFrameToArgumentPositions; |
| 1072 | |
| 1073 | // The number of arguments passed to the function. |
| 1074 | unsigned m_numArguments; |
| 1075 | // The number of locals (vars + temporaries) used in the function. |
| 1076 | unsigned m_numLocals; |
| 1077 | // The number of slots (in units of sizeof(Register)) that we need to |
| 1078 | // preallocate for arguments to outgoing calls from this frame. This |
| 1079 | // number includes the CallFrame slots that we initialize for the callee |
| 1080 | // (but not the callee-initialized CallerFrame and ReturnPC slots). |
| 1081 | // This number is 0 if and only if this function is a leaf. |
| 1082 | unsigned m_parameterSlots; |
| 1083 | // The number of var args passed to the next var arg node. |
| 1084 | unsigned m_numPassedVarArgs; |
| 1085 | |
| 1086 | struct InlineStackEntry { |
| 1087 | ByteCodeParser* m_byteCodeParser; |
| 1088 | |
| 1089 | CodeBlock* m_codeBlock; |
| 1090 | CodeBlock* m_profiledBlock; |
| 1091 | InlineCallFrame* m_inlineCallFrame; |
| 1092 | |
| 1093 | ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); } |
| 1094 | |
| 1095 | QueryableExitProfile m_exitProfile; |
| 1096 | |
| 1097 | // Remapping of identifier and constant numbers from the code block being |
| 1098 | // inlined (inline callee) to the code block that we're inlining into |
| 1099 | // (the machine code block, which is the transitive, though not necessarily |
| 1100 | // direct, caller). |
| 1101 | Vector<unsigned> m_identifierRemap; |
| 1102 | Vector<unsigned> m_switchRemap; |
| 1103 | |
| 1104 | // These are blocks whose terminal is a Jump, Branch or Switch, and whose target has not yet been linked. |
| 1105 | // Their terminal instead refers to a bytecode index, and the right BB can be found in m_blockLinkingTargets. |
| 1106 | Vector<BasicBlock*> m_unlinkedBlocks; |
| 1107 | |
| 1108 | // Potential block linking targets. Must be sorted by bytecodeBegin, and |
| 1109 | // cannot have two blocks that have the same bytecodeBegin. |
| 1110 | Vector<BasicBlock*> m_blockLinkingTargets; |
| 1111 | |
| 1112 | // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist. |
| 1113 | BasicBlock* m_continuationBlock; |
| 1114 | |
| 1115 | VirtualRegister m_returnValue; |
| 1116 | |
| 1117 | // Speculations about variable types collected from the profiled code block, |
| 1118 | // which are based on OSR exit profiles that past DFG compilations of this |
| 1119 | // code block had gathered. |
| 1120 | LazyOperandValueProfileParser m_lazyOperands; |
| 1121 | |
| 1122 | ICStatusMap m_baselineMap; |
| 1123 | ICStatusContext m_optimizedContext; |
| 1124 | |
| 1125 | // Pointers to the argument position trackers for this slice of code. |
| 1126 | Vector<ArgumentPosition*> m_argumentPositions; |
| 1127 | |
| 1128 | InlineStackEntry* m_caller; |
| 1129 | |
| 1130 | InlineStackEntry( |
| 1131 | ByteCodeParser*, |
| 1132 | CodeBlock*, |
| 1133 | CodeBlock* profiledBlock, |
| 1134 | JSFunction* callee, // Null if this is a closure call. |
| 1135 | VirtualRegister returnValueVR, |
| 1136 | VirtualRegister inlineCallFrameStart, |
| 1137 | int argumentCountIncludingThis, |
| 1138 | InlineCallFrame::Kind, |
| 1139 | BasicBlock* continuationBlock); |
| 1140 | |
| 1141 | ~InlineStackEntry(); |
| 1142 | |
| 1143 | VirtualRegister remapOperand(VirtualRegister operand) const |
| 1144 | { |
| 1145 | if (!m_inlineCallFrame) |
| 1146 | return operand; |
| 1147 | |
| 1148 | ASSERT(!operand.isConstant()); |
| 1149 | |
| 1150 | return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset); |
| 1151 | } |
| 1152 | }; |
| 1153 | |
| 1154 | InlineStackEntry* m_inlineStackTop; |
| 1155 | |
| 1156 | ICStatusContextStack m_icContextStack; |
| 1157 | |
| 1158 | struct DelayedSetLocal { |
| 1159 | CodeOrigin m_origin; |
| 1160 | VirtualRegister m_operand; |
| 1161 | Node* m_value; |
| 1162 | SetMode m_setMode; |
| 1163 | |
| 1164 | DelayedSetLocal() { } |
| 1165 | DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode) |
| 1166 | : m_origin(origin) |
| 1167 | , m_operand(operand) |
| 1168 | , m_value(value) |
| 1169 | , m_setMode(setMode) |
| 1170 | { |
| 1171 | RELEASE_ASSERT(operand.isValid()); |
| 1172 | } |
| 1173 | |
| 1174 | Node* execute(ByteCodeParser* parser) |
| 1175 | { |
| 1176 | if (m_operand.isArgument()) |
| 1177 | return parser->setArgument(m_origin, m_operand, m_value, m_setMode); |
| 1178 | return parser->setLocal(m_origin, m_operand, m_value, m_setMode); |
| 1179 | } |
| 1180 | }; |
| 1181 | |
| 1182 | Vector<DelayedSetLocal, 2> m_setLocalQueue; |
| 1183 | |
| 1184 | const Instruction* m_currentInstruction; |
| 1185 | bool m_hasDebuggerEnabled; |
| 1186 | bool m_hasAnyForceOSRExits { false }; |
| 1187 | }; |
| 1188 | |
| 1189 | BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex) |
| 1190 | { |
| 1191 | ASSERT(bytecodeIndex != UINT_MAX); |
| 1192 | Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1)); |
| 1193 | BasicBlock* blockPtr = block.ptr(); |
| 1194 | // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin |
| 1195 | if (m_inlineStackTop->m_blockLinkingTargets.size()) |
| 1196 | ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex); |
| 1197 | m_inlineStackTop->m_blockLinkingTargets.append(blockPtr); |
| 1198 | m_graph.appendBlock(WTFMove(block)); |
| 1199 | return blockPtr; |
| 1200 | } |
| 1201 | |
| 1202 | BasicBlock* ByteCodeParser::allocateUntargetableBlock() |
| 1203 | { |
| 1204 | Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1)); |
| 1205 | BasicBlock* blockPtr = block.ptr(); |
| 1206 | m_graph.appendBlock(WTFMove(block)); |
| 1207 | return blockPtr; |
| 1208 | } |
| 1209 | |
| 1210 | void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsigned bytecodeIndex) |
| 1211 | { |
| 1212 | RELEASE_ASSERT(block->bytecodeBegin == UINT_MAX); |
| 1213 | block->bytecodeBegin = bytecodeIndex; |
| 1214 | // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin |
| 1215 | if (m_inlineStackTop->m_blockLinkingTargets.size()) |
| 1216 | ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex); |
| 1217 | m_inlineStackTop->m_blockLinkingTargets.append(block); |
| 1218 | } |
| 1219 | |
| 1220 | void ByteCodeParser::addJumpTo(BasicBlock* block) |
| 1221 | { |
| 1222 | ASSERT(!m_currentBlock->terminal()); |
| 1223 | Node* jumpNode = addToGraph(Jump); |
| 1224 | jumpNode->targetBlock() = block; |
| 1225 | m_currentBlock->didLink(); |
| 1226 | } |
| 1227 | |
| 1228 | void ByteCodeParser::addJumpTo(unsigned bytecodeIndex) |
| 1229 | { |
| 1230 | ASSERT(!m_currentBlock->terminal()); |
| 1231 | addToGraph(Jump, OpInfo(bytecodeIndex)); |
| 1232 | m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock); |
| 1233 | } |
| 1234 | |
| 1235 | template<typename CallOp> |
| 1236 | ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode) |
| 1237 | { |
| 1238 | auto bytecode = pc->as<CallOp>(); |
| 1239 | Node* callTarget = get(bytecode.m_callee); |
| 1240 | int registerOffset = -static_cast<int>(bytecode.m_argv); |
| 1241 | |
| 1242 | CallLinkStatus callLinkStatus = CallLinkStatus::computeFor( |
| 1243 | m_inlineStackTop->m_profiledBlock, currentCodeOrigin(), |
| 1244 | m_inlineStackTop->m_baselineMap, m_icContextStack); |
| 1245 | |
| 1246 | InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode); |
| 1247 | |
| 1248 | return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget, |
| 1249 | bytecode.m_argc, registerOffset, callLinkStatus, getPrediction()); |
| 1250 | } |
| 1251 | |
| 1252 | void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget) |
| 1253 | { |
| 1254 | if (callTarget->isCellConstant()) |
| 1255 | callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell())); |
| 1256 | } |
| 1257 | |
| 1258 | ByteCodeParser::Terminality ByteCodeParser::handleCall( |
| 1259 | VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize, |
| 1260 | Node* callTarget, int argumentCountIncludingThis, int registerOffset, |
| 1261 | CallLinkStatus callLinkStatus, SpeculatedType prediction) |
| 1262 | { |
| 1263 | ASSERT(registerOffset <= 0); |
| 1264 | |
| 1265 | refineStatically(callLinkStatus, callTarget); |
| 1266 | |
| 1267 | VERBOSE_LOG(" Handling call at " , currentCodeOrigin(), ": " , callLinkStatus, "\n" ); |
| 1268 | |
| 1269 | // If we have profiling information about this call, and it did not behave too polymorphically, |
| 1270 | // we may be able to inline it, or in the case of recursive tail calls turn it into a jump. |
| 1271 | if (callLinkStatus.canOptimize()) { |
| 1272 | addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget); |
| 1273 | |
| 1274 | VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset); |
| 1275 | auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument, |
| 1276 | argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction); |
| 1277 | if (optimizationResult == CallOptimizationResult::OptimizedToJump) |
| 1278 | return Terminal; |
| 1279 | if (optimizationResult == CallOptimizationResult::Inlined) { |
| 1280 | if (UNLIKELY(m_graph.compilation())) |
| 1281 | m_graph.compilation()->noticeInlinedCall(); |
| 1282 | return NonTerminal; |
| 1283 | } |
| 1284 | } |
| 1285 | |
| 1286 | Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction); |
| 1287 | ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs); |
| 1288 | return callNode->op() == TailCall ? Terminal : NonTerminal; |
| 1289 | } |
| 1290 | |
| 1291 | template<typename CallOp> |
| 1292 | ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode) |
| 1293 | { |
| 1294 | auto bytecode = pc->as<CallOp>(); |
| 1295 | int firstFreeReg = bytecode.m_firstFree.offset(); |
| 1296 | int firstVarArgOffset = bytecode.m_firstVarArg; |
| 1297 | |
| 1298 | SpeculatedType prediction = getPrediction(); |
| 1299 | |
| 1300 | Node* callTarget = get(bytecode.m_callee); |
| 1301 | |
| 1302 | CallLinkStatus callLinkStatus = CallLinkStatus::computeFor( |
| 1303 | m_inlineStackTop->m_profiledBlock, currentCodeOrigin(), |
| 1304 | m_inlineStackTop->m_baselineMap, m_icContextStack); |
| 1305 | refineStatically(callLinkStatus, callTarget); |
| 1306 | |
| 1307 | VERBOSE_LOG(" Varargs call link status at " , currentCodeOrigin(), ": " , callLinkStatus, "\n" ); |
| 1308 | |
| 1309 | if (callLinkStatus.canOptimize()) { |
| 1310 | addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget); |
| 1311 | |
| 1312 | if (handleVarargsInlining(callTarget, bytecode.m_dst, |
| 1313 | callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments, |
| 1314 | firstVarArgOffset, op, |
| 1315 | InlineCallFrame::varargsKindFor(callMode))) { |
| 1316 | if (UNLIKELY(m_graph.compilation())) |
| 1317 | m_graph.compilation()->noticeInlinedCall(); |
| 1318 | return NonTerminal; |
| 1319 | } |
| 1320 | } |
| 1321 | |
| 1322 | CallVarargsData* data = m_graph.m_callVarargsData.add(); |
| 1323 | data->firstVarArgOffset = firstVarArgOffset; |
| 1324 | |
| 1325 | Node* thisChild = get(bytecode.m_thisValue); |
| 1326 | Node* argumentsChild = nullptr; |
| 1327 | if (op != TailCallForwardVarargs) |
| 1328 | argumentsChild = get(bytecode.m_arguments); |
| 1329 | |
| 1330 | if (op == TailCallVarargs || op == TailCallForwardVarargs) { |
| 1331 | if (allInlineFramesAreTailCalls()) { |
| 1332 | addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild); |
| 1333 | return Terminal; |
| 1334 | } |
| 1335 | op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller; |
| 1336 | } |
| 1337 | |
| 1338 | Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild); |
| 1339 | if (bytecode.m_dst.isValid()) |
| 1340 | set(bytecode.m_dst, call); |
| 1341 | return NonTerminal; |
| 1342 | } |
| 1343 | |
| 1344 | void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg) |
| 1345 | { |
| 1346 | Node* thisArgument; |
| 1347 | if (thisArgumentReg.isValid()) |
| 1348 | thisArgument = get(thisArgumentReg); |
| 1349 | else |
| 1350 | thisArgument = nullptr; |
| 1351 | |
| 1352 | JSCell* calleeCell; |
| 1353 | Node* callTargetForCheck; |
| 1354 | if (callee.isClosureCall()) { |
| 1355 | calleeCell = callee.executable(); |
| 1356 | callTargetForCheck = addToGraph(GetExecutable, callTarget); |
| 1357 | } else { |
| 1358 | calleeCell = callee.nonExecutableCallee(); |
| 1359 | callTargetForCheck = callTarget; |
| 1360 | } |
| 1361 | |
| 1362 | ASSERT(calleeCell); |
| 1363 | addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck); |
| 1364 | if (thisArgument) |
| 1365 | addToGraph(Phantom, thisArgument); |
| 1366 | } |
| 1367 | |
| 1368 | Node* ByteCodeParser::getArgumentCount() |
| 1369 | { |
| 1370 | Node* argumentCount; |
| 1371 | if (m_inlineStackTop->m_inlineCallFrame && !m_inlineStackTop->m_inlineCallFrame->isVarargs()) |
| 1372 | argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->argumentCountIncludingThis))->value()); |
| 1373 | else |
| 1374 | argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(m_inlineStackTop->m_inlineCallFrame), OpInfo(SpecInt32Only)); |
| 1375 | return argumentCount; |
| 1376 | } |
| 1377 | |
| 1378 | void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis) |
| 1379 | { |
| 1380 | for (int i = 0; i < argumentCountIncludingThis; ++i) |
| 1381 | addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset))); |
| 1382 | } |
| 1383 | |
| 1384 | template<typename ChecksFunctor> |
| 1385 | bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant callVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded) |
| 1386 | { |
| 1387 | if (UNLIKELY(!Options::optimizeRecursiveTailCalls())) |
| 1388 | return false; |
| 1389 | |
| 1390 | auto targetExecutable = callVariant.executable(); |
| 1391 | InlineStackEntry* stackEntry = m_inlineStackTop; |
| 1392 | do { |
| 1393 | if (targetExecutable != stackEntry->executable()) |
| 1394 | continue; |
| 1395 | VERBOSE_LOG(" We found a recursive tail call, trying to optimize it into a jump.\n" ); |
| 1396 | |
| 1397 | if (auto* callFrame = stackEntry->m_inlineCallFrame) { |
| 1398 | // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match. |
| 1399 | // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments. |
| 1400 | if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis)) |
| 1401 | continue; |
| 1402 | } else { |
| 1403 | // We are in the machine code entry (i.e. the original caller). |
| 1404 | // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack. |
| 1405 | if (argumentCountIncludingThis > m_codeBlock->numParameters()) |
| 1406 | return false; |
| 1407 | } |
| 1408 | |
| 1409 | // If an InlineCallFrame is not a closure, it was optimized using a constant callee. |
| 1410 | // Check if this is the same callee that we try to inline here. |
| 1411 | if (stackEntry->m_inlineCallFrame && !stackEntry->m_inlineCallFrame->isClosureCall) { |
| 1412 | if (stackEntry->m_inlineCallFrame->calleeConstant() != callVariant.function()) |
| 1413 | continue; |
| 1414 | } |
| 1415 | |
| 1416 | // We must add some check that the profiling information was correct and the target of this call is what we thought. |
| 1417 | emitFunctionCheckIfNeeded(); |
| 1418 | // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock). |
| 1419 | flushForTerminal(); |
| 1420 | |
| 1421 | // We must set the callee to the right value |
| 1422 | if (stackEntry->m_inlineCallFrame) { |
| 1423 | if (stackEntry->m_inlineCallFrame->isClosureCall) |
| 1424 | setDirect(stackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)), callTargetNode, NormalSet); |
| 1425 | } else |
| 1426 | addToGraph(SetCallee, callTargetNode); |
| 1427 | |
| 1428 | // We must set the arguments to the right values |
| 1429 | if (!stackEntry->m_inlineCallFrame) |
| 1430 | addToGraph(SetArgumentCountIncludingThis, OpInfo(argumentCountIncludingThis)); |
| 1431 | int argIndex = 0; |
| 1432 | for (; argIndex < argumentCountIncludingThis; ++argIndex) { |
| 1433 | Node* value = get(virtualRegisterForArgument(argIndex, registerOffset)); |
| 1434 | setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet); |
| 1435 | } |
| 1436 | Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined)); |
| 1437 | for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex) |
| 1438 | setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet); |
| 1439 | |
| 1440 | // We must repeat the work of op_enter here as we will jump right after it. |
| 1441 | // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR. |
| 1442 | for (int i = 0; i < stackEntry->m_codeBlock->numVars(); ++i) |
| 1443 | setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet); |
| 1444 | |
| 1445 | // We want to emit the SetLocals with an exit origin that points to the place we are jumping to. |
| 1446 | unsigned oldIndex = m_currentIndex; |
| 1447 | auto oldStackTop = m_inlineStackTop; |
| 1448 | m_inlineStackTop = stackEntry; |
| 1449 | m_currentIndex = opcodeLengths[op_enter]; |
| 1450 | m_exitOK = true; |
| 1451 | processSetLocalQueue(); |
| 1452 | m_currentIndex = oldIndex; |
| 1453 | m_inlineStackTop = oldStackTop; |
| 1454 | m_exitOK = false; |
| 1455 | |
| 1456 | BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock); |
| 1457 | RELEASE_ASSERT(entryBlockPtr); |
| 1458 | addJumpTo(*entryBlockPtr); |
| 1459 | return true; |
| 1460 | // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case. |
| 1461 | } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller)); |
| 1462 | |
| 1463 | // The tail call was not recursive |
| 1464 | return false; |
| 1465 | } |
| 1466 | |
| 1467 | unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, InlineCallFrame::Kind kind) |
| 1468 | { |
| 1469 | CallMode callMode = InlineCallFrame::callModeFor(kind); |
| 1470 | CodeSpecializationKind specializationKind = specializationKindFor(callMode); |
| 1471 | VERBOSE_LOG("Considering inlining " , callee, " into " , currentCodeOrigin(), "\n" ); |
| 1472 | |
| 1473 | if (m_hasDebuggerEnabled) { |
| 1474 | VERBOSE_LOG(" Failing because the debugger is in use.\n" ); |
| 1475 | return UINT_MAX; |
| 1476 | } |
| 1477 | |
| 1478 | FunctionExecutable* executable = callee.functionExecutable(); |
| 1479 | if (!executable) { |
| 1480 | VERBOSE_LOG(" Failing because there is no function executable.\n" ); |
| 1481 | return UINT_MAX; |
| 1482 | } |
| 1483 | |
| 1484 | // Do we have a code block, and does the code block's size match the heuristics/requirements for |
| 1485 | // being an inline candidate? We might not have a code block (1) if code was thrown away, |
| 1486 | // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and |
| 1487 | // specialization kind is construct. In the former 2 cases, we could still theoretically attempt |
| 1488 | // to inline it if we had a static proof of what was being called; this might happen for example |
| 1489 | // if you call a global function, where watchpointing gives us static information. Overall, |
| 1490 | // it's a rare case because we expect that any hot callees would have already been compiled. |
| 1491 | CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind); |
| 1492 | if (!codeBlock) { |
| 1493 | VERBOSE_LOG(" Failing because no code block available.\n" ); |
| 1494 | return UINT_MAX; |
| 1495 | } |
| 1496 | |
| 1497 | if (!Options::useArityFixupInlining()) { |
| 1498 | if (codeBlock->numParameters() > argumentCountIncludingThis) { |
| 1499 | VERBOSE_LOG(" Failing because of arity mismatch.\n" ); |
| 1500 | return UINT_MAX; |
| 1501 | } |
| 1502 | } |
| 1503 | |
| 1504 | CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel( |
| 1505 | codeBlock, specializationKind, callee.isClosureCall()); |
| 1506 | VERBOSE_LOG(" Call mode: " , callMode, "\n" ); |
| 1507 | VERBOSE_LOG(" Is closure call: " , callee.isClosureCall(), "\n" ); |
| 1508 | VERBOSE_LOG(" Capability level: " , capabilityLevel, "\n" ); |
| 1509 | VERBOSE_LOG(" Might inline function: " , mightInlineFunctionFor(codeBlock, specializationKind), "\n" ); |
| 1510 | VERBOSE_LOG(" Might compile function: " , mightCompileFunctionFor(codeBlock, specializationKind), "\n" ); |
| 1511 | VERBOSE_LOG(" Is supported for inlining: " , isSupportedForInlining(codeBlock), "\n" ); |
| 1512 | VERBOSE_LOG(" Is inlining candidate: " , codeBlock->ownerExecutable()->isInliningCandidate(), "\n" ); |
| 1513 | if (!canInline(capabilityLevel)) { |
| 1514 | VERBOSE_LOG(" Failing because the function is not inlineable.\n" ); |
| 1515 | return UINT_MAX; |
| 1516 | } |
| 1517 | |
| 1518 | // Check if the caller is already too large. We do this check here because that's just |
| 1519 | // where we happen to also have the callee's code block, and we want that for the |
| 1520 | // purpose of unsetting SABI. |
| 1521 | if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) { |
| 1522 | codeBlock->m_shouldAlwaysBeInlined = false; |
| 1523 | VERBOSE_LOG(" Failing because the caller is too large.\n" ); |
| 1524 | return UINT_MAX; |
| 1525 | } |
| 1526 | |
| 1527 | // FIXME: this should be better at predicting how much bloat we will introduce by inlining |
| 1528 | // this function. |
| 1529 | // https://bugs.webkit.org/show_bug.cgi?id=127627 |
| 1530 | |
| 1531 | // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These |
| 1532 | // functions have very low fidelity profiling, and presumably they weren't very hot if they |
| 1533 | // haven't gotten to Baseline yet. Consider not inlining these functions. |
| 1534 | // https://bugs.webkit.org/show_bug.cgi?id=145503 |
| 1535 | |
| 1536 | // Have we exceeded inline stack depth, or are we trying to inline a recursive call to |
| 1537 | // too many levels? If either of these are detected, then don't inline. We adjust our |
| 1538 | // heuristics if we are dealing with a function that cannot otherwise be compiled. |
| 1539 | |
| 1540 | unsigned depth = 0; |
| 1541 | unsigned recursion = 0; |
| 1542 | |
| 1543 | for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) { |
| 1544 | ++depth; |
| 1545 | if (depth >= Options::maximumInliningDepth()) { |
| 1546 | VERBOSE_LOG(" Failing because depth exceeded.\n" ); |
| 1547 | return UINT_MAX; |
| 1548 | } |
| 1549 | |
| 1550 | if (entry->executable() == executable) { |
| 1551 | ++recursion; |
| 1552 | if (recursion >= Options::maximumInliningRecursion()) { |
| 1553 | VERBOSE_LOG(" Failing because recursion detected.\n" ); |
| 1554 | return UINT_MAX; |
| 1555 | } |
| 1556 | } |
| 1557 | } |
| 1558 | |
| 1559 | VERBOSE_LOG(" Inlining should be possible.\n" ); |
| 1560 | |
| 1561 | // It might be possible to inline. |
| 1562 | return codeBlock->instructionCount(); |
| 1563 | } |
| 1564 | |
| 1565 | template<typename ChecksFunctor> |
| 1566 | void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks) |
| 1567 | { |
| 1568 | const Instruction* savedCurrentInstruction = m_currentInstruction; |
| 1569 | CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); |
| 1570 | |
| 1571 | ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX); |
| 1572 | |
| 1573 | CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind); |
| 1574 | insertChecks(codeBlock); |
| 1575 | |
| 1576 | // FIXME: Don't flush constants! |
| 1577 | |
| 1578 | // arityFixupCount and numberOfStackPaddingSlots are different. While arityFixupCount does not consider about stack alignment, |
| 1579 | // numberOfStackPaddingSlots consider alignment. Consider the following case, |
| 1580 | // |
| 1581 | // before: [ ... ][arg0][header] |
| 1582 | // after: [ ... ][ext ][arg1][arg0][header] |
| 1583 | // |
| 1584 | // In the above case, arityFixupCount is 1. But numberOfStackPaddingSlots is 2 because the stack needs to be aligned. |
| 1585 | // We insert extra slots to align stack. |
| 1586 | int arityFixupCount = std::max<int>(codeBlock->numParameters() - argumentCountIncludingThis, 0); |
| 1587 | int numberOfStackPaddingSlots = CommonSlowPaths::numberOfStackPaddingSlots(codeBlock, argumentCountIncludingThis); |
| 1588 | ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters())); |
| 1589 | int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots; |
| 1590 | |
| 1591 | int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters; |
| 1592 | |
| 1593 | ensureLocals( |
| 1594 | VirtualRegister(inlineCallFrameStart).toLocal() + 1 + |
| 1595 | CallFrame::headerSizeInRegisters + codeBlock->numCalleeLocals()); |
| 1596 | |
| 1597 | size_t argumentPositionStart = m_graph.m_argumentPositions.size(); |
| 1598 | |
| 1599 | if (result.isValid()) |
| 1600 | result = m_inlineStackTop->remapOperand(result); |
| 1601 | |
| 1602 | VariableAccessData* calleeVariable = nullptr; |
| 1603 | if (callee.isClosureCall()) { |
| 1604 | Node* calleeSet = set( |
| 1605 | VirtualRegister(registerOffsetAfterFixup + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet); |
| 1606 | |
| 1607 | calleeVariable = calleeSet->variableAccessData(); |
| 1608 | calleeVariable->mergeShouldNeverUnbox(true); |
| 1609 | } |
| 1610 | |
| 1611 | if (arityFixupCount) { |
| 1612 | // Note: we do arity fixup in two phases: |
| 1613 | // 1. We get all the values we need and MovHint them to the expected locals. |
| 1614 | // 2. We SetLocal them inside the callee's CodeOrigin. This way, if we exit, the callee's |
| 1615 | // frame is already set up. If any SetLocal exits, we have a valid exit state. |
| 1616 | // This is required because if we didn't do this in two phases, we may exit in |
| 1617 | // the middle of arity fixup from the caller's CodeOrigin. This is unsound because if |
| 1618 | // we did the SetLocals in the caller's frame, the memcpy may clobber needed parts |
| 1619 | // of the frame right before exiting. For example, consider if we need to pad two args: |
| 1620 | // [arg3][arg2][arg1][arg0] |
| 1621 | // [fix ][fix ][arg3][arg2][arg1][arg0] |
| 1622 | // We memcpy starting from arg0 in the direction of arg3. If we were to exit at a type check |
| 1623 | // for arg3's SetLocal in the caller's CodeOrigin, we'd exit with a frame like so: |
| 1624 | // [arg3][arg2][arg1][arg2][arg1][arg0] |
| 1625 | // And the caller would then just end up thinking its argument are: |
| 1626 | // [arg3][arg2][arg1][arg2] |
| 1627 | // which is incorrect. |
| 1628 | |
| 1629 | Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined)); |
| 1630 | // The stack needs to be aligned due to the JS calling convention. Thus, we have a hole if the count of arguments is not aligned. |
| 1631 | // We call this hole "extra slot". Consider the following case, the number of arguments is 2. If this argument |
| 1632 | // count does not fulfill the stack alignment requirement, we already inserted extra slots. |
| 1633 | // |
| 1634 | // before: [ ... ][ext ][arg1][arg0][header] |
| 1635 | // |
| 1636 | // In the above case, one extra slot is inserted. If the code's parameter count is 3, we will fixup arguments. |
| 1637 | // At that time, we can simply use this extra slots. So the fixuped stack is the following. |
| 1638 | // |
| 1639 | // before: [ ... ][ext ][arg1][arg0][header] |
| 1640 | // after: [ ... ][arg2][arg1][arg0][header] |
| 1641 | // |
| 1642 | // In such cases, we do not need to move frames. |
| 1643 | if (registerOffsetAfterFixup != registerOffset) { |
| 1644 | for (int index = 0; index < argumentCountIncludingThis; ++index) { |
| 1645 | Node* value = get(virtualRegisterForArgument(index, registerOffset)); |
| 1646 | VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index, registerOffsetAfterFixup)); |
| 1647 | addToGraph(MovHint, OpInfo(argumentToSet.offset()), value); |
| 1648 | m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet }); |
| 1649 | } |
| 1650 | } |
| 1651 | for (int index = 0; index < arityFixupCount; ++index) { |
| 1652 | VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index, registerOffsetAfterFixup)); |
| 1653 | addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined); |
| 1654 | m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet }); |
| 1655 | } |
| 1656 | |
| 1657 | // At this point, it's OK to OSR exit because we finished setting up |
| 1658 | // our callee's frame. We emit an ExitOK below from the callee's CodeOrigin. |
| 1659 | } |
| 1660 | |
| 1661 | InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result, |
| 1662 | (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock); |
| 1663 | |
| 1664 | // This is where the actual inlining really happens. |
| 1665 | unsigned oldIndex = m_currentIndex; |
| 1666 | m_currentIndex = 0; |
| 1667 | |
| 1668 | // At this point, it's again OK to OSR exit. |
| 1669 | m_exitOK = true; |
| 1670 | addToGraph(ExitOK); |
| 1671 | |
| 1672 | processSetLocalQueue(); |
| 1673 | |
| 1674 | InlineVariableData inlineVariableData; |
| 1675 | inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame; |
| 1676 | inlineVariableData.argumentPositionStart = argumentPositionStart; |
| 1677 | inlineVariableData.calleeVariable = 0; |
| 1678 | |
| 1679 | RELEASE_ASSERT( |
| 1680 | m_inlineStackTop->m_inlineCallFrame->isClosureCall |
| 1681 | == callee.isClosureCall()); |
| 1682 | if (callee.isClosureCall()) { |
| 1683 | RELEASE_ASSERT(calleeVariable); |
| 1684 | inlineVariableData.calleeVariable = calleeVariable; |
| 1685 | } |
| 1686 | |
| 1687 | m_graph.m_inlineVariableData.append(inlineVariableData); |
| 1688 | |
| 1689 | parseCodeBlock(); |
| 1690 | clearCaches(); // Reset our state now that we're back to the outer code. |
| 1691 | |
| 1692 | m_currentIndex = oldIndex; |
| 1693 | m_exitOK = false; |
| 1694 | |
| 1695 | linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets); |
| 1696 | |
| 1697 | // Most functions have at least one op_ret and thus set up the continuation block. |
| 1698 | // In some rare cases, a function ends in op_unreachable, forcing us to allocate a new continuationBlock here. |
| 1699 | if (inlineStackEntry.m_continuationBlock) |
| 1700 | m_currentBlock = inlineStackEntry.m_continuationBlock; |
| 1701 | else |
| 1702 | m_currentBlock = allocateUntargetableBlock(); |
| 1703 | ASSERT(!m_currentBlock->terminal()); |
| 1704 | |
| 1705 | prepareToParseBlock(); |
| 1706 | m_currentInstruction = savedCurrentInstruction; |
| 1707 | } |
| 1708 | |
| 1709 | ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee) |
| 1710 | { |
| 1711 | VERBOSE_LOG(" Considering callee " , callee, "\n" ); |
| 1712 | |
| 1713 | bool didInsertChecks = false; |
| 1714 | auto insertChecksWithAccounting = [&] () { |
| 1715 | if (needsToCheckCallee) |
| 1716 | emitFunctionChecks(callee, callTargetNode, thisArgument); |
| 1717 | didInsertChecks = true; |
| 1718 | }; |
| 1719 | |
| 1720 | if (kind == InlineCallFrame::TailCall && ByteCodeParser::handleRecursiveTailCall(callTargetNode, callee, registerOffset, argumentCountIncludingThis, insertChecksWithAccounting)) { |
| 1721 | RELEASE_ASSERT(didInsertChecks); |
| 1722 | return CallOptimizationResult::OptimizedToJump; |
| 1723 | } |
| 1724 | RELEASE_ASSERT(!didInsertChecks); |
| 1725 | |
| 1726 | if (!inliningBalance) |
| 1727 | return CallOptimizationResult::DidNothing; |
| 1728 | |
| 1729 | CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); |
| 1730 | |
| 1731 | auto endSpecialCase = [&] () { |
| 1732 | RELEASE_ASSERT(didInsertChecks); |
| 1733 | addToGraph(Phantom, callTargetNode); |
| 1734 | emitArgumentPhantoms(registerOffset, argumentCountIncludingThis); |
| 1735 | inliningBalance--; |
| 1736 | if (continuationBlock) { |
| 1737 | m_currentIndex = nextOffset; |
| 1738 | m_exitOK = true; |
| 1739 | processSetLocalQueue(); |
| 1740 | addJumpTo(continuationBlock); |
| 1741 | } |
| 1742 | }; |
| 1743 | |
| 1744 | if (InternalFunction* function = callee.internalFunction()) { |
| 1745 | if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) { |
| 1746 | endSpecialCase(); |
| 1747 | return CallOptimizationResult::Inlined; |
| 1748 | } |
| 1749 | RELEASE_ASSERT(!didInsertChecks); |
| 1750 | return CallOptimizationResult::DidNothing; |
| 1751 | } |
| 1752 | |
| 1753 | Intrinsic intrinsic = callee.intrinsicFor(specializationKind); |
| 1754 | if (intrinsic != NoIntrinsic) { |
| 1755 | if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { |
| 1756 | endSpecialCase(); |
| 1757 | return CallOptimizationResult::Inlined; |
| 1758 | } |
| 1759 | RELEASE_ASSERT(!didInsertChecks); |
| 1760 | // We might still try to inline the Intrinsic because it might be a builtin JS function. |
| 1761 | } |
| 1762 | |
| 1763 | if (Options::useDOMJIT()) { |
| 1764 | if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) { |
| 1765 | if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { |
| 1766 | endSpecialCase(); |
| 1767 | return CallOptimizationResult::Inlined; |
| 1768 | } |
| 1769 | RELEASE_ASSERT(!didInsertChecks); |
| 1770 | } |
| 1771 | } |
| 1772 | |
| 1773 | unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, kind); |
| 1774 | if (myInliningCost > inliningBalance) |
| 1775 | return CallOptimizationResult::DidNothing; |
| 1776 | |
| 1777 | auto insertCheck = [&] (CodeBlock*) { |
| 1778 | if (needsToCheckCallee) |
| 1779 | emitFunctionChecks(callee, callTargetNode, thisArgument); |
| 1780 | }; |
| 1781 | inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck); |
| 1782 | inliningBalance -= myInliningCost; |
| 1783 | return CallOptimizationResult::Inlined; |
| 1784 | } |
| 1785 | |
| 1786 | bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result, |
| 1787 | const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument, |
| 1788 | VirtualRegister argumentsArgument, unsigned argumentsOffset, |
| 1789 | NodeType callOp, InlineCallFrame::Kind kind) |
| 1790 | { |
| 1791 | VERBOSE_LOG("Handling inlining (Varargs)...\nStack: " , currentCodeOrigin(), "\n" ); |
| 1792 | if (callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) { |
| 1793 | VERBOSE_LOG("Bailing inlining: too many arguments for varargs inlining.\n" ); |
| 1794 | return false; |
| 1795 | } |
| 1796 | if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1) { |
| 1797 | VERBOSE_LOG("Bailing inlining: polymorphic inlining is not yet supported for varargs.\n" ); |
| 1798 | return false; |
| 1799 | } |
| 1800 | |
| 1801 | CallVariant callVariant = callLinkStatus[0]; |
| 1802 | |
| 1803 | unsigned mandatoryMinimum; |
| 1804 | if (FunctionExecutable* functionExecutable = callVariant.functionExecutable()) |
| 1805 | mandatoryMinimum = functionExecutable->parameterCount(); |
| 1806 | else |
| 1807 | mandatoryMinimum = 0; |
| 1808 | |
| 1809 | // includes "this" |
| 1810 | unsigned maxNumArguments = std::max(callLinkStatus.maxNumArguments(), mandatoryMinimum + 1); |
| 1811 | |
| 1812 | CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); |
| 1813 | if (inliningCost(callVariant, maxNumArguments, kind) > getInliningBalance(callLinkStatus, specializationKind)) { |
| 1814 | VERBOSE_LOG("Bailing inlining: inlining cost too high.\n" ); |
| 1815 | return false; |
| 1816 | } |
| 1817 | |
| 1818 | int registerOffset = firstFreeReg + 1; |
| 1819 | registerOffset -= maxNumArguments; // includes "this" |
| 1820 | registerOffset -= CallFrame::headerSizeInRegisters; |
| 1821 | registerOffset = -WTF::roundUpToMultipleOf(stackAlignmentRegisters(), -registerOffset); |
| 1822 | |
| 1823 | auto insertChecks = [&] (CodeBlock* codeBlock) { |
| 1824 | emitFunctionChecks(callVariant, callTargetNode, thisArgument); |
| 1825 | |
| 1826 | int remappedRegisterOffset = |
| 1827 | m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset(); |
| 1828 | |
| 1829 | ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal()); |
| 1830 | |
| 1831 | int argumentStart = registerOffset + CallFrame::headerSizeInRegisters; |
| 1832 | int remappedArgumentStart = |
| 1833 | m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset(); |
| 1834 | |
| 1835 | LoadVarargsData* data = m_graph.m_loadVarargsData.add(); |
| 1836 | data->start = VirtualRegister(remappedArgumentStart + 1); |
| 1837 | data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount); |
| 1838 | data->offset = argumentsOffset; |
| 1839 | data->limit = maxNumArguments; |
| 1840 | data->mandatoryMinimum = mandatoryMinimum; |
| 1841 | |
| 1842 | if (callOp == TailCallForwardVarargs) |
| 1843 | addToGraph(ForwardVarargs, OpInfo(data)); |
| 1844 | else |
| 1845 | addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument)); |
| 1846 | |
| 1847 | // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument |
| 1848 | // and argumentsArgument for the baseline JIT. However, we only need a Phantom for |
| 1849 | // callTargetNode because the other 2 are still in use and alive at this point. |
| 1850 | addToGraph(Phantom, callTargetNode); |
| 1851 | |
| 1852 | // In DFG IR before SSA, we cannot insert control flow between after the |
| 1853 | // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG |
| 1854 | // SSA. Fortunately, we also have other reasons for not inserting control flow |
| 1855 | // before SSA. |
| 1856 | |
| 1857 | VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount)); |
| 1858 | // This is pretty lame, but it will force the count to be flushed as an int. This doesn't |
| 1859 | // matter very much, since our use of a SetArgument and Flushes for this local slot is |
| 1860 | // mostly just a formality. |
| 1861 | countVariable->predict(SpecInt32Only); |
| 1862 | countVariable->mergeIsProfitableToUnbox(true); |
| 1863 | Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable)); |
| 1864 | m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount); |
| 1865 | |
| 1866 | set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet); |
| 1867 | for (unsigned argument = 1; argument < maxNumArguments; ++argument) { |
| 1868 | VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument)); |
| 1869 | variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit. |
| 1870 | |
| 1871 | // For a while it had been my intention to do things like this inside the |
| 1872 | // prediction injection phase. But in this case it's really best to do it here, |
| 1873 | // because it's here that we have access to the variable access datas for the |
| 1874 | // inlining we're about to do. |
| 1875 | // |
| 1876 | // Something else that's interesting here is that we'd really love to get |
| 1877 | // predictions from the arguments loaded at the callsite, rather than the |
| 1878 | // arguments received inside the callee. But that probably won't matter for most |
| 1879 | // calls. |
| 1880 | if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) { |
| 1881 | ConcurrentJSLocker locker(codeBlock->m_lock); |
| 1882 | ValueProfile& profile = codeBlock->valueProfileForArgument(argument); |
| 1883 | variable->predict(profile.computeUpdatedPrediction(locker)); |
| 1884 | } |
| 1885 | |
| 1886 | Node* setArgument = addToGraph(SetArgument, OpInfo(variable)); |
| 1887 | m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument); |
| 1888 | } |
| 1889 | }; |
| 1890 | |
| 1891 | // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because |
| 1892 | // we currently don't have any way of getting profiling information for arguments to non-JS varargs |
| 1893 | // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow, |
| 1894 | // and there are no callsite value profiles and native function won't have callee value profiles for |
| 1895 | // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to |
| 1896 | // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without |
| 1897 | // calling LoadVarargs twice. |
| 1898 | inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks); |
| 1899 | |
| 1900 | VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: " , currentCodeOrigin(), "\n" ); |
| 1901 | return true; |
| 1902 | } |
| 1903 | |
| 1904 | unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus, CodeSpecializationKind specializationKind) |
| 1905 | { |
| 1906 | unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount(); |
| 1907 | if (specializationKind == CodeForConstruct) |
| 1908 | inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount()); |
| 1909 | if (callLinkStatus.isClosureCall()) |
| 1910 | inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount()); |
| 1911 | return inliningBalance; |
| 1912 | } |
| 1913 | |
| 1914 | ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( |
| 1915 | Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus, |
| 1916 | int registerOffset, VirtualRegister thisArgument, |
| 1917 | int argumentCountIncludingThis, |
| 1918 | unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction) |
| 1919 | { |
| 1920 | VERBOSE_LOG("Handling inlining...\nStack: " , currentCodeOrigin(), "\n" ); |
| 1921 | |
| 1922 | CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); |
| 1923 | unsigned inliningBalance = getInliningBalance(callLinkStatus, specializationKind); |
| 1924 | |
| 1925 | // First check if we can avoid creating control flow. Our inliner does some CFG |
| 1926 | // simplification on the fly and this helps reduce compile times, but we can only leverage |
| 1927 | // this in cases where we don't need control flow diamonds to check the callee. |
| 1928 | if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) { |
| 1929 | return handleCallVariant( |
| 1930 | callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument, |
| 1931 | argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true); |
| 1932 | } |
| 1933 | |
| 1934 | // We need to create some kind of switch over callee. For now we only do this if we believe that |
| 1935 | // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to |
| 1936 | // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in |
| 1937 | // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that |
| 1938 | // we could improve that aspect of this by doing polymorphic inlining but having the profiling |
| 1939 | // also. |
| 1940 | if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) { |
| 1941 | VERBOSE_LOG("Bailing inlining (hard).\nStack: " , currentCodeOrigin(), "\n" ); |
| 1942 | return CallOptimizationResult::DidNothing; |
| 1943 | } |
| 1944 | |
| 1945 | // If the claim is that this did not originate from a stub, then we don't want to emit a switch |
| 1946 | // statement. Whenever the non-stub profiling says that it could take slow path, it really means that |
| 1947 | // it has no idea. |
| 1948 | if (!Options::usePolymorphicCallInliningForNonStubStatus() |
| 1949 | && !callLinkStatus.isBasedOnStub()) { |
| 1950 | VERBOSE_LOG("Bailing inlining (non-stub polymorphism).\nStack: " , currentCodeOrigin(), "\n" ); |
| 1951 | return CallOptimizationResult::DidNothing; |
| 1952 | } |
| 1953 | |
| 1954 | bool allAreClosureCalls = true; |
| 1955 | bool allAreDirectCalls = true; |
| 1956 | for (unsigned i = callLinkStatus.size(); i--;) { |
| 1957 | if (callLinkStatus[i].isClosureCall()) |
| 1958 | allAreDirectCalls = false; |
| 1959 | else |
| 1960 | allAreClosureCalls = false; |
| 1961 | } |
| 1962 | |
| 1963 | Node* thingToSwitchOn; |
| 1964 | if (allAreDirectCalls) |
| 1965 | thingToSwitchOn = callTargetNode; |
| 1966 | else if (allAreClosureCalls) |
| 1967 | thingToSwitchOn = addToGraph(GetExecutable, callTargetNode); |
| 1968 | else { |
| 1969 | // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases |
| 1970 | // where it would be beneficial. It might be best to handle these cases as if all calls were |
| 1971 | // closure calls. |
| 1972 | // https://bugs.webkit.org/show_bug.cgi?id=136020 |
| 1973 | VERBOSE_LOG("Bailing inlining (mix).\nStack: " , currentCodeOrigin(), "\n" ); |
| 1974 | return CallOptimizationResult::DidNothing; |
| 1975 | } |
| 1976 | |
| 1977 | VERBOSE_LOG("Doing hard inlining...\nStack: " , currentCodeOrigin(), "\n" ); |
| 1978 | |
| 1979 | // This makes me wish that we were in SSA all the time. We need to pick a variable into which to |
| 1980 | // store the callee so that it will be accessible to all of the blocks we're about to create. We |
| 1981 | // get away with doing an immediate-set here because we wouldn't have performed any side effects |
| 1982 | // yet. |
| 1983 | VERBOSE_LOG("Register offset: " , registerOffset); |
| 1984 | VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee); |
| 1985 | calleeReg = m_inlineStackTop->remapOperand(calleeReg); |
| 1986 | VERBOSE_LOG("Callee is going to be " , calleeReg, "\n" ); |
| 1987 | setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush); |
| 1988 | |
| 1989 | // It's OK to exit right now, even though we set some locals. That's because those locals are not |
| 1990 | // user-visible. |
| 1991 | m_exitOK = true; |
| 1992 | addToGraph(ExitOK); |
| 1993 | |
| 1994 | SwitchData& data = *m_graph.m_switchData.add(); |
| 1995 | data.kind = SwitchCell; |
| 1996 | addToGraph(Switch, OpInfo(&data), thingToSwitchOn); |
| 1997 | m_currentBlock->didLink(); |
| 1998 | |
| 1999 | BasicBlock* continuationBlock = allocateUntargetableBlock(); |
| 2000 | VERBOSE_LOG("Adding untargetable block " , RawPointer(continuationBlock), " (continuation)\n" ); |
| 2001 | |
| 2002 | // We may force this true if we give up on inlining any of the edges. |
| 2003 | bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath(); |
| 2004 | |
| 2005 | VERBOSE_LOG("About to loop over functions at " , currentCodeOrigin(), ".\n" ); |
| 2006 | |
| 2007 | unsigned oldOffset = m_currentIndex; |
| 2008 | for (unsigned i = 0; i < callLinkStatus.size(); ++i) { |
| 2009 | m_currentIndex = oldOffset; |
| 2010 | BasicBlock* calleeEntryBlock = allocateUntargetableBlock(); |
| 2011 | m_currentBlock = calleeEntryBlock; |
| 2012 | prepareToParseBlock(); |
| 2013 | |
| 2014 | // At the top of each switch case, we can exit. |
| 2015 | m_exitOK = true; |
| 2016 | |
| 2017 | Node* myCallTargetNode = getDirect(calleeReg); |
| 2018 | |
| 2019 | auto inliningResult = handleCallVariant( |
| 2020 | myCallTargetNode, result, callLinkStatus[i], registerOffset, |
| 2021 | thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction, |
| 2022 | inliningBalance, continuationBlock, false); |
| 2023 | |
| 2024 | if (inliningResult == CallOptimizationResult::DidNothing) { |
| 2025 | // That failed so we let the block die. Nothing interesting should have been added to |
| 2026 | // the block. We also give up on inlining any of the (less frequent) callees. |
| 2027 | ASSERT(m_graph.m_blocks.last() == m_currentBlock); |
| 2028 | m_graph.killBlockAndItsContents(m_currentBlock); |
| 2029 | m_graph.m_blocks.removeLast(); |
| 2030 | VERBOSE_LOG("Inlining of a poly call failed, we will have to go through a slow path\n" ); |
| 2031 | |
| 2032 | // The fact that inlining failed means we need a slow path. |
| 2033 | couldTakeSlowPath = true; |
| 2034 | break; |
| 2035 | } |
| 2036 | |
| 2037 | JSCell* thingToCaseOn; |
| 2038 | if (allAreDirectCalls) |
| 2039 | thingToCaseOn = callLinkStatus[i].nonExecutableCallee(); |
| 2040 | else { |
| 2041 | ASSERT(allAreClosureCalls); |
| 2042 | thingToCaseOn = callLinkStatus[i].executable(); |
| 2043 | } |
| 2044 | data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), calleeEntryBlock)); |
| 2045 | VERBOSE_LOG("Finished optimizing " , callLinkStatus[i], " at " , currentCodeOrigin(), ".\n" ); |
| 2046 | } |
| 2047 | |
| 2048 | // Slow path block |
| 2049 | m_currentBlock = allocateUntargetableBlock(); |
| 2050 | m_currentIndex = oldOffset; |
| 2051 | m_exitOK = true; |
| 2052 | data.fallThrough = BranchTarget(m_currentBlock); |
| 2053 | prepareToParseBlock(); |
| 2054 | Node* myCallTargetNode = getDirect(calleeReg); |
| 2055 | if (couldTakeSlowPath) { |
| 2056 | addCall( |
| 2057 | result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis, |
| 2058 | registerOffset, prediction); |
| 2059 | VERBOSE_LOG("We added a call in the slow path\n" ); |
| 2060 | } else { |
| 2061 | addToGraph(CheckBadCell); |
| 2062 | addToGraph(Phantom, myCallTargetNode); |
| 2063 | emitArgumentPhantoms(registerOffset, argumentCountIncludingThis); |
| 2064 | |
| 2065 | set(result, addToGraph(BottomValue)); |
| 2066 | VERBOSE_LOG("couldTakeSlowPath was false\n" ); |
| 2067 | } |
| 2068 | |
| 2069 | m_currentIndex = nextOffset; |
| 2070 | m_exitOK = true; // Origin changed, so it's fine to exit again. |
| 2071 | processSetLocalQueue(); |
| 2072 | |
| 2073 | if (Node* terminal = m_currentBlock->terminal()) |
| 2074 | ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs); |
| 2075 | else { |
| 2076 | addJumpTo(continuationBlock); |
| 2077 | } |
| 2078 | |
| 2079 | prepareToParseBlock(); |
| 2080 | |
| 2081 | m_currentIndex = oldOffset; |
| 2082 | m_currentBlock = continuationBlock; |
| 2083 | m_exitOK = true; |
| 2084 | |
| 2085 | VERBOSE_LOG("Done inlining (hard).\nStack: " , currentCodeOrigin(), "\n" ); |
| 2086 | return CallOptimizationResult::Inlined; |
| 2087 | } |
| 2088 | |
| 2089 | template<typename ChecksFunctor> |
| 2090 | bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks) |
| 2091 | { |
| 2092 | ASSERT(op == ArithMin || op == ArithMax); |
| 2093 | |
| 2094 | if (argumentCountIncludingThis == 1) { |
| 2095 | insertChecks(); |
| 2096 | double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity(); |
| 2097 | set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit))))); |
| 2098 | return true; |
| 2099 | } |
| 2100 | |
| 2101 | if (argumentCountIncludingThis == 2) { |
| 2102 | insertChecks(); |
| 2103 | Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset))); |
| 2104 | addToGraph(Phantom, Edge(resultNode, NumberUse)); |
| 2105 | set(result, resultNode); |
| 2106 | return true; |
| 2107 | } |
| 2108 | |
| 2109 | if (argumentCountIncludingThis == 3) { |
| 2110 | insertChecks(); |
| 2111 | set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); |
| 2112 | return true; |
| 2113 | } |
| 2114 | |
| 2115 | // Don't handle >=3 arguments for now. |
| 2116 | return false; |
| 2117 | } |
| 2118 | |
| 2119 | template<typename ChecksFunctor> |
| 2120 | bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) |
| 2121 | { |
| 2122 | VERBOSE_LOG(" The intrinsic is " , intrinsic, "\n" ); |
| 2123 | |
| 2124 | if (!isOpcodeShape<OpCallShape>(m_currentInstruction)) |
| 2125 | return false; |
| 2126 | |
| 2127 | // It so happens that the code below doesn't handle the invalid result case. We could fix that, but |
| 2128 | // it would only benefit intrinsics called as setters, like if you do: |
| 2129 | // |
| 2130 | // o.__defineSetter__("foo", Math.pow) |
| 2131 | // |
| 2132 | // Which is extremely amusing, but probably not worth optimizing. |
| 2133 | if (!result.isValid()) |
| 2134 | return false; |
| 2135 | |
| 2136 | bool didSetResult = false; |
| 2137 | auto setResult = [&] (Node* node) { |
| 2138 | RELEASE_ASSERT(!didSetResult); |
| 2139 | set(result, node); |
| 2140 | didSetResult = true; |
| 2141 | }; |
| 2142 | |
| 2143 | auto inlineIntrinsic = [&] { |
| 2144 | switch (intrinsic) { |
| 2145 | |
| 2146 | // Intrinsic Functions: |
| 2147 | |
| 2148 | case AbsIntrinsic: { |
| 2149 | if (argumentCountIncludingThis == 1) { // Math.abs() |
| 2150 | insertChecks(); |
| 2151 | setResult(addToGraph(JSConstant, OpInfo(m_constantNaN))); |
| 2152 | return true; |
| 2153 | } |
| 2154 | |
| 2155 | if (!MacroAssembler::supportsFloatingPointAbs()) |
| 2156 | return false; |
| 2157 | |
| 2158 | insertChecks(); |
| 2159 | Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset))); |
| 2160 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) |
| 2161 | node->mergeFlags(NodeMayOverflowInt32InDFG); |
| 2162 | setResult(node); |
| 2163 | return true; |
| 2164 | } |
| 2165 | |
| 2166 | case MinIntrinsic: |
| 2167 | case MaxIntrinsic: |
| 2168 | if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) { |
| 2169 | didSetResult = true; |
| 2170 | return true; |
| 2171 | } |
| 2172 | return false; |
| 2173 | |
| 2174 | #define DFG_ARITH_UNARY(capitalizedName, lowerName) \ |
| 2175 | case capitalizedName##Intrinsic: |
| 2176 | FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY) |
| 2177 | #undef DFG_ARITH_UNARY |
| 2178 | { |
| 2179 | if (argumentCountIncludingThis == 1) { |
| 2180 | insertChecks(); |
| 2181 | setResult(addToGraph(JSConstant, OpInfo(m_constantNaN))); |
| 2182 | return true; |
| 2183 | } |
| 2184 | Arith::UnaryType type = Arith::UnaryType::Sin; |
| 2185 | switch (intrinsic) { |
| 2186 | #define DFG_ARITH_UNARY(capitalizedName, lowerName) \ |
| 2187 | case capitalizedName##Intrinsic: \ |
| 2188 | type = Arith::UnaryType::capitalizedName; \ |
| 2189 | break; |
| 2190 | FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY) |
| 2191 | #undef DFG_ARITH_UNARY |
| 2192 | default: |
| 2193 | RELEASE_ASSERT_NOT_REACHED(); |
| 2194 | } |
| 2195 | insertChecks(); |
| 2196 | setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset)))); |
| 2197 | return true; |
| 2198 | } |
| 2199 | |
| 2200 | case FRoundIntrinsic: |
| 2201 | case SqrtIntrinsic: { |
| 2202 | if (argumentCountIncludingThis == 1) { |
| 2203 | insertChecks(); |
| 2204 | setResult(addToGraph(JSConstant, OpInfo(m_constantNaN))); |
| 2205 | return true; |
| 2206 | } |
| 2207 | |
| 2208 | NodeType nodeType = Unreachable; |
| 2209 | switch (intrinsic) { |
| 2210 | case FRoundIntrinsic: |
| 2211 | nodeType = ArithFRound; |
| 2212 | break; |
| 2213 | case SqrtIntrinsic: |
| 2214 | nodeType = ArithSqrt; |
| 2215 | break; |
| 2216 | default: |
| 2217 | RELEASE_ASSERT_NOT_REACHED(); |
| 2218 | } |
| 2219 | insertChecks(); |
| 2220 | setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset)))); |
| 2221 | return true; |
| 2222 | } |
| 2223 | |
| 2224 | case PowIntrinsic: { |
| 2225 | if (argumentCountIncludingThis < 3) { |
| 2226 | // Math.pow() and Math.pow(x) return NaN. |
| 2227 | insertChecks(); |
| 2228 | setResult(addToGraph(JSConstant, OpInfo(m_constantNaN))); |
| 2229 | return true; |
| 2230 | } |
| 2231 | insertChecks(); |
| 2232 | VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset); |
| 2233 | VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset); |
| 2234 | setResult(addToGraph(ArithPow, get(xOperand), get(yOperand))); |
| 2235 | return true; |
| 2236 | } |
| 2237 | |
| 2238 | case ArrayPushIntrinsic: { |
| 2239 | #if USE(JSVALUE32_64) |
| 2240 | if (isX86()) { |
| 2241 | if (argumentCountIncludingThis > 2) |
| 2242 | return false; |
| 2243 | } |
| 2244 | #endif |
| 2245 | |
| 2246 | if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX) |
| 2247 | return false; |
| 2248 | |
| 2249 | ArrayMode arrayMode = getArrayMode(Array::Write); |
| 2250 | if (!arrayMode.isJSArray()) |
| 2251 | return false; |
| 2252 | switch (arrayMode.type()) { |
| 2253 | case Array::Int32: |
| 2254 | case Array::Double: |
| 2255 | case Array::Contiguous: |
| 2256 | case Array::ArrayStorage: { |
| 2257 | insertChecks(); |
| 2258 | |
| 2259 | addVarArgChild(nullptr); // For storage. |
| 2260 | for (int i = 0; i < argumentCountIncludingThis; ++i) |
| 2261 | addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); |
| 2262 | Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction)); |
| 2263 | setResult(arrayPush); |
| 2264 | return true; |
| 2265 | } |
| 2266 | |
| 2267 | default: |
| 2268 | return false; |
| 2269 | } |
| 2270 | } |
| 2271 | |
| 2272 | case ArraySliceIntrinsic: { |
| 2273 | #if USE(JSVALUE32_64) |
| 2274 | if (isX86()) { |
| 2275 | // There aren't enough registers for this to be done easily. |
| 2276 | return false; |
| 2277 | } |
| 2278 | #endif |
| 2279 | if (argumentCountIncludingThis < 1) |
| 2280 | return false; |
| 2281 | |
| 2282 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache) |
| 2283 | || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) |
| 2284 | return false; |
| 2285 | |
| 2286 | ArrayMode arrayMode = getArrayMode(Array::Read); |
| 2287 | if (!arrayMode.isJSArray()) |
| 2288 | return false; |
| 2289 | |
| 2290 | if (!arrayMode.isJSArrayWithOriginalStructure()) |
| 2291 | return false; |
| 2292 | |
| 2293 | switch (arrayMode.type()) { |
| 2294 | case Array::Double: |
| 2295 | case Array::Int32: |
| 2296 | case Array::Contiguous: { |
| 2297 | JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic); |
| 2298 | |
| 2299 | Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm); |
| 2300 | Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm); |
| 2301 | |
| 2302 | // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole. |
| 2303 | // https://bugs.webkit.org/show_bug.cgi?id=173171 |
| 2304 | if (globalObject->arraySpeciesWatchpoint().state() == IsWatched |
| 2305 | && globalObject->havingABadTimeWatchpoint()->isStillValid() |
| 2306 | && arrayPrototypeStructure->transitionWatchpointSetIsStillValid() |
| 2307 | && objectPrototypeStructure->transitionWatchpointSetIsStillValid() |
| 2308 | && globalObject->arrayPrototypeChainIsSane()) { |
| 2309 | |
| 2310 | m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint()); |
| 2311 | m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint()); |
| 2312 | m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure); |
| 2313 | m_graph.registerAndWatchStructureTransition(objectPrototypeStructure); |
| 2314 | |
| 2315 | insertChecks(); |
| 2316 | |
| 2317 | Node* array = get(virtualRegisterForArgument(0, registerOffset)); |
| 2318 | // We do a few things here to prove that we aren't skipping doing side-effects in an observable way: |
| 2319 | // 1. We ensure that the "constructor" property hasn't been changed (because the observable |
| 2320 | // effects of slice require that we perform a Get(array, "constructor") and we can skip |
| 2321 | // that if we're an original array structure. (We can relax this in the future by using |
| 2322 | // TryGetById and CheckCell). |
| 2323 | // |
| 2324 | // 2. We check that the array we're calling slice on has the same global object as the lexical |
| 2325 | // global object that this code is running in. This requirement is necessary because we setup the |
| 2326 | // watchpoints above on the lexical global object. This means that code that calls slice on |
| 2327 | // arrays produced by other global objects won't get this optimization. We could relax this |
| 2328 | // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code |
| 2329 | // we generate instead of registering it as a watchpoint that would invalidate the compilation. |
| 2330 | // |
| 2331 | // 3. By proving we're an original array structure, we guarantee that the incoming array |
| 2332 | // isn't a subclass of Array. |
| 2333 | |
| 2334 | StructureSet structureSet; |
| 2335 | structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32)); |
| 2336 | structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous)); |
| 2337 | structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble)); |
| 2338 | structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32)); |
| 2339 | structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous)); |
| 2340 | structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble)); |
| 2341 | addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array); |
| 2342 | |
| 2343 | addVarArgChild(array); |
| 2344 | if (argumentCountIncludingThis >= 2) |
| 2345 | addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index. |
| 2346 | if (argumentCountIncludingThis >= 3) |
| 2347 | addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index. |
| 2348 | addVarArgChild(addToGraph(GetButterfly, array)); |
| 2349 | |
| 2350 | Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo()); |
| 2351 | setResult(arraySlice); |
| 2352 | return true; |
| 2353 | } |
| 2354 | |
| 2355 | return false; |
| 2356 | } |
| 2357 | default: |
| 2358 | return false; |
| 2359 | } |
| 2360 | |
| 2361 | RELEASE_ASSERT_NOT_REACHED(); |
| 2362 | return false; |
| 2363 | } |
| 2364 | |
| 2365 | case ArrayIndexOfIntrinsic: { |
| 2366 | if (argumentCountIncludingThis < 2) |
| 2367 | return false; |
| 2368 | |
| 2369 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType) |
| 2370 | || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache) |
| 2371 | || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) |
| 2372 | || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 2373 | return false; |
| 2374 | |
| 2375 | ArrayMode arrayMode = getArrayMode(Array::Read); |
| 2376 | if (!arrayMode.isJSArray()) |
| 2377 | return false; |
| 2378 | |
| 2379 | if (!arrayMode.isJSArrayWithOriginalStructure()) |
| 2380 | return false; |
| 2381 | |
| 2382 | // We do not want to convert arrays into one type just to perform indexOf. |
| 2383 | if (arrayMode.doesConversion()) |
| 2384 | return false; |
| 2385 | |
| 2386 | switch (arrayMode.type()) { |
| 2387 | case Array::Double: |
| 2388 | case Array::Int32: |
| 2389 | case Array::Contiguous: { |
| 2390 | JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic); |
| 2391 | |
| 2392 | Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm); |
| 2393 | Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm); |
| 2394 | |
| 2395 | // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole. |
| 2396 | // https://bugs.webkit.org/show_bug.cgi?id=173171 |
| 2397 | if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid() |
| 2398 | && objectPrototypeStructure->transitionWatchpointSetIsStillValid() |
| 2399 | && globalObject->arrayPrototypeChainIsSane()) { |
| 2400 | |
| 2401 | m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure); |
| 2402 | m_graph.registerAndWatchStructureTransition(objectPrototypeStructure); |
| 2403 | |
| 2404 | insertChecks(); |
| 2405 | |
| 2406 | Node* array = get(virtualRegisterForArgument(0, registerOffset)); |
| 2407 | addVarArgChild(array); |
| 2408 | addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element. |
| 2409 | if (argumentCountIncludingThis >= 3) |
| 2410 | addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index. |
| 2411 | addVarArgChild(nullptr); |
| 2412 | |
| 2413 | Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo()); |
| 2414 | setResult(node); |
| 2415 | return true; |
| 2416 | } |
| 2417 | |
| 2418 | return false; |
| 2419 | } |
| 2420 | default: |
| 2421 | return false; |
| 2422 | } |
| 2423 | |
| 2424 | RELEASE_ASSERT_NOT_REACHED(); |
| 2425 | return false; |
| 2426 | |
| 2427 | } |
| 2428 | |
| 2429 | case ArrayPopIntrinsic: { |
| 2430 | if (argumentCountIncludingThis != 1) |
| 2431 | return false; |
| 2432 | |
| 2433 | ArrayMode arrayMode = getArrayMode(Array::Write); |
| 2434 | if (!arrayMode.isJSArray()) |
| 2435 | return false; |
| 2436 | switch (arrayMode.type()) { |
| 2437 | case Array::Int32: |
| 2438 | case Array::Double: |
| 2439 | case Array::Contiguous: |
| 2440 | case Array::ArrayStorage: { |
| 2441 | insertChecks(); |
| 2442 | Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset))); |
| 2443 | setResult(arrayPop); |
| 2444 | return true; |
| 2445 | } |
| 2446 | |
| 2447 | default: |
| 2448 | return false; |
| 2449 | } |
| 2450 | } |
| 2451 | |
| 2452 | case AtomicsAddIntrinsic: |
| 2453 | case AtomicsAndIntrinsic: |
| 2454 | case AtomicsCompareExchangeIntrinsic: |
| 2455 | case AtomicsExchangeIntrinsic: |
| 2456 | case AtomicsIsLockFreeIntrinsic: |
| 2457 | case AtomicsLoadIntrinsic: |
| 2458 | case AtomicsOrIntrinsic: |
| 2459 | case AtomicsStoreIntrinsic: |
| 2460 | case AtomicsSubIntrinsic: |
| 2461 | case AtomicsXorIntrinsic: { |
| 2462 | if (!is64Bit()) |
| 2463 | return false; |
| 2464 | |
| 2465 | NodeType op = LastNodeType; |
| 2466 | Array::Action action = Array::Write; |
| 2467 | unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer. |
| 2468 | switch (intrinsic) { |
| 2469 | case AtomicsAddIntrinsic: |
| 2470 | op = AtomicsAdd; |
| 2471 | numArgs = 3; |
| 2472 | break; |
| 2473 | case AtomicsAndIntrinsic: |
| 2474 | op = AtomicsAnd; |
| 2475 | numArgs = 3; |
| 2476 | break; |
| 2477 | case AtomicsCompareExchangeIntrinsic: |
| 2478 | op = AtomicsCompareExchange; |
| 2479 | numArgs = 4; |
| 2480 | break; |
| 2481 | case AtomicsExchangeIntrinsic: |
| 2482 | op = AtomicsExchange; |
| 2483 | numArgs = 3; |
| 2484 | break; |
| 2485 | case AtomicsIsLockFreeIntrinsic: |
| 2486 | // This gets no backing store, but we need no special logic for this since this also does |
| 2487 | // not need varargs. |
| 2488 | op = AtomicsIsLockFree; |
| 2489 | numArgs = 1; |
| 2490 | break; |
| 2491 | case AtomicsLoadIntrinsic: |
| 2492 | op = AtomicsLoad; |
| 2493 | numArgs = 2; |
| 2494 | action = Array::Read; |
| 2495 | break; |
| 2496 | case AtomicsOrIntrinsic: |
| 2497 | op = AtomicsOr; |
| 2498 | numArgs = 3; |
| 2499 | break; |
| 2500 | case AtomicsStoreIntrinsic: |
| 2501 | op = AtomicsStore; |
| 2502 | numArgs = 3; |
| 2503 | break; |
| 2504 | case AtomicsSubIntrinsic: |
| 2505 | op = AtomicsSub; |
| 2506 | numArgs = 3; |
| 2507 | break; |
| 2508 | case AtomicsXorIntrinsic: |
| 2509 | op = AtomicsXor; |
| 2510 | numArgs = 3; |
| 2511 | break; |
| 2512 | default: |
| 2513 | RELEASE_ASSERT_NOT_REACHED(); |
| 2514 | break; |
| 2515 | } |
| 2516 | |
| 2517 | if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs) |
| 2518 | return false; |
| 2519 | |
| 2520 | insertChecks(); |
| 2521 | |
| 2522 | Vector<Node*, 3> args; |
| 2523 | for (unsigned i = 0; i < numArgs; ++i) |
| 2524 | args.append(get(virtualRegisterForArgument(1 + i, registerOffset))); |
| 2525 | |
| 2526 | Node* resultNode; |
| 2527 | if (numArgs + 1 <= 3) { |
| 2528 | while (args.size() < 3) |
| 2529 | args.append(nullptr); |
| 2530 | resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]); |
| 2531 | } else { |
| 2532 | for (Node* node : args) |
| 2533 | addVarArgChild(node); |
| 2534 | addVarArgChild(nullptr); |
| 2535 | resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction)); |
| 2536 | } |
| 2537 | |
| 2538 | setResult(resultNode); |
| 2539 | return true; |
| 2540 | } |
| 2541 | |
| 2542 | case ParseIntIntrinsic: { |
| 2543 | if (argumentCountIncludingThis < 2) |
| 2544 | return false; |
| 2545 | |
| 2546 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 2547 | return false; |
| 2548 | |
| 2549 | insertChecks(); |
| 2550 | VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset); |
| 2551 | Node* parseInt; |
| 2552 | if (argumentCountIncludingThis == 2) |
| 2553 | parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand)); |
| 2554 | else { |
| 2555 | ASSERT(argumentCountIncludingThis > 2); |
| 2556 | VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset); |
| 2557 | parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand)); |
| 2558 | } |
| 2559 | setResult(parseInt); |
| 2560 | return true; |
| 2561 | } |
| 2562 | |
| 2563 | case CharCodeAtIntrinsic: { |
| 2564 | if (argumentCountIncludingThis != 2) |
| 2565 | return false; |
| 2566 | |
| 2567 | insertChecks(); |
| 2568 | VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset); |
| 2569 | VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); |
| 2570 | Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand)); |
| 2571 | |
| 2572 | setResult(charCode); |
| 2573 | return true; |
| 2574 | } |
| 2575 | |
| 2576 | case CharAtIntrinsic: { |
| 2577 | if (argumentCountIncludingThis != 2) |
| 2578 | return false; |
| 2579 | |
| 2580 | insertChecks(); |
| 2581 | VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset); |
| 2582 | VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); |
| 2583 | Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand)); |
| 2584 | |
| 2585 | setResult(charCode); |
| 2586 | return true; |
| 2587 | } |
| 2588 | case Clz32Intrinsic: { |
| 2589 | insertChecks(); |
| 2590 | if (argumentCountIncludingThis == 1) |
| 2591 | setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32))))); |
| 2592 | else { |
| 2593 | Node* operand = get(virtualRegisterForArgument(1, registerOffset)); |
| 2594 | setResult(addToGraph(ArithClz32, operand)); |
| 2595 | } |
| 2596 | return true; |
| 2597 | } |
| 2598 | case FromCharCodeIntrinsic: { |
| 2599 | if (argumentCountIncludingThis != 2) |
| 2600 | return false; |
| 2601 | |
| 2602 | insertChecks(); |
| 2603 | VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); |
| 2604 | Node* charCode = addToGraph(StringFromCharCode, get(indexOperand)); |
| 2605 | |
| 2606 | setResult(charCode); |
| 2607 | |
| 2608 | return true; |
| 2609 | } |
| 2610 | |
| 2611 | case RegExpExecIntrinsic: { |
| 2612 | if (argumentCountIncludingThis != 2) |
| 2613 | return false; |
| 2614 | |
| 2615 | insertChecks(); |
| 2616 | Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); |
| 2617 | setResult(regExpExec); |
| 2618 | |
| 2619 | return true; |
| 2620 | } |
| 2621 | |
| 2622 | case RegExpTestIntrinsic: |
| 2623 | case RegExpTestFastIntrinsic: { |
| 2624 | if (argumentCountIncludingThis != 2) |
| 2625 | return false; |
| 2626 | |
| 2627 | if (intrinsic == RegExpTestIntrinsic) { |
| 2628 | // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing. |
| 2629 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) |
| 2630 | return false; |
| 2631 | |
| 2632 | JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); |
| 2633 | Structure* regExpStructure = globalObject->regExpStructure(); |
| 2634 | m_graph.registerStructure(regExpStructure); |
| 2635 | ASSERT(regExpStructure->storedPrototype().isObject()); |
| 2636 | ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info()); |
| 2637 | |
| 2638 | FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype()); |
| 2639 | Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure(); |
| 2640 | |
| 2641 | auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) { |
| 2642 | JSValue currentProperty; |
| 2643 | if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty)) |
| 2644 | return false; |
| 2645 | |
| 2646 | return currentProperty == primordialProperty; |
| 2647 | }; |
| 2648 | |
| 2649 | // Check that RegExp.exec is still the primordial RegExp.prototype.exec |
| 2650 | if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl())) |
| 2651 | return false; |
| 2652 | |
| 2653 | // Check that regExpObject is actually a RegExp object. |
| 2654 | Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset)); |
| 2655 | addToGraph(Check, Edge(regExpObject, RegExpObjectUse)); |
| 2656 | |
| 2657 | // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec. |
| 2658 | UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl(); |
| 2659 | unsigned execIndex = m_graph.identifiers().ensure(execPropertyID); |
| 2660 | Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse)); |
| 2661 | FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction()); |
| 2662 | addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse)); |
| 2663 | } |
| 2664 | |
| 2665 | insertChecks(); |
| 2666 | Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset)); |
| 2667 | Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset))); |
| 2668 | setResult(regExpExec); |
| 2669 | |
| 2670 | return true; |
| 2671 | } |
| 2672 | |
| 2673 | case RegExpMatchFastIntrinsic: { |
| 2674 | RELEASE_ASSERT(argumentCountIncludingThis == 2); |
| 2675 | |
| 2676 | insertChecks(); |
| 2677 | Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); |
| 2678 | setResult(regExpMatch); |
| 2679 | return true; |
| 2680 | } |
| 2681 | |
| 2682 | case ObjectCreateIntrinsic: { |
| 2683 | if (argumentCountIncludingThis != 2) |
| 2684 | return false; |
| 2685 | |
| 2686 | insertChecks(); |
| 2687 | setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset)))); |
| 2688 | return true; |
| 2689 | } |
| 2690 | |
| 2691 | case ObjectGetPrototypeOfIntrinsic: { |
| 2692 | if (argumentCountIncludingThis != 2) |
| 2693 | return false; |
| 2694 | |
| 2695 | insertChecks(); |
| 2696 | setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); |
| 2697 | return true; |
| 2698 | } |
| 2699 | |
| 2700 | case ObjectIsIntrinsic: { |
| 2701 | if (argumentCountIncludingThis < 3) |
| 2702 | return false; |
| 2703 | |
| 2704 | insertChecks(); |
| 2705 | setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); |
| 2706 | return true; |
| 2707 | } |
| 2708 | |
| 2709 | case ObjectKeysIntrinsic: { |
| 2710 | if (argumentCountIncludingThis < 2) |
| 2711 | return false; |
| 2712 | |
| 2713 | insertChecks(); |
| 2714 | setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset)))); |
| 2715 | return true; |
| 2716 | } |
| 2717 | |
| 2718 | case ReflectGetPrototypeOfIntrinsic: { |
| 2719 | if (argumentCountIncludingThis != 2) |
| 2720 | return false; |
| 2721 | |
| 2722 | insertChecks(); |
| 2723 | setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse))); |
| 2724 | return true; |
| 2725 | } |
| 2726 | |
| 2727 | case IsTypedArrayViewIntrinsic: { |
| 2728 | ASSERT(argumentCountIncludingThis == 2); |
| 2729 | |
| 2730 | insertChecks(); |
| 2731 | setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); |
| 2732 | return true; |
| 2733 | } |
| 2734 | |
| 2735 | case StringPrototypeValueOfIntrinsic: { |
| 2736 | insertChecks(); |
| 2737 | Node* value = get(virtualRegisterForArgument(0, registerOffset)); |
| 2738 | setResult(addToGraph(StringValueOf, value)); |
| 2739 | return true; |
| 2740 | } |
| 2741 | |
| 2742 | case StringPrototypeReplaceIntrinsic: { |
| 2743 | if (argumentCountIncludingThis != 3) |
| 2744 | return false; |
| 2745 | |
| 2746 | // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object. |
| 2747 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 2748 | return false; |
| 2749 | |
| 2750 | // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing. |
| 2751 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) |
| 2752 | return false; |
| 2753 | |
| 2754 | JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); |
| 2755 | Structure* regExpStructure = globalObject->regExpStructure(); |
| 2756 | m_graph.registerStructure(regExpStructure); |
| 2757 | ASSERT(regExpStructure->storedPrototype().isObject()); |
| 2758 | ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info()); |
| 2759 | |
| 2760 | FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype()); |
| 2761 | Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure(); |
| 2762 | |
| 2763 | auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) { |
| 2764 | JSValue currentProperty; |
| 2765 | if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty)) |
| 2766 | return false; |
| 2767 | |
| 2768 | return currentProperty == primordialProperty; |
| 2769 | }; |
| 2770 | |
| 2771 | // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec |
| 2772 | if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl())) |
| 2773 | return false; |
| 2774 | |
| 2775 | // Check that searchRegExp.global is still the primordial RegExp.prototype.global |
| 2776 | if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl())) |
| 2777 | return false; |
| 2778 | |
| 2779 | // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode |
| 2780 | if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl())) |
| 2781 | return false; |
| 2782 | |
| 2783 | // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace] |
| 2784 | if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl())) |
| 2785 | return false; |
| 2786 | |
| 2787 | insertChecks(); |
| 2788 | |
| 2789 | Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))); |
| 2790 | setResult(resultNode); |
| 2791 | return true; |
| 2792 | } |
| 2793 | |
| 2794 | case StringPrototypeReplaceRegExpIntrinsic: { |
| 2795 | if (argumentCountIncludingThis != 3) |
| 2796 | return false; |
| 2797 | |
| 2798 | insertChecks(); |
| 2799 | Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))); |
| 2800 | setResult(resultNode); |
| 2801 | return true; |
| 2802 | } |
| 2803 | |
| 2804 | case RoundIntrinsic: |
| 2805 | case FloorIntrinsic: |
| 2806 | case CeilIntrinsic: |
| 2807 | case TruncIntrinsic: { |
| 2808 | if (argumentCountIncludingThis == 1) { |
| 2809 | insertChecks(); |
| 2810 | setResult(addToGraph(JSConstant, OpInfo(m_constantNaN))); |
| 2811 | return true; |
| 2812 | } |
| 2813 | insertChecks(); |
| 2814 | Node* operand = get(virtualRegisterForArgument(1, registerOffset)); |
| 2815 | NodeType op; |
| 2816 | if (intrinsic == RoundIntrinsic) |
| 2817 | op = ArithRound; |
| 2818 | else if (intrinsic == FloorIntrinsic) |
| 2819 | op = ArithFloor; |
| 2820 | else if (intrinsic == CeilIntrinsic) |
| 2821 | op = ArithCeil; |
| 2822 | else { |
| 2823 | ASSERT(intrinsic == TruncIntrinsic); |
| 2824 | op = ArithTrunc; |
| 2825 | } |
| 2826 | Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand); |
| 2827 | setResult(roundNode); |
| 2828 | return true; |
| 2829 | } |
| 2830 | case IMulIntrinsic: { |
| 2831 | if (argumentCountIncludingThis != 3) |
| 2832 | return false; |
| 2833 | insertChecks(); |
| 2834 | VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset); |
| 2835 | VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset); |
| 2836 | Node* left = get(leftOperand); |
| 2837 | Node* right = get(rightOperand); |
| 2838 | setResult(addToGraph(ArithIMul, left, right)); |
| 2839 | return true; |
| 2840 | } |
| 2841 | |
| 2842 | case RandomIntrinsic: { |
| 2843 | if (argumentCountIncludingThis != 1) |
| 2844 | return false; |
| 2845 | insertChecks(); |
| 2846 | setResult(addToGraph(ArithRandom)); |
| 2847 | return true; |
| 2848 | } |
| 2849 | |
| 2850 | case DFGTrueIntrinsic: { |
| 2851 | insertChecks(); |
| 2852 | setResult(jsConstant(jsBoolean(true))); |
| 2853 | return true; |
| 2854 | } |
| 2855 | |
| 2856 | case FTLTrueIntrinsic: { |
| 2857 | insertChecks(); |
| 2858 | setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL()))); |
| 2859 | return true; |
| 2860 | } |
| 2861 | |
| 2862 | case OSRExitIntrinsic: { |
| 2863 | insertChecks(); |
| 2864 | addToGraph(ForceOSRExit); |
| 2865 | setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined))); |
| 2866 | return true; |
| 2867 | } |
| 2868 | |
| 2869 | case IsFinalTierIntrinsic: { |
| 2870 | insertChecks(); |
| 2871 | setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true))); |
| 2872 | return true; |
| 2873 | } |
| 2874 | |
| 2875 | case SetInt32HeapPredictionIntrinsic: { |
| 2876 | insertChecks(); |
| 2877 | for (int i = 1; i < argumentCountIncludingThis; ++i) { |
| 2878 | Node* node = get(virtualRegisterForArgument(i, registerOffset)); |
| 2879 | if (node->hasHeapPrediction()) |
| 2880 | node->setHeapPrediction(SpecInt32Only); |
| 2881 | } |
| 2882 | setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined))); |
| 2883 | return true; |
| 2884 | } |
| 2885 | |
| 2886 | case CheckInt32Intrinsic: { |
| 2887 | insertChecks(); |
| 2888 | for (int i = 1; i < argumentCountIncludingThis; ++i) { |
| 2889 | Node* node = get(virtualRegisterForArgument(i, registerOffset)); |
| 2890 | addToGraph(Phantom, Edge(node, Int32Use)); |
| 2891 | } |
| 2892 | setResult(jsConstant(jsBoolean(true))); |
| 2893 | return true; |
| 2894 | } |
| 2895 | |
| 2896 | case FiatInt52Intrinsic: { |
| 2897 | if (argumentCountIncludingThis != 2) |
| 2898 | return false; |
| 2899 | insertChecks(); |
| 2900 | VirtualRegister operand = virtualRegisterForArgument(1, registerOffset); |
| 2901 | if (enableInt52()) |
| 2902 | setResult(addToGraph(FiatInt52, get(operand))); |
| 2903 | else |
| 2904 | setResult(get(operand)); |
| 2905 | return true; |
| 2906 | } |
| 2907 | |
| 2908 | case JSMapGetIntrinsic: { |
| 2909 | if (argumentCountIncludingThis != 2) |
| 2910 | return false; |
| 2911 | |
| 2912 | insertChecks(); |
| 2913 | Node* map = get(virtualRegisterForArgument(0, registerOffset)); |
| 2914 | Node* key = get(virtualRegisterForArgument(1, registerOffset)); |
| 2915 | Node* normalizedKey = addToGraph(NormalizeMapKey, key); |
| 2916 | Node* hash = addToGraph(MapHash, normalizedKey); |
| 2917 | Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash)); |
| 2918 | Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket); |
| 2919 | setResult(resultNode); |
| 2920 | return true; |
| 2921 | } |
| 2922 | |
| 2923 | case JSSetHasIntrinsic: |
| 2924 | case JSMapHasIntrinsic: { |
| 2925 | if (argumentCountIncludingThis != 2) |
| 2926 | return false; |
| 2927 | |
| 2928 | insertChecks(); |
| 2929 | Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset)); |
| 2930 | Node* key = get(virtualRegisterForArgument(1, registerOffset)); |
| 2931 | Node* normalizedKey = addToGraph(NormalizeMapKey, key); |
| 2932 | Node* hash = addToGraph(MapHash, normalizedKey); |
| 2933 | UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse; |
| 2934 | Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash)); |
| 2935 | JSCell* sentinel = nullptr; |
| 2936 | if (intrinsic == JSMapHasIntrinsic) |
| 2937 | sentinel = m_vm->sentinelMapBucket(); |
| 2938 | else |
| 2939 | sentinel = m_vm->sentinelSetBucket(); |
| 2940 | |
| 2941 | FrozenValue* frozenPointer = m_graph.freeze(sentinel); |
| 2942 | Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket); |
| 2943 | Node* resultNode = addToGraph(LogicalNot, invertedResult); |
| 2944 | setResult(resultNode); |
| 2945 | return true; |
| 2946 | } |
| 2947 | |
| 2948 | case JSSetAddIntrinsic: { |
| 2949 | if (argumentCountIncludingThis != 2) |
| 2950 | return false; |
| 2951 | |
| 2952 | insertChecks(); |
| 2953 | Node* base = get(virtualRegisterForArgument(0, registerOffset)); |
| 2954 | Node* key = get(virtualRegisterForArgument(1, registerOffset)); |
| 2955 | Node* normalizedKey = addToGraph(NormalizeMapKey, key); |
| 2956 | Node* hash = addToGraph(MapHash, normalizedKey); |
| 2957 | addToGraph(SetAdd, base, normalizedKey, hash); |
| 2958 | setResult(base); |
| 2959 | return true; |
| 2960 | } |
| 2961 | |
| 2962 | case JSMapSetIntrinsic: { |
| 2963 | if (argumentCountIncludingThis != 3) |
| 2964 | return false; |
| 2965 | |
| 2966 | insertChecks(); |
| 2967 | Node* base = get(virtualRegisterForArgument(0, registerOffset)); |
| 2968 | Node* key = get(virtualRegisterForArgument(1, registerOffset)); |
| 2969 | Node* value = get(virtualRegisterForArgument(2, registerOffset)); |
| 2970 | |
| 2971 | Node* normalizedKey = addToGraph(NormalizeMapKey, key); |
| 2972 | Node* hash = addToGraph(MapHash, normalizedKey); |
| 2973 | |
| 2974 | addVarArgChild(base); |
| 2975 | addVarArgChild(normalizedKey); |
| 2976 | addVarArgChild(value); |
| 2977 | addVarArgChild(hash); |
| 2978 | addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0)); |
| 2979 | setResult(base); |
| 2980 | return true; |
| 2981 | } |
| 2982 | |
| 2983 | case JSSetBucketHeadIntrinsic: |
| 2984 | case JSMapBucketHeadIntrinsic: { |
| 2985 | ASSERT(argumentCountIncludingThis == 2); |
| 2986 | |
| 2987 | insertChecks(); |
| 2988 | Node* map = get(virtualRegisterForArgument(1, registerOffset)); |
| 2989 | UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse; |
| 2990 | Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind)); |
| 2991 | setResult(resultNode); |
| 2992 | return true; |
| 2993 | } |
| 2994 | |
| 2995 | case JSSetBucketNextIntrinsic: |
| 2996 | case JSMapBucketNextIntrinsic: { |
| 2997 | ASSERT(argumentCountIncludingThis == 2); |
| 2998 | |
| 2999 | insertChecks(); |
| 3000 | Node* bucket = get(virtualRegisterForArgument(1, registerOffset)); |
| 3001 | BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map; |
| 3002 | Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket); |
| 3003 | setResult(resultNode); |
| 3004 | return true; |
| 3005 | } |
| 3006 | |
| 3007 | case JSSetBucketKeyIntrinsic: |
| 3008 | case JSMapBucketKeyIntrinsic: { |
| 3009 | ASSERT(argumentCountIncludingThis == 2); |
| 3010 | |
| 3011 | insertChecks(); |
| 3012 | Node* bucket = get(virtualRegisterForArgument(1, registerOffset)); |
| 3013 | BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map; |
| 3014 | Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket); |
| 3015 | setResult(resultNode); |
| 3016 | return true; |
| 3017 | } |
| 3018 | |
| 3019 | case JSMapBucketValueIntrinsic: { |
| 3020 | ASSERT(argumentCountIncludingThis == 2); |
| 3021 | |
| 3022 | insertChecks(); |
| 3023 | Node* bucket = get(virtualRegisterForArgument(1, registerOffset)); |
| 3024 | Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket); |
| 3025 | setResult(resultNode); |
| 3026 | return true; |
| 3027 | } |
| 3028 | |
| 3029 | case JSWeakMapGetIntrinsic: { |
| 3030 | if (argumentCountIncludingThis != 2) |
| 3031 | return false; |
| 3032 | |
| 3033 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 3034 | return false; |
| 3035 | |
| 3036 | insertChecks(); |
| 3037 | Node* map = get(virtualRegisterForArgument(0, registerOffset)); |
| 3038 | Node* key = get(virtualRegisterForArgument(1, registerOffset)); |
| 3039 | addToGraph(Check, Edge(key, ObjectUse)); |
| 3040 | Node* hash = addToGraph(MapHash, key); |
| 3041 | Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); |
| 3042 | Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder); |
| 3043 | |
| 3044 | setResult(resultNode); |
| 3045 | return true; |
| 3046 | } |
| 3047 | |
| 3048 | case JSWeakMapHasIntrinsic: { |
| 3049 | if (argumentCountIncludingThis != 2) |
| 3050 | return false; |
| 3051 | |
| 3052 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 3053 | return false; |
| 3054 | |
| 3055 | insertChecks(); |
| 3056 | Node* map = get(virtualRegisterForArgument(0, registerOffset)); |
| 3057 | Node* key = get(virtualRegisterForArgument(1, registerOffset)); |
| 3058 | addToGraph(Check, Edge(key, ObjectUse)); |
| 3059 | Node* hash = addToGraph(MapHash, key); |
| 3060 | Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); |
| 3061 | Node* invertedResult = addToGraph(IsEmpty, holder); |
| 3062 | Node* resultNode = addToGraph(LogicalNot, invertedResult); |
| 3063 | |
| 3064 | setResult(resultNode); |
| 3065 | return true; |
| 3066 | } |
| 3067 | |
| 3068 | case JSWeakSetHasIntrinsic: { |
| 3069 | if (argumentCountIncludingThis != 2) |
| 3070 | return false; |
| 3071 | |
| 3072 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 3073 | return false; |
| 3074 | |
| 3075 | insertChecks(); |
| 3076 | Node* map = get(virtualRegisterForArgument(0, registerOffset)); |
| 3077 | Node* key = get(virtualRegisterForArgument(1, registerOffset)); |
| 3078 | addToGraph(Check, Edge(key, ObjectUse)); |
| 3079 | Node* hash = addToGraph(MapHash, key); |
| 3080 | Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); |
| 3081 | Node* invertedResult = addToGraph(IsEmpty, holder); |
| 3082 | Node* resultNode = addToGraph(LogicalNot, invertedResult); |
| 3083 | |
| 3084 | setResult(resultNode); |
| 3085 | return true; |
| 3086 | } |
| 3087 | |
| 3088 | case JSWeakSetAddIntrinsic: { |
| 3089 | if (argumentCountIncludingThis != 2) |
| 3090 | return false; |
| 3091 | |
| 3092 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 3093 | return false; |
| 3094 | |
| 3095 | insertChecks(); |
| 3096 | Node* base = get(virtualRegisterForArgument(0, registerOffset)); |
| 3097 | Node* key = get(virtualRegisterForArgument(1, registerOffset)); |
| 3098 | addToGraph(Check, Edge(key, ObjectUse)); |
| 3099 | Node* hash = addToGraph(MapHash, key); |
| 3100 | addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); |
| 3101 | setResult(base); |
| 3102 | return true; |
| 3103 | } |
| 3104 | |
| 3105 | case JSWeakMapSetIntrinsic: { |
| 3106 | if (argumentCountIncludingThis != 3) |
| 3107 | return false; |
| 3108 | |
| 3109 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 3110 | return false; |
| 3111 | |
| 3112 | insertChecks(); |
| 3113 | Node* base = get(virtualRegisterForArgument(0, registerOffset)); |
| 3114 | Node* key = get(virtualRegisterForArgument(1, registerOffset)); |
| 3115 | Node* value = get(virtualRegisterForArgument(2, registerOffset)); |
| 3116 | |
| 3117 | addToGraph(Check, Edge(key, ObjectUse)); |
| 3118 | Node* hash = addToGraph(MapHash, key); |
| 3119 | |
| 3120 | addVarArgChild(Edge(base, WeakMapObjectUse)); |
| 3121 | addVarArgChild(Edge(key, ObjectUse)); |
| 3122 | addVarArgChild(Edge(value)); |
| 3123 | addVarArgChild(Edge(hash, Int32Use)); |
| 3124 | addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0)); |
| 3125 | setResult(base); |
| 3126 | return true; |
| 3127 | } |
| 3128 | |
| 3129 | case DataViewGetInt8: |
| 3130 | case DataViewGetUint8: |
| 3131 | case DataViewGetInt16: |
| 3132 | case DataViewGetUint16: |
| 3133 | case DataViewGetInt32: |
| 3134 | case DataViewGetUint32: |
| 3135 | case DataViewGetFloat32: |
| 3136 | case DataViewGetFloat64: { |
| 3137 | if (!is64Bit()) |
| 3138 | return false; |
| 3139 | |
| 3140 | // To inline data view accesses, we assume the architecture we're running on: |
| 3141 | // - Is little endian. |
| 3142 | // - Allows unaligned loads/stores without crashing. |
| 3143 | |
| 3144 | if (argumentCountIncludingThis < 2) |
| 3145 | return false; |
| 3146 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 3147 | return false; |
| 3148 | |
| 3149 | insertChecks(); |
| 3150 | |
| 3151 | uint8_t byteSize; |
| 3152 | NodeType op = DataViewGetInt; |
| 3153 | bool isSigned = false; |
| 3154 | switch (intrinsic) { |
| 3155 | case DataViewGetInt8: |
| 3156 | isSigned = true; |
| 3157 | FALLTHROUGH; |
| 3158 | case DataViewGetUint8: |
| 3159 | byteSize = 1; |
| 3160 | break; |
| 3161 | |
| 3162 | case DataViewGetInt16: |
| 3163 | isSigned = true; |
| 3164 | FALLTHROUGH; |
| 3165 | case DataViewGetUint16: |
| 3166 | byteSize = 2; |
| 3167 | break; |
| 3168 | |
| 3169 | case DataViewGetInt32: |
| 3170 | isSigned = true; |
| 3171 | FALLTHROUGH; |
| 3172 | case DataViewGetUint32: |
| 3173 | byteSize = 4; |
| 3174 | break; |
| 3175 | |
| 3176 | case DataViewGetFloat32: |
| 3177 | byteSize = 4; |
| 3178 | op = DataViewGetFloat; |
| 3179 | break; |
| 3180 | case DataViewGetFloat64: |
| 3181 | byteSize = 8; |
| 3182 | op = DataViewGetFloat; |
| 3183 | break; |
| 3184 | default: |
| 3185 | RELEASE_ASSERT_NOT_REACHED(); |
| 3186 | } |
| 3187 | |
| 3188 | TriState isLittleEndian = MixedTriState; |
| 3189 | Node* littleEndianChild = nullptr; |
| 3190 | if (byteSize > 1) { |
| 3191 | if (argumentCountIncludingThis < 3) |
| 3192 | isLittleEndian = FalseTriState; |
| 3193 | else { |
| 3194 | littleEndianChild = get(virtualRegisterForArgument(2, registerOffset)); |
| 3195 | if (littleEndianChild->hasConstant()) { |
| 3196 | JSValue constant = littleEndianChild->constant()->value(); |
| 3197 | isLittleEndian = constant.pureToBoolean(); |
| 3198 | if (isLittleEndian != MixedTriState) |
| 3199 | littleEndianChild = nullptr; |
| 3200 | } else |
| 3201 | isLittleEndian = MixedTriState; |
| 3202 | } |
| 3203 | } |
| 3204 | |
| 3205 | DataViewData data { }; |
| 3206 | data.isLittleEndian = isLittleEndian; |
| 3207 | data.isSigned = isSigned; |
| 3208 | data.byteSize = byteSize; |
| 3209 | |
| 3210 | setResult( |
| 3211 | addToGraph(op, OpInfo(data.asQuadWord), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), littleEndianChild)); |
| 3212 | return true; |
| 3213 | } |
| 3214 | |
| 3215 | case DataViewSetInt8: |
| 3216 | case DataViewSetUint8: |
| 3217 | case DataViewSetInt16: |
| 3218 | case DataViewSetUint16: |
| 3219 | case DataViewSetInt32: |
| 3220 | case DataViewSetUint32: |
| 3221 | case DataViewSetFloat32: |
| 3222 | case DataViewSetFloat64: { |
| 3223 | if (!is64Bit()) |
| 3224 | return false; |
| 3225 | |
| 3226 | if (argumentCountIncludingThis < 3) |
| 3227 | return false; |
| 3228 | |
| 3229 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 3230 | return false; |
| 3231 | |
| 3232 | insertChecks(); |
| 3233 | |
| 3234 | uint8_t byteSize; |
| 3235 | bool isFloatingPoint = false; |
| 3236 | bool isSigned = false; |
| 3237 | switch (intrinsic) { |
| 3238 | case DataViewSetInt8: |
| 3239 | isSigned = true; |
| 3240 | FALLTHROUGH; |
| 3241 | case DataViewSetUint8: |
| 3242 | byteSize = 1; |
| 3243 | break; |
| 3244 | |
| 3245 | case DataViewSetInt16: |
| 3246 | isSigned = true; |
| 3247 | FALLTHROUGH; |
| 3248 | case DataViewSetUint16: |
| 3249 | byteSize = 2; |
| 3250 | break; |
| 3251 | |
| 3252 | case DataViewSetInt32: |
| 3253 | isSigned = true; |
| 3254 | FALLTHROUGH; |
| 3255 | case DataViewSetUint32: |
| 3256 | byteSize = 4; |
| 3257 | break; |
| 3258 | |
| 3259 | case DataViewSetFloat32: |
| 3260 | isFloatingPoint = true; |
| 3261 | byteSize = 4; |
| 3262 | break; |
| 3263 | case DataViewSetFloat64: |
| 3264 | isFloatingPoint = true; |
| 3265 | byteSize = 8; |
| 3266 | break; |
| 3267 | default: |
| 3268 | RELEASE_ASSERT_NOT_REACHED(); |
| 3269 | } |
| 3270 | |
| 3271 | TriState isLittleEndian = MixedTriState; |
| 3272 | Node* littleEndianChild = nullptr; |
| 3273 | if (byteSize > 1) { |
| 3274 | if (argumentCountIncludingThis < 4) |
| 3275 | isLittleEndian = FalseTriState; |
| 3276 | else { |
| 3277 | littleEndianChild = get(virtualRegisterForArgument(3, registerOffset)); |
| 3278 | if (littleEndianChild->hasConstant()) { |
| 3279 | JSValue constant = littleEndianChild->constant()->value(); |
| 3280 | isLittleEndian = constant.pureToBoolean(); |
| 3281 | if (isLittleEndian != MixedTriState) |
| 3282 | littleEndianChild = nullptr; |
| 3283 | } else |
| 3284 | isLittleEndian = MixedTriState; |
| 3285 | } |
| 3286 | } |
| 3287 | |
| 3288 | DataViewData data { }; |
| 3289 | data.isLittleEndian = isLittleEndian; |
| 3290 | data.isSigned = isSigned; |
| 3291 | data.byteSize = byteSize; |
| 3292 | data.isFloatingPoint = isFloatingPoint; |
| 3293 | |
| 3294 | addVarArgChild(get(virtualRegisterForArgument(0, registerOffset))); |
| 3295 | addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); |
| 3296 | addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); |
| 3297 | addVarArgChild(littleEndianChild); |
| 3298 | |
| 3299 | addToGraph(Node::VarArg, DataViewSet, OpInfo(data.asQuadWord), OpInfo()); |
| 3300 | setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined))); |
| 3301 | return true; |
| 3302 | } |
| 3303 | |
| 3304 | case HasOwnPropertyIntrinsic: { |
| 3305 | if (argumentCountIncludingThis != 2) |
| 3306 | return false; |
| 3307 | |
| 3308 | // This can be racy, that's fine. We know that once we observe that this is created, |
| 3309 | // that it will never be destroyed until the VM is destroyed. It's unlikely that |
| 3310 | // we'd ever get to the point where we inline this as an intrinsic without the |
| 3311 | // cache being created, however, it's possible if we always throw exceptions inside |
| 3312 | // hasOwnProperty. |
| 3313 | if (!m_vm->hasOwnPropertyCache()) |
| 3314 | return false; |
| 3315 | |
| 3316 | insertChecks(); |
| 3317 | Node* object = get(virtualRegisterForArgument(0, registerOffset)); |
| 3318 | Node* key = get(virtualRegisterForArgument(1, registerOffset)); |
| 3319 | Node* resultNode = addToGraph(HasOwnProperty, object, key); |
| 3320 | setResult(resultNode); |
| 3321 | return true; |
| 3322 | } |
| 3323 | |
| 3324 | case StringPrototypeSliceIntrinsic: { |
| 3325 | if (argumentCountIncludingThis < 2) |
| 3326 | return false; |
| 3327 | |
| 3328 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 3329 | return false; |
| 3330 | |
| 3331 | insertChecks(); |
| 3332 | Node* thisString = get(virtualRegisterForArgument(0, registerOffset)); |
| 3333 | Node* start = get(virtualRegisterForArgument(1, registerOffset)); |
| 3334 | Node* end = nullptr; |
| 3335 | if (argumentCountIncludingThis > 2) |
| 3336 | end = get(virtualRegisterForArgument(2, registerOffset)); |
| 3337 | Node* resultNode = addToGraph(StringSlice, thisString, start, end); |
| 3338 | setResult(resultNode); |
| 3339 | return true; |
| 3340 | } |
| 3341 | |
| 3342 | case StringPrototypeToLowerCaseIntrinsic: { |
| 3343 | if (argumentCountIncludingThis != 1) |
| 3344 | return false; |
| 3345 | |
| 3346 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 3347 | return false; |
| 3348 | |
| 3349 | insertChecks(); |
| 3350 | Node* thisString = get(virtualRegisterForArgument(0, registerOffset)); |
| 3351 | Node* resultNode = addToGraph(ToLowerCase, thisString); |
| 3352 | setResult(resultNode); |
| 3353 | return true; |
| 3354 | } |
| 3355 | |
| 3356 | case NumberPrototypeToStringIntrinsic: { |
| 3357 | if (argumentCountIncludingThis != 1 && argumentCountIncludingThis != 2) |
| 3358 | return false; |
| 3359 | |
| 3360 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 3361 | return false; |
| 3362 | |
| 3363 | insertChecks(); |
| 3364 | Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset)); |
| 3365 | if (argumentCountIncludingThis == 1) { |
| 3366 | Node* resultNode = addToGraph(ToString, thisNumber); |
| 3367 | setResult(resultNode); |
| 3368 | } else { |
| 3369 | Node* radix = get(virtualRegisterForArgument(1, registerOffset)); |
| 3370 | Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix); |
| 3371 | setResult(resultNode); |
| 3372 | } |
| 3373 | return true; |
| 3374 | } |
| 3375 | |
| 3376 | case NumberIsIntegerIntrinsic: { |
| 3377 | if (argumentCountIncludingThis < 2) |
| 3378 | return false; |
| 3379 | |
| 3380 | insertChecks(); |
| 3381 | Node* input = get(virtualRegisterForArgument(1, registerOffset)); |
| 3382 | Node* resultNode = addToGraph(NumberIsInteger, input); |
| 3383 | setResult(resultNode); |
| 3384 | return true; |
| 3385 | } |
| 3386 | |
| 3387 | case CPUMfenceIntrinsic: |
| 3388 | case CPURdtscIntrinsic: |
| 3389 | case CPUCpuidIntrinsic: |
| 3390 | case CPUPauseIntrinsic: { |
| 3391 | #if CPU(X86_64) |
| 3392 | if (!m_graph.m_plan.isFTL()) |
| 3393 | return false; |
| 3394 | insertChecks(); |
| 3395 | setResult(addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo())); |
| 3396 | return true; |
| 3397 | #else |
| 3398 | return false; |
| 3399 | #endif |
| 3400 | } |
| 3401 | |
| 3402 | default: |
| 3403 | return false; |
| 3404 | } |
| 3405 | }; |
| 3406 | |
| 3407 | if (inlineIntrinsic()) { |
| 3408 | RELEASE_ASSERT(didSetResult); |
| 3409 | return true; |
| 3410 | } |
| 3411 | |
| 3412 | return false; |
| 3413 | } |
| 3414 | |
| 3415 | template<typename ChecksFunctor> |
| 3416 | bool ByteCodeParser::handleDOMJITCall(Node* callTarget, VirtualRegister result, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) |
| 3417 | { |
| 3418 | if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount)) |
| 3419 | return false; |
| 3420 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) |
| 3421 | return false; |
| 3422 | |
| 3423 | // FIXME: Currently, we only support functions which arguments are up to 2. |
| 3424 | // Eventually, we should extend this. But possibly, 2 or 3 can cover typical use cases. |
| 3425 | // https://bugs.webkit.org/show_bug.cgi?id=164346 |
| 3426 | ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments." ); |
| 3427 | |
| 3428 | insertChecks(); |
| 3429 | addCall(result, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction); |
| 3430 | return true; |
| 3431 | } |
| 3432 | |
| 3433 | |
| 3434 | template<typename ChecksFunctor> |
| 3435 | bool ByteCodeParser::handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks) |
| 3436 | { |
| 3437 | switch (variant.intrinsic()) { |
| 3438 | case TypedArrayByteLengthIntrinsic: { |
| 3439 | insertChecks(); |
| 3440 | |
| 3441 | TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType; |
| 3442 | Array::Type arrayType = toArrayType(type); |
| 3443 | size_t logSize = logElementSize(type); |
| 3444 | |
| 3445 | variant.structureSet().forEach([&] (Structure* structure) { |
| 3446 | TypedArrayType curType = structure->classInfo()->typedArrayStorageType; |
| 3447 | ASSERT(logSize == logElementSize(curType)); |
| 3448 | arrayType = refineTypedArrayType(arrayType, curType); |
| 3449 | ASSERT(arrayType != Array::Generic); |
| 3450 | }); |
| 3451 | |
| 3452 | Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode); |
| 3453 | |
| 3454 | if (!logSize) { |
| 3455 | set(result, lengthNode); |
| 3456 | return true; |
| 3457 | } |
| 3458 | |
| 3459 | // We can use a BitLShift here because typed arrays will never have a byteLength |
| 3460 | // that overflows int32. |
| 3461 | Node* shiftNode = jsConstant(jsNumber(logSize)); |
| 3462 | set(result, addToGraph(BitLShift, lengthNode, shiftNode)); |
| 3463 | |
| 3464 | return true; |
| 3465 | } |
| 3466 | |
| 3467 | case TypedArrayLengthIntrinsic: { |
| 3468 | insertChecks(); |
| 3469 | |
| 3470 | TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType; |
| 3471 | Array::Type arrayType = toArrayType(type); |
| 3472 | |
| 3473 | variant.structureSet().forEach([&] (Structure* structure) { |
| 3474 | TypedArrayType curType = structure->classInfo()->typedArrayStorageType; |
| 3475 | arrayType = refineTypedArrayType(arrayType, curType); |
| 3476 | ASSERT(arrayType != Array::Generic); |
| 3477 | }); |
| 3478 | |
| 3479 | set(result, addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode)); |
| 3480 | |
| 3481 | return true; |
| 3482 | |
| 3483 | } |
| 3484 | |
| 3485 | case TypedArrayByteOffsetIntrinsic: { |
| 3486 | insertChecks(); |
| 3487 | |
| 3488 | TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType; |
| 3489 | Array::Type arrayType = toArrayType(type); |
| 3490 | |
| 3491 | variant.structureSet().forEach([&] (Structure* structure) { |
| 3492 | TypedArrayType curType = structure->classInfo()->typedArrayStorageType; |
| 3493 | arrayType = refineTypedArrayType(arrayType, curType); |
| 3494 | ASSERT(arrayType != Array::Generic); |
| 3495 | }); |
| 3496 | |
| 3497 | set(result, addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode)); |
| 3498 | |
| 3499 | return true; |
| 3500 | } |
| 3501 | |
| 3502 | case UnderscoreProtoIntrinsic: { |
| 3503 | insertChecks(); |
| 3504 | |
| 3505 | bool canFold = !variant.structureSet().isEmpty(); |
| 3506 | JSValue prototype; |
| 3507 | variant.structureSet().forEach([&] (Structure* structure) { |
| 3508 | auto getPrototypeMethod = structure->classInfo()->methodTable.getPrototype; |
| 3509 | MethodTable::GetPrototypeFunctionPtr defaultGetPrototype = JSObject::getPrototype; |
| 3510 | if (getPrototypeMethod != defaultGetPrototype) { |
| 3511 | canFold = false; |
| 3512 | return; |
| 3513 | } |
| 3514 | |
| 3515 | if (structure->hasPolyProto()) { |
| 3516 | canFold = false; |
| 3517 | return; |
| 3518 | } |
| 3519 | if (!prototype) |
| 3520 | prototype = structure->storedPrototype(); |
| 3521 | else if (prototype != structure->storedPrototype()) |
| 3522 | canFold = false; |
| 3523 | }); |
| 3524 | |
| 3525 | // OK, only one prototype is found. We perform constant folding here. |
| 3526 | // This information is important for super's constructor call to get new.target constant. |
| 3527 | if (prototype && canFold) { |
| 3528 | set(result, weakJSConstant(prototype)); |
| 3529 | return true; |
| 3530 | } |
| 3531 | |
| 3532 | set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), thisNode)); |
| 3533 | return true; |
| 3534 | } |
| 3535 | |
| 3536 | default: |
| 3537 | return false; |
| 3538 | } |
| 3539 | RELEASE_ASSERT_NOT_REACHED(); |
| 3540 | } |
| 3541 | |
| 3542 | static void blessCallDOMGetter(Node* node) |
| 3543 | { |
| 3544 | DOMJIT::CallDOMGetterSnippet* snippet = node->callDOMGetterData()->snippet; |
| 3545 | if (snippet && !snippet->effect.mustGenerate()) |
| 3546 | node->clearFlags(NodeMustGenerate); |
| 3547 | } |
| 3548 | |
| 3549 | bool ByteCodeParser::handleDOMJITGetter(VirtualRegister result, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction) |
| 3550 | { |
| 3551 | if (!variant.domAttribute()) |
| 3552 | return false; |
| 3553 | |
| 3554 | auto domAttribute = variant.domAttribute().value(); |
| 3555 | |
| 3556 | // We do not need to actually look up CustomGetterSetter here. Checking Structures or registering watchpoints are enough, |
| 3557 | // since replacement of CustomGetterSetter always incurs Structure transition. |
| 3558 | if (!check(variant.conditionSet())) |
| 3559 | return false; |
| 3560 | addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), thisNode); |
| 3561 | |
| 3562 | // We do not need to emit CheckCell thingy here. When the custom accessor is replaced to different one, Structure transition occurs. |
| 3563 | addToGraph(CheckSubClass, OpInfo(domAttribute.classInfo), thisNode); |
| 3564 | |
| 3565 | bool wasSeenInJIT = true; |
| 3566 | addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), GetByIdStatus(GetByIdStatus::Custom, wasSeenInJIT, variant))), thisNode); |
| 3567 | |
| 3568 | CallDOMGetterData* callDOMGetterData = m_graph.m_callDOMGetterData.add(); |
| 3569 | callDOMGetterData->customAccessorGetter = variant.customAccessorGetter(); |
| 3570 | ASSERT(callDOMGetterData->customAccessorGetter); |
| 3571 | |
| 3572 | if (const auto* domJIT = domAttribute.domJIT) { |
| 3573 | callDOMGetterData->domJIT = domJIT; |
| 3574 | Ref<DOMJIT::CallDOMGetterSnippet> snippet = domJIT->compiler()(); |
| 3575 | callDOMGetterData->snippet = snippet.ptr(); |
| 3576 | m_graph.m_domJITSnippets.append(WTFMove(snippet)); |
| 3577 | } |
| 3578 | DOMJIT::CallDOMGetterSnippet* callDOMGetterSnippet = callDOMGetterData->snippet; |
| 3579 | callDOMGetterData->identifierNumber = identifierNumber; |
| 3580 | |
| 3581 | Node* callDOMGetterNode = nullptr; |
| 3582 | // GlobalObject of thisNode is always used to create a DOMWrapper. |
| 3583 | if (callDOMGetterSnippet && callDOMGetterSnippet->requireGlobalObject) { |
| 3584 | Node* globalObject = addToGraph(GetGlobalObject, thisNode); |
| 3585 | callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode, globalObject); |
| 3586 | } else |
| 3587 | callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode); |
| 3588 | blessCallDOMGetter(callDOMGetterNode); |
| 3589 | set(result, callDOMGetterNode); |
| 3590 | return true; |
| 3591 | } |
| 3592 | |
| 3593 | bool ByteCodeParser::handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType prediction, Node* base, GetByIdStatus getById) |
| 3594 | { |
| 3595 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) |
| 3596 | return false; |
| 3597 | addToGraph(CheckCell, OpInfo(m_graph.freeze(getById.moduleNamespaceObject())), Edge(base, CellUse)); |
| 3598 | |
| 3599 | addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getById)), base); |
| 3600 | |
| 3601 | // Ideally we wouldn't have to do this Phantom. But: |
| 3602 | // |
| 3603 | // For the constant case: we must do it because otherwise we would have no way of knowing |
| 3604 | // that the scope is live at OSR here. |
| 3605 | // |
| 3606 | // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation |
| 3607 | // won't be able to handle an Undefined scope. |
| 3608 | addToGraph(Phantom, base); |
| 3609 | |
| 3610 | // Constant folding in the bytecode parser is important for performance. This may not |
| 3611 | // have executed yet. If it hasn't, then we won't have a prediction. Lacking a |
| 3612 | // prediction, we'd otherwise think that it has to exit. Then when it did execute, we |
| 3613 | // would recompile. But if we can fold it here, we avoid the exit. |
| 3614 | m_graph.freeze(getById.moduleEnvironment()); |
| 3615 | if (JSValue value = m_graph.tryGetConstantClosureVar(getById.moduleEnvironment(), getById.scopeOffset())) { |
| 3616 | set(result, weakJSConstant(value)); |
| 3617 | return true; |
| 3618 | } |
| 3619 | set(result, addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment()))); |
| 3620 | return true; |
| 3621 | } |
| 3622 | |
| 3623 | template<typename ChecksFunctor> |
| 3624 | bool ByteCodeParser::handleTypedArrayConstructor( |
| 3625 | VirtualRegister result, InternalFunction* function, int registerOffset, |
| 3626 | int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks) |
| 3627 | { |
| 3628 | if (!isTypedView(type)) |
| 3629 | return false; |
| 3630 | |
| 3631 | if (function->classInfo() != constructorClassInfoForType(type)) |
| 3632 | return false; |
| 3633 | |
| 3634 | if (function->globalObject(*m_vm) != m_inlineStackTop->m_codeBlock->globalObject()) |
| 3635 | return false; |
| 3636 | |
| 3637 | // We only have an intrinsic for the case where you say: |
| 3638 | // |
| 3639 | // new FooArray(blah); |
| 3640 | // |
| 3641 | // Of course, 'blah' could be any of the following: |
| 3642 | // |
| 3643 | // - Integer, indicating that you want to allocate an array of that length. |
| 3644 | // This is the thing we're hoping for, and what we can actually do meaningful |
| 3645 | // optimizations for. |
| 3646 | // |
| 3647 | // - Array buffer, indicating that you want to create a view onto that _entire_ |
| 3648 | // buffer. |
| 3649 | // |
| 3650 | // - Non-buffer object, indicating that you want to create a copy of that |
| 3651 | // object by pretending that it quacks like an array. |
| 3652 | // |
| 3653 | // - Anything else, indicating that you want to have an exception thrown at |
| 3654 | // you. |
| 3655 | // |
| 3656 | // The intrinsic, NewTypedArray, will behave as if it could do any of these |
| 3657 | // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is |
| 3658 | // predicted Int32, then we lock it in as a normal typed array allocation. |
| 3659 | // Otherwise, NewTypedArray turns into a totally opaque function call that |
| 3660 | // may clobber the world - by virtue of it accessing properties on what could |
| 3661 | // be an object. |
| 3662 | // |
| 3663 | // Note that although the generic form of NewTypedArray sounds sort of awful, |
| 3664 | // it is actually quite likely to be more efficient than a fully generic |
| 3665 | // Construct. So, we might want to think about making NewTypedArray variadic, |
| 3666 | // or else making Construct not super slow. |
| 3667 | |
| 3668 | if (argumentCountIncludingThis != 2) |
| 3669 | return false; |
| 3670 | |
| 3671 | if (!function->globalObject(*m_vm)->typedArrayStructureConcurrently(type)) |
| 3672 | return false; |
| 3673 | |
| 3674 | insertChecks(); |
| 3675 | set(result, |
| 3676 | addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset)))); |
| 3677 | return true; |
| 3678 | } |
| 3679 | |
| 3680 | template<typename ChecksFunctor> |
| 3681 | bool ByteCodeParser::handleConstantInternalFunction( |
| 3682 | Node* callTargetNode, VirtualRegister result, InternalFunction* function, int registerOffset, |
| 3683 | int argumentCountIncludingThis, CodeSpecializationKind kind, SpeculatedType prediction, const ChecksFunctor& insertChecks) |
| 3684 | { |
| 3685 | VERBOSE_LOG(" Handling constant internal function " , JSValue(function), "\n" ); |
| 3686 | |
| 3687 | // It so happens that the code below assumes that the result operand is valid. It's extremely |
| 3688 | // unlikely that the result operand would be invalid - you'd have to call this via a setter call. |
| 3689 | if (!result.isValid()) |
| 3690 | return false; |
| 3691 | |
| 3692 | if (kind == CodeForConstruct) { |
| 3693 | Node* newTargetNode = get(virtualRegisterForArgument(0, registerOffset)); |
| 3694 | // We cannot handle the case where new.target != callee (i.e. a construct from a super call) because we |
| 3695 | // don't know what the prototype of the constructed object will be. |
| 3696 | // FIXME: If we have inlined super calls up to the call site, however, we should be able to figure out the structure. https://bugs.webkit.org/show_bug.cgi?id=152700 |
| 3697 | if (newTargetNode != callTargetNode) |
| 3698 | return false; |
| 3699 | } |
| 3700 | |
| 3701 | if (function->classInfo() == ArrayConstructor::info()) { |
| 3702 | if (function->globalObject(*m_vm) != m_inlineStackTop->m_codeBlock->globalObject()) |
| 3703 | return false; |
| 3704 | |
| 3705 | insertChecks(); |
| 3706 | if (argumentCountIncludingThis == 2) { |
| 3707 | set(result, |
| 3708 | addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset)))); |
| 3709 | return true; |
| 3710 | } |
| 3711 | |
| 3712 | for (int i = 1; i < argumentCountIncludingThis; ++i) |
| 3713 | addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); |
| 3714 | set(result, |
| 3715 | addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(argumentCountIncludingThis - 1))); |
| 3716 | return true; |
| 3717 | } |
| 3718 | |
| 3719 | if (function->classInfo() == NumberConstructor::info()) { |
| 3720 | if (kind == CodeForConstruct) |
| 3721 | return false; |
| 3722 | |
| 3723 | insertChecks(); |
| 3724 | if (argumentCountIncludingThis <= 1) |
| 3725 | set(result, jsConstant(jsNumber(0))); |
| 3726 | else |
| 3727 | set(result, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); |
| 3728 | |
| 3729 | return true; |
| 3730 | } |
| 3731 | |
| 3732 | if (function->classInfo() == StringConstructor::info()) { |
| 3733 | insertChecks(); |
| 3734 | |
| 3735 | Node* resultNode; |
| 3736 | |
| 3737 | if (argumentCountIncludingThis <= 1) |
| 3738 | resultNode = jsConstant(m_vm->smallStrings.emptyString()); |
| 3739 | else |
| 3740 | resultNode = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset))); |
| 3741 | |
| 3742 | if (kind == CodeForConstruct) |
| 3743 | resultNode = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->stringObjectStructure())), resultNode); |
| 3744 | |
| 3745 | set(result, resultNode); |
| 3746 | return true; |
| 3747 | } |
| 3748 | |
| 3749 | if (function->classInfo() == SymbolConstructor::info() && kind == CodeForCall) { |
| 3750 | insertChecks(); |
| 3751 | |
| 3752 | Node* resultNode; |
| 3753 | |
| 3754 | if (argumentCountIncludingThis <= 1) |
| 3755 | resultNode = addToGraph(NewSymbol); |
| 3756 | else |
| 3757 | resultNode = addToGraph(NewSymbol, addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset)))); |
| 3758 | |
| 3759 | set(result, resultNode); |
| 3760 | return true; |
| 3761 | } |
| 3762 | |
| 3763 | // FIXME: This should handle construction as well. https://bugs.webkit.org/show_bug.cgi?id=155591 |
| 3764 | if (function->classInfo() == ObjectConstructor::info() && kind == CodeForCall) { |
| 3765 | insertChecks(); |
| 3766 | |
| 3767 | Node* resultNode; |
| 3768 | if (argumentCountIncludingThis <= 1) |
| 3769 | resultNode = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->objectStructureForObjectConstructor()))); |
| 3770 | else |
| 3771 | resultNode = addToGraph(CallObjectConstructor, OpInfo(m_graph.freeze(function->globalObject(*m_vm))), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))); |
| 3772 | set(result, resultNode); |
| 3773 | return true; |
| 3774 | } |
| 3775 | |
| 3776 | for (unsigned typeIndex = 0; typeIndex < NumberOfTypedArrayTypes; ++typeIndex) { |
| 3777 | bool handled = handleTypedArrayConstructor( |
| 3778 | result, function, registerOffset, argumentCountIncludingThis, |
| 3779 | indexToTypedArrayType(typeIndex), insertChecks); |
| 3780 | if (handled) |
| 3781 | return true; |
| 3782 | } |
| 3783 | |
| 3784 | return false; |
| 3785 | } |
| 3786 | |
| 3787 | Node* ByteCodeParser::handleGetByOffset( |
| 3788 | SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset, NodeType op) |
| 3789 | { |
| 3790 | Node* propertyStorage; |
| 3791 | if (isInlineOffset(offset)) |
| 3792 | propertyStorage = base; |
| 3793 | else |
| 3794 | propertyStorage = addToGraph(GetButterfly, base); |
| 3795 | |
| 3796 | StorageAccessData* data = m_graph.m_storageAccessData.add(); |
| 3797 | data->offset = offset; |
| 3798 | data->identifierNumber = identifierNumber; |
| 3799 | |
| 3800 | Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base); |
| 3801 | |
| 3802 | return getByOffset; |
| 3803 | } |
| 3804 | |
| 3805 | Node* ByteCodeParser::handlePutByOffset( |
| 3806 | Node* base, unsigned identifier, PropertyOffset offset, |
| 3807 | Node* value) |
| 3808 | { |
| 3809 | Node* propertyStorage; |
| 3810 | if (isInlineOffset(offset)) |
| 3811 | propertyStorage = base; |
| 3812 | else |
| 3813 | propertyStorage = addToGraph(GetButterfly, base); |
| 3814 | |
| 3815 | StorageAccessData* data = m_graph.m_storageAccessData.add(); |
| 3816 | data->offset = offset; |
| 3817 | data->identifierNumber = identifier; |
| 3818 | |
| 3819 | Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value); |
| 3820 | |
| 3821 | return result; |
| 3822 | } |
| 3823 | |
| 3824 | bool ByteCodeParser::check(const ObjectPropertyCondition& condition) |
| 3825 | { |
| 3826 | if (!condition) |
| 3827 | return false; |
| 3828 | |
| 3829 | if (m_graph.watchCondition(condition)) |
| 3830 | return true; |
| 3831 | |
| 3832 | Structure* structure = condition.object()->structure(*m_vm); |
| 3833 | if (!condition.structureEnsuresValidity(structure)) |
| 3834 | return false; |
| 3835 | |
| 3836 | addToGraph( |
| 3837 | CheckStructure, |
| 3838 | OpInfo(m_graph.addStructureSet(structure)), |
| 3839 | weakJSConstant(condition.object())); |
| 3840 | return true; |
| 3841 | } |
| 3842 | |
| 3843 | GetByOffsetMethod ByteCodeParser::promoteToConstant(GetByOffsetMethod method) |
| 3844 | { |
| 3845 | if (method.kind() == GetByOffsetMethod::LoadFromPrototype |
| 3846 | && method.prototype()->structure()->dfgShouldWatch()) { |
| 3847 | if (JSValue constant = m_graph.tryGetConstantProperty(method.prototype()->value(), method.prototype()->structure(), method.offset())) |
| 3848 | return GetByOffsetMethod::constant(m_graph.freeze(constant)); |
| 3849 | } |
| 3850 | |
| 3851 | return method; |
| 3852 | } |
| 3853 | |
| 3854 | bool ByteCodeParser::needsDynamicLookup(ResolveType type, OpcodeID opcode) |
| 3855 | { |
| 3856 | ASSERT(opcode == op_resolve_scope || opcode == op_get_from_scope || opcode == op_put_to_scope); |
| 3857 | |
| 3858 | JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); |
| 3859 | if (needsVarInjectionChecks(type) && globalObject->varInjectionWatchpoint()->hasBeenInvalidated()) |
| 3860 | return true; |
| 3861 | |
| 3862 | switch (type) { |
| 3863 | case GlobalProperty: |
| 3864 | case GlobalVar: |
| 3865 | case GlobalLexicalVar: |
| 3866 | case ClosureVar: |
| 3867 | case LocalClosureVar: |
| 3868 | case ModuleVar: |
| 3869 | return false; |
| 3870 | |
| 3871 | case UnresolvedProperty: |
| 3872 | case UnresolvedPropertyWithVarInjectionChecks: { |
| 3873 | // The heuristic for UnresolvedProperty scope accesses is we will ForceOSRExit if we |
| 3874 | // haven't exited from from this access before to let the baseline JIT try to better |
| 3875 | // cache the access. If we've already exited from this operation, it's unlikely that |
| 3876 | // the baseline will come up with a better ResolveType and instead we will compile |
| 3877 | // this as a dynamic scope access. |
| 3878 | |
| 3879 | // We only track our heuristic through resolve_scope since resolve_scope will |
| 3880 | // dominate unresolved gets/puts on that scope. |
| 3881 | if (opcode != op_resolve_scope) |
| 3882 | return true; |
| 3883 | |
| 3884 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, InadequateCoverage)) { |
| 3885 | // We've already exited so give up on getting better ResolveType information. |
| 3886 | return true; |
| 3887 | } |
| 3888 | |
| 3889 | // We have not exited yet, so let's have the baseline get better ResolveType information for us. |
| 3890 | // This type of code is often seen when we tier up in a loop but haven't executed the part |
| 3891 | // of a function that comes after the loop. |
| 3892 | return false; |
| 3893 | } |
| 3894 | |
| 3895 | case Dynamic: |
| 3896 | return true; |
| 3897 | |
| 3898 | case GlobalPropertyWithVarInjectionChecks: |
| 3899 | case GlobalVarWithVarInjectionChecks: |
| 3900 | case GlobalLexicalVarWithVarInjectionChecks: |
| 3901 | case ClosureVarWithVarInjectionChecks: |
| 3902 | return false; |
| 3903 | } |
| 3904 | |
| 3905 | ASSERT_NOT_REACHED(); |
| 3906 | return false; |
| 3907 | } |
| 3908 | |
| 3909 | GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyCondition& condition) |
| 3910 | { |
| 3911 | VERBOSE_LOG("Planning a load: " , condition, "\n" ); |
| 3912 | |
| 3913 | // We might promote this to Equivalence, and a later DFG pass might also do such promotion |
| 3914 | // even if we fail, but for simplicity this cannot be asked to load an equivalence condition. |
| 3915 | // None of the clients of this method will request a load of an Equivalence condition anyway, |
| 3916 | // and supporting it would complicate the heuristics below. |
| 3917 | RELEASE_ASSERT(condition.kind() == PropertyCondition::Presence); |
| 3918 | |
| 3919 | // Here's the ranking of how to handle this, from most preferred to least preferred: |
| 3920 | // |
| 3921 | // 1) Watchpoint on an equivalence condition and return a constant node for the loaded value. |
| 3922 | // No other code is emitted, and the structure of the base object is never registered. |
| 3923 | // Hence this results in zero code and we won't jettison this compilation if the object |
| 3924 | // transitions, even if the structure is watchable right now. |
| 3925 | // |
| 3926 | // 2) Need to emit a load, and the current structure of the base is going to be watched by the |
| 3927 | // DFG anyway (i.e. dfgShouldWatch). Watch the structure and emit the load. Don't watch the |
| 3928 | // condition, since the act of turning the base into a constant in IR will cause the DFG to |
| 3929 | // watch the structure anyway and doing so would subsume watching the condition. |
| 3930 | // |
| 3931 | // 3) Need to emit a load, and the current structure of the base is watchable but not by the |
| 3932 | // DFG (i.e. transitionWatchpointSetIsStillValid() and !dfgShouldWatchIfPossible()). Watch |
| 3933 | // the condition, and emit a load. |
| 3934 | // |
| 3935 | // 4) Need to emit a load, and the current structure of the base is not watchable. Emit a |
| 3936 | // structure check, and emit a load. |
| 3937 | // |
| 3938 | // 5) The condition does not hold. Give up and return null. |
| 3939 | |
| 3940 | // First, try to promote Presence to Equivalence. We do this before doing anything else |
| 3941 | // because it's the most profitable. Also, there are cases where the presence is watchable but |
| 3942 | // we don't want to watch it unless it became an equivalence (see the relationship between |
| 3943 | // (1), (2), and (3) above). |
| 3944 | ObjectPropertyCondition equivalenceCondition = condition.attemptToMakeEquivalenceWithoutBarrier(*m_vm); |
| 3945 | if (m_graph.watchCondition(equivalenceCondition)) |
| 3946 | return GetByOffsetMethod::constant(m_graph.freeze(equivalenceCondition.requiredValue())); |
| 3947 | |
| 3948 | // At this point, we'll have to materialize the condition's base as a constant in DFG IR. Once |
| 3949 | // we do this, the frozen value will have its own idea of what the structure is. Use that from |
| 3950 | // now on just because it's less confusing. |
| 3951 | FrozenValue* base = m_graph.freeze(condition.object()); |
| 3952 | Structure* structure = base->structure(); |
| 3953 | |
| 3954 | // Check if the structure that we've registered makes the condition hold. If not, just give |
| 3955 | // up. This is case (5) above. |
| 3956 | if (!condition.structureEnsuresValidity(structure)) |
| 3957 | return GetByOffsetMethod(); |
| 3958 | |
| 3959 | // If the structure is watched by the DFG already, then just use this fact to emit the load. |
| 3960 | // This is case (2) above. |
| 3961 | if (structure->dfgShouldWatch()) |
| 3962 | return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset())); |
| 3963 | |
| 3964 | // If we can watch the condition right now, then we can emit the load after watching it. This |
| 3965 | // is case (3) above. |
| 3966 | if (m_graph.watchCondition(condition)) |
| 3967 | return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset())); |
| 3968 | |
| 3969 | // We can't watch anything but we know that the current structure satisfies the condition. So, |
| 3970 | // check for that structure and then emit the load. |
| 3971 | addToGraph( |
| 3972 | CheckStructure, |
| 3973 | OpInfo(m_graph.addStructureSet(structure)), |
| 3974 | addToGraph(JSConstant, OpInfo(base))); |
| 3975 | return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset())); |
| 3976 | } |
| 3977 | |
| 3978 | Node* ByteCodeParser::load( |
| 3979 | SpeculatedType prediction, unsigned identifierNumber, const GetByOffsetMethod& method, |
| 3980 | NodeType op) |
| 3981 | { |
| 3982 | switch (method.kind()) { |
| 3983 | case GetByOffsetMethod::Invalid: |
| 3984 | return nullptr; |
| 3985 | case GetByOffsetMethod::Constant: |
| 3986 | return addToGraph(JSConstant, OpInfo(method.constant())); |
| 3987 | case GetByOffsetMethod::LoadFromPrototype: { |
| 3988 | Node* baseNode = addToGraph(JSConstant, OpInfo(method.prototype())); |
| 3989 | return handleGetByOffset( |
| 3990 | prediction, baseNode, identifierNumber, method.offset(), op); |
| 3991 | } |
| 3992 | case GetByOffsetMethod::Load: |
| 3993 | // Will never see this from planLoad(). |
| 3994 | RELEASE_ASSERT_NOT_REACHED(); |
| 3995 | return nullptr; |
| 3996 | } |
| 3997 | |
| 3998 | RELEASE_ASSERT_NOT_REACHED(); |
| 3999 | return nullptr; |
| 4000 | } |
| 4001 | |
| 4002 | Node* ByteCodeParser::load( |
| 4003 | SpeculatedType prediction, const ObjectPropertyCondition& condition, NodeType op) |
| 4004 | { |
| 4005 | GetByOffsetMethod method = planLoad(condition); |
| 4006 | return load(prediction, m_graph.identifiers().ensure(condition.uid()), method, op); |
| 4007 | } |
| 4008 | |
| 4009 | bool ByteCodeParser::check(const ObjectPropertyConditionSet& conditionSet) |
| 4010 | { |
| 4011 | for (const ObjectPropertyCondition& condition : conditionSet) { |
| 4012 | if (!check(condition)) |
| 4013 | return false; |
| 4014 | } |
| 4015 | return true; |
| 4016 | } |
| 4017 | |
| 4018 | GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyConditionSet& conditionSet) |
| 4019 | { |
| 4020 | VERBOSE_LOG("conditionSet = " , conditionSet, "\n" ); |
| 4021 | |
| 4022 | GetByOffsetMethod result; |
| 4023 | for (const ObjectPropertyCondition& condition : conditionSet) { |
| 4024 | switch (condition.kind()) { |
| 4025 | case PropertyCondition::Presence: |
| 4026 | RELEASE_ASSERT(!result); // Should only see exactly one of these. |
| 4027 | result = planLoad(condition); |
| 4028 | if (!result) |
| 4029 | return GetByOffsetMethod(); |
| 4030 | break; |
| 4031 | default: |
| 4032 | if (!check(condition)) |
| 4033 | return GetByOffsetMethod(); |
| 4034 | break; |
| 4035 | } |
| 4036 | } |
| 4037 | if (!result) { |
| 4038 | // We have a unset property. |
| 4039 | ASSERT(!conditionSet.numberOfConditionsWithKind(PropertyCondition::Presence)); |
| 4040 | return GetByOffsetMethod::constant(m_constantUndefined); |
| 4041 | } |
| 4042 | return result; |
| 4043 | } |
| 4044 | |
| 4045 | Node* ByteCodeParser::load( |
| 4046 | SpeculatedType prediction, const ObjectPropertyConditionSet& conditionSet, NodeType op) |
| 4047 | { |
| 4048 | GetByOffsetMethod method = planLoad(conditionSet); |
| 4049 | return load( |
| 4050 | prediction, |
| 4051 | m_graph.identifiers().ensure(conditionSet.slotBaseCondition().uid()), |
| 4052 | method, op); |
| 4053 | } |
| 4054 | |
| 4055 | ObjectPropertyCondition ByteCodeParser::presenceLike( |
| 4056 | JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set) |
| 4057 | { |
| 4058 | if (set.isEmpty()) |
| 4059 | return ObjectPropertyCondition(); |
| 4060 | unsigned attributes; |
| 4061 | PropertyOffset firstOffset = set[0]->getConcurrently(uid, attributes); |
| 4062 | if (firstOffset != offset) |
| 4063 | return ObjectPropertyCondition(); |
| 4064 | for (unsigned i = 1; i < set.size(); ++i) { |
| 4065 | unsigned otherAttributes; |
| 4066 | PropertyOffset otherOffset = set[i]->getConcurrently(uid, otherAttributes); |
| 4067 | if (otherOffset != offset || otherAttributes != attributes) |
| 4068 | return ObjectPropertyCondition(); |
| 4069 | } |
| 4070 | return ObjectPropertyCondition::presenceWithoutBarrier(knownBase, uid, offset, attributes); |
| 4071 | } |
| 4072 | |
| 4073 | bool ByteCodeParser::checkPresenceLike( |
| 4074 | JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set) |
| 4075 | { |
| 4076 | return check(presenceLike(knownBase, uid, offset, set)); |
| 4077 | } |
| 4078 | |
| 4079 | void ByteCodeParser::checkPresenceLike( |
| 4080 | Node* base, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set) |
| 4081 | { |
| 4082 | if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>(*m_vm)) { |
| 4083 | if (checkPresenceLike(knownBase, uid, offset, set)) |
| 4084 | return; |
| 4085 | } |
| 4086 | |
| 4087 | addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(set)), base); |
| 4088 | } |
| 4089 | |
| 4090 | template<typename VariantType> |
| 4091 | Node* ByteCodeParser::load( |
| 4092 | SpeculatedType prediction, Node* base, unsigned identifierNumber, const VariantType& variant) |
| 4093 | { |
| 4094 | // Make sure backwards propagation knows that we've used base. |
| 4095 | addToGraph(Phantom, base); |
| 4096 | |
| 4097 | bool needStructureCheck = true; |
| 4098 | |
| 4099 | UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; |
| 4100 | |
| 4101 | if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>(*m_vm)) { |
| 4102 | // Try to optimize away the structure check. Note that it's not worth doing anything about this |
| 4103 | // if the base's structure is watched. |
| 4104 | Structure* structure = base->constant()->structure(); |
| 4105 | if (!structure->dfgShouldWatch()) { |
| 4106 | if (!variant.conditionSet().isEmpty()) { |
| 4107 | // This means that we're loading from a prototype or we have a property miss. We expect |
| 4108 | // the base not to have the property. We can only use ObjectPropertyCondition if all of |
| 4109 | // the structures in the variant.structureSet() agree on the prototype (it would be |
| 4110 | // hilariously rare if they didn't). Note that we are relying on structureSet() having |
| 4111 | // at least one element. That will always be true here because of how GetByIdStatus/PutByIdStatus work. |
| 4112 | |
| 4113 | // FIXME: right now, if we have an OPCS, we have mono proto. However, this will |
| 4114 | // need to be changed in the future once we have a hybrid data structure for |
| 4115 | // poly proto: |
| 4116 | // https://bugs.webkit.org/show_bug.cgi?id=177339 |
| 4117 | JSObject* prototype = variant.structureSet()[0]->storedPrototypeObject(); |
| 4118 | bool allAgree = true; |
| 4119 | for (unsigned i = 1; i < variant.structureSet().size(); ++i) { |
| 4120 | if (variant.structureSet()[i]->storedPrototypeObject() != prototype) { |
| 4121 | allAgree = false; |
| 4122 | break; |
| 4123 | } |
| 4124 | } |
| 4125 | if (allAgree) { |
| 4126 | ObjectPropertyCondition condition = ObjectPropertyCondition::absenceWithoutBarrier( |
| 4127 | knownBase, uid, prototype); |
| 4128 | if (check(condition)) |
| 4129 | needStructureCheck = false; |
| 4130 | } |
| 4131 | } else { |
| 4132 | // This means we're loading directly from base. We can avoid all of the code that follows |
| 4133 | // if we can prove that the property is a constant. Otherwise, we try to prove that the |
| 4134 | // property is watchably present, in which case we get rid of the structure check. |
| 4135 | |
| 4136 | ObjectPropertyCondition presenceCondition = |
| 4137 | presenceLike(knownBase, uid, variant.offset(), variant.structureSet()); |
| 4138 | if (presenceCondition) { |
| 4139 | ObjectPropertyCondition equivalenceCondition = |
| 4140 | presenceCondition.attemptToMakeEquivalenceWithoutBarrier(*m_vm); |
| 4141 | if (m_graph.watchCondition(equivalenceCondition)) |
| 4142 | return weakJSConstant(equivalenceCondition.requiredValue()); |
| 4143 | |
| 4144 | if (check(presenceCondition)) |
| 4145 | needStructureCheck = false; |
| 4146 | } |
| 4147 | } |
| 4148 | } |
| 4149 | } |
| 4150 | |
| 4151 | if (needStructureCheck) |
| 4152 | addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base); |
| 4153 | |
| 4154 | if (variant.isPropertyUnset()) { |
| 4155 | if (m_graph.watchConditions(variant.conditionSet())) |
| 4156 | return jsConstant(jsUndefined()); |
| 4157 | return nullptr; |
| 4158 | } |
| 4159 | |
| 4160 | SpeculatedType loadPrediction; |
| 4161 | NodeType loadOp; |
| 4162 | if (variant.callLinkStatus() || variant.intrinsic() != NoIntrinsic) { |
| 4163 | loadPrediction = SpecCellOther; |
| 4164 | loadOp = GetGetterSetterByOffset; |
| 4165 | } else { |
| 4166 | loadPrediction = prediction; |
| 4167 | loadOp = GetByOffset; |
| 4168 | } |
| 4169 | |
| 4170 | Node* loadedValue; |
| 4171 | if (!variant.conditionSet().isEmpty()) |
| 4172 | loadedValue = load(loadPrediction, variant.conditionSet(), loadOp); |
| 4173 | else { |
| 4174 | if (needStructureCheck && base->hasConstant()) { |
| 4175 | // We did emit a structure check. That means that we have an opportunity to do constant folding |
| 4176 | // here, since we didn't do it above. |
| 4177 | JSValue constant = m_graph.tryGetConstantProperty( |
| 4178 | base->asJSValue(), *m_graph.addStructureSet(variant.structureSet()), variant.offset()); |
| 4179 | if (constant) |
| 4180 | return weakJSConstant(constant); |
| 4181 | } |
| 4182 | |
| 4183 | loadedValue = handleGetByOffset( |
| 4184 | loadPrediction, base, identifierNumber, variant.offset(), loadOp); |
| 4185 | } |
| 4186 | |
| 4187 | return loadedValue; |
| 4188 | } |
| 4189 | |
| 4190 | Node* ByteCodeParser::store(Node* base, unsigned identifier, const PutByIdVariant& variant, Node* value) |
| 4191 | { |
| 4192 | RELEASE_ASSERT(variant.kind() == PutByIdVariant::Replace); |
| 4193 | |
| 4194 | checkPresenceLike(base, m_graph.identifiers()[identifier], variant.offset(), variant.structure()); |
| 4195 | return handlePutByOffset(base, identifier, variant.offset(), value); |
| 4196 | } |
| 4197 | |
| 4198 | void ByteCodeParser::handleGetById( |
| 4199 | VirtualRegister destination, SpeculatedType prediction, Node* base, unsigned identifierNumber, |
| 4200 | GetByIdStatus getByIdStatus, AccessType type, unsigned instructionSize) |
| 4201 | { |
| 4202 | // Attempt to reduce the set of things in the GetByIdStatus. |
| 4203 | if (base->op() == NewObject) { |
| 4204 | bool ok = true; |
| 4205 | for (unsigned i = m_currentBlock->size(); i--;) { |
| 4206 | Node* node = m_currentBlock->at(i); |
| 4207 | if (node == base) |
| 4208 | break; |
| 4209 | if (writesOverlap(m_graph, node, JSCell_structureID)) { |
| 4210 | ok = false; |
| 4211 | break; |
| 4212 | } |
| 4213 | } |
| 4214 | if (ok) |
| 4215 | getByIdStatus.filter(base->structure().get()); |
| 4216 | } |
| 4217 | |
| 4218 | NodeType getById; |
| 4219 | if (type == AccessType::Get) |
| 4220 | getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById; |
| 4221 | else if (type == AccessType::TryGet) |
| 4222 | getById = TryGetById; |
| 4223 | else |
| 4224 | getById = getByIdStatus.makesCalls() ? GetByIdDirectFlush : GetByIdDirect; |
| 4225 | |
| 4226 | if (getById != TryGetById && getByIdStatus.isModuleNamespace()) { |
| 4227 | if (handleModuleNamespaceLoad(destination, prediction, base, getByIdStatus)) { |
| 4228 | if (UNLIKELY(m_graph.compilation())) |
| 4229 | m_graph.compilation()->noticeInlinedGetById(); |
| 4230 | return; |
| 4231 | } |
| 4232 | } |
| 4233 | |
| 4234 | // Special path for custom accessors since custom's offset does not have any meanings. |
| 4235 | // So, this is completely different from Simple one. But we have a chance to optimize it when we use DOMJIT. |
| 4236 | if (Options::useDOMJIT() && getByIdStatus.isCustom()) { |
| 4237 | ASSERT(getByIdStatus.numVariants() == 1); |
| 4238 | ASSERT(!getByIdStatus.makesCalls()); |
| 4239 | GetByIdVariant variant = getByIdStatus[0]; |
| 4240 | ASSERT(variant.domAttribute()); |
| 4241 | if (handleDOMJITGetter(destination, variant, base, identifierNumber, prediction)) { |
| 4242 | if (UNLIKELY(m_graph.compilation())) |
| 4243 | m_graph.compilation()->noticeInlinedGetById(); |
| 4244 | return; |
| 4245 | } |
| 4246 | } |
| 4247 | |
| 4248 | ASSERT(type == AccessType::Get || type == AccessType::GetDirect || !getByIdStatus.makesCalls()); |
| 4249 | if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::useAccessInlining()) { |
| 4250 | set(destination, |
| 4251 | addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); |
| 4252 | return; |
| 4253 | } |
| 4254 | |
| 4255 | // FIXME: If we use the GetByIdStatus for anything then we should record it and insert a node |
| 4256 | // after everything else (like the GetByOffset or whatever) that will filter the recorded |
| 4257 | // GetByIdStatus. That means that the constant folder also needs to do the same! |
| 4258 | |
| 4259 | if (getByIdStatus.numVariants() > 1) { |
| 4260 | if (getByIdStatus.makesCalls() || !m_graph.m_plan.isFTL() |
| 4261 | || !Options::usePolymorphicAccessInlining() |
| 4262 | || getByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) { |
| 4263 | set(destination, |
| 4264 | addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); |
| 4265 | return; |
| 4266 | } |
| 4267 | |
| 4268 | addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base); |
| 4269 | |
| 4270 | Vector<MultiGetByOffsetCase, 2> cases; |
| 4271 | |
| 4272 | // 1) Emit prototype structure checks for all chains. This could sort of maybe not be |
| 4273 | // optimal, if there is some rarely executed case in the chain that requires a lot |
| 4274 | // of checks and those checks are not watchpointable. |
| 4275 | for (const GetByIdVariant& variant : getByIdStatus.variants()) { |
| 4276 | if (variant.intrinsic() != NoIntrinsic) { |
| 4277 | set(destination, |
| 4278 | addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); |
| 4279 | return; |
| 4280 | } |
| 4281 | |
| 4282 | if (variant.conditionSet().isEmpty()) { |
| 4283 | cases.append( |
| 4284 | MultiGetByOffsetCase( |
| 4285 | *m_graph.addStructureSet(variant.structureSet()), |
| 4286 | GetByOffsetMethod::load(variant.offset()))); |
| 4287 | continue; |
| 4288 | } |
| 4289 | |
| 4290 | GetByOffsetMethod method = planLoad(variant.conditionSet()); |
| 4291 | if (!method) { |
| 4292 | set(destination, |
| 4293 | addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); |
| 4294 | return; |
| 4295 | } |
| 4296 | |
| 4297 | cases.append(MultiGetByOffsetCase(*m_graph.addStructureSet(variant.structureSet()), method)); |
| 4298 | } |
| 4299 | |
| 4300 | if (UNLIKELY(m_graph.compilation())) |
| 4301 | m_graph.compilation()->noticeInlinedGetById(); |
| 4302 | |
| 4303 | // 2) Emit a MultiGetByOffset |
| 4304 | MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add(); |
| 4305 | data->cases = cases; |
| 4306 | data->identifierNumber = identifierNumber; |
| 4307 | set(destination, |
| 4308 | addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base)); |
| 4309 | return; |
| 4310 | } |
| 4311 | |
| 4312 | addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base); |
| 4313 | |
| 4314 | ASSERT(getByIdStatus.numVariants() == 1); |
| 4315 | GetByIdVariant variant = getByIdStatus[0]; |
| 4316 | |
| 4317 | Node* loadedValue = load(prediction, base, identifierNumber, variant); |
| 4318 | if (!loadedValue) { |
| 4319 | set(destination, |
| 4320 | addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); |
| 4321 | return; |
| 4322 | } |
| 4323 | |
| 4324 | if (UNLIKELY(m_graph.compilation())) |
| 4325 | m_graph.compilation()->noticeInlinedGetById(); |
| 4326 | |
| 4327 | ASSERT(type == AccessType::Get || type == AccessType::GetDirect || !variant.callLinkStatus()); |
| 4328 | if (!variant.callLinkStatus() && variant.intrinsic() == NoIntrinsic) { |
| 4329 | set(destination, loadedValue); |
| 4330 | return; |
| 4331 | } |
| 4332 | |
| 4333 | Node* getter = addToGraph(GetGetter, loadedValue); |
| 4334 | |
| 4335 | if (handleIntrinsicGetter(destination, prediction, variant, base, |
| 4336 | [&] () { |
| 4337 | addToGraph(CheckCell, OpInfo(m_graph.freeze(variant.intrinsicFunction())), getter); |
| 4338 | })) { |
| 4339 | addToGraph(Phantom, base); |
| 4340 | return; |
| 4341 | } |
| 4342 | |
| 4343 | ASSERT(variant.intrinsic() == NoIntrinsic); |
| 4344 | |
| 4345 | // Make a call. We don't try to get fancy with using the smallest operand number because |
| 4346 | // the stack layout phase should compress the stack anyway. |
| 4347 | |
| 4348 | unsigned numberOfParameters = 0; |
| 4349 | numberOfParameters++; // The 'this' argument. |
| 4350 | numberOfParameters++; // True return PC. |
| 4351 | |
| 4352 | // Start with a register offset that corresponds to the last in-use register. |
| 4353 | int registerOffset = virtualRegisterForLocal( |
| 4354 | m_inlineStackTop->m_profiledBlock->numCalleeLocals() - 1).offset(); |
| 4355 | registerOffset -= numberOfParameters; |
| 4356 | registerOffset -= CallFrame::headerSizeInRegisters; |
| 4357 | |
| 4358 | // Get the alignment right. |
| 4359 | registerOffset = -WTF::roundUpToMultipleOf( |
| 4360 | stackAlignmentRegisters(), |
| 4361 | -registerOffset); |
| 4362 | |
| 4363 | ensureLocals( |
| 4364 | m_inlineStackTop->remapOperand( |
| 4365 | VirtualRegister(registerOffset)).toLocal()); |
| 4366 | |
| 4367 | // Issue SetLocals. This has two effects: |
| 4368 | // 1) That's how handleCall() sees the arguments. |
| 4369 | // 2) If we inline then this ensures that the arguments are flushed so that if you use |
| 4370 | // the dreaded arguments object on the getter, the right things happen. Well, sort of - |
| 4371 | // since we only really care about 'this' in this case. But we're not going to take that |
| 4372 | // shortcut. |
| 4373 | int nextRegister = registerOffset + CallFrame::headerSizeInRegisters; |
| 4374 | set(VirtualRegister(nextRegister++), base, ImmediateNakedSet); |
| 4375 | |
| 4376 | // We've set some locals, but they are not user-visible. It's still OK to exit from here. |
| 4377 | m_exitOK = true; |
| 4378 | addToGraph(ExitOK); |
| 4379 | |
| 4380 | handleCall( |
| 4381 | destination, Call, InlineCallFrame::GetterCall, instructionSize, |
| 4382 | getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction); |
| 4383 | } |
| 4384 | |
| 4385 | void ByteCodeParser::emitPutById( |
| 4386 | Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect) |
| 4387 | { |
| 4388 | if (isDirect) |
| 4389 | addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value); |
| 4390 | else |
| 4391 | addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value); |
| 4392 | } |
| 4393 | |
| 4394 | void ByteCodeParser::handlePutById( |
| 4395 | Node* base, unsigned identifierNumber, Node* value, |
| 4396 | const PutByIdStatus& putByIdStatus, bool isDirect, unsigned instructionSize) |
| 4397 | { |
| 4398 | if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::useAccessInlining()) { |
| 4399 | if (!putByIdStatus.isSet()) |
| 4400 | addToGraph(ForceOSRExit); |
| 4401 | emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); |
| 4402 | return; |
| 4403 | } |
| 4404 | |
| 4405 | if (putByIdStatus.numVariants() > 1) { |
| 4406 | if (!m_graph.m_plan.isFTL() || putByIdStatus.makesCalls() |
| 4407 | || !Options::usePolymorphicAccessInlining() |
| 4408 | || putByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) { |
| 4409 | emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); |
| 4410 | return; |
| 4411 | } |
| 4412 | |
| 4413 | if (!isDirect) { |
| 4414 | for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) { |
| 4415 | if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition) |
| 4416 | continue; |
| 4417 | if (!check(putByIdStatus[variantIndex].conditionSet())) { |
| 4418 | emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); |
| 4419 | return; |
| 4420 | } |
| 4421 | } |
| 4422 | } |
| 4423 | |
| 4424 | if (UNLIKELY(m_graph.compilation())) |
| 4425 | m_graph.compilation()->noticeInlinedPutById(); |
| 4426 | |
| 4427 | addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base); |
| 4428 | |
| 4429 | for (const PutByIdVariant& variant : putByIdStatus.variants()) { |
| 4430 | for (Structure* structure : variant.oldStructure()) |
| 4431 | m_graph.registerStructure(structure); |
| 4432 | if (variant.kind() == PutByIdVariant::Transition) |
| 4433 | m_graph.registerStructure(variant.newStructure()); |
| 4434 | } |
| 4435 | |
| 4436 | MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add(); |
| 4437 | data->variants = putByIdStatus.variants(); |
| 4438 | data->identifierNumber = identifierNumber; |
| 4439 | addToGraph(MultiPutByOffset, OpInfo(data), base, value); |
| 4440 | return; |
| 4441 | } |
| 4442 | |
| 4443 | ASSERT(putByIdStatus.numVariants() == 1); |
| 4444 | const PutByIdVariant& variant = putByIdStatus[0]; |
| 4445 | |
| 4446 | switch (variant.kind()) { |
| 4447 | case PutByIdVariant::Replace: { |
| 4448 | addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base); |
| 4449 | |
| 4450 | store(base, identifierNumber, variant, value); |
| 4451 | if (UNLIKELY(m_graph.compilation())) |
| 4452 | m_graph.compilation()->noticeInlinedPutById(); |
| 4453 | return; |
| 4454 | } |
| 4455 | |
| 4456 | case PutByIdVariant::Transition: { |
| 4457 | addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base); |
| 4458 | |
| 4459 | addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base); |
| 4460 | if (!check(variant.conditionSet())) { |
| 4461 | emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); |
| 4462 | return; |
| 4463 | } |
| 4464 | |
| 4465 | ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated()); |
| 4466 | |
| 4467 | Node* propertyStorage; |
| 4468 | Transition* transition = m_graph.m_transitions.add( |
| 4469 | m_graph.registerStructure(variant.oldStructureForTransition()), m_graph.registerStructure(variant.newStructure())); |
| 4470 | |
| 4471 | if (variant.reallocatesStorage()) { |
| 4472 | |
| 4473 | // If we're growing the property storage then it must be because we're |
| 4474 | // storing into the out-of-line storage. |
| 4475 | ASSERT(!isInlineOffset(variant.offset())); |
| 4476 | |
| 4477 | if (!variant.oldStructureForTransition()->outOfLineCapacity()) { |
| 4478 | propertyStorage = addToGraph( |
| 4479 | AllocatePropertyStorage, OpInfo(transition), base); |
| 4480 | } else { |
| 4481 | propertyStorage = addToGraph( |
| 4482 | ReallocatePropertyStorage, OpInfo(transition), |
| 4483 | base, addToGraph(GetButterfly, base)); |
| 4484 | } |
| 4485 | } else { |
| 4486 | if (isInlineOffset(variant.offset())) |
| 4487 | propertyStorage = base; |
| 4488 | else |
| 4489 | propertyStorage = addToGraph(GetButterfly, base); |
| 4490 | } |
| 4491 | |
| 4492 | StorageAccessData* data = m_graph.m_storageAccessData.add(); |
| 4493 | data->offset = variant.offset(); |
| 4494 | data->identifierNumber = identifierNumber; |
| 4495 | |
| 4496 | // NOTE: We could GC at this point because someone could insert an operation that GCs. |
| 4497 | // That's fine because: |
| 4498 | // - Things already in the structure will get scanned because we haven't messed with |
| 4499 | // the object yet. |
| 4500 | // - The value we are fixing to put is going to be kept live by OSR exit handling. So |
| 4501 | // if the GC does a conservative scan here it will see the new value. |
| 4502 | |
| 4503 | addToGraph( |
| 4504 | PutByOffset, |
| 4505 | OpInfo(data), |
| 4506 | propertyStorage, |
| 4507 | base, |
| 4508 | value); |
| 4509 | |
| 4510 | if (variant.reallocatesStorage()) |
| 4511 | addToGraph(NukeStructureAndSetButterfly, base, propertyStorage); |
| 4512 | |
| 4513 | // FIXME: PutStructure goes last until we fix either |
| 4514 | // https://bugs.webkit.org/show_bug.cgi?id=142921 or |
| 4515 | // https://bugs.webkit.org/show_bug.cgi?id=142924. |
| 4516 | addToGraph(PutStructure, OpInfo(transition), base); |
| 4517 | |
| 4518 | if (UNLIKELY(m_graph.compilation())) |
| 4519 | m_graph.compilation()->noticeInlinedPutById(); |
| 4520 | return; |
| 4521 | } |
| 4522 | |
| 4523 | case PutByIdVariant::Setter: { |
| 4524 | addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base); |
| 4525 | |
| 4526 | Node* loadedValue = load(SpecCellOther, base, identifierNumber, variant); |
| 4527 | if (!loadedValue) { |
| 4528 | emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); |
| 4529 | return; |
| 4530 | } |
| 4531 | |
| 4532 | Node* setter = addToGraph(GetSetter, loadedValue); |
| 4533 | |
| 4534 | // Make a call. We don't try to get fancy with using the smallest operand number because |
| 4535 | // the stack layout phase should compress the stack anyway. |
| 4536 | |
| 4537 | unsigned numberOfParameters = 0; |
| 4538 | numberOfParameters++; // The 'this' argument. |
| 4539 | numberOfParameters++; // The new value. |
| 4540 | numberOfParameters++; // True return PC. |
| 4541 | |
| 4542 | // Start with a register offset that corresponds to the last in-use register. |
| 4543 | int registerOffset = virtualRegisterForLocal( |
| 4544 | m_inlineStackTop->m_profiledBlock->numCalleeLocals() - 1).offset(); |
| 4545 | registerOffset -= numberOfParameters; |
| 4546 | registerOffset -= CallFrame::headerSizeInRegisters; |
| 4547 | |
| 4548 | // Get the alignment right. |
| 4549 | registerOffset = -WTF::roundUpToMultipleOf( |
| 4550 | stackAlignmentRegisters(), |
| 4551 | -registerOffset); |
| 4552 | |
| 4553 | ensureLocals( |
| 4554 | m_inlineStackTop->remapOperand( |
| 4555 | VirtualRegister(registerOffset)).toLocal()); |
| 4556 | |
| 4557 | int nextRegister = registerOffset + CallFrame::headerSizeInRegisters; |
| 4558 | set(VirtualRegister(nextRegister++), base, ImmediateNakedSet); |
| 4559 | set(VirtualRegister(nextRegister++), value, ImmediateNakedSet); |
| 4560 | |
| 4561 | // We've set some locals, but they are not user-visible. It's still OK to exit from here. |
| 4562 | m_exitOK = true; |
| 4563 | addToGraph(ExitOK); |
| 4564 | |
| 4565 | handleCall( |
| 4566 | VirtualRegister(), Call, InlineCallFrame::SetterCall, |
| 4567 | instructionSize, setter, numberOfParameters - 1, registerOffset, |
| 4568 | *variant.callLinkStatus(), SpecOther); |
| 4569 | return; |
| 4570 | } |
| 4571 | |
| 4572 | default: { |
| 4573 | emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); |
| 4574 | return; |
| 4575 | } } |
| 4576 | } |
| 4577 | |
| 4578 | void ByteCodeParser::prepareToParseBlock() |
| 4579 | { |
| 4580 | clearCaches(); |
| 4581 | ASSERT(m_setLocalQueue.isEmpty()); |
| 4582 | } |
| 4583 | |
| 4584 | void ByteCodeParser::clearCaches() |
| 4585 | { |
| 4586 | m_constants.shrink(0); |
| 4587 | } |
| 4588 | |
| 4589 | template<typename Op> |
| 4590 | void ByteCodeParser::parseGetById(const Instruction* currentInstruction) |
| 4591 | { |
| 4592 | auto bytecode = currentInstruction->as<Op>(); |
| 4593 | SpeculatedType prediction = getPrediction(); |
| 4594 | |
| 4595 | Node* base = get(bytecode.m_base); |
| 4596 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property]; |
| 4597 | |
| 4598 | UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; |
| 4599 | GetByIdStatus getByIdStatus = GetByIdStatus::computeFor( |
| 4600 | m_inlineStackTop->m_profiledBlock, |
| 4601 | m_inlineStackTop->m_baselineMap, m_icContextStack, |
| 4602 | currentCodeOrigin(), uid); |
| 4603 | |
| 4604 | AccessType type = AccessType::Get; |
| 4605 | unsigned opcodeLength = currentInstruction->size(); |
| 4606 | if (Op::opcodeID == op_try_get_by_id) |
| 4607 | type = AccessType::TryGet; |
| 4608 | else if (Op::opcodeID == op_get_by_id_direct) |
| 4609 | type = AccessType::GetDirect; |
| 4610 | |
| 4611 | handleGetById( |
| 4612 | bytecode.m_dst, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength); |
| 4613 | |
| 4614 | } |
| 4615 | |
| 4616 | static uint64_t makeDynamicVarOpInfo(unsigned identifierNumber, unsigned getPutInfo) |
| 4617 | { |
| 4618 | static_assert(sizeof(identifierNumber) == 4, |
| 4619 | "We cannot fit identifierNumber into the high bits of m_opInfo" ); |
| 4620 | return static_cast<uint64_t>(identifierNumber) | (static_cast<uint64_t>(getPutInfo) << 32); |
| 4621 | } |
| 4622 | |
| 4623 | // The idiom: |
| 4624 | // if (true) { ...; goto label; } else label: continue |
| 4625 | // Allows using NEXT_OPCODE as a statement, even in unbraced if+else, while containing a `continue`. |
| 4626 | // The more common idiom: |
| 4627 | // do { ...; } while (false) |
| 4628 | // Doesn't allow using `continue`. |
| 4629 | #define NEXT_OPCODE(name) \ |
| 4630 | if (true) { \ |
| 4631 | m_currentIndex += currentInstruction->size(); \ |
| 4632 | goto WTF_CONCAT(NEXT_OPCODE_, __LINE__); /* Need a unique label: usable more than once per function. */ \ |
| 4633 | } else \ |
| 4634 | WTF_CONCAT(NEXT_OPCODE_, __LINE__): \ |
| 4635 | continue |
| 4636 | |
| 4637 | #define LAST_OPCODE_LINKED(name) do { \ |
| 4638 | m_currentIndex += currentInstruction->size(); \ |
| 4639 | m_exitOK = false; \ |
| 4640 | return; \ |
| 4641 | } while (false) |
| 4642 | |
| 4643 | #define LAST_OPCODE(name) \ |
| 4644 | do { \ |
| 4645 | if (m_currentBlock->terminal()) { \ |
| 4646 | switch (m_currentBlock->terminal()->op()) { \ |
| 4647 | case Jump: \ |
| 4648 | case Branch: \ |
| 4649 | case Switch: \ |
| 4650 | ASSERT(!m_currentBlock->isLinked); \ |
| 4651 | m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock); \ |
| 4652 | break;\ |
| 4653 | default: break; \ |
| 4654 | } \ |
| 4655 | } \ |
| 4656 | LAST_OPCODE_LINKED(name); \ |
| 4657 | } while (false) |
| 4658 | |
| 4659 | void ByteCodeParser::parseBlock(unsigned limit) |
| 4660 | { |
| 4661 | auto& instructions = m_inlineStackTop->m_codeBlock->instructions(); |
| 4662 | unsigned blockBegin = m_currentIndex; |
| 4663 | |
| 4664 | // If we are the first basic block, introduce markers for arguments. This allows |
| 4665 | // us to track if a use of an argument may use the actual argument passed, as |
| 4666 | // opposed to using a value we set explicitly. |
| 4667 | if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) { |
| 4668 | auto addResult = m_graph.m_rootToArguments.add(m_currentBlock, ArgumentsVector()); |
| 4669 | RELEASE_ASSERT(addResult.isNewEntry); |
| 4670 | ArgumentsVector& entrypointArguments = addResult.iterator->value; |
| 4671 | entrypointArguments.resize(m_numArguments); |
| 4672 | |
| 4673 | // We will emit SetArgument nodes. They don't exit, but we're at the top of an op_enter so |
| 4674 | // exitOK = true. |
| 4675 | m_exitOK = true; |
| 4676 | for (unsigned argument = 0; argument < m_numArguments; ++argument) { |
| 4677 | VariableAccessData* variable = newVariableAccessData( |
| 4678 | virtualRegisterForArgument(argument)); |
| 4679 | variable->mergeStructureCheckHoistingFailed( |
| 4680 | m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)); |
| 4681 | variable->mergeCheckArrayHoistingFailed( |
| 4682 | m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)); |
| 4683 | |
| 4684 | Node* setArgument = addToGraph(SetArgument, OpInfo(variable)); |
| 4685 | entrypointArguments[argument] = setArgument; |
| 4686 | m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument); |
| 4687 | } |
| 4688 | } |
| 4689 | |
| 4690 | CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock; |
| 4691 | |
| 4692 | auto jumpTarget = [&](int target) { |
| 4693 | if (target) |
| 4694 | return target; |
| 4695 | return codeBlock->outOfLineJumpOffset(m_currentInstruction); |
| 4696 | }; |
| 4697 | |
| 4698 | while (true) { |
| 4699 | // We're staring at a new bytecode instruction. So we once again have a place that we can exit |
| 4700 | // to. |
| 4701 | m_exitOK = true; |
| 4702 | |
| 4703 | processSetLocalQueue(); |
| 4704 | |
| 4705 | // Don't extend over jump destinations. |
| 4706 | if (m_currentIndex == limit) { |
| 4707 | // Ordinarily we want to plant a jump. But refuse to do this if the block is |
| 4708 | // empty. This is a special case for inlining, which might otherwise create |
| 4709 | // some empty blocks in some cases. When parseBlock() returns with an empty |
| 4710 | // block, it will get repurposed instead of creating a new one. Note that this |
| 4711 | // logic relies on every bytecode resulting in one or more nodes, which would |
| 4712 | // be true anyway except for op_loop_hint, which emits a Phantom to force this |
| 4713 | // to be true. |
| 4714 | |
| 4715 | if (!m_currentBlock->isEmpty()) |
| 4716 | addJumpTo(m_currentIndex); |
| 4717 | return; |
| 4718 | } |
| 4719 | |
| 4720 | // Switch on the current bytecode opcode. |
| 4721 | const Instruction* currentInstruction = instructions.at(m_currentIndex).ptr(); |
| 4722 | m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls. |
| 4723 | OpcodeID opcodeID = currentInstruction->opcodeID(); |
| 4724 | |
| 4725 | VERBOSE_LOG(" parsing " , currentCodeOrigin(), ": " , opcodeID, "\n" ); |
| 4726 | |
| 4727 | if (UNLIKELY(m_graph.compilation())) { |
| 4728 | addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor( |
| 4729 | Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin())))); |
| 4730 | } |
| 4731 | |
| 4732 | switch (opcodeID) { |
| 4733 | |
| 4734 | // === Function entry opcodes === |
| 4735 | |
| 4736 | case op_enter: { |
| 4737 | Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined)); |
| 4738 | // Initialize all locals to undefined. |
| 4739 | for (int i = 0; i < m_inlineStackTop->m_codeBlock->numVars(); ++i) |
| 4740 | set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet); |
| 4741 | |
| 4742 | NEXT_OPCODE(op_enter); |
| 4743 | } |
| 4744 | |
| 4745 | case op_to_this: { |
| 4746 | Node* op1 = getThis(); |
| 4747 | auto& metadata = currentInstruction->as<OpToThis>().metadata(codeBlock); |
| 4748 | Structure* cachedStructure = metadata.m_cachedStructure.get(); |
| 4749 | if (metadata.m_toThisStatus != ToThisOK |
| 4750 | || !cachedStructure |
| 4751 | || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis |
| 4752 | || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex) |
| 4753 | || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) |
| 4754 | || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) { |
| 4755 | setThis(addToGraph(ToThis, OpInfo(), OpInfo(getPrediction()), op1)); |
| 4756 | } else { |
| 4757 | addToGraph( |
| 4758 | CheckStructure, |
| 4759 | OpInfo(m_graph.addStructureSet(cachedStructure)), |
| 4760 | op1); |
| 4761 | } |
| 4762 | NEXT_OPCODE(op_to_this); |
| 4763 | } |
| 4764 | |
| 4765 | case op_create_this: { |
| 4766 | auto bytecode = currentInstruction->as<OpCreateThis>(); |
| 4767 | Node* callee = get(VirtualRegister(bytecode.m_callee)); |
| 4768 | |
| 4769 | JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm); |
| 4770 | if (!function) { |
| 4771 | JSCell* cachedFunction = bytecode.metadata(codeBlock).m_cachedCallee.unvalidatedGet(); |
| 4772 | if (cachedFunction |
| 4773 | && cachedFunction != JSCell::seenMultipleCalleeObjects() |
| 4774 | && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { |
| 4775 | ASSERT(cachedFunction->inherits<JSFunction>(*m_vm)); |
| 4776 | |
| 4777 | FrozenValue* frozen = m_graph.freeze(cachedFunction); |
| 4778 | addToGraph(CheckCell, OpInfo(frozen), callee); |
| 4779 | |
| 4780 | function = static_cast<JSFunction*>(cachedFunction); |
| 4781 | } |
| 4782 | } |
| 4783 | |
| 4784 | bool alreadyEmitted = false; |
| 4785 | if (function) { |
| 4786 | if (FunctionRareData* rareData = function->rareData()) { |
| 4787 | if (rareData->allocationProfileWatchpointSet().isStillValid()) { |
| 4788 | Structure* structure = rareData->objectAllocationStructure(); |
| 4789 | JSObject* prototype = rareData->objectAllocationPrototype(); |
| 4790 | if (structure |
| 4791 | && (structure->hasMonoProto() || prototype) |
| 4792 | && rareData->allocationProfileWatchpointSet().isStillValid()) { |
| 4793 | |
| 4794 | m_graph.freeze(rareData); |
| 4795 | m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet()); |
| 4796 | |
| 4797 | // The callee is still live up to this point. |
| 4798 | addToGraph(Phantom, callee); |
| 4799 | Node* object = addToGraph(NewObject, OpInfo(m_graph.registerStructure(structure))); |
| 4800 | if (structure->hasPolyProto()) { |
| 4801 | StorageAccessData* data = m_graph.m_storageAccessData.add(); |
| 4802 | data->offset = knownPolyProtoOffset; |
| 4803 | data->identifierNumber = m_graph.identifiers().ensure(m_graph.m_vm.propertyNames->builtinNames().polyProtoName().impl()); |
| 4804 | ASSERT(isInlineOffset(knownPolyProtoOffset)); |
| 4805 | addToGraph(PutByOffset, OpInfo(data), object, object, weakJSConstant(prototype)); |
| 4806 | } |
| 4807 | set(VirtualRegister(bytecode.m_dst), object); |
| 4808 | alreadyEmitted = true; |
| 4809 | } |
| 4810 | } |
| 4811 | } |
| 4812 | } |
| 4813 | if (!alreadyEmitted) { |
| 4814 | set(VirtualRegister(bytecode.m_dst), |
| 4815 | addToGraph(CreateThis, OpInfo(bytecode.m_inlineCapacity), callee)); |
| 4816 | } |
| 4817 | NEXT_OPCODE(op_create_this); |
| 4818 | } |
| 4819 | |
| 4820 | case op_new_object: { |
| 4821 | auto bytecode = currentInstruction->as<OpNewObject>(); |
| 4822 | set(bytecode.m_dst, |
| 4823 | addToGraph(NewObject, |
| 4824 | OpInfo(m_graph.registerStructure(bytecode.metadata(codeBlock).m_objectAllocationProfile.structure())))); |
| 4825 | NEXT_OPCODE(op_new_object); |
| 4826 | } |
| 4827 | |
| 4828 | case op_new_array: { |
| 4829 | auto bytecode = currentInstruction->as<OpNewArray>(); |
| 4830 | int startOperand = bytecode.m_argv.offset(); |
| 4831 | int numOperands = bytecode.m_argc; |
| 4832 | ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).m_arrayAllocationProfile; |
| 4833 | for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx) |
| 4834 | addVarArgChild(get(VirtualRegister(operandIdx))); |
| 4835 | unsigned vectorLengthHint = std::max<unsigned>(profile.vectorLengthHint(), numOperands); |
| 4836 | set(bytecode.m_dst, addToGraph(Node::VarArg, NewArray, OpInfo(profile.selectIndexingType()), OpInfo(vectorLengthHint))); |
| 4837 | NEXT_OPCODE(op_new_array); |
| 4838 | } |
| 4839 | |
| 4840 | case op_new_array_with_spread: { |
| 4841 | auto bytecode = currentInstruction->as<OpNewArrayWithSpread>(); |
| 4842 | int startOperand = bytecode.m_argv.offset(); |
| 4843 | int numOperands = bytecode.m_argc; |
| 4844 | const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(bytecode.m_bitVector); |
| 4845 | for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx) |
| 4846 | addVarArgChild(get(VirtualRegister(operandIdx))); |
| 4847 | |
| 4848 | BitVector* copy = m_graph.m_bitVectors.add(bitVector); |
| 4849 | ASSERT(*copy == bitVector); |
| 4850 | |
| 4851 | set(bytecode.m_dst, |
| 4852 | addToGraph(Node::VarArg, NewArrayWithSpread, OpInfo(copy))); |
| 4853 | NEXT_OPCODE(op_new_array_with_spread); |
| 4854 | } |
| 4855 | |
| 4856 | case op_spread: { |
| 4857 | auto bytecode = currentInstruction->as<OpSpread>(); |
| 4858 | set(bytecode.m_dst, |
| 4859 | addToGraph(Spread, get(bytecode.m_argument))); |
| 4860 | NEXT_OPCODE(op_spread); |
| 4861 | } |
| 4862 | |
| 4863 | case op_new_array_with_size: { |
| 4864 | auto bytecode = currentInstruction->as<OpNewArrayWithSize>(); |
| 4865 | ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).m_arrayAllocationProfile; |
| 4866 | set(bytecode.m_dst, addToGraph(NewArrayWithSize, OpInfo(profile.selectIndexingType()), get(bytecode.m_length))); |
| 4867 | NEXT_OPCODE(op_new_array_with_size); |
| 4868 | } |
| 4869 | |
| 4870 | case op_new_array_buffer: { |
| 4871 | auto bytecode = currentInstruction->as<OpNewArrayBuffer>(); |
| 4872 | // Unfortunately, we can't allocate a new JSImmutableButterfly if the profile tells us new information because we |
| 4873 | // cannot allocate from compilation threads. |
| 4874 | WTF::loadLoadFence(); |
| 4875 | FrozenValue* frozen = get(VirtualRegister(bytecode.m_immutableButterfly))->constant(); |
| 4876 | WTF::loadLoadFence(); |
| 4877 | JSImmutableButterfly* immutableButterfly = frozen->cast<JSImmutableButterfly*>(); |
| 4878 | NewArrayBufferData data { }; |
| 4879 | data.indexingMode = immutableButterfly->indexingMode(); |
| 4880 | data.vectorLengthHint = immutableButterfly->toButterfly()->vectorLength(); |
| 4881 | |
| 4882 | set(VirtualRegister(bytecode.m_dst), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord))); |
| 4883 | NEXT_OPCODE(op_new_array_buffer); |
| 4884 | } |
| 4885 | |
| 4886 | case op_new_regexp: { |
| 4887 | auto bytecode = currentInstruction->as<OpNewRegexp>(); |
| 4888 | ASSERT(bytecode.m_regexp.isConstant()); |
| 4889 | FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_regexp.offset())); |
| 4890 | set(bytecode.m_dst, addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0)))); |
| 4891 | NEXT_OPCODE(op_new_regexp); |
| 4892 | } |
| 4893 | |
| 4894 | case op_get_rest_length: { |
| 4895 | auto bytecode = currentInstruction->as<OpGetRestLength>(); |
| 4896 | InlineCallFrame* inlineCallFrame = this->inlineCallFrame(); |
| 4897 | Node* length; |
| 4898 | if (inlineCallFrame && !inlineCallFrame->isVarargs()) { |
| 4899 | unsigned argumentsLength = inlineCallFrame->argumentCountIncludingThis - 1; |
| 4900 | JSValue restLength; |
| 4901 | if (argumentsLength <= bytecode.m_numParametersToSkip) |
| 4902 | restLength = jsNumber(0); |
| 4903 | else |
| 4904 | restLength = jsNumber(argumentsLength - bytecode.m_numParametersToSkip); |
| 4905 | |
| 4906 | length = jsConstant(restLength); |
| 4907 | } else |
| 4908 | length = addToGraph(GetRestLength, OpInfo(bytecode.m_numParametersToSkip)); |
| 4909 | set(bytecode.m_dst, length); |
| 4910 | NEXT_OPCODE(op_get_rest_length); |
| 4911 | } |
| 4912 | |
| 4913 | case op_create_rest: { |
| 4914 | auto bytecode = currentInstruction->as<OpCreateRest>(); |
| 4915 | noticeArgumentsUse(); |
| 4916 | Node* arrayLength = get(bytecode.m_arraySize); |
| 4917 | set(bytecode.m_dst, |
| 4918 | addToGraph(CreateRest, OpInfo(bytecode.m_numParametersToSkip), arrayLength)); |
| 4919 | NEXT_OPCODE(op_create_rest); |
| 4920 | } |
| 4921 | |
| 4922 | // === Bitwise operations === |
| 4923 | |
| 4924 | case op_bitnot: { |
| 4925 | auto bytecode = currentInstruction->as<OpBitnot>(); |
| 4926 | SpeculatedType prediction = getPrediction(); |
| 4927 | Node* op1 = get(bytecode.m_operand); |
| 4928 | if (op1->hasNumberOrAnyIntResult()) |
| 4929 | set(bytecode.m_dst, addToGraph(ArithBitNot, op1)); |
| 4930 | else |
| 4931 | set(bytecode.m_dst, addToGraph(ValueBitNot, OpInfo(), OpInfo(prediction), op1)); |
| 4932 | NEXT_OPCODE(op_bitnot); |
| 4933 | } |
| 4934 | |
| 4935 | case op_bitand: { |
| 4936 | auto bytecode = currentInstruction->as<OpBitand>(); |
| 4937 | SpeculatedType prediction = getPrediction(); |
| 4938 | Node* op1 = get(bytecode.m_lhs); |
| 4939 | Node* op2 = get(bytecode.m_rhs); |
| 4940 | if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult()) |
| 4941 | set(bytecode.m_dst, addToGraph(ArithBitAnd, op1, op2)); |
| 4942 | else |
| 4943 | set(bytecode.m_dst, addToGraph(ValueBitAnd, OpInfo(), OpInfo(prediction), op1, op2)); |
| 4944 | NEXT_OPCODE(op_bitand); |
| 4945 | } |
| 4946 | |
| 4947 | case op_bitor: { |
| 4948 | auto bytecode = currentInstruction->as<OpBitor>(); |
| 4949 | SpeculatedType prediction = getPrediction(); |
| 4950 | Node* op1 = get(bytecode.m_lhs); |
| 4951 | Node* op2 = get(bytecode.m_rhs); |
| 4952 | if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult()) |
| 4953 | set(bytecode.m_dst, addToGraph(ArithBitOr, op1, op2)); |
| 4954 | else |
| 4955 | set(bytecode.m_dst, addToGraph(ValueBitOr, OpInfo(), OpInfo(prediction), op1, op2)); |
| 4956 | NEXT_OPCODE(op_bitor); |
| 4957 | } |
| 4958 | |
| 4959 | case op_bitxor: { |
| 4960 | auto bytecode = currentInstruction->as<OpBitxor>(); |
| 4961 | SpeculatedType prediction = getPrediction(); |
| 4962 | Node* op1 = get(bytecode.m_lhs); |
| 4963 | Node* op2 = get(bytecode.m_rhs); |
| 4964 | if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult()) |
| 4965 | set(bytecode.m_dst, addToGraph(ArithBitXor, op1, op2)); |
| 4966 | else |
| 4967 | set(bytecode.m_dst, addToGraph(ValueBitXor, OpInfo(), OpInfo(prediction), op1, op2)); |
| 4968 | NEXT_OPCODE(op_bitxor); |
| 4969 | } |
| 4970 | |
| 4971 | case op_rshift: { |
| 4972 | auto bytecode = currentInstruction->as<OpRshift>(); |
| 4973 | Node* op1 = get(bytecode.m_lhs); |
| 4974 | Node* op2 = get(bytecode.m_rhs); |
| 4975 | set(bytecode.m_dst, addToGraph(BitRShift, op1, op2)); |
| 4976 | NEXT_OPCODE(op_rshift); |
| 4977 | } |
| 4978 | |
| 4979 | case op_lshift: { |
| 4980 | auto bytecode = currentInstruction->as<OpLshift>(); |
| 4981 | Node* op1 = get(bytecode.m_lhs); |
| 4982 | Node* op2 = get(bytecode.m_rhs); |
| 4983 | set(bytecode.m_dst, addToGraph(BitLShift, op1, op2)); |
| 4984 | NEXT_OPCODE(op_lshift); |
| 4985 | } |
| 4986 | |
| 4987 | case op_urshift: { |
| 4988 | auto bytecode = currentInstruction->as<OpUrshift>(); |
| 4989 | Node* op1 = get(bytecode.m_lhs); |
| 4990 | Node* op2 = get(bytecode.m_rhs); |
| 4991 | set(bytecode.m_dst, addToGraph(BitURShift, op1, op2)); |
| 4992 | NEXT_OPCODE(op_urshift); |
| 4993 | } |
| 4994 | |
| 4995 | case op_unsigned: { |
| 4996 | auto bytecode = currentInstruction->as<OpUnsigned>(); |
| 4997 | set(bytecode.m_dst, makeSafe(addToGraph(UInt32ToNumber, get(bytecode.m_operand)))); |
| 4998 | NEXT_OPCODE(op_unsigned); |
| 4999 | } |
| 5000 | |
| 5001 | // === Increment/Decrement opcodes === |
| 5002 | |
| 5003 | case op_inc: { |
| 5004 | auto bytecode = currentInstruction->as<OpInc>(); |
| 5005 | Node* op = get(bytecode.m_srcDst); |
| 5006 | set(bytecode.m_srcDst, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); |
| 5007 | NEXT_OPCODE(op_inc); |
| 5008 | } |
| 5009 | |
| 5010 | case op_dec: { |
| 5011 | auto bytecode = currentInstruction->as<OpDec>(); |
| 5012 | Node* op = get(bytecode.m_srcDst); |
| 5013 | set(bytecode.m_srcDst, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); |
| 5014 | NEXT_OPCODE(op_dec); |
| 5015 | } |
| 5016 | |
| 5017 | // === Arithmetic operations === |
| 5018 | |
| 5019 | case op_add: { |
| 5020 | auto bytecode = currentInstruction->as<OpAdd>(); |
| 5021 | Node* op1 = get(bytecode.m_lhs); |
| 5022 | Node* op2 = get(bytecode.m_rhs); |
| 5023 | if (op1->hasNumberResult() && op2->hasNumberResult()) |
| 5024 | set(bytecode.m_dst, makeSafe(addToGraph(ArithAdd, op1, op2))); |
| 5025 | else |
| 5026 | set(bytecode.m_dst, makeSafe(addToGraph(ValueAdd, op1, op2))); |
| 5027 | NEXT_OPCODE(op_add); |
| 5028 | } |
| 5029 | |
| 5030 | case op_sub: { |
| 5031 | auto bytecode = currentInstruction->as<OpSub>(); |
| 5032 | Node* op1 = get(bytecode.m_lhs); |
| 5033 | Node* op2 = get(bytecode.m_rhs); |
| 5034 | if (op1->hasNumberResult() && op2->hasNumberResult()) |
| 5035 | set(bytecode.m_dst, makeSafe(addToGraph(ArithSub, op1, op2))); |
| 5036 | else |
| 5037 | set(bytecode.m_dst, makeSafe(addToGraph(ValueSub, op1, op2))); |
| 5038 | NEXT_OPCODE(op_sub); |
| 5039 | } |
| 5040 | |
| 5041 | case op_negate: { |
| 5042 | auto bytecode = currentInstruction->as<OpNegate>(); |
| 5043 | Node* op1 = get(bytecode.m_operand); |
| 5044 | if (op1->hasNumberResult()) |
| 5045 | set(bytecode.m_dst, makeSafe(addToGraph(ArithNegate, op1))); |
| 5046 | else |
| 5047 | set(bytecode.m_dst, makeSafe(addToGraph(ValueNegate, op1))); |
| 5048 | NEXT_OPCODE(op_negate); |
| 5049 | } |
| 5050 | |
| 5051 | case op_mul: { |
| 5052 | // Multiply requires that the inputs are not truncated, unfortunately. |
| 5053 | auto bytecode = currentInstruction->as<OpMul>(); |
| 5054 | Node* op1 = get(bytecode.m_lhs); |
| 5055 | Node* op2 = get(bytecode.m_rhs); |
| 5056 | if (op1->hasNumberResult() && op2->hasNumberResult()) |
| 5057 | set(bytecode.m_dst, makeSafe(addToGraph(ArithMul, op1, op2))); |
| 5058 | else |
| 5059 | set(bytecode.m_dst, makeSafe(addToGraph(ValueMul, op1, op2))); |
| 5060 | NEXT_OPCODE(op_mul); |
| 5061 | } |
| 5062 | |
| 5063 | case op_mod: { |
| 5064 | auto bytecode = currentInstruction->as<OpMod>(); |
| 5065 | Node* op1 = get(bytecode.m_lhs); |
| 5066 | Node* op2 = get(bytecode.m_rhs); |
| 5067 | set(bytecode.m_dst, makeSafe(addToGraph(ArithMod, op1, op2))); |
| 5068 | NEXT_OPCODE(op_mod); |
| 5069 | } |
| 5070 | |
| 5071 | case op_pow: { |
| 5072 | // FIXME: ArithPow(Untyped, Untyped) should be supported as the same to ArithMul, ArithSub etc. |
| 5073 | // https://bugs.webkit.org/show_bug.cgi?id=160012 |
| 5074 | auto bytecode = currentInstruction->as<OpPow>(); |
| 5075 | Node* op1 = get(bytecode.m_lhs); |
| 5076 | Node* op2 = get(bytecode.m_rhs); |
| 5077 | set(bytecode.m_dst, addToGraph(ArithPow, op1, op2)); |
| 5078 | NEXT_OPCODE(op_pow); |
| 5079 | } |
| 5080 | |
| 5081 | case op_div: { |
| 5082 | auto bytecode = currentInstruction->as<OpDiv>(); |
| 5083 | Node* op1 = get(bytecode.m_lhs); |
| 5084 | Node* op2 = get(bytecode.m_rhs); |
| 5085 | if (op1->hasNumberResult() && op2->hasNumberResult()) |
| 5086 | set(bytecode.m_dst, makeDivSafe(addToGraph(ArithDiv, op1, op2))); |
| 5087 | else |
| 5088 | set(bytecode.m_dst, makeDivSafe(addToGraph(ValueDiv, op1, op2))); |
| 5089 | NEXT_OPCODE(op_div); |
| 5090 | } |
| 5091 | |
| 5092 | // === Misc operations === |
| 5093 | |
| 5094 | case op_debug: { |
| 5095 | // This is a nop in the DFG/FTL because when we set a breakpoint in the debugger, |
| 5096 | // we will jettison all optimized CodeBlocks that contains the breakpoint. |
| 5097 | addToGraph(Check); // We add a nop here so that basic block linking doesn't break. |
| 5098 | NEXT_OPCODE(op_debug); |
| 5099 | } |
| 5100 | |
| 5101 | case op_mov: { |
| 5102 | auto bytecode = currentInstruction->as<OpMov>(); |
| 5103 | Node* op = get(bytecode.m_src); |
| 5104 | set(bytecode.m_dst, op); |
| 5105 | NEXT_OPCODE(op_mov); |
| 5106 | } |
| 5107 | |
| 5108 | case op_check_tdz: { |
| 5109 | auto bytecode = currentInstruction->as<OpCheckTdz>(); |
| 5110 | addToGraph(CheckNotEmpty, get(bytecode.m_targetVirtualRegister)); |
| 5111 | NEXT_OPCODE(op_check_tdz); |
| 5112 | } |
| 5113 | |
| 5114 | case op_overrides_has_instance: { |
| 5115 | auto bytecode = currentInstruction->as<OpOverridesHasInstance>(); |
| 5116 | JSFunction* defaultHasInstanceSymbolFunction = m_inlineStackTop->m_codeBlock->globalObjectFor(currentCodeOrigin())->functionProtoHasInstanceSymbolFunction(); |
| 5117 | |
| 5118 | Node* constructor = get(VirtualRegister(bytecode.m_constructor)); |
| 5119 | Node* hasInstanceValue = get(VirtualRegister(bytecode.m_hasInstanceValue)); |
| 5120 | |
| 5121 | set(VirtualRegister(bytecode.m_dst), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue)); |
| 5122 | NEXT_OPCODE(op_overrides_has_instance); |
| 5123 | } |
| 5124 | |
| 5125 | case op_identity_with_profile: { |
| 5126 | auto bytecode = currentInstruction->as<OpIdentityWithProfile>(); |
| 5127 | Node* srcDst = get(bytecode.m_srcDst); |
| 5128 | SpeculatedType speculation = static_cast<SpeculatedType>(bytecode.m_topProfile) << 32 | static_cast<SpeculatedType>(bytecode.m_bottomProfile); |
| 5129 | set(bytecode.m_srcDst, addToGraph(IdentityWithProfile, OpInfo(speculation), srcDst)); |
| 5130 | NEXT_OPCODE(op_identity_with_profile); |
| 5131 | } |
| 5132 | |
| 5133 | case op_instanceof: { |
| 5134 | auto bytecode = currentInstruction->as<OpInstanceof>(); |
| 5135 | |
| 5136 | InstanceOfStatus status = InstanceOfStatus::computeFor( |
| 5137 | m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_baselineMap, |
| 5138 | m_currentIndex); |
| 5139 | |
| 5140 | Node* value = get(bytecode.m_value); |
| 5141 | Node* prototype = get(bytecode.m_prototype); |
| 5142 | |
| 5143 | // Only inline it if it's Simple with a commonPrototype; bottom/top or variable |
| 5144 | // prototypes both get handled by the IC. This makes sense for bottom (unprofiled) |
| 5145 | // instanceof ICs because the profit of this optimization is fairly low. So, in the |
| 5146 | // absence of any information, it's better to avoid making this be the cause of a |
| 5147 | // recompilation. |
| 5148 | if (JSObject* commonPrototype = status.commonPrototype()) { |
| 5149 | addToGraph(CheckCell, OpInfo(m_graph.freeze(commonPrototype)), prototype); |
| 5150 | |
| 5151 | bool allOK = true; |
| 5152 | MatchStructureData* data = m_graph.m_matchStructureData.add(); |
| 5153 | for (const InstanceOfVariant& variant : status.variants()) { |
| 5154 | if (!check(variant.conditionSet())) { |
| 5155 | allOK = false; |
| 5156 | break; |
| 5157 | } |
| 5158 | for (Structure* structure : variant.structureSet()) { |
| 5159 | MatchStructureVariant matchVariant; |
| 5160 | matchVariant.structure = m_graph.registerStructure(structure); |
| 5161 | matchVariant.result = variant.isHit(); |
| 5162 | |
| 5163 | data->variants.append(WTFMove(matchVariant)); |
| 5164 | } |
| 5165 | } |
| 5166 | |
| 5167 | if (allOK) { |
| 5168 | Node* match = addToGraph(MatchStructure, OpInfo(data), value); |
| 5169 | set(bytecode.m_dst, match); |
| 5170 | NEXT_OPCODE(op_instanceof); |
| 5171 | } |
| 5172 | } |
| 5173 | |
| 5174 | set(bytecode.m_dst, addToGraph(InstanceOf, value, prototype)); |
| 5175 | NEXT_OPCODE(op_instanceof); |
| 5176 | } |
| 5177 | |
| 5178 | case op_instanceof_custom: { |
| 5179 | auto bytecode = currentInstruction->as<OpInstanceofCustom>(); |
| 5180 | Node* value = get(bytecode.m_value); |
| 5181 | Node* constructor = get(bytecode.m_constructor); |
| 5182 | Node* hasInstanceValue = get(bytecode.m_hasInstanceValue); |
| 5183 | set(bytecode.m_dst, addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue)); |
| 5184 | NEXT_OPCODE(op_instanceof_custom); |
| 5185 | } |
| 5186 | case op_is_empty: { |
| 5187 | auto bytecode = currentInstruction->as<OpIsEmpty>(); |
| 5188 | Node* value = get(bytecode.m_operand); |
| 5189 | set(bytecode.m_dst, addToGraph(IsEmpty, value)); |
| 5190 | NEXT_OPCODE(op_is_empty); |
| 5191 | } |
| 5192 | case op_is_undefined: { |
| 5193 | auto bytecode = currentInstruction->as<OpIsUndefined>(); |
| 5194 | Node* value = get(bytecode.m_operand); |
| 5195 | set(bytecode.m_dst, addToGraph(IsUndefined, value)); |
| 5196 | NEXT_OPCODE(op_is_undefined); |
| 5197 | } |
| 5198 | case op_is_undefined_or_null: { |
| 5199 | auto bytecode = currentInstruction->as<OpIsUndefinedOrNull>(); |
| 5200 | Node* value = get(bytecode.m_operand); |
| 5201 | set(bytecode.m_dst, addToGraph(IsUndefinedOrNull, value)); |
| 5202 | NEXT_OPCODE(op_is_undefined_or_null); |
| 5203 | } |
| 5204 | |
| 5205 | case op_is_boolean: { |
| 5206 | auto bytecode = currentInstruction->as<OpIsBoolean>(); |
| 5207 | Node* value = get(bytecode.m_operand); |
| 5208 | set(bytecode.m_dst, addToGraph(IsBoolean, value)); |
| 5209 | NEXT_OPCODE(op_is_boolean); |
| 5210 | } |
| 5211 | |
| 5212 | case op_is_number: { |
| 5213 | auto bytecode = currentInstruction->as<OpIsNumber>(); |
| 5214 | Node* value = get(bytecode.m_operand); |
| 5215 | set(bytecode.m_dst, addToGraph(IsNumber, value)); |
| 5216 | NEXT_OPCODE(op_is_number); |
| 5217 | } |
| 5218 | |
| 5219 | case op_is_cell_with_type: { |
| 5220 | auto bytecode = currentInstruction->as<OpIsCellWithType>(); |
| 5221 | Node* value = get(bytecode.m_operand); |
| 5222 | set(bytecode.m_dst, addToGraph(IsCellWithType, OpInfo(bytecode.m_type), value)); |
| 5223 | NEXT_OPCODE(op_is_cell_with_type); |
| 5224 | } |
| 5225 | |
| 5226 | case op_is_object: { |
| 5227 | auto bytecode = currentInstruction->as<OpIsObject>(); |
| 5228 | Node* value = get(bytecode.m_operand); |
| 5229 | set(bytecode.m_dst, addToGraph(IsObject, value)); |
| 5230 | NEXT_OPCODE(op_is_object); |
| 5231 | } |
| 5232 | |
| 5233 | case op_is_object_or_null: { |
| 5234 | auto bytecode = currentInstruction->as<OpIsObjectOrNull>(); |
| 5235 | Node* value = get(bytecode.m_operand); |
| 5236 | set(bytecode.m_dst, addToGraph(IsObjectOrNull, value)); |
| 5237 | NEXT_OPCODE(op_is_object_or_null); |
| 5238 | } |
| 5239 | |
| 5240 | case op_is_function: { |
| 5241 | auto bytecode = currentInstruction->as<OpIsFunction>(); |
| 5242 | Node* value = get(bytecode.m_operand); |
| 5243 | set(bytecode.m_dst, addToGraph(IsFunction, value)); |
| 5244 | NEXT_OPCODE(op_is_function); |
| 5245 | } |
| 5246 | |
| 5247 | case op_not: { |
| 5248 | auto bytecode = currentInstruction->as<OpNot>(); |
| 5249 | Node* value = get(bytecode.m_operand); |
| 5250 | set(bytecode.m_dst, addToGraph(LogicalNot, value)); |
| 5251 | NEXT_OPCODE(op_not); |
| 5252 | } |
| 5253 | |
| 5254 | case op_to_primitive: { |
| 5255 | auto bytecode = currentInstruction->as<OpToPrimitive>(); |
| 5256 | Node* value = get(bytecode.m_src); |
| 5257 | set(bytecode.m_dst, addToGraph(ToPrimitive, value)); |
| 5258 | NEXT_OPCODE(op_to_primitive); |
| 5259 | } |
| 5260 | |
| 5261 | case op_strcat: { |
| 5262 | auto bytecode = currentInstruction->as<OpStrcat>(); |
| 5263 | int startOperand = bytecode.m_src.offset(); |
| 5264 | int numOperands = bytecode.m_count; |
| 5265 | #if CPU(X86) |
| 5266 | // X86 doesn't have enough registers to compile MakeRope with three arguments. The |
| 5267 | // StrCat we emit here may be turned into a MakeRope. Rather than try to be clever, |
| 5268 | // we just make StrCat dumber on this processor. |
| 5269 | const unsigned maxArguments = 2; |
| 5270 | #else |
| 5271 | const unsigned maxArguments = 3; |
| 5272 | #endif |
| 5273 | Node* operands[AdjacencyList::Size]; |
| 5274 | unsigned indexInOperands = 0; |
| 5275 | for (unsigned i = 0; i < AdjacencyList::Size; ++i) |
| 5276 | operands[i] = 0; |
| 5277 | for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) { |
| 5278 | if (indexInOperands == maxArguments) { |
| 5279 | operands[0] = addToGraph(StrCat, operands[0], operands[1], operands[2]); |
| 5280 | for (unsigned i = 1; i < AdjacencyList::Size; ++i) |
| 5281 | operands[i] = 0; |
| 5282 | indexInOperands = 1; |
| 5283 | } |
| 5284 | |
| 5285 | ASSERT(indexInOperands < AdjacencyList::Size); |
| 5286 | ASSERT(indexInOperands < maxArguments); |
| 5287 | operands[indexInOperands++] = get(VirtualRegister(startOperand - operandIdx)); |
| 5288 | } |
| 5289 | set(bytecode.m_dst, addToGraph(StrCat, operands[0], operands[1], operands[2])); |
| 5290 | NEXT_OPCODE(op_strcat); |
| 5291 | } |
| 5292 | |
| 5293 | case op_less: { |
| 5294 | auto bytecode = currentInstruction->as<OpLess>(); |
| 5295 | Node* op1 = get(bytecode.m_lhs); |
| 5296 | Node* op2 = get(bytecode.m_rhs); |
| 5297 | set(bytecode.m_dst, addToGraph(CompareLess, op1, op2)); |
| 5298 | NEXT_OPCODE(op_less); |
| 5299 | } |
| 5300 | |
| 5301 | case op_lesseq: { |
| 5302 | auto bytecode = currentInstruction->as<OpLesseq>(); |
| 5303 | Node* op1 = get(bytecode.m_lhs); |
| 5304 | Node* op2 = get(bytecode.m_rhs); |
| 5305 | set(bytecode.m_dst, addToGraph(CompareLessEq, op1, op2)); |
| 5306 | NEXT_OPCODE(op_lesseq); |
| 5307 | } |
| 5308 | |
| 5309 | case op_greater: { |
| 5310 | auto bytecode = currentInstruction->as<OpGreater>(); |
| 5311 | Node* op1 = get(bytecode.m_lhs); |
| 5312 | Node* op2 = get(bytecode.m_rhs); |
| 5313 | set(bytecode.m_dst, addToGraph(CompareGreater, op1, op2)); |
| 5314 | NEXT_OPCODE(op_greater); |
| 5315 | } |
| 5316 | |
| 5317 | case op_greatereq: { |
| 5318 | auto bytecode = currentInstruction->as<OpGreatereq>(); |
| 5319 | Node* op1 = get(bytecode.m_lhs); |
| 5320 | Node* op2 = get(bytecode.m_rhs); |
| 5321 | set(bytecode.m_dst, addToGraph(CompareGreaterEq, op1, op2)); |
| 5322 | NEXT_OPCODE(op_greatereq); |
| 5323 | } |
| 5324 | |
| 5325 | case op_below: { |
| 5326 | auto bytecode = currentInstruction->as<OpBelow>(); |
| 5327 | Node* op1 = get(bytecode.m_lhs); |
| 5328 | Node* op2 = get(bytecode.m_rhs); |
| 5329 | set(bytecode.m_dst, addToGraph(CompareBelow, op1, op2)); |
| 5330 | NEXT_OPCODE(op_below); |
| 5331 | } |
| 5332 | |
| 5333 | case op_beloweq: { |
| 5334 | auto bytecode = currentInstruction->as<OpBeloweq>(); |
| 5335 | Node* op1 = get(bytecode.m_lhs); |
| 5336 | Node* op2 = get(bytecode.m_rhs); |
| 5337 | set(bytecode.m_dst, addToGraph(CompareBelowEq, op1, op2)); |
| 5338 | NEXT_OPCODE(op_beloweq); |
| 5339 | } |
| 5340 | |
| 5341 | case op_eq: { |
| 5342 | auto bytecode = currentInstruction->as<OpEq>(); |
| 5343 | Node* op1 = get(bytecode.m_lhs); |
| 5344 | Node* op2 = get(bytecode.m_rhs); |
| 5345 | set(bytecode.m_dst, addToGraph(CompareEq, op1, op2)); |
| 5346 | NEXT_OPCODE(op_eq); |
| 5347 | } |
| 5348 | |
| 5349 | case op_eq_null: { |
| 5350 | auto bytecode = currentInstruction->as<OpEqNull>(); |
| 5351 | Node* value = get(bytecode.m_operand); |
| 5352 | Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); |
| 5353 | set(bytecode.m_dst, addToGraph(CompareEq, value, nullConstant)); |
| 5354 | NEXT_OPCODE(op_eq_null); |
| 5355 | } |
| 5356 | |
| 5357 | case op_stricteq: { |
| 5358 | auto bytecode = currentInstruction->as<OpStricteq>(); |
| 5359 | Node* op1 = get(bytecode.m_lhs); |
| 5360 | Node* op2 = get(bytecode.m_rhs); |
| 5361 | set(bytecode.m_dst, addToGraph(CompareStrictEq, op1, op2)); |
| 5362 | NEXT_OPCODE(op_stricteq); |
| 5363 | } |
| 5364 | |
| 5365 | case op_neq: { |
| 5366 | auto bytecode = currentInstruction->as<OpNeq>(); |
| 5367 | Node* op1 = get(bytecode.m_lhs); |
| 5368 | Node* op2 = get(bytecode.m_rhs); |
| 5369 | set(bytecode.m_dst, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2))); |
| 5370 | NEXT_OPCODE(op_neq); |
| 5371 | } |
| 5372 | |
| 5373 | case op_neq_null: { |
| 5374 | auto bytecode = currentInstruction->as<OpNeqNull>(); |
| 5375 | Node* value = get(bytecode.m_operand); |
| 5376 | Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); |
| 5377 | set(bytecode.m_dst, addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant))); |
| 5378 | NEXT_OPCODE(op_neq_null); |
| 5379 | } |
| 5380 | |
| 5381 | case op_nstricteq: { |
| 5382 | auto bytecode = currentInstruction->as<OpNstricteq>(); |
| 5383 | Node* op1 = get(bytecode.m_lhs); |
| 5384 | Node* op2 = get(bytecode.m_rhs); |
| 5385 | Node* invertedResult; |
| 5386 | invertedResult = addToGraph(CompareStrictEq, op1, op2); |
| 5387 | set(bytecode.m_dst, addToGraph(LogicalNot, invertedResult)); |
| 5388 | NEXT_OPCODE(op_nstricteq); |
| 5389 | } |
| 5390 | |
| 5391 | // === Property access operations === |
| 5392 | |
| 5393 | case op_get_by_val: { |
| 5394 | auto bytecode = currentInstruction->as<OpGetByVal>(); |
| 5395 | SpeculatedType prediction = getPredictionWithoutOSRExit(); |
| 5396 | |
| 5397 | Node* base = get(bytecode.m_base); |
| 5398 | Node* property = get(bytecode.m_property); |
| 5399 | bool compiledAsGetById = false; |
| 5400 | GetByIdStatus getByIdStatus; |
| 5401 | unsigned identifierNumber = 0; |
| 5402 | { |
| 5403 | ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); |
| 5404 | ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex())).byValInfo; |
| 5405 | // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null. |
| 5406 | // At that time, there is no information. |
| 5407 | if (byValInfo |
| 5408 | && byValInfo->stubInfo |
| 5409 | && !byValInfo->tookSlowPath |
| 5410 | && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent) |
| 5411 | && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType) |
| 5412 | && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { |
| 5413 | compiledAsGetById = true; |
| 5414 | identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl()); |
| 5415 | UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; |
| 5416 | |
| 5417 | if (Symbol* symbol = byValInfo->cachedSymbol.get()) { |
| 5418 | FrozenValue* frozen = m_graph.freezeStrong(symbol); |
| 5419 | addToGraph(CheckCell, OpInfo(frozen), property); |
| 5420 | } else { |
| 5421 | ASSERT(!uid->isSymbol()); |
| 5422 | addToGraph(CheckStringIdent, OpInfo(uid), property); |
| 5423 | } |
| 5424 | |
| 5425 | getByIdStatus = GetByIdStatus::computeForStubInfo( |
| 5426 | locker, m_inlineStackTop->m_profiledBlock, |
| 5427 | byValInfo->stubInfo, currentCodeOrigin(), uid); |
| 5428 | } |
| 5429 | } |
| 5430 | |
| 5431 | if (compiledAsGetById) |
| 5432 | handleGetById(bytecode.m_dst, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, currentInstruction->size()); |
| 5433 | else { |
| 5434 | ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read); |
| 5435 | // FIXME: We could consider making this not vararg, since it only uses three child |
| 5436 | // slots. |
| 5437 | // https://bugs.webkit.org/show_bug.cgi?id=184192 |
| 5438 | addVarArgChild(base); |
| 5439 | addVarArgChild(property); |
| 5440 | addVarArgChild(0); // Leave room for property storage. |
| 5441 | Node* getByVal = addToGraph(Node::VarArg, GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction)); |
| 5442 | m_exitOK = false; // GetByVal must be treated as if it clobbers exit state, since FixupPhase may make it generic. |
| 5443 | set(bytecode.m_dst, getByVal); |
| 5444 | } |
| 5445 | |
| 5446 | NEXT_OPCODE(op_get_by_val); |
| 5447 | } |
| 5448 | |
| 5449 | case op_get_by_val_with_this: { |
| 5450 | auto bytecode = currentInstruction->as<OpGetByValWithThis>(); |
| 5451 | SpeculatedType prediction = getPrediction(); |
| 5452 | |
| 5453 | Node* base = get(bytecode.m_base); |
| 5454 | Node* thisValue = get(bytecode.m_thisValue); |
| 5455 | Node* property = get(bytecode.m_property); |
| 5456 | Node* getByValWithThis = addToGraph(GetByValWithThis, OpInfo(), OpInfo(prediction), base, thisValue, property); |
| 5457 | set(bytecode.m_dst, getByValWithThis); |
| 5458 | |
| 5459 | NEXT_OPCODE(op_get_by_val_with_this); |
| 5460 | } |
| 5461 | |
| 5462 | case op_put_by_val_direct: |
| 5463 | handlePutByVal(currentInstruction->as<OpPutByValDirect>(), currentInstruction->size()); |
| 5464 | NEXT_OPCODE(op_put_by_val_direct); |
| 5465 | |
| 5466 | case op_put_by_val: { |
| 5467 | handlePutByVal(currentInstruction->as<OpPutByVal>(), currentInstruction->size()); |
| 5468 | NEXT_OPCODE(op_put_by_val); |
| 5469 | } |
| 5470 | |
| 5471 | case op_put_by_val_with_this: { |
| 5472 | auto bytecode = currentInstruction->as<OpPutByValWithThis>(); |
| 5473 | Node* base = get(bytecode.m_base); |
| 5474 | Node* thisValue = get(bytecode.m_thisValue); |
| 5475 | Node* property = get(bytecode.m_property); |
| 5476 | Node* value = get(bytecode.m_value); |
| 5477 | |
| 5478 | addVarArgChild(base); |
| 5479 | addVarArgChild(thisValue); |
| 5480 | addVarArgChild(property); |
| 5481 | addVarArgChild(value); |
| 5482 | addToGraph(Node::VarArg, PutByValWithThis, OpInfo(0), OpInfo(0)); |
| 5483 | |
| 5484 | NEXT_OPCODE(op_put_by_val_with_this); |
| 5485 | } |
| 5486 | |
| 5487 | case op_define_data_property: { |
| 5488 | auto bytecode = currentInstruction->as<OpDefineDataProperty>(); |
| 5489 | Node* base = get(bytecode.m_base); |
| 5490 | Node* property = get(bytecode.m_property); |
| 5491 | Node* value = get(bytecode.m_value); |
| 5492 | Node* attributes = get(bytecode.m_attributes); |
| 5493 | |
| 5494 | addVarArgChild(base); |
| 5495 | addVarArgChild(property); |
| 5496 | addVarArgChild(value); |
| 5497 | addVarArgChild(attributes); |
| 5498 | addToGraph(Node::VarArg, DefineDataProperty, OpInfo(0), OpInfo(0)); |
| 5499 | |
| 5500 | NEXT_OPCODE(op_define_data_property); |
| 5501 | } |
| 5502 | |
| 5503 | case op_define_accessor_property: { |
| 5504 | auto bytecode = currentInstruction->as<OpDefineAccessorProperty>(); |
| 5505 | Node* base = get(bytecode.m_base); |
| 5506 | Node* property = get(bytecode.m_property); |
| 5507 | Node* getter = get(bytecode.m_getter); |
| 5508 | Node* setter = get(bytecode.m_setter); |
| 5509 | Node* attributes = get(bytecode.m_attributes); |
| 5510 | |
| 5511 | addVarArgChild(base); |
| 5512 | addVarArgChild(property); |
| 5513 | addVarArgChild(getter); |
| 5514 | addVarArgChild(setter); |
| 5515 | addVarArgChild(attributes); |
| 5516 | addToGraph(Node::VarArg, DefineAccessorProperty, OpInfo(0), OpInfo(0)); |
| 5517 | |
| 5518 | NEXT_OPCODE(op_define_accessor_property); |
| 5519 | } |
| 5520 | |
| 5521 | case op_get_by_id_direct: { |
| 5522 | parseGetById<OpGetByIdDirect>(currentInstruction); |
| 5523 | NEXT_OPCODE(op_get_by_id_direct); |
| 5524 | } |
| 5525 | case op_try_get_by_id: { |
| 5526 | parseGetById<OpTryGetById>(currentInstruction); |
| 5527 | NEXT_OPCODE(op_try_get_by_id); |
| 5528 | } |
| 5529 | case op_get_by_id: { |
| 5530 | parseGetById<OpGetById>(currentInstruction); |
| 5531 | NEXT_OPCODE(op_get_by_id); |
| 5532 | } |
| 5533 | case op_get_by_id_with_this: { |
| 5534 | SpeculatedType prediction = getPrediction(); |
| 5535 | |
| 5536 | auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); |
| 5537 | Node* base = get(bytecode.m_base); |
| 5538 | Node* thisValue = get(bytecode.m_thisValue); |
| 5539 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property]; |
| 5540 | |
| 5541 | set(bytecode.m_dst, |
| 5542 | addToGraph(GetByIdWithThis, OpInfo(identifierNumber), OpInfo(prediction), base, thisValue)); |
| 5543 | |
| 5544 | NEXT_OPCODE(op_get_by_id_with_this); |
| 5545 | } |
| 5546 | case op_put_by_id: { |
| 5547 | auto bytecode = currentInstruction->as<OpPutById>(); |
| 5548 | Node* value = get(bytecode.m_value); |
| 5549 | Node* base = get(bytecode.m_base); |
| 5550 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property]; |
| 5551 | bool direct = !!(bytecode.m_flags & PutByIdIsDirect); |
| 5552 | |
| 5553 | PutByIdStatus putByIdStatus = PutByIdStatus::computeFor( |
| 5554 | m_inlineStackTop->m_profiledBlock, |
| 5555 | m_inlineStackTop->m_baselineMap, m_icContextStack, |
| 5556 | currentCodeOrigin(), m_graph.identifiers()[identifierNumber]); |
| 5557 | |
| 5558 | handlePutById(base, identifierNumber, value, putByIdStatus, direct, currentInstruction->size()); |
| 5559 | NEXT_OPCODE(op_put_by_id); |
| 5560 | } |
| 5561 | |
| 5562 | case op_put_by_id_with_this: { |
| 5563 | auto bytecode = currentInstruction->as<OpPutByIdWithThis>(); |
| 5564 | Node* base = get(bytecode.m_base); |
| 5565 | Node* thisValue = get(bytecode.m_thisValue); |
| 5566 | Node* value = get(bytecode.m_value); |
| 5567 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property]; |
| 5568 | |
| 5569 | addToGraph(PutByIdWithThis, OpInfo(identifierNumber), base, thisValue, value); |
| 5570 | NEXT_OPCODE(op_put_by_id_with_this); |
| 5571 | } |
| 5572 | |
| 5573 | case op_put_getter_by_id: |
| 5574 | handlePutAccessorById(PutGetterById, currentInstruction->as<OpPutGetterById>()); |
| 5575 | NEXT_OPCODE(op_put_getter_by_id); |
| 5576 | case op_put_setter_by_id: { |
| 5577 | handlePutAccessorById(PutSetterById, currentInstruction->as<OpPutSetterById>()); |
| 5578 | NEXT_OPCODE(op_put_setter_by_id); |
| 5579 | } |
| 5580 | |
| 5581 | case op_put_getter_setter_by_id: { |
| 5582 | auto bytecode = currentInstruction->as<OpPutGetterSetterById>(); |
| 5583 | Node* base = get(bytecode.m_base); |
| 5584 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property]; |
| 5585 | Node* getter = get(bytecode.m_getter); |
| 5586 | Node* setter = get(bytecode.m_setter); |
| 5587 | addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(bytecode.m_attributes), base, getter, setter); |
| 5588 | NEXT_OPCODE(op_put_getter_setter_by_id); |
| 5589 | } |
| 5590 | |
| 5591 | case op_put_getter_by_val: |
| 5592 | handlePutAccessorByVal(PutGetterByVal, currentInstruction->as<OpPutGetterByVal>()); |
| 5593 | NEXT_OPCODE(op_put_getter_by_val); |
| 5594 | case op_put_setter_by_val: { |
| 5595 | handlePutAccessorByVal(PutSetterByVal, currentInstruction->as<OpPutSetterByVal>()); |
| 5596 | NEXT_OPCODE(op_put_setter_by_val); |
| 5597 | } |
| 5598 | |
| 5599 | case op_del_by_id: { |
| 5600 | auto bytecode = currentInstruction->as<OpDelById>(); |
| 5601 | Node* base = get(bytecode.m_base); |
| 5602 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property]; |
| 5603 | set(bytecode.m_dst, addToGraph(DeleteById, OpInfo(identifierNumber), base)); |
| 5604 | NEXT_OPCODE(op_del_by_id); |
| 5605 | } |
| 5606 | |
| 5607 | case op_del_by_val: { |
| 5608 | auto bytecode = currentInstruction->as<OpDelByVal>(); |
| 5609 | Node* base = get(bytecode.m_base); |
| 5610 | Node* key = get(bytecode.m_property); |
| 5611 | set(bytecode.m_dst, addToGraph(DeleteByVal, base, key)); |
| 5612 | NEXT_OPCODE(op_del_by_val); |
| 5613 | } |
| 5614 | |
| 5615 | case op_profile_type: { |
| 5616 | auto bytecode = currentInstruction->as<OpProfileType>(); |
| 5617 | auto& metadata = bytecode.metadata(codeBlock); |
| 5618 | Node* valueToProfile = get(bytecode.m_targetVirtualRegister); |
| 5619 | addToGraph(ProfileType, OpInfo(metadata.m_typeLocation), valueToProfile); |
| 5620 | NEXT_OPCODE(op_profile_type); |
| 5621 | } |
| 5622 | |
| 5623 | case op_profile_control_flow: { |
| 5624 | auto bytecode = currentInstruction->as<OpProfileControlFlow>(); |
| 5625 | BasicBlockLocation* basicBlockLocation = bytecode.metadata(codeBlock).m_basicBlockLocation; |
| 5626 | addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation)); |
| 5627 | NEXT_OPCODE(op_profile_control_flow); |
| 5628 | } |
| 5629 | |
| 5630 | // === Block terminators. === |
| 5631 | |
| 5632 | case op_jmp: { |
| 5633 | ASSERT(!m_currentBlock->terminal()); |
| 5634 | auto bytecode = currentInstruction->as<OpJmp>(); |
| 5635 | int relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5636 | addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); |
| 5637 | if (relativeOffset <= 0) |
| 5638 | flushForTerminal(); |
| 5639 | LAST_OPCODE(op_jmp); |
| 5640 | } |
| 5641 | |
| 5642 | case op_jtrue: { |
| 5643 | auto bytecode = currentInstruction->as<OpJtrue>(); |
| 5644 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5645 | Node* condition = get(bytecode.m_condition); |
| 5646 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); |
| 5647 | LAST_OPCODE(op_jtrue); |
| 5648 | } |
| 5649 | |
| 5650 | case op_jfalse: { |
| 5651 | auto bytecode = currentInstruction->as<OpJfalse>(); |
| 5652 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5653 | Node* condition = get(bytecode.m_condition); |
| 5654 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); |
| 5655 | LAST_OPCODE(op_jfalse); |
| 5656 | } |
| 5657 | |
| 5658 | case op_jeq_null: { |
| 5659 | auto bytecode = currentInstruction->as<OpJeqNull>(); |
| 5660 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5661 | Node* value = get(bytecode.m_value); |
| 5662 | Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); |
| 5663 | Node* condition = addToGraph(CompareEq, value, nullConstant); |
| 5664 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); |
| 5665 | LAST_OPCODE(op_jeq_null); |
| 5666 | } |
| 5667 | |
| 5668 | case op_jneq_null: { |
| 5669 | auto bytecode = currentInstruction->as<OpJneqNull>(); |
| 5670 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5671 | Node* value = get(bytecode.m_value); |
| 5672 | Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); |
| 5673 | Node* condition = addToGraph(CompareEq, value, nullConstant); |
| 5674 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); |
| 5675 | LAST_OPCODE(op_jneq_null); |
| 5676 | } |
| 5677 | |
| 5678 | case op_jless: { |
| 5679 | auto bytecode = currentInstruction->as<OpJless>(); |
| 5680 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5681 | Node* op1 = get(bytecode.m_lhs); |
| 5682 | Node* op2 = get(bytecode.m_rhs); |
| 5683 | Node* condition = addToGraph(CompareLess, op1, op2); |
| 5684 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); |
| 5685 | LAST_OPCODE(op_jless); |
| 5686 | } |
| 5687 | |
| 5688 | case op_jlesseq: { |
| 5689 | auto bytecode = currentInstruction->as<OpJlesseq>(); |
| 5690 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5691 | Node* op1 = get(bytecode.m_lhs); |
| 5692 | Node* op2 = get(bytecode.m_rhs); |
| 5693 | Node* condition = addToGraph(CompareLessEq, op1, op2); |
| 5694 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); |
| 5695 | LAST_OPCODE(op_jlesseq); |
| 5696 | } |
| 5697 | |
| 5698 | case op_jgreater: { |
| 5699 | auto bytecode = currentInstruction->as<OpJgreater>(); |
| 5700 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5701 | Node* op1 = get(bytecode.m_lhs); |
| 5702 | Node* op2 = get(bytecode.m_rhs); |
| 5703 | Node* condition = addToGraph(CompareGreater, op1, op2); |
| 5704 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); |
| 5705 | LAST_OPCODE(op_jgreater); |
| 5706 | } |
| 5707 | |
| 5708 | case op_jgreatereq: { |
| 5709 | auto bytecode = currentInstruction->as<OpJgreatereq>(); |
| 5710 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5711 | Node* op1 = get(bytecode.m_lhs); |
| 5712 | Node* op2 = get(bytecode.m_rhs); |
| 5713 | Node* condition = addToGraph(CompareGreaterEq, op1, op2); |
| 5714 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); |
| 5715 | LAST_OPCODE(op_jgreatereq); |
| 5716 | } |
| 5717 | |
| 5718 | case op_jeq: { |
| 5719 | auto bytecode = currentInstruction->as<OpJeq>(); |
| 5720 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5721 | Node* op1 = get(bytecode.m_lhs); |
| 5722 | Node* op2 = get(bytecode.m_rhs); |
| 5723 | Node* condition = addToGraph(CompareEq, op1, op2); |
| 5724 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); |
| 5725 | LAST_OPCODE(op_jeq); |
| 5726 | } |
| 5727 | |
| 5728 | case op_jstricteq: { |
| 5729 | auto bytecode = currentInstruction->as<OpJstricteq>(); |
| 5730 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5731 | Node* op1 = get(bytecode.m_lhs); |
| 5732 | Node* op2 = get(bytecode.m_rhs); |
| 5733 | Node* condition = addToGraph(CompareStrictEq, op1, op2); |
| 5734 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); |
| 5735 | LAST_OPCODE(op_jstricteq); |
| 5736 | } |
| 5737 | |
| 5738 | case op_jnless: { |
| 5739 | auto bytecode = currentInstruction->as<OpJnless>(); |
| 5740 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5741 | Node* op1 = get(bytecode.m_lhs); |
| 5742 | Node* op2 = get(bytecode.m_rhs); |
| 5743 | Node* condition = addToGraph(CompareLess, op1, op2); |
| 5744 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); |
| 5745 | LAST_OPCODE(op_jnless); |
| 5746 | } |
| 5747 | |
| 5748 | case op_jnlesseq: { |
| 5749 | auto bytecode = currentInstruction->as<OpJnlesseq>(); |
| 5750 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5751 | Node* op1 = get(bytecode.m_lhs); |
| 5752 | Node* op2 = get(bytecode.m_rhs); |
| 5753 | Node* condition = addToGraph(CompareLessEq, op1, op2); |
| 5754 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); |
| 5755 | LAST_OPCODE(op_jnlesseq); |
| 5756 | } |
| 5757 | |
| 5758 | case op_jngreater: { |
| 5759 | auto bytecode = currentInstruction->as<OpJngreater>(); |
| 5760 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5761 | Node* op1 = get(bytecode.m_lhs); |
| 5762 | Node* op2 = get(bytecode.m_rhs); |
| 5763 | Node* condition = addToGraph(CompareGreater, op1, op2); |
| 5764 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); |
| 5765 | LAST_OPCODE(op_jngreater); |
| 5766 | } |
| 5767 | |
| 5768 | case op_jngreatereq: { |
| 5769 | auto bytecode = currentInstruction->as<OpJngreatereq>(); |
| 5770 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5771 | Node* op1 = get(bytecode.m_lhs); |
| 5772 | Node* op2 = get(bytecode.m_rhs); |
| 5773 | Node* condition = addToGraph(CompareGreaterEq, op1, op2); |
| 5774 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); |
| 5775 | LAST_OPCODE(op_jngreatereq); |
| 5776 | } |
| 5777 | |
| 5778 | case op_jneq: { |
| 5779 | auto bytecode = currentInstruction->as<OpJneq>(); |
| 5780 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5781 | Node* op1 = get(bytecode.m_lhs); |
| 5782 | Node* op2 = get(bytecode.m_rhs); |
| 5783 | Node* condition = addToGraph(CompareEq, op1, op2); |
| 5784 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); |
| 5785 | LAST_OPCODE(op_jneq); |
| 5786 | } |
| 5787 | |
| 5788 | case op_jnstricteq: { |
| 5789 | auto bytecode = currentInstruction->as<OpJnstricteq>(); |
| 5790 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5791 | Node* op1 = get(bytecode.m_lhs); |
| 5792 | Node* op2 = get(bytecode.m_rhs); |
| 5793 | Node* condition = addToGraph(CompareStrictEq, op1, op2); |
| 5794 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); |
| 5795 | LAST_OPCODE(op_jnstricteq); |
| 5796 | } |
| 5797 | |
| 5798 | case op_jbelow: { |
| 5799 | auto bytecode = currentInstruction->as<OpJbelow>(); |
| 5800 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5801 | Node* op1 = get(bytecode.m_lhs); |
| 5802 | Node* op2 = get(bytecode.m_rhs); |
| 5803 | Node* condition = addToGraph(CompareBelow, op1, op2); |
| 5804 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); |
| 5805 | LAST_OPCODE(op_jbelow); |
| 5806 | } |
| 5807 | |
| 5808 | case op_jbeloweq: { |
| 5809 | auto bytecode = currentInstruction->as<OpJbeloweq>(); |
| 5810 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 5811 | Node* op1 = get(bytecode.m_lhs); |
| 5812 | Node* op2 = get(bytecode.m_rhs); |
| 5813 | Node* condition = addToGraph(CompareBelowEq, op1, op2); |
| 5814 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); |
| 5815 | LAST_OPCODE(op_jbeloweq); |
| 5816 | } |
| 5817 | |
| 5818 | case op_switch_imm: { |
| 5819 | auto bytecode = currentInstruction->as<OpSwitchImm>(); |
| 5820 | SwitchData& data = *m_graph.m_switchData.add(); |
| 5821 | data.kind = SwitchImm; |
| 5822 | data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex]; |
| 5823 | data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset)); |
| 5824 | SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); |
| 5825 | for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { |
| 5826 | if (!table.branchOffsets[i]) |
| 5827 | continue; |
| 5828 | unsigned target = m_currentIndex + table.branchOffsets[i]; |
| 5829 | if (target == data.fallThrough.bytecodeIndex()) |
| 5830 | continue; |
| 5831 | data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target)); |
| 5832 | } |
| 5833 | addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee)); |
| 5834 | flushIfTerminal(data); |
| 5835 | LAST_OPCODE(op_switch_imm); |
| 5836 | } |
| 5837 | |
| 5838 | case op_switch_char: { |
| 5839 | auto bytecode = currentInstruction->as<OpSwitchChar>(); |
| 5840 | SwitchData& data = *m_graph.m_switchData.add(); |
| 5841 | data.kind = SwitchChar; |
| 5842 | data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex]; |
| 5843 | data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset)); |
| 5844 | SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); |
| 5845 | for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { |
| 5846 | if (!table.branchOffsets[i]) |
| 5847 | continue; |
| 5848 | unsigned target = m_currentIndex + table.branchOffsets[i]; |
| 5849 | if (target == data.fallThrough.bytecodeIndex()) |
| 5850 | continue; |
| 5851 | data.cases.append( |
| 5852 | SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target)); |
| 5853 | } |
| 5854 | addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee)); |
| 5855 | flushIfTerminal(data); |
| 5856 | LAST_OPCODE(op_switch_char); |
| 5857 | } |
| 5858 | |
| 5859 | case op_switch_string: { |
| 5860 | auto bytecode = currentInstruction->as<OpSwitchString>(); |
| 5861 | SwitchData& data = *m_graph.m_switchData.add(); |
| 5862 | data.kind = SwitchString; |
| 5863 | data.switchTableIndex = bytecode.m_tableIndex; |
| 5864 | data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset)); |
| 5865 | StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); |
| 5866 | StringJumpTable::StringOffsetTable::iterator iter; |
| 5867 | StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); |
| 5868 | for (iter = table.offsetTable.begin(); iter != end; ++iter) { |
| 5869 | unsigned target = m_currentIndex + iter->value.branchOffset; |
| 5870 | if (target == data.fallThrough.bytecodeIndex()) |
| 5871 | continue; |
| 5872 | data.cases.append( |
| 5873 | SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target)); |
| 5874 | } |
| 5875 | addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee)); |
| 5876 | flushIfTerminal(data); |
| 5877 | LAST_OPCODE(op_switch_string); |
| 5878 | } |
| 5879 | |
| 5880 | case op_ret: { |
| 5881 | auto bytecode = currentInstruction->as<OpRet>(); |
| 5882 | ASSERT(!m_currentBlock->terminal()); |
| 5883 | if (!inlineCallFrame()) { |
| 5884 | // Simple case: we are just producing a return |
| 5885 | addToGraph(Return, get(bytecode.m_value)); |
| 5886 | flushForReturn(); |
| 5887 | LAST_OPCODE(op_ret); |
| 5888 | } |
| 5889 | |
| 5890 | flushForReturn(); |
| 5891 | if (m_inlineStackTop->m_returnValue.isValid()) |
| 5892 | setDirect(m_inlineStackTop->m_returnValue, get(bytecode.m_value), ImmediateSetWithFlush); |
| 5893 | |
| 5894 | if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + currentInstruction->size() != m_inlineStackTop->m_codeBlock->instructions().size()) { |
| 5895 | // This is an early return from an inlined function and we do not have a continuation block, so we must allocate one. |
| 5896 | // It is untargetable, because we do not know the appropriate index. |
| 5897 | // If this block turns out to be a jump target, parseCodeBlock will fix its bytecodeIndex before putting it in m_blockLinkingTargets |
| 5898 | m_inlineStackTop->m_continuationBlock = allocateUntargetableBlock(); |
| 5899 | } |
| 5900 | |
| 5901 | if (m_inlineStackTop->m_continuationBlock) |
| 5902 | addJumpTo(m_inlineStackTop->m_continuationBlock); |
| 5903 | else { |
| 5904 | // We are returning from an inlined function, and do not need to jump anywhere, so we just keep the current block |
| 5905 | m_inlineStackTop->m_continuationBlock = m_currentBlock; |
| 5906 | } |
| 5907 | LAST_OPCODE_LINKED(op_ret); |
| 5908 | } |
| 5909 | case op_end: |
| 5910 | ASSERT(!inlineCallFrame()); |
| 5911 | addToGraph(Return, get(currentInstruction->as<OpEnd>().m_value)); |
| 5912 | flushForReturn(); |
| 5913 | LAST_OPCODE(op_end); |
| 5914 | |
| 5915 | case op_throw: |
| 5916 | addToGraph(Throw, get(currentInstruction->as<OpThrow>().m_value)); |
| 5917 | flushForTerminal(); |
| 5918 | LAST_OPCODE(op_throw); |
| 5919 | |
| 5920 | case op_throw_static_error: { |
| 5921 | auto bytecode = currentInstruction->as<OpThrowStaticError>(); |
| 5922 | addToGraph(ThrowStaticError, OpInfo(bytecode.m_errorType), get(bytecode.m_message)); |
| 5923 | flushForTerminal(); |
| 5924 | LAST_OPCODE(op_throw_static_error); |
| 5925 | } |
| 5926 | |
| 5927 | case op_catch: { |
| 5928 | auto bytecode = currentInstruction->as<OpCatch>(); |
| 5929 | m_graph.m_hasExceptionHandlers = true; |
| 5930 | |
| 5931 | if (inlineCallFrame()) { |
| 5932 | // We can't do OSR entry into an inlined frame. |
| 5933 | NEXT_OPCODE(op_catch); |
| 5934 | } |
| 5935 | |
| 5936 | if (m_graph.m_plan.mode() == FTLForOSREntryMode) { |
| 5937 | NEXT_OPCODE(op_catch); |
| 5938 | } |
| 5939 | |
| 5940 | RELEASE_ASSERT(!m_currentBlock->size() || (m_graph.compilation() && m_currentBlock->size() == 1 && m_currentBlock->at(0)->op() == CountExecution)); |
| 5941 | |
| 5942 | ValueProfileAndOperandBuffer* buffer = bytecode.metadata(codeBlock).m_buffer; |
| 5943 | |
| 5944 | if (!buffer) { |
| 5945 | NEXT_OPCODE(op_catch); // This catch has yet to execute. Note: this load can be racy with the main thread. |
| 5946 | } |
| 5947 | |
| 5948 | // We're now committed to compiling this as an entrypoint. |
| 5949 | m_currentBlock->isCatchEntrypoint = true; |
| 5950 | m_graph.m_roots.append(m_currentBlock); |
| 5951 | |
| 5952 | Vector<SpeculatedType> argumentPredictions(m_numArguments); |
| 5953 | Vector<SpeculatedType> localPredictions; |
| 5954 | HashSet<unsigned, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> seenArguments; |
| 5955 | |
| 5956 | { |
| 5957 | ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); |
| 5958 | |
| 5959 | buffer->forEach([&] (ValueProfileAndOperand& profile) { |
| 5960 | VirtualRegister operand(profile.m_operand); |
| 5961 | SpeculatedType prediction = profile.m_profile.computeUpdatedPrediction(locker); |
| 5962 | if (operand.isLocal()) |
| 5963 | localPredictions.append(prediction); |
| 5964 | else { |
| 5965 | RELEASE_ASSERT(operand.isArgument()); |
| 5966 | RELEASE_ASSERT(static_cast<uint32_t>(operand.toArgument()) < argumentPredictions.size()); |
| 5967 | if (validationEnabled()) |
| 5968 | seenArguments.add(operand.toArgument()); |
| 5969 | argumentPredictions[operand.toArgument()] = prediction; |
| 5970 | } |
| 5971 | }); |
| 5972 | |
| 5973 | if (validationEnabled()) { |
| 5974 | for (unsigned argument = 0; argument < m_numArguments; ++argument) |
| 5975 | RELEASE_ASSERT(seenArguments.contains(argument)); |
| 5976 | } |
| 5977 | } |
| 5978 | |
| 5979 | Vector<std::pair<VirtualRegister, Node*>> localsToSet; |
| 5980 | localsToSet.reserveInitialCapacity(buffer->m_size); // Note: This will reserve more than the number of locals we see below because the buffer includes arguments. |
| 5981 | |
| 5982 | // We're not allowed to exit here since we would not properly recover values. |
| 5983 | // We first need to bootstrap the catch entrypoint state. |
| 5984 | m_exitOK = false; |
| 5985 | |
| 5986 | unsigned numberOfLocals = 0; |
| 5987 | buffer->forEach([&] (ValueProfileAndOperand& profile) { |
| 5988 | VirtualRegister operand(profile.m_operand); |
| 5989 | if (operand.isArgument()) |
| 5990 | return; |
| 5991 | ASSERT(operand.isLocal()); |
| 5992 | Node* value = addToGraph(ExtractCatchLocal, OpInfo(numberOfLocals), OpInfo(localPredictions[numberOfLocals])); |
| 5993 | ++numberOfLocals; |
| 5994 | addToGraph(MovHint, OpInfo(profile.m_operand), value); |
| 5995 | localsToSet.uncheckedAppend(std::make_pair(operand, value)); |
| 5996 | }); |
| 5997 | if (numberOfLocals) |
| 5998 | addToGraph(ClearCatchLocals); |
| 5999 | |
| 6000 | if (!m_graph.m_maxLocalsForCatchOSREntry) |
| 6001 | m_graph.m_maxLocalsForCatchOSREntry = 0; |
| 6002 | m_graph.m_maxLocalsForCatchOSREntry = std::max(numberOfLocals, *m_graph.m_maxLocalsForCatchOSREntry); |
| 6003 | |
| 6004 | // We could not exit before this point in the program because we would not know how to do value |
| 6005 | // recovery for live locals. The above IR sets up the necessary state so we can recover values |
| 6006 | // during OSR exit. |
| 6007 | // |
| 6008 | // The nodes that follow here all exit to the following bytecode instruction, not |
| 6009 | // the op_catch. Exiting to op_catch is reserved for when an exception is thrown. |
| 6010 | // The SetArgument nodes that follow below may exit because we may hoist type checks |
| 6011 | // to them. The SetLocal nodes that follow below may exit because we may choose |
| 6012 | // a flush format that speculates on the type of the local. |
| 6013 | m_exitOK = true; |
| 6014 | addToGraph(ExitOK); |
| 6015 | |
| 6016 | { |
| 6017 | auto addResult = m_graph.m_rootToArguments.add(m_currentBlock, ArgumentsVector()); |
| 6018 | RELEASE_ASSERT(addResult.isNewEntry); |
| 6019 | ArgumentsVector& entrypointArguments = addResult.iterator->value; |
| 6020 | entrypointArguments.resize(m_numArguments); |
| 6021 | |
| 6022 | unsigned exitBytecodeIndex = m_currentIndex + currentInstruction->size(); |
| 6023 | |
| 6024 | for (unsigned argument = 0; argument < argumentPredictions.size(); ++argument) { |
| 6025 | VariableAccessData* variable = newVariableAccessData(virtualRegisterForArgument(argument)); |
| 6026 | variable->predict(argumentPredictions[argument]); |
| 6027 | |
| 6028 | variable->mergeStructureCheckHoistingFailed( |
| 6029 | m_inlineStackTop->m_exitProfile.hasExitSite(exitBytecodeIndex, BadCache)); |
| 6030 | variable->mergeCheckArrayHoistingFailed( |
| 6031 | m_inlineStackTop->m_exitProfile.hasExitSite(exitBytecodeIndex, BadIndexingType)); |
| 6032 | |
| 6033 | Node* setArgument = addToGraph(SetArgument, OpInfo(variable)); |
| 6034 | setArgument->origin.forExit = CodeOrigin(exitBytecodeIndex, setArgument->origin.forExit.inlineCallFrame()); |
| 6035 | m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument); |
| 6036 | entrypointArguments[argument] = setArgument; |
| 6037 | } |
| 6038 | } |
| 6039 | |
| 6040 | for (const std::pair<VirtualRegister, Node*>& pair : localsToSet) { |
| 6041 | DelayedSetLocal delayed { currentCodeOrigin(), pair.first, pair.second, ImmediateNakedSet }; |
| 6042 | m_setLocalQueue.append(delayed); |
| 6043 | } |
| 6044 | |
| 6045 | NEXT_OPCODE(op_catch); |
| 6046 | } |
| 6047 | |
| 6048 | case op_call: |
| 6049 | handleCall<OpCall>(currentInstruction, Call, CallMode::Regular); |
| 6050 | ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction" ); |
| 6051 | NEXT_OPCODE(op_call); |
| 6052 | |
| 6053 | case op_tail_call: { |
| 6054 | flushForReturn(); |
| 6055 | Terminality terminality = handleCall<OpTailCall>(currentInstruction, TailCall, CallMode::Tail); |
| 6056 | ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction" ); |
| 6057 | // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. |
| 6058 | // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean |
| 6059 | // things up. |
| 6060 | if (terminality == NonTerminal) |
| 6061 | NEXT_OPCODE(op_tail_call); |
| 6062 | else |
| 6063 | LAST_OPCODE_LINKED(op_tail_call); |
| 6064 | // We use LAST_OPCODE_LINKED instead of LAST_OPCODE because if the tail call was optimized, it may now be a jump to a bytecode index in a different InlineStackEntry. |
| 6065 | } |
| 6066 | |
| 6067 | case op_construct: |
| 6068 | handleCall<OpConstruct>(currentInstruction, Construct, CallMode::Construct); |
| 6069 | ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction" ); |
| 6070 | NEXT_OPCODE(op_construct); |
| 6071 | |
| 6072 | case op_call_varargs: { |
| 6073 | handleVarargsCall<OpCallVarargs>(currentInstruction, CallVarargs, CallMode::Regular); |
| 6074 | ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction" ); |
| 6075 | NEXT_OPCODE(op_call_varargs); |
| 6076 | } |
| 6077 | |
| 6078 | case op_tail_call_varargs: { |
| 6079 | flushForReturn(); |
| 6080 | Terminality terminality = handleVarargsCall<OpTailCallVarargs>(currentInstruction, TailCallVarargs, CallMode::Tail); |
| 6081 | ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction" ); |
| 6082 | // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. |
| 6083 | // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean |
| 6084 | // things up. |
| 6085 | if (terminality == NonTerminal) |
| 6086 | NEXT_OPCODE(op_tail_call_varargs); |
| 6087 | else |
| 6088 | LAST_OPCODE(op_tail_call_varargs); |
| 6089 | } |
| 6090 | |
| 6091 | case op_tail_call_forward_arguments: { |
| 6092 | // We need to make sure that we don't unbox our arguments here since that won't be |
| 6093 | // done by the arguments object creation node as that node may not exist. |
| 6094 | noticeArgumentsUse(); |
| 6095 | flushForReturn(); |
| 6096 | Terminality terminality = handleVarargsCall<OpTailCallForwardArguments>(currentInstruction, TailCallForwardVarargs, CallMode::Tail); |
| 6097 | ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction" ); |
| 6098 | // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. |
| 6099 | // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean |
| 6100 | // things up. |
| 6101 | if (terminality == NonTerminal) |
| 6102 | NEXT_OPCODE(op_tail_call_forward_arguments); |
| 6103 | else |
| 6104 | LAST_OPCODE(op_tail_call_forward_arguments); |
| 6105 | } |
| 6106 | |
| 6107 | case op_construct_varargs: { |
| 6108 | handleVarargsCall<OpConstructVarargs>(currentInstruction, ConstructVarargs, CallMode::Construct); |
| 6109 | ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction" ); |
| 6110 | NEXT_OPCODE(op_construct_varargs); |
| 6111 | } |
| 6112 | |
| 6113 | case op_call_eval: { |
| 6114 | auto bytecode = currentInstruction->as<OpCallEval>(); |
| 6115 | int registerOffset = -bytecode.m_argv; |
| 6116 | addCall(bytecode.m_dst, CallEval, nullptr, get(bytecode.m_callee), bytecode.m_argc, registerOffset, getPrediction()); |
| 6117 | NEXT_OPCODE(op_call_eval); |
| 6118 | } |
| 6119 | |
| 6120 | case op_jneq_ptr: { |
| 6121 | auto bytecode = currentInstruction->as<OpJneqPtr>(); |
| 6122 | Special::Pointer specialPointer = bytecode.m_specialPointer; |
| 6123 | ASSERT(pointerIsCell(specialPointer)); |
| 6124 | JSCell* actualPointer = static_cast<JSCell*>( |
| 6125 | actualPointerFor(m_inlineStackTop->m_codeBlock, specialPointer)); |
| 6126 | FrozenValue* frozenPointer = m_graph.freeze(actualPointer); |
| 6127 | unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); |
| 6128 | Node* child = get(bytecode.m_value); |
| 6129 | if (bytecode.metadata(codeBlock).m_hasJumped) { |
| 6130 | Node* condition = addToGraph(CompareEqPtr, OpInfo(frozenPointer), child); |
| 6131 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); |
| 6132 | LAST_OPCODE(op_jneq_ptr); |
| 6133 | } |
| 6134 | addToGraph(CheckCell, OpInfo(frozenPointer), child); |
| 6135 | NEXT_OPCODE(op_jneq_ptr); |
| 6136 | } |
| 6137 | |
| 6138 | case op_resolve_scope: { |
| 6139 | auto bytecode = currentInstruction->as<OpResolveScope>(); |
| 6140 | auto& metadata = bytecode.metadata(codeBlock); |
| 6141 | |
| 6142 | ResolveType resolveType; |
| 6143 | unsigned depth; |
| 6144 | JSScope* constantScope = nullptr; |
| 6145 | JSCell* lexicalEnvironment = nullptr; |
| 6146 | SymbolTable* symbolTable = nullptr; |
| 6147 | { |
| 6148 | ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); |
| 6149 | resolveType = metadata.m_resolveType; |
| 6150 | depth = metadata.m_localScopeDepth; |
| 6151 | switch (resolveType) { |
| 6152 | case GlobalProperty: |
| 6153 | case GlobalVar: |
| 6154 | case GlobalPropertyWithVarInjectionChecks: |
| 6155 | case GlobalVarWithVarInjectionChecks: |
| 6156 | case GlobalLexicalVar: |
| 6157 | case GlobalLexicalVarWithVarInjectionChecks: |
| 6158 | constantScope = metadata.m_constantScope.get(); |
| 6159 | break; |
| 6160 | case ModuleVar: |
| 6161 | lexicalEnvironment = metadata.m_lexicalEnvironment.get(); |
| 6162 | break; |
| 6163 | case LocalClosureVar: |
| 6164 | case ClosureVar: |
| 6165 | case ClosureVarWithVarInjectionChecks: |
| 6166 | symbolTable = metadata.m_symbolTable.get(); |
| 6167 | break; |
| 6168 | default: |
| 6169 | break; |
| 6170 | } |
| 6171 | } |
| 6172 | |
| 6173 | if (needsDynamicLookup(resolveType, op_resolve_scope)) { |
| 6174 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var]; |
| 6175 | set(bytecode.m_dst, addToGraph(ResolveScope, OpInfo(identifierNumber), get(bytecode.m_scope))); |
| 6176 | NEXT_OPCODE(op_resolve_scope); |
| 6177 | } |
| 6178 | |
| 6179 | // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints. |
| 6180 | if (needsVarInjectionChecks(resolveType)) |
| 6181 | m_graph.watchpoints().addLazily(m_inlineStackTop->m_codeBlock->globalObject()->varInjectionWatchpoint()); |
| 6182 | |
| 6183 | // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed. |
| 6184 | // https://bugs.webkit.org/show_bug.cgi?id=193347 |
| 6185 | if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) { |
| 6186 | if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) { |
| 6187 | JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); |
| 6188 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var]; |
| 6189 | if (!m_graph.watchGlobalProperty(globalObject, identifierNumber)) |
| 6190 | addToGraph(ForceOSRExit); |
| 6191 | } |
| 6192 | } |
| 6193 | |
| 6194 | switch (resolveType) { |
| 6195 | case GlobalProperty: |
| 6196 | case GlobalVar: |
| 6197 | case GlobalPropertyWithVarInjectionChecks: |
| 6198 | case GlobalVarWithVarInjectionChecks: |
| 6199 | case GlobalLexicalVar: |
| 6200 | case GlobalLexicalVarWithVarInjectionChecks: { |
| 6201 | RELEASE_ASSERT(constantScope); |
| 6202 | RELEASE_ASSERT(constantScope == JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock)); |
| 6203 | set(bytecode.m_dst, weakJSConstant(constantScope)); |
| 6204 | addToGraph(Phantom, get(bytecode.m_scope)); |
| 6205 | break; |
| 6206 | } |
| 6207 | case ModuleVar: { |
| 6208 | // Since the value of the "scope" virtual register is not used in LLInt / baseline op_resolve_scope with ModuleVar, |
| 6209 | // we need not to keep it alive by the Phantom node. |
| 6210 | // Module environment is already strongly referenced by the CodeBlock. |
| 6211 | set(bytecode.m_dst, weakJSConstant(lexicalEnvironment)); |
| 6212 | break; |
| 6213 | } |
| 6214 | case LocalClosureVar: |
| 6215 | case ClosureVar: |
| 6216 | case ClosureVarWithVarInjectionChecks: { |
| 6217 | Node* localBase = get(bytecode.m_scope); |
| 6218 | addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope. |
| 6219 | |
| 6220 | // We have various forms of constant folding here. This is necessary to avoid |
| 6221 | // spurious recompiles in dead-but-foldable code. |
| 6222 | if (symbolTable) { |
| 6223 | InferredValue* singleton = symbolTable->singletonScope(); |
| 6224 | if (JSValue value = singleton->inferredValue()) { |
| 6225 | m_graph.watchpoints().addLazily(singleton); |
| 6226 | set(bytecode.m_dst, weakJSConstant(value)); |
| 6227 | break; |
| 6228 | } |
| 6229 | } |
| 6230 | if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>(*m_vm)) { |
| 6231 | for (unsigned n = depth; n--;) |
| 6232 | scope = scope->next(); |
| 6233 | set(bytecode.m_dst, weakJSConstant(scope)); |
| 6234 | break; |
| 6235 | } |
| 6236 | for (unsigned n = depth; n--;) |
| 6237 | localBase = addToGraph(SkipScope, localBase); |
| 6238 | set(bytecode.m_dst, localBase); |
| 6239 | break; |
| 6240 | } |
| 6241 | case UnresolvedProperty: |
| 6242 | case UnresolvedPropertyWithVarInjectionChecks: { |
| 6243 | addToGraph(Phantom, get(bytecode.m_scope)); |
| 6244 | addToGraph(ForceOSRExit); |
| 6245 | set(bytecode.m_dst, addToGraph(JSConstant, OpInfo(m_constantNull))); |
| 6246 | break; |
| 6247 | } |
| 6248 | case Dynamic: |
| 6249 | RELEASE_ASSERT_NOT_REACHED(); |
| 6250 | break; |
| 6251 | } |
| 6252 | NEXT_OPCODE(op_resolve_scope); |
| 6253 | } |
| 6254 | case op_resolve_scope_for_hoisting_func_decl_in_eval: { |
| 6255 | auto bytecode = currentInstruction->as<OpResolveScopeForHoistingFuncDeclInEval>(); |
| 6256 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property]; |
| 6257 | |
| 6258 | set(bytecode.m_dst, addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(bytecode.m_scope))); |
| 6259 | |
| 6260 | NEXT_OPCODE(op_resolve_scope_for_hoisting_func_decl_in_eval); |
| 6261 | } |
| 6262 | |
| 6263 | case op_get_from_scope: { |
| 6264 | auto bytecode = currentInstruction->as<OpGetFromScope>(); |
| 6265 | auto& metadata = bytecode.metadata(codeBlock); |
| 6266 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var]; |
| 6267 | UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; |
| 6268 | |
| 6269 | ResolveType resolveType; |
| 6270 | GetPutInfo getPutInfo(0); |
| 6271 | Structure* structure = 0; |
| 6272 | WatchpointSet* watchpoints = 0; |
| 6273 | uintptr_t operand; |
| 6274 | { |
| 6275 | ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); |
| 6276 | getPutInfo = metadata.m_getPutInfo; |
| 6277 | resolveType = getPutInfo.resolveType(); |
| 6278 | if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) |
| 6279 | watchpoints = metadata.m_watchpointSet; |
| 6280 | else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks) |
| 6281 | structure = metadata.m_structure.get(); |
| 6282 | operand = metadata.m_operand; |
| 6283 | } |
| 6284 | |
| 6285 | if (needsDynamicLookup(resolveType, op_get_from_scope)) { |
| 6286 | uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, getPutInfo.operand()); |
| 6287 | SpeculatedType prediction = getPrediction(); |
| 6288 | set(bytecode.m_dst, |
| 6289 | addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get(bytecode.m_scope))); |
| 6290 | NEXT_OPCODE(op_get_from_scope); |
| 6291 | } |
| 6292 | |
| 6293 | UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode. |
| 6294 | |
| 6295 | JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); |
| 6296 | |
| 6297 | switch (resolveType) { |
| 6298 | case GlobalProperty: |
| 6299 | case GlobalPropertyWithVarInjectionChecks: { |
| 6300 | // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed. |
| 6301 | // https://bugs.webkit.org/show_bug.cgi?id=193347 |
| 6302 | if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) { |
| 6303 | if (!m_graph.watchGlobalProperty(globalObject, identifierNumber)) |
| 6304 | addToGraph(ForceOSRExit); |
| 6305 | } |
| 6306 | |
| 6307 | SpeculatedType prediction = getPrediction(); |
| 6308 | |
| 6309 | GetByIdStatus status = GetByIdStatus::computeFor(structure, uid); |
| 6310 | if (status.state() != GetByIdStatus::Simple |
| 6311 | || status.numVariants() != 1 |
| 6312 | || status[0].structureSet().size() != 1) { |
| 6313 | set(bytecode.m_dst, addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(bytecode.m_scope))); |
| 6314 | break; |
| 6315 | } |
| 6316 | |
| 6317 | Node* base = weakJSConstant(globalObject); |
| 6318 | Node* result = load(prediction, base, identifierNumber, status[0]); |
| 6319 | addToGraph(Phantom, get(bytecode.m_scope)); |
| 6320 | set(bytecode.m_dst, result); |
| 6321 | break; |
| 6322 | } |
| 6323 | case GlobalVar: |
| 6324 | case GlobalVarWithVarInjectionChecks: |
| 6325 | case GlobalLexicalVar: |
| 6326 | case GlobalLexicalVarWithVarInjectionChecks: { |
| 6327 | addToGraph(Phantom, get(bytecode.m_scope)); |
| 6328 | WatchpointSet* watchpointSet; |
| 6329 | ScopeOffset offset; |
| 6330 | JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock)); |
| 6331 | { |
| 6332 | ConcurrentJSLocker locker(scopeObject->symbolTable()->m_lock); |
| 6333 | SymbolTableEntry entry = scopeObject->symbolTable()->get(locker, uid); |
| 6334 | watchpointSet = entry.watchpointSet(); |
| 6335 | offset = entry.scopeOffset(); |
| 6336 | } |
| 6337 | if (watchpointSet && watchpointSet->state() == IsWatched) { |
| 6338 | // This has a fun concurrency story. There is the possibility of a race in two |
| 6339 | // directions: |
| 6340 | // |
| 6341 | // We see that the set IsWatched, but in the meantime it gets invalidated: this is |
| 6342 | // fine because if we saw that it IsWatched then we add a watchpoint. If it gets |
| 6343 | // invalidated, then this compilation is invalidated. Note that in the meantime we |
| 6344 | // may load an absurd value from the global object. It's fine to load an absurd |
| 6345 | // value if the compilation is invalidated anyway. |
| 6346 | // |
| 6347 | // We see that the set IsWatched, but the value isn't yet initialized: this isn't |
| 6348 | // possible because of the ordering of operations. |
| 6349 | // |
| 6350 | // Here's how we order operations: |
| 6351 | // |
| 6352 | // Main thread stores to the global object: always store a value first, and only |
| 6353 | // after that do we touch the watchpoint set. There is a fence in the touch, that |
| 6354 | // ensures that the store to the global object always happens before the touch on the |
| 6355 | // set. |
| 6356 | // |
| 6357 | // Compilation thread: always first load the state of the watchpoint set, and then |
| 6358 | // load the value. The WatchpointSet::state() method does fences for us to ensure |
| 6359 | // that the load of the state happens before our load of the value. |
| 6360 | // |
| 6361 | // Finalizing compilation: this happens on the main thread and synchronously checks |
| 6362 | // validity of all watchpoint sets. |
| 6363 | // |
| 6364 | // We will only perform optimizations if the load of the state yields IsWatched. That |
| 6365 | // means that at least one store would have happened to initialize the original value |
| 6366 | // of the variable (that is, the value we'd like to constant fold to). There may be |
| 6367 | // other stores that happen after that, but those stores will invalidate the |
| 6368 | // watchpoint set and also the compilation. |
| 6369 | |
| 6370 | // Note that we need to use the operand, which is a direct pointer at the global, |
| 6371 | // rather than looking up the global by doing variableAt(offset). That's because the |
| 6372 | // internal data structures of JSSegmentedVariableObject are not thread-safe even |
| 6373 | // though accessing the global itself is. The segmentation involves a vector spine |
| 6374 | // that resizes with malloc/free, so if new globals unrelated to the one we are |
| 6375 | // reading are added, we might access freed memory if we do variableAt(). |
| 6376 | WriteBarrier<Unknown>* pointer = bitwise_cast<WriteBarrier<Unknown>*>(operand); |
| 6377 | |
| 6378 | ASSERT(scopeObject->findVariableIndex(pointer) == offset); |
| 6379 | |
| 6380 | JSValue value = pointer->get(); |
| 6381 | if (value) { |
| 6382 | m_graph.watchpoints().addLazily(watchpointSet); |
| 6383 | set(bytecode.m_dst, weakJSConstant(value)); |
| 6384 | break; |
| 6385 | } |
| 6386 | } |
| 6387 | |
| 6388 | SpeculatedType prediction = getPrediction(); |
| 6389 | NodeType nodeType; |
| 6390 | if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) |
| 6391 | nodeType = GetGlobalVar; |
| 6392 | else |
| 6393 | nodeType = GetGlobalLexicalVariable; |
| 6394 | Node* value = addToGraph(nodeType, OpInfo(operand), OpInfo(prediction)); |
| 6395 | if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) |
| 6396 | addToGraph(CheckNotEmpty, value); |
| 6397 | set(bytecode.m_dst, value); |
| 6398 | break; |
| 6399 | } |
| 6400 | case LocalClosureVar: |
| 6401 | case ClosureVar: |
| 6402 | case ClosureVarWithVarInjectionChecks: { |
| 6403 | Node* scopeNode = get(bytecode.m_scope); |
| 6404 | |
| 6405 | // Ideally we wouldn't have to do this Phantom. But: |
| 6406 | // |
| 6407 | // For the constant case: we must do it because otherwise we would have no way of knowing |
| 6408 | // that the scope is live at OSR here. |
| 6409 | // |
| 6410 | // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation |
| 6411 | // won't be able to handle an Undefined scope. |
| 6412 | addToGraph(Phantom, scopeNode); |
| 6413 | |
| 6414 | // Constant folding in the bytecode parser is important for performance. This may not |
| 6415 | // have executed yet. If it hasn't, then we won't have a prediction. Lacking a |
| 6416 | // prediction, we'd otherwise think that it has to exit. Then when it did execute, we |
| 6417 | // would recompile. But if we can fold it here, we avoid the exit. |
| 6418 | if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) { |
| 6419 | set(bytecode.m_dst, weakJSConstant(value)); |
| 6420 | break; |
| 6421 | } |
| 6422 | SpeculatedType prediction = getPrediction(); |
| 6423 | set(bytecode.m_dst, |
| 6424 | addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode)); |
| 6425 | break; |
| 6426 | } |
| 6427 | case UnresolvedProperty: |
| 6428 | case UnresolvedPropertyWithVarInjectionChecks: |
| 6429 | case ModuleVar: |
| 6430 | case Dynamic: |
| 6431 | RELEASE_ASSERT_NOT_REACHED(); |
| 6432 | break; |
| 6433 | } |
| 6434 | NEXT_OPCODE(op_get_from_scope); |
| 6435 | } |
| 6436 | |
| 6437 | case op_put_to_scope: { |
| 6438 | auto bytecode = currentInstruction->as<OpPutToScope>(); |
| 6439 | auto& metadata = bytecode.metadata(codeBlock); |
| 6440 | unsigned identifierNumber = bytecode.m_var; |
| 6441 | if (identifierNumber != UINT_MAX) |
| 6442 | identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber]; |
| 6443 | UniquedStringImpl* uid; |
| 6444 | if (identifierNumber != UINT_MAX) |
| 6445 | uid = m_graph.identifiers()[identifierNumber]; |
| 6446 | else |
| 6447 | uid = nullptr; |
| 6448 | |
| 6449 | ResolveType resolveType; |
| 6450 | GetPutInfo getPutInfo(0); |
| 6451 | Structure* structure = nullptr; |
| 6452 | WatchpointSet* watchpoints = nullptr; |
| 6453 | uintptr_t operand; |
| 6454 | { |
| 6455 | ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); |
| 6456 | getPutInfo = metadata.m_getPutInfo; |
| 6457 | resolveType = getPutInfo.resolveType(); |
| 6458 | if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) |
| 6459 | watchpoints = metadata.m_watchpointSet; |
| 6460 | else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks) |
| 6461 | structure = metadata.m_structure.get(); |
| 6462 | operand = metadata.m_operand; |
| 6463 | } |
| 6464 | |
| 6465 | JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); |
| 6466 | |
| 6467 | if (needsDynamicLookup(resolveType, op_put_to_scope)) { |
| 6468 | ASSERT(identifierNumber != UINT_MAX); |
| 6469 | uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, getPutInfo.operand()); |
| 6470 | addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get(bytecode.m_scope), get(bytecode.m_value)); |
| 6471 | NEXT_OPCODE(op_put_to_scope); |
| 6472 | } |
| 6473 | |
| 6474 | switch (resolveType) { |
| 6475 | case GlobalProperty: |
| 6476 | case GlobalPropertyWithVarInjectionChecks: { |
| 6477 | // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed. |
| 6478 | // https://bugs.webkit.org/show_bug.cgi?id=193347 |
| 6479 | if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) { |
| 6480 | if (!m_graph.watchGlobalProperty(globalObject, identifierNumber)) |
| 6481 | addToGraph(ForceOSRExit); |
| 6482 | } |
| 6483 | |
| 6484 | PutByIdStatus status; |
| 6485 | if (uid) |
| 6486 | status = PutByIdStatus::computeFor(globalObject, structure, uid, false); |
| 6487 | else |
| 6488 | status = PutByIdStatus(PutByIdStatus::TakesSlowPath); |
| 6489 | if (status.numVariants() != 1 |
| 6490 | || status[0].kind() != PutByIdVariant::Replace |
| 6491 | || status[0].structure().size() != 1) { |
| 6492 | addToGraph(PutById, OpInfo(identifierNumber), get(bytecode.m_scope), get(bytecode.m_value)); |
| 6493 | break; |
| 6494 | } |
| 6495 | Node* base = weakJSConstant(globalObject); |
| 6496 | store(base, identifierNumber, status[0], get(bytecode.m_value)); |
| 6497 | // Keep scope alive until after put. |
| 6498 | addToGraph(Phantom, get(bytecode.m_scope)); |
| 6499 | break; |
| 6500 | } |
| 6501 | case GlobalLexicalVar: |
| 6502 | case GlobalLexicalVarWithVarInjectionChecks: |
| 6503 | case GlobalVar: |
| 6504 | case GlobalVarWithVarInjectionChecks: { |
| 6505 | if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) { |
| 6506 | SpeculatedType prediction = SpecEmpty; |
| 6507 | Node* value = addToGraph(GetGlobalLexicalVariable, OpInfo(operand), OpInfo(prediction)); |
| 6508 | addToGraph(CheckNotEmpty, value); |
| 6509 | } |
| 6510 | |
| 6511 | JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock)); |
| 6512 | if (watchpoints) { |
| 6513 | SymbolTableEntry entry = scopeObject->symbolTable()->get(uid); |
| 6514 | ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet()); |
| 6515 | } |
| 6516 | Node* valueNode = get(bytecode.m_value); |
| 6517 | addToGraph(PutGlobalVariable, OpInfo(operand), weakJSConstant(scopeObject), valueNode); |
| 6518 | if (watchpoints && watchpoints->state() != IsInvalidated) { |
| 6519 | // Must happen after the store. See comment for GetGlobalVar. |
| 6520 | addToGraph(NotifyWrite, OpInfo(watchpoints)); |
| 6521 | } |
| 6522 | // Keep scope alive until after put. |
| 6523 | addToGraph(Phantom, get(bytecode.m_scope)); |
| 6524 | break; |
| 6525 | } |
| 6526 | case LocalClosureVar: |
| 6527 | case ClosureVar: |
| 6528 | case ClosureVarWithVarInjectionChecks: { |
| 6529 | Node* scopeNode = get(bytecode.m_scope); |
| 6530 | Node* valueNode = get(bytecode.m_value); |
| 6531 | |
| 6532 | addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode); |
| 6533 | |
| 6534 | if (watchpoints && watchpoints->state() != IsInvalidated) { |
| 6535 | // Must happen after the store. See comment for GetGlobalVar. |
| 6536 | addToGraph(NotifyWrite, OpInfo(watchpoints)); |
| 6537 | } |
| 6538 | break; |
| 6539 | } |
| 6540 | |
| 6541 | case ModuleVar: |
| 6542 | // Need not to keep "scope" and "value" register values here by Phantom because |
| 6543 | // they are not used in LLInt / baseline op_put_to_scope with ModuleVar. |
| 6544 | addToGraph(ForceOSRExit); |
| 6545 | break; |
| 6546 | |
| 6547 | case Dynamic: |
| 6548 | case UnresolvedProperty: |
| 6549 | case UnresolvedPropertyWithVarInjectionChecks: |
| 6550 | RELEASE_ASSERT_NOT_REACHED(); |
| 6551 | break; |
| 6552 | } |
| 6553 | NEXT_OPCODE(op_put_to_scope); |
| 6554 | } |
| 6555 | |
| 6556 | case op_loop_hint: { |
| 6557 | // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG |
| 6558 | // OSR can only happen at basic block boundaries. Assert that these two statements |
| 6559 | // are compatible. |
| 6560 | RELEASE_ASSERT(m_currentIndex == blockBegin); |
| 6561 | |
| 6562 | // We never do OSR into an inlined code block. That could not happen, since OSR |
| 6563 | // looks up the code block that is the replacement for the baseline JIT code |
| 6564 | // block. Hence, machine code block = true code block = not inline code block. |
| 6565 | if (!m_inlineStackTop->m_caller) |
| 6566 | m_currentBlock->isOSRTarget = true; |
| 6567 | |
| 6568 | addToGraph(LoopHint); |
| 6569 | NEXT_OPCODE(op_loop_hint); |
| 6570 | } |
| 6571 | |
| 6572 | case op_check_traps: { |
| 6573 | addToGraph(Options::usePollingTraps() ? CheckTraps : InvalidationPoint); |
| 6574 | NEXT_OPCODE(op_check_traps); |
| 6575 | } |
| 6576 | |
| 6577 | case op_nop: { |
| 6578 | addToGraph(Check); // We add a nop here so that basic block linking doesn't break. |
| 6579 | NEXT_OPCODE(op_nop); |
| 6580 | } |
| 6581 | |
| 6582 | case op_super_sampler_begin: { |
| 6583 | addToGraph(SuperSamplerBegin); |
| 6584 | NEXT_OPCODE(op_super_sampler_begin); |
| 6585 | } |
| 6586 | |
| 6587 | case op_super_sampler_end: { |
| 6588 | addToGraph(SuperSamplerEnd); |
| 6589 | NEXT_OPCODE(op_super_sampler_end); |
| 6590 | } |
| 6591 | |
| 6592 | case op_create_lexical_environment: { |
| 6593 | auto bytecode = currentInstruction->as<OpCreateLexicalEnvironment>(); |
| 6594 | ASSERT(bytecode.m_symbolTable.isConstant() && bytecode.m_initialValue.isConstant()); |
| 6595 | FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_symbolTable.offset())); |
| 6596 | FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_initialValue.offset())); |
| 6597 | Node* scope = get(bytecode.m_scope); |
| 6598 | Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), OpInfo(initialValue), scope); |
| 6599 | set(bytecode.m_dst, lexicalEnvironment); |
| 6600 | NEXT_OPCODE(op_create_lexical_environment); |
| 6601 | } |
| 6602 | |
| 6603 | case op_push_with_scope: { |
| 6604 | auto bytecode = currentInstruction->as<OpPushWithScope>(); |
| 6605 | Node* currentScope = get(bytecode.m_currentScope); |
| 6606 | Node* object = get(bytecode.m_newScope); |
| 6607 | set(bytecode.m_dst, addToGraph(PushWithScope, currentScope, object)); |
| 6608 | NEXT_OPCODE(op_push_with_scope); |
| 6609 | } |
| 6610 | |
| 6611 | case op_get_parent_scope: { |
| 6612 | auto bytecode = currentInstruction->as<OpGetParentScope>(); |
| 6613 | Node* currentScope = get(bytecode.m_scope); |
| 6614 | Node* newScope = addToGraph(SkipScope, currentScope); |
| 6615 | set(bytecode.m_dst, newScope); |
| 6616 | addToGraph(Phantom, currentScope); |
| 6617 | NEXT_OPCODE(op_get_parent_scope); |
| 6618 | } |
| 6619 | |
| 6620 | case op_get_scope: { |
| 6621 | // Help the later stages a bit by doing some small constant folding here. Note that this |
| 6622 | // only helps for the first basic block. It's extremely important not to constant fold |
| 6623 | // loads from the scope register later, as that would prevent the DFG from tracking the |
| 6624 | // bytecode-level liveness of the scope register. |
| 6625 | auto bytecode = currentInstruction->as<OpGetScope>(); |
| 6626 | Node* callee = get(VirtualRegister(CallFrameSlot::callee)); |
| 6627 | Node* result; |
| 6628 | if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm)) |
| 6629 | result = weakJSConstant(function->scope()); |
| 6630 | else |
| 6631 | result = addToGraph(GetScope, callee); |
| 6632 | set(bytecode.m_dst, result); |
| 6633 | NEXT_OPCODE(op_get_scope); |
| 6634 | } |
| 6635 | |
| 6636 | case op_argument_count: { |
| 6637 | auto bytecode = currentInstruction->as<OpArgumentCount>(); |
| 6638 | Node* sub = addToGraph(ArithSub, OpInfo(Arith::Unchecked), OpInfo(SpecInt32Only), getArgumentCount(), addToGraph(JSConstant, OpInfo(m_constantOne))); |
| 6639 | set(bytecode.m_dst, sub); |
| 6640 | NEXT_OPCODE(op_argument_count); |
| 6641 | } |
| 6642 | |
| 6643 | case op_create_direct_arguments: { |
| 6644 | auto bytecode = currentInstruction->as<OpCreateDirectArguments>(); |
| 6645 | noticeArgumentsUse(); |
| 6646 | Node* createArguments = addToGraph(CreateDirectArguments); |
| 6647 | set(bytecode.m_dst, createArguments); |
| 6648 | NEXT_OPCODE(op_create_direct_arguments); |
| 6649 | } |
| 6650 | |
| 6651 | case op_create_scoped_arguments: { |
| 6652 | auto bytecode = currentInstruction->as<OpCreateScopedArguments>(); |
| 6653 | noticeArgumentsUse(); |
| 6654 | Node* createArguments = addToGraph(CreateScopedArguments, get(bytecode.m_scope)); |
| 6655 | set(bytecode.m_dst, createArguments); |
| 6656 | NEXT_OPCODE(op_create_scoped_arguments); |
| 6657 | } |
| 6658 | |
| 6659 | case op_create_cloned_arguments: { |
| 6660 | auto bytecode = currentInstruction->as<OpCreateClonedArguments>(); |
| 6661 | noticeArgumentsUse(); |
| 6662 | Node* createArguments = addToGraph(CreateClonedArguments); |
| 6663 | set(bytecode.m_dst, createArguments); |
| 6664 | NEXT_OPCODE(op_create_cloned_arguments); |
| 6665 | } |
| 6666 | |
| 6667 | case op_get_from_arguments: { |
| 6668 | auto bytecode = currentInstruction->as<OpGetFromArguments>(); |
| 6669 | set(bytecode.m_dst, |
| 6670 | addToGraph( |
| 6671 | GetFromArguments, |
| 6672 | OpInfo(bytecode.m_index), |
| 6673 | OpInfo(getPrediction()), |
| 6674 | get(bytecode.m_arguments))); |
| 6675 | NEXT_OPCODE(op_get_from_arguments); |
| 6676 | } |
| 6677 | |
| 6678 | case op_put_to_arguments: { |
| 6679 | auto bytecode = currentInstruction->as<OpPutToArguments>(); |
| 6680 | addToGraph( |
| 6681 | PutToArguments, |
| 6682 | OpInfo(bytecode.m_index), |
| 6683 | get(bytecode.m_arguments), |
| 6684 | get(bytecode.m_value)); |
| 6685 | NEXT_OPCODE(op_put_to_arguments); |
| 6686 | } |
| 6687 | |
| 6688 | case op_get_argument: { |
| 6689 | auto bytecode = currentInstruction->as<OpGetArgument>(); |
| 6690 | InlineCallFrame* inlineCallFrame = this->inlineCallFrame(); |
| 6691 | Node* argument; |
| 6692 | int32_t argumentIndexIncludingThis = bytecode.m_index; |
| 6693 | if (inlineCallFrame && !inlineCallFrame->isVarargs()) { |
| 6694 | int32_t argumentCountIncludingThisWithFixup = inlineCallFrame->argumentsWithFixup.size(); |
| 6695 | if (argumentIndexIncludingThis < argumentCountIncludingThisWithFixup) |
| 6696 | argument = get(virtualRegisterForArgument(argumentIndexIncludingThis)); |
| 6697 | else |
| 6698 | argument = addToGraph(JSConstant, OpInfo(m_constantUndefined)); |
| 6699 | } else |
| 6700 | argument = addToGraph(GetArgument, OpInfo(argumentIndexIncludingThis), OpInfo(getPrediction())); |
| 6701 | set(bytecode.m_dst, argument); |
| 6702 | NEXT_OPCODE(op_get_argument); |
| 6703 | } |
| 6704 | case op_new_async_generator_func: |
| 6705 | handleNewFunc(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFunc>()); |
| 6706 | NEXT_OPCODE(op_new_async_generator_func); |
| 6707 | case op_new_func: |
| 6708 | handleNewFunc(NewFunction, currentInstruction->as<OpNewFunc>()); |
| 6709 | NEXT_OPCODE(op_new_func); |
| 6710 | case op_new_generator_func: |
| 6711 | handleNewFunc(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFunc>()); |
| 6712 | NEXT_OPCODE(op_new_generator_func); |
| 6713 | case op_new_async_func: |
| 6714 | handleNewFunc(NewAsyncFunction, currentInstruction->as<OpNewAsyncFunc>()); |
| 6715 | NEXT_OPCODE(op_new_async_func); |
| 6716 | |
| 6717 | case op_new_func_exp: |
| 6718 | handleNewFuncExp(NewFunction, currentInstruction->as<OpNewFuncExp>()); |
| 6719 | NEXT_OPCODE(op_new_func_exp); |
| 6720 | case op_new_generator_func_exp: |
| 6721 | handleNewFuncExp(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFuncExp>()); |
| 6722 | NEXT_OPCODE(op_new_generator_func_exp); |
| 6723 | case op_new_async_generator_func_exp: |
| 6724 | handleNewFuncExp(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFuncExp>()); |
| 6725 | NEXT_OPCODE(op_new_async_generator_func_exp); |
| 6726 | case op_new_async_func_exp: |
| 6727 | handleNewFuncExp(NewAsyncFunction, currentInstruction->as<OpNewAsyncFuncExp>()); |
| 6728 | NEXT_OPCODE(op_new_async_func_exp); |
| 6729 | |
| 6730 | case op_set_function_name: { |
| 6731 | auto bytecode = currentInstruction->as<OpSetFunctionName>(); |
| 6732 | Node* func = get(bytecode.m_function); |
| 6733 | Node* name = get(bytecode.m_name); |
| 6734 | addToGraph(SetFunctionName, func, name); |
| 6735 | NEXT_OPCODE(op_set_function_name); |
| 6736 | } |
| 6737 | |
| 6738 | case op_typeof: { |
| 6739 | auto bytecode = currentInstruction->as<OpTypeof>(); |
| 6740 | set(bytecode.m_dst, addToGraph(TypeOf, get(bytecode.m_value))); |
| 6741 | NEXT_OPCODE(op_typeof); |
| 6742 | } |
| 6743 | |
| 6744 | case op_to_number: { |
| 6745 | auto bytecode = currentInstruction->as<OpToNumber>(); |
| 6746 | SpeculatedType prediction = getPrediction(); |
| 6747 | Node* value = get(bytecode.m_operand); |
| 6748 | set(bytecode.m_dst, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value)); |
| 6749 | NEXT_OPCODE(op_to_number); |
| 6750 | } |
| 6751 | |
| 6752 | case op_to_string: { |
| 6753 | auto bytecode = currentInstruction->as<OpToString>(); |
| 6754 | Node* value = get(bytecode.m_operand); |
| 6755 | set(bytecode.m_dst, addToGraph(ToString, value)); |
| 6756 | NEXT_OPCODE(op_to_string); |
| 6757 | } |
| 6758 | |
| 6759 | case op_to_object: { |
| 6760 | auto bytecode = currentInstruction->as<OpToObject>(); |
| 6761 | SpeculatedType prediction = getPrediction(); |
| 6762 | Node* value = get(bytecode.m_operand); |
| 6763 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_message]; |
| 6764 | set(bytecode.m_dst, addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value)); |
| 6765 | NEXT_OPCODE(op_to_object); |
| 6766 | } |
| 6767 | |
| 6768 | case op_in_by_val: { |
| 6769 | auto bytecode = currentInstruction->as<OpInByVal>(); |
| 6770 | ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read); |
| 6771 | set(bytecode.m_dst, addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(bytecode.m_base), get(bytecode.m_property))); |
| 6772 | NEXT_OPCODE(op_in_by_val); |
| 6773 | } |
| 6774 | |
| 6775 | case op_in_by_id: { |
| 6776 | auto bytecode = currentInstruction->as<OpInById>(); |
| 6777 | Node* base = get(bytecode.m_base); |
| 6778 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property]; |
| 6779 | UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; |
| 6780 | |
| 6781 | InByIdStatus status = InByIdStatus::computeFor( |
| 6782 | m_inlineStackTop->m_profiledBlock, |
| 6783 | m_inlineStackTop->m_baselineMap, m_icContextStack, |
| 6784 | currentCodeOrigin(), uid); |
| 6785 | |
| 6786 | if (status.isSimple()) { |
| 6787 | bool allOK = true; |
| 6788 | MatchStructureData* data = m_graph.m_matchStructureData.add(); |
| 6789 | for (const InByIdVariant& variant : status.variants()) { |
| 6790 | if (!check(variant.conditionSet())) { |
| 6791 | allOK = false; |
| 6792 | break; |
| 6793 | } |
| 6794 | for (Structure* structure : variant.structureSet()) { |
| 6795 | MatchStructureVariant matchVariant; |
| 6796 | matchVariant.structure = m_graph.registerStructure(structure); |
| 6797 | matchVariant.result = variant.isHit(); |
| 6798 | |
| 6799 | data->variants.append(WTFMove(matchVariant)); |
| 6800 | } |
| 6801 | } |
| 6802 | |
| 6803 | if (allOK) { |
| 6804 | addToGraph(FilterInByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addInByIdStatus(currentCodeOrigin(), status)), base); |
| 6805 | |
| 6806 | Node* match = addToGraph(MatchStructure, OpInfo(data), base); |
| 6807 | set(bytecode.m_dst, match); |
| 6808 | NEXT_OPCODE(op_in_by_id); |
| 6809 | } |
| 6810 | } |
| 6811 | |
| 6812 | set(bytecode.m_dst, addToGraph(InById, OpInfo(identifierNumber), base)); |
| 6813 | NEXT_OPCODE(op_in_by_id); |
| 6814 | } |
| 6815 | |
| 6816 | case op_get_enumerable_length: { |
| 6817 | auto bytecode = currentInstruction->as<OpGetEnumerableLength>(); |
| 6818 | set(bytecode.m_dst, addToGraph(GetEnumerableLength, get(bytecode.m_base))); |
| 6819 | NEXT_OPCODE(op_get_enumerable_length); |
| 6820 | } |
| 6821 | |
| 6822 | case op_has_generic_property: { |
| 6823 | auto bytecode = currentInstruction->as<OpHasGenericProperty>(); |
| 6824 | set(bytecode.m_dst, addToGraph(HasGenericProperty, get(bytecode.m_base), get(bytecode.m_property))); |
| 6825 | NEXT_OPCODE(op_has_generic_property); |
| 6826 | } |
| 6827 | |
| 6828 | case op_has_structure_property: { |
| 6829 | auto bytecode = currentInstruction->as<OpHasStructureProperty>(); |
| 6830 | set(bytecode.m_dst, addToGraph(HasStructureProperty, |
| 6831 | get(bytecode.m_base), |
| 6832 | get(bytecode.m_property), |
| 6833 | get(bytecode.m_enumerator))); |
| 6834 | NEXT_OPCODE(op_has_structure_property); |
| 6835 | } |
| 6836 | |
| 6837 | case op_has_indexed_property: { |
| 6838 | auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); |
| 6839 | Node* base = get(bytecode.m_base); |
| 6840 | ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read); |
| 6841 | Node* property = get(bytecode.m_property); |
| 6842 | addVarArgChild(base); |
| 6843 | addVarArgChild(property); |
| 6844 | addVarArgChild(nullptr); |
| 6845 | Node* hasIterableProperty = addToGraph(Node::VarArg, HasIndexedProperty, OpInfo(arrayMode.asWord()), OpInfo(static_cast<uint32_t>(PropertySlot::InternalMethodType::GetOwnProperty))); |
| 6846 | m_exitOK = false; // HasIndexedProperty must be treated as if it clobbers exit state, since FixupPhase may make it generic. |
| 6847 | set(bytecode.m_dst, hasIterableProperty); |
| 6848 | NEXT_OPCODE(op_has_indexed_property); |
| 6849 | } |
| 6850 | |
| 6851 | case op_get_direct_pname: { |
| 6852 | auto bytecode = currentInstruction->as<OpGetDirectPname>(); |
| 6853 | SpeculatedType prediction = getPredictionWithoutOSRExit(); |
| 6854 | |
| 6855 | Node* base = get(bytecode.m_base); |
| 6856 | Node* property = get(bytecode.m_property); |
| 6857 | Node* index = get(bytecode.m_index); |
| 6858 | Node* enumerator = get(bytecode.m_enumerator); |
| 6859 | |
| 6860 | addVarArgChild(base); |
| 6861 | addVarArgChild(property); |
| 6862 | addVarArgChild(index); |
| 6863 | addVarArgChild(enumerator); |
| 6864 | set(bytecode.m_dst, addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction))); |
| 6865 | |
| 6866 | NEXT_OPCODE(op_get_direct_pname); |
| 6867 | } |
| 6868 | |
| 6869 | case op_get_property_enumerator: { |
| 6870 | auto bytecode = currentInstruction->as<OpGetPropertyEnumerator>(); |
| 6871 | set(bytecode.m_dst, addToGraph(GetPropertyEnumerator, get(bytecode.m_base))); |
| 6872 | NEXT_OPCODE(op_get_property_enumerator); |
| 6873 | } |
| 6874 | |
| 6875 | case op_enumerator_structure_pname: { |
| 6876 | auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>(); |
| 6877 | set(bytecode.m_dst, addToGraph(GetEnumeratorStructurePname, |
| 6878 | get(bytecode.m_enumerator), |
| 6879 | get(bytecode.m_index))); |
| 6880 | NEXT_OPCODE(op_enumerator_structure_pname); |
| 6881 | } |
| 6882 | |
| 6883 | case op_enumerator_generic_pname: { |
| 6884 | auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>(); |
| 6885 | set(bytecode.m_dst, addToGraph(GetEnumeratorGenericPname, |
| 6886 | get(bytecode.m_enumerator), |
| 6887 | get(bytecode.m_index))); |
| 6888 | NEXT_OPCODE(op_enumerator_generic_pname); |
| 6889 | } |
| 6890 | |
| 6891 | case op_to_index_string: { |
| 6892 | auto bytecode = currentInstruction->as<OpToIndexString>(); |
| 6893 | set(bytecode.m_dst, addToGraph(ToIndexString, get(bytecode.m_index))); |
| 6894 | NEXT_OPCODE(op_to_index_string); |
| 6895 | } |
| 6896 | |
| 6897 | case op_log_shadow_chicken_prologue: { |
| 6898 | auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>(); |
| 6899 | if (!m_inlineStackTop->m_inlineCallFrame) |
| 6900 | addToGraph(LogShadowChickenPrologue, get(bytecode.m_scope)); |
| 6901 | NEXT_OPCODE(op_log_shadow_chicken_prologue); |
| 6902 | } |
| 6903 | |
| 6904 | case op_log_shadow_chicken_tail: { |
| 6905 | auto bytecode = currentInstruction->as<OpLogShadowChickenTail>(); |
| 6906 | if (!m_inlineStackTop->m_inlineCallFrame) { |
| 6907 | // FIXME: The right solution for inlining is to elide these whenever the tail call |
| 6908 | // ends up being inlined. |
| 6909 | // https://bugs.webkit.org/show_bug.cgi?id=155686 |
| 6910 | addToGraph(LogShadowChickenTail, get(bytecode.m_thisValue), get(bytecode.m_scope)); |
| 6911 | } |
| 6912 | NEXT_OPCODE(op_log_shadow_chicken_tail); |
| 6913 | } |
| 6914 | |
| 6915 | case op_unreachable: { |
| 6916 | flushForTerminal(); |
| 6917 | addToGraph(Unreachable); |
| 6918 | LAST_OPCODE(op_unreachable); |
| 6919 | } |
| 6920 | |
| 6921 | default: |
| 6922 | // Parse failed! This should not happen because the capabilities checker |
| 6923 | // should have caught it. |
| 6924 | RELEASE_ASSERT_NOT_REACHED(); |
| 6925 | return; |
| 6926 | } |
| 6927 | } |
| 6928 | } |
| 6929 | |
| 6930 | void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleTargets) |
| 6931 | { |
| 6932 | ASSERT(!block->isLinked); |
| 6933 | ASSERT(!block->isEmpty()); |
| 6934 | Node* node = block->terminal(); |
| 6935 | ASSERT(node->isTerminal()); |
| 6936 | |
| 6937 | switch (node->op()) { |
| 6938 | case Jump: |
| 6939 | node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing()); |
| 6940 | break; |
| 6941 | |
| 6942 | case Branch: { |
| 6943 | BranchData* data = node->branchData(); |
| 6944 | data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex()); |
| 6945 | data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex()); |
| 6946 | break; |
| 6947 | } |
| 6948 | |
| 6949 | case Switch: { |
| 6950 | SwitchData* data = node->switchData(); |
| 6951 | for (unsigned i = node->switchData()->cases.size(); i--;) |
| 6952 | data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex()); |
| 6953 | data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex()); |
| 6954 | break; |
| 6955 | } |
| 6956 | |
| 6957 | default: |
| 6958 | RELEASE_ASSERT_NOT_REACHED(); |
| 6959 | } |
| 6960 | |
| 6961 | VERBOSE_LOG("Marking " , RawPointer(block), " as linked (actually did linking)\n" ); |
| 6962 | block->didLink(); |
| 6963 | } |
| 6964 | |
| 6965 | void ByteCodeParser::linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets) |
| 6966 | { |
| 6967 | for (size_t i = 0; i < unlinkedBlocks.size(); ++i) { |
| 6968 | VERBOSE_LOG("Attempting to link " , RawPointer(unlinkedBlocks[i]), "\n" ); |
| 6969 | linkBlock(unlinkedBlocks[i], possibleTargets); |
| 6970 | } |
| 6971 | } |
| 6972 | |
| 6973 | ByteCodeParser::InlineStackEntry::InlineStackEntry( |
| 6974 | ByteCodeParser* byteCodeParser, |
| 6975 | CodeBlock* codeBlock, |
| 6976 | CodeBlock* profiledBlock, |
| 6977 | JSFunction* callee, // Null if this is a closure call. |
| 6978 | VirtualRegister returnValueVR, |
| 6979 | VirtualRegister inlineCallFrameStart, |
| 6980 | int argumentCountIncludingThis, |
| 6981 | InlineCallFrame::Kind kind, |
| 6982 | BasicBlock* continuationBlock) |
| 6983 | : m_byteCodeParser(byteCodeParser) |
| 6984 | , m_codeBlock(codeBlock) |
| 6985 | , m_profiledBlock(profiledBlock) |
| 6986 | , m_continuationBlock(continuationBlock) |
| 6987 | , m_returnValue(returnValueVR) |
| 6988 | , m_caller(byteCodeParser->m_inlineStackTop) |
| 6989 | { |
| 6990 | { |
| 6991 | m_exitProfile.initialize(m_profiledBlock->unlinkedCodeBlock()); |
| 6992 | |
| 6993 | ConcurrentJSLocker locker(m_profiledBlock->m_lock); |
| 6994 | m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles(locker)); |
| 6995 | |
| 6996 | // We do this while holding the lock because we want to encourage StructureStubInfo's |
| 6997 | // to be potentially added to operations and because the profiled block could be in the |
| 6998 | // middle of LLInt->JIT tier-up in which case we would be adding the info's right now. |
| 6999 | if (m_profiledBlock->hasBaselineJITProfiling()) |
| 7000 | m_profiledBlock->getICStatusMap(locker, m_baselineMap); |
| 7001 | } |
| 7002 | |
| 7003 | CodeBlock* optimizedBlock = m_profiledBlock->replacement(); |
| 7004 | m_optimizedContext.optimizedCodeBlock = optimizedBlock; |
| 7005 | if (Options::usePolyvariantDevirtualization() && optimizedBlock) { |
| 7006 | ConcurrentJSLocker locker(optimizedBlock->m_lock); |
| 7007 | optimizedBlock->getICStatusMap(locker, m_optimizedContext.map); |
| 7008 | } |
| 7009 | byteCodeParser->m_icContextStack.append(&m_optimizedContext); |
| 7010 | |
| 7011 | int argumentCountIncludingThisWithFixup = std::max<int>(argumentCountIncludingThis, codeBlock->numParameters()); |
| 7012 | |
| 7013 | if (m_caller) { |
| 7014 | // Inline case. |
| 7015 | ASSERT(codeBlock != byteCodeParser->m_codeBlock); |
| 7016 | ASSERT(inlineCallFrameStart.isValid()); |
| 7017 | |
| 7018 | m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames()->add(); |
| 7019 | m_optimizedContext.inlineCallFrame = m_inlineCallFrame; |
| 7020 | |
| 7021 | // The owner is the machine code block, and we already have a barrier on that when the |
| 7022 | // plan finishes. |
| 7023 | m_inlineCallFrame->baselineCodeBlock.setWithoutWriteBarrier(codeBlock->baselineVersion()); |
| 7024 | m_inlineCallFrame->setStackOffset(inlineCallFrameStart.offset() - CallFrame::headerSizeInRegisters); |
| 7025 | m_inlineCallFrame->argumentCountIncludingThis = argumentCountIncludingThis; |
| 7026 | if (callee) { |
| 7027 | m_inlineCallFrame->calleeRecovery = ValueRecovery::constant(callee); |
| 7028 | m_inlineCallFrame->isClosureCall = false; |
| 7029 | } else |
| 7030 | m_inlineCallFrame->isClosureCall = true; |
| 7031 | m_inlineCallFrame->directCaller = byteCodeParser->currentCodeOrigin(); |
| 7032 | m_inlineCallFrame->argumentsWithFixup.resizeToFit(argumentCountIncludingThisWithFixup); // Set the number of arguments including this, but don't configure the value recoveries, yet. |
| 7033 | m_inlineCallFrame->kind = kind; |
| 7034 | |
| 7035 | m_identifierRemap.resize(codeBlock->numberOfIdentifiers()); |
| 7036 | m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables()); |
| 7037 | |
| 7038 | for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) { |
| 7039 | UniquedStringImpl* rep = codeBlock->identifier(i).impl(); |
| 7040 | unsigned index = byteCodeParser->m_graph.identifiers().ensure(rep); |
| 7041 | m_identifierRemap[i] = index; |
| 7042 | } |
| 7043 | for (unsigned i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i) { |
| 7044 | m_switchRemap[i] = byteCodeParser->m_codeBlock->numberOfSwitchJumpTables(); |
| 7045 | byteCodeParser->m_codeBlock->addSwitchJumpTable() = codeBlock->switchJumpTable(i); |
| 7046 | } |
| 7047 | } else { |
| 7048 | // Machine code block case. |
| 7049 | ASSERT(codeBlock == byteCodeParser->m_codeBlock); |
| 7050 | ASSERT(!callee); |
| 7051 | ASSERT(!returnValueVR.isValid()); |
| 7052 | ASSERT(!inlineCallFrameStart.isValid()); |
| 7053 | |
| 7054 | m_inlineCallFrame = 0; |
| 7055 | |
| 7056 | m_identifierRemap.resize(codeBlock->numberOfIdentifiers()); |
| 7057 | m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables()); |
| 7058 | for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) |
| 7059 | m_identifierRemap[i] = i; |
| 7060 | for (size_t i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i) |
| 7061 | m_switchRemap[i] = i; |
| 7062 | } |
| 7063 | |
| 7064 | m_argumentPositions.resize(argumentCountIncludingThisWithFixup); |
| 7065 | for (int i = 0; i < argumentCountIncludingThisWithFixup; ++i) { |
| 7066 | byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition()); |
| 7067 | ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last(); |
| 7068 | m_argumentPositions[i] = argumentPosition; |
| 7069 | } |
| 7070 | byteCodeParser->m_inlineCallFrameToArgumentPositions.add(m_inlineCallFrame, m_argumentPositions); |
| 7071 | |
| 7072 | byteCodeParser->m_inlineStackTop = this; |
| 7073 | } |
| 7074 | |
| 7075 | ByteCodeParser::InlineStackEntry::~InlineStackEntry() |
| 7076 | { |
| 7077 | m_byteCodeParser->m_inlineStackTop = m_caller; |
| 7078 | RELEASE_ASSERT(m_byteCodeParser->m_icContextStack.last() == &m_optimizedContext); |
| 7079 | m_byteCodeParser->m_icContextStack.removeLast(); |
| 7080 | } |
| 7081 | |
| 7082 | void ByteCodeParser::parseCodeBlock() |
| 7083 | { |
| 7084 | clearCaches(); |
| 7085 | |
| 7086 | CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock; |
| 7087 | |
| 7088 | if (UNLIKELY(m_graph.compilation())) { |
| 7089 | m_graph.compilation()->addProfiledBytecodes( |
| 7090 | *m_vm->m_perBytecodeProfiler, m_inlineStackTop->m_profiledBlock); |
| 7091 | } |
| 7092 | |
| 7093 | if (UNLIKELY(Options::dumpSourceAtDFGTime())) { |
| 7094 | Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback()->ensureDeferredSourceDump(); |
| 7095 | if (inlineCallFrame()) { |
| 7096 | DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITCode::DFGJIT, inlineCallFrame()->directCaller.bytecodeIndex()); |
| 7097 | deferredSourceDump.append(dump); |
| 7098 | } else |
| 7099 | deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion())); |
| 7100 | } |
| 7101 | |
| 7102 | if (Options::dumpBytecodeAtDFGTime()) { |
| 7103 | dataLog("Parsing " , *codeBlock); |
| 7104 | if (inlineCallFrame()) { |
| 7105 | dataLog( |
| 7106 | " for inlining at " , CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT), |
| 7107 | " " , inlineCallFrame()->directCaller); |
| 7108 | } |
| 7109 | dataLog( |
| 7110 | ", isStrictMode = " , codeBlock->ownerExecutable()->isStrictMode(), "\n" ); |
| 7111 | codeBlock->baselineVersion()->dumpBytecode(); |
| 7112 | } |
| 7113 | |
| 7114 | Vector<InstructionStream::Offset, 32> jumpTargets; |
| 7115 | computePreciseJumpTargets(codeBlock, jumpTargets); |
| 7116 | if (Options::dumpBytecodeAtDFGTime()) { |
| 7117 | dataLog("Jump targets: " ); |
| 7118 | CommaPrinter comma; |
| 7119 | for (unsigned i = 0; i < jumpTargets.size(); ++i) |
| 7120 | dataLog(comma, jumpTargets[i]); |
| 7121 | dataLog("\n" ); |
| 7122 | } |
| 7123 | |
| 7124 | for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= jumpTargets.size(); ++jumpTargetIndex) { |
| 7125 | // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions. |
| 7126 | unsigned limit = jumpTargetIndex < jumpTargets.size() ? jumpTargets[jumpTargetIndex] : codeBlock->instructions().size(); |
| 7127 | ASSERT(m_currentIndex < limit); |
| 7128 | |
| 7129 | // Loop until we reach the current limit (i.e. next jump target). |
| 7130 | do { |
| 7131 | // There may already be a currentBlock in two cases: |
| 7132 | // - we may have just entered the loop for the first time |
| 7133 | // - we may have just returned from an inlined callee that had some early returns and |
| 7134 | // so allocated a continuation block, and the instruction after the call is a jump target. |
| 7135 | // In both cases, we want to keep using it. |
| 7136 | if (!m_currentBlock) { |
| 7137 | m_currentBlock = allocateTargetableBlock(m_currentIndex); |
| 7138 | |
| 7139 | // The first block is definitely an OSR target. |
| 7140 | if (m_graph.numBlocks() == 1) { |
| 7141 | m_currentBlock->isOSRTarget = true; |
| 7142 | m_graph.m_roots.append(m_currentBlock); |
| 7143 | } |
| 7144 | prepareToParseBlock(); |
| 7145 | } |
| 7146 | |
| 7147 | parseBlock(limit); |
| 7148 | |
| 7149 | // We should not have gone beyond the limit. |
| 7150 | ASSERT(m_currentIndex <= limit); |
| 7151 | |
| 7152 | if (m_currentBlock->isEmpty()) { |
| 7153 | // This case only happens if the last instruction was an inlined call with early returns |
| 7154 | // or polymorphic (creating an empty continuation block), |
| 7155 | // and then we hit the limit before putting anything in the continuation block. |
| 7156 | ASSERT(m_currentIndex == limit); |
| 7157 | makeBlockTargetable(m_currentBlock, m_currentIndex); |
| 7158 | } else { |
| 7159 | ASSERT(m_currentBlock->terminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame())); |
| 7160 | m_currentBlock = nullptr; |
| 7161 | } |
| 7162 | } while (m_currentIndex < limit); |
| 7163 | } |
| 7164 | |
| 7165 | // Should have reached the end of the instructions. |
| 7166 | ASSERT(m_currentIndex == codeBlock->instructions().size()); |
| 7167 | |
| 7168 | VERBOSE_LOG("Done parsing " , *codeBlock, " (fell off end)\n" ); |
| 7169 | } |
| 7170 | |
| 7171 | template <typename Bytecode> |
| 7172 | void ByteCodeParser::handlePutByVal(Bytecode bytecode, unsigned instructionSize) |
| 7173 | { |
| 7174 | Node* base = get(bytecode.m_base); |
| 7175 | Node* property = get(bytecode.m_property); |
| 7176 | Node* value = get(bytecode.m_value); |
| 7177 | bool isDirect = Bytecode::opcodeID == op_put_by_val_direct; |
| 7178 | bool compiledAsPutById = false; |
| 7179 | { |
| 7180 | unsigned identifierNumber = std::numeric_limits<unsigned>::max(); |
| 7181 | PutByIdStatus putByIdStatus; |
| 7182 | { |
| 7183 | ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); |
| 7184 | ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex())).byValInfo; |
| 7185 | // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null. |
| 7186 | // At that time, there is no information. |
| 7187 | if (byValInfo |
| 7188 | && byValInfo->stubInfo |
| 7189 | && !byValInfo->tookSlowPath |
| 7190 | && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent) |
| 7191 | && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType) |
| 7192 | && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { |
| 7193 | compiledAsPutById = true; |
| 7194 | identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl()); |
| 7195 | UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; |
| 7196 | |
| 7197 | if (Symbol* symbol = byValInfo->cachedSymbol.get()) { |
| 7198 | FrozenValue* frozen = m_graph.freezeStrong(symbol); |
| 7199 | addToGraph(CheckCell, OpInfo(frozen), property); |
| 7200 | } else { |
| 7201 | ASSERT(!uid->isSymbol()); |
| 7202 | addToGraph(CheckStringIdent, OpInfo(uid), property); |
| 7203 | } |
| 7204 | |
| 7205 | putByIdStatus = PutByIdStatus::computeForStubInfo( |
| 7206 | locker, m_inlineStackTop->m_profiledBlock, |
| 7207 | byValInfo->stubInfo, currentCodeOrigin(), uid); |
| 7208 | |
| 7209 | } |
| 7210 | } |
| 7211 | |
| 7212 | if (compiledAsPutById) |
| 7213 | handlePutById(base, identifierNumber, value, putByIdStatus, isDirect, instructionSize); |
| 7214 | } |
| 7215 | |
| 7216 | if (!compiledAsPutById) { |
| 7217 | ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_inlineStackTop->m_codeBlock).m_arrayProfile, Array::Write); |
| 7218 | |
| 7219 | addVarArgChild(base); |
| 7220 | addVarArgChild(property); |
| 7221 | addVarArgChild(value); |
| 7222 | addVarArgChild(0); // Leave room for property storage. |
| 7223 | addVarArgChild(0); // Leave room for length. |
| 7224 | addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0)); |
| 7225 | m_exitOK = false; // PutByVal and PutByValDirect must be treated as if they clobber exit state, since FixupPhase may make them generic. |
| 7226 | } |
| 7227 | } |
| 7228 | |
| 7229 | template <typename Bytecode> |
| 7230 | void ByteCodeParser::handlePutAccessorById(NodeType op, Bytecode bytecode) |
| 7231 | { |
| 7232 | Node* base = get(bytecode.m_base); |
| 7233 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property]; |
| 7234 | Node* accessor = get(bytecode.m_accessor); |
| 7235 | addToGraph(op, OpInfo(identifierNumber), OpInfo(bytecode.m_attributes), base, accessor); |
| 7236 | } |
| 7237 | |
| 7238 | template <typename Bytecode> |
| 7239 | void ByteCodeParser::handlePutAccessorByVal(NodeType op, Bytecode bytecode) |
| 7240 | { |
| 7241 | Node* base = get(bytecode.m_base); |
| 7242 | Node* subscript = get(bytecode.m_property); |
| 7243 | Node* accessor = get(bytecode.m_accessor); |
| 7244 | addToGraph(op, OpInfo(bytecode.m_attributes), base, subscript, accessor); |
| 7245 | } |
| 7246 | |
| 7247 | template <typename Bytecode> |
| 7248 | void ByteCodeParser::handleNewFunc(NodeType op, Bytecode bytecode) |
| 7249 | { |
| 7250 | FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(bytecode.m_functionDecl); |
| 7251 | FrozenValue* frozen = m_graph.freezeStrong(decl); |
| 7252 | Node* scope = get(bytecode.m_scope); |
| 7253 | set(bytecode.m_dst, addToGraph(op, OpInfo(frozen), scope)); |
| 7254 | // Ideally we wouldn't have to do this Phantom. But: |
| 7255 | // |
| 7256 | // For the constant case: we must do it because otherwise we would have no way of knowing |
| 7257 | // that the scope is live at OSR here. |
| 7258 | // |
| 7259 | // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation |
| 7260 | // won't be able to handle an Undefined scope. |
| 7261 | addToGraph(Phantom, scope); |
| 7262 | } |
| 7263 | |
| 7264 | template <typename Bytecode> |
| 7265 | void ByteCodeParser::handleNewFuncExp(NodeType op, Bytecode bytecode) |
| 7266 | { |
| 7267 | FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(bytecode.m_functionDecl); |
| 7268 | FrozenValue* frozen = m_graph.freezeStrong(expr); |
| 7269 | Node* scope = get(bytecode.m_scope); |
| 7270 | set(bytecode.m_dst, addToGraph(op, OpInfo(frozen), scope)); |
| 7271 | // Ideally we wouldn't have to do this Phantom. But: |
| 7272 | // |
| 7273 | // For the constant case: we must do it because otherwise we would have no way of knowing |
| 7274 | // that the scope is live at OSR here. |
| 7275 | // |
| 7276 | // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation |
| 7277 | // won't be able to handle an Undefined scope. |
| 7278 | addToGraph(Phantom, scope); |
| 7279 | } |
| 7280 | |
| 7281 | void ByteCodeParser::parse() |
| 7282 | { |
| 7283 | // Set during construction. |
| 7284 | ASSERT(!m_currentIndex); |
| 7285 | |
| 7286 | VERBOSE_LOG("Parsing " , *m_codeBlock, "\n" ); |
| 7287 | |
| 7288 | InlineStackEntry inlineStackEntry( |
| 7289 | this, m_codeBlock, m_profiledBlock, 0, VirtualRegister(), VirtualRegister(), |
| 7290 | m_codeBlock->numParameters(), InlineCallFrame::Call, nullptr); |
| 7291 | |
| 7292 | parseCodeBlock(); |
| 7293 | linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets); |
| 7294 | |
| 7295 | if (m_hasAnyForceOSRExits) { |
| 7296 | BlockSet blocksToIgnore; |
| 7297 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { |
| 7298 | if (block->isOSRTarget && block->bytecodeBegin == m_graph.m_plan.osrEntryBytecodeIndex()) { |
| 7299 | blocksToIgnore.add(block); |
| 7300 | break; |
| 7301 | } |
| 7302 | } |
| 7303 | |
| 7304 | { |
| 7305 | bool isSafeToValidate = false; |
| 7306 | auto postOrder = m_graph.blocksInPostOrder(isSafeToValidate); // This algorithm doesn't rely on the predecessors list, which is not yet built. |
| 7307 | bool changed; |
| 7308 | do { |
| 7309 | changed = false; |
| 7310 | for (BasicBlock* block : postOrder) { |
| 7311 | for (BasicBlock* successor : block->successors()) { |
| 7312 | if (blocksToIgnore.contains(successor)) { |
| 7313 | changed |= blocksToIgnore.add(block); |
| 7314 | break; |
| 7315 | } |
| 7316 | } |
| 7317 | } |
| 7318 | } while (changed); |
| 7319 | } |
| 7320 | |
| 7321 | InsertionSet insertionSet(m_graph); |
| 7322 | Operands<VariableAccessData*> mapping(OperandsLike, m_graph.block(0)->variablesAtHead); |
| 7323 | |
| 7324 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { |
| 7325 | if (blocksToIgnore.contains(block)) |
| 7326 | continue; |
| 7327 | |
| 7328 | mapping.fill(nullptr); |
| 7329 | if (validationEnabled()) { |
| 7330 | // Verify that it's correct to fill mapping with nullptr. |
| 7331 | for (unsigned i = 0; i < block->variablesAtHead.size(); ++i) { |
| 7332 | Node* node = block->variablesAtHead.at(i); |
| 7333 | RELEASE_ASSERT(!node); |
| 7334 | } |
| 7335 | } |
| 7336 | |
| 7337 | for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { |
| 7338 | { |
| 7339 | Node* node = block->at(nodeIndex); |
| 7340 | |
| 7341 | if (node->hasVariableAccessData(m_graph)) |
| 7342 | mapping.operand(node->local()) = node->variableAccessData(); |
| 7343 | |
| 7344 | if (node->op() != ForceOSRExit) |
| 7345 | continue; |
| 7346 | } |
| 7347 | |
| 7348 | NodeOrigin origin = block->at(nodeIndex)->origin; |
| 7349 | RELEASE_ASSERT(origin.exitOK); |
| 7350 | |
| 7351 | ++nodeIndex; |
| 7352 | |
| 7353 | { |
| 7354 | if (validationEnabled()) { |
| 7355 | // This verifies that we don't need to change any of the successors's predecessor |
| 7356 | // list after planting the Unreachable below. At this point in the bytecode |
| 7357 | // parser, we haven't linked up the predecessor lists yet. |
| 7358 | for (BasicBlock* successor : block->successors()) |
| 7359 | RELEASE_ASSERT(successor->predecessors.isEmpty()); |
| 7360 | } |
| 7361 | |
| 7362 | auto insertLivenessPreservingOp = [&] (InlineCallFrame* inlineCallFrame, NodeType op, VirtualRegister operand) { |
| 7363 | VariableAccessData* variable = mapping.operand(operand); |
| 7364 | if (!variable) { |
| 7365 | variable = newVariableAccessData(operand); |
| 7366 | mapping.operand(operand) = variable; |
| 7367 | } |
| 7368 | |
| 7369 | VirtualRegister argument = operand - (inlineCallFrame ? inlineCallFrame->stackOffset : 0); |
| 7370 | if (argument.isArgument() && !argument.isHeader()) { |
| 7371 | const Vector<ArgumentPosition*>& arguments = m_inlineCallFrameToArgumentPositions.get(inlineCallFrame); |
| 7372 | arguments[argument.toArgument()]->addVariable(variable); |
| 7373 | } |
| 7374 | insertionSet.insertNode(nodeIndex, SpecNone, op, origin, OpInfo(variable)); |
| 7375 | }; |
| 7376 | auto addFlushDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) { |
| 7377 | insertLivenessPreservingOp(inlineCallFrame, Flush, operand); |
| 7378 | }; |
| 7379 | auto addPhantomLocalDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) { |
| 7380 | insertLivenessPreservingOp(inlineCallFrame, PhantomLocal, operand); |
| 7381 | }; |
| 7382 | flushForTerminalImpl(origin.semantic, addFlushDirect, addPhantomLocalDirect); |
| 7383 | } |
| 7384 | |
| 7385 | while (true) { |
| 7386 | RELEASE_ASSERT(nodeIndex < block->size()); |
| 7387 | |
| 7388 | Node* node = block->at(nodeIndex); |
| 7389 | |
| 7390 | node->origin = origin; |
| 7391 | m_graph.doToChildren(node, [&] (Edge edge) { |
| 7392 | // We only need to keep data flow edges to nodes defined prior to the ForceOSRExit. The reason |
| 7393 | // for this is we rely on backwards propagation being able to see the "full" bytecode. To model |
| 7394 | // this, we preserve uses of a node in a generic way so that backwards propagation can reason |
| 7395 | // about them. Therefore, we can't remove uses of a node which is defined before the ForceOSRExit |
| 7396 | // even when we're at a point in the program after the ForceOSRExit, because that would break backwards |
| 7397 | // propagation's analysis over the uses of a node. However, we don't need this same preservation for |
| 7398 | // nodes defined after ForceOSRExit, as we've already exitted before those defs. |
| 7399 | if (edge->hasResult()) |
| 7400 | insertionSet.insertNode(nodeIndex, SpecNone, Phantom, origin, Edge(edge.node(), UntypedUse)); |
| 7401 | }); |
| 7402 | |
| 7403 | bool isTerminal = node->isTerminal(); |
| 7404 | |
| 7405 | node->removeWithoutChecks(); |
| 7406 | |
| 7407 | if (isTerminal) { |
| 7408 | insertionSet.insertNode(nodeIndex, SpecNone, Unreachable, origin); |
| 7409 | break; |
| 7410 | } |
| 7411 | |
| 7412 | ++nodeIndex; |
| 7413 | } |
| 7414 | |
| 7415 | insertionSet.execute(block); |
| 7416 | |
| 7417 | auto nodeAndIndex = block->findTerminal(); |
| 7418 | RELEASE_ASSERT(nodeAndIndex.node->op() == Unreachable); |
| 7419 | block->resize(nodeAndIndex.index + 1); |
| 7420 | break; |
| 7421 | } |
| 7422 | } |
| 7423 | } else if (validationEnabled()) { |
| 7424 | // Ensure our bookkeeping for ForceOSRExit nodes is working. |
| 7425 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { |
| 7426 | for (Node* node : *block) |
| 7427 | RELEASE_ASSERT(node->op() != ForceOSRExit); |
| 7428 | } |
| 7429 | } |
| 7430 | |
| 7431 | m_graph.determineReachability(); |
| 7432 | m_graph.killUnreachableBlocks(); |
| 7433 | |
| 7434 | for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { |
| 7435 | BasicBlock* block = m_graph.block(blockIndex); |
| 7436 | if (!block) |
| 7437 | continue; |
| 7438 | ASSERT(block->variablesAtHead.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals()); |
| 7439 | ASSERT(block->variablesAtHead.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments()); |
| 7440 | ASSERT(block->variablesAtTail.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals()); |
| 7441 | ASSERT(block->variablesAtTail.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments()); |
| 7442 | } |
| 7443 | |
| 7444 | m_graph.m_localVars = m_numLocals; |
| 7445 | m_graph.m_parameterSlots = m_parameterSlots; |
| 7446 | } |
| 7447 | |
| 7448 | void parse(Graph& graph) |
| 7449 | { |
| 7450 | ByteCodeParser(graph).parse(); |
| 7451 | } |
| 7452 | |
| 7453 | } } // namespace JSC::DFG |
| 7454 | |
| 7455 | #endif |
| 7456 | |