1/*
2 * Copyright (C) 2013-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "FTLLowerDFGToB3.h"
28
29#if ENABLE(FTL_JIT)
30
31#include "AirCode.h"
32#include "AirGenerationContext.h"
33#include "AllowMacroScratchRegisterUsage.h"
34#include "AllowMacroScratchRegisterUsageIf.h"
35#include "AtomicsObject.h"
36#include "B3CheckValue.h"
37#include "B3FenceValue.h"
38#include "B3PatchpointValue.h"
39#include "B3SlotBaseValue.h"
40#include "B3StackmapGenerationParams.h"
41#include "B3ValueInlines.h"
42#include "CallFrameShuffler.h"
43#include "CodeBlockWithJITType.h"
44#include "DFGAbstractInterpreterInlines.h"
45#include "DFGCapabilities.h"
46#include "DFGDoesGC.h"
47#include "DFGDominators.h"
48#include "DFGInPlaceAbstractState.h"
49#include "DFGLivenessAnalysisPhase.h"
50#include "DFGMayExit.h"
51#include "DFGOSRAvailabilityAnalysisPhase.h"
52#include "DFGOSRExitFuzz.h"
53#include "DirectArguments.h"
54#include "FTLAbstractHeapRepository.h"
55#include "FTLAvailableRecovery.h"
56#include "FTLExceptionTarget.h"
57#include "FTLForOSREntryJITCode.h"
58#include "FTLFormattedValue.h"
59#include "FTLLazySlowPathCall.h"
60#include "FTLLoweredNodeValue.h"
61#include "FTLOperations.h"
62#include "FTLOutput.h"
63#include "FTLPatchpointExceptionHandle.h"
64#include "FTLSnippetParams.h"
65#include "FTLThunks.h"
66#include "FTLWeightedTarget.h"
67#include "JITAddGenerator.h"
68#include "JITBitAndGenerator.h"
69#include "JITBitOrGenerator.h"
70#include "JITBitXorGenerator.h"
71#include "JITDivGenerator.h"
72#include "JITInlineCacheGenerator.h"
73#include "JITLeftShiftGenerator.h"
74#include "JITMathIC.h"
75#include "JITMulGenerator.h"
76#include "JITRightShiftGenerator.h"
77#include "JITSubGenerator.h"
78#include "JSAsyncFunction.h"
79#include "JSAsyncGeneratorFunction.h"
80#include "JSCInlines.h"
81#include "JSGeneratorFunction.h"
82#include "JSImmutableButterfly.h"
83#include "JSLexicalEnvironment.h"
84#include "JSMap.h"
85#include "OperandsInlines.h"
86#include "ProbeContext.h"
87#include "RegExpObject.h"
88#include "ScopedArguments.h"
89#include "ScopedArgumentsTable.h"
90#include "ScratchRegisterAllocator.h"
91#include "SetupVarargsFrame.h"
92#include "ShadowChicken.h"
93#include "StructureStubInfo.h"
94#include "SuperSampler.h"
95#include "ThunkGenerators.h"
96#include "VirtualRegister.h"
97#include "Watchdog.h"
98#include <atomic>
99#include <wtf/Box.h>
100#include <wtf/Gigacage.h>
101#include <wtf/RecursableLambda.h>
102#include <wtf/StdUnorderedSet.h>
103
104#undef RELEASE_ASSERT
105#define RELEASE_ASSERT(assertion) do { \
106 if (!(assertion)) { \
107 WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
108 CRASH(); \
109 } \
110} while (0)
111
112namespace JSC { namespace FTL {
113
114using namespace B3;
115using namespace DFG;
116
117namespace {
118
119std::atomic<int> compileCounter;
120
121#if !ASSERT_DISABLED
122NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
123 CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
124{
125 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
126 if (nodeIndex != UINT_MAX)
127 dataLog(", node @", nodeIndex);
128 dataLog(".\n");
129 CRASH();
130}
131#endif
132
133// Using this instead of typeCheck() helps to reduce the load on B3, by creating
134// significantly less dead code.
135#define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \
136 FormattedValue _ftc_lowValue = (lowValue); \
137 Edge _ftc_highValue = (highValue); \
138 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
139 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
140 break; \
141 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \
142 } while (false)
143
144#define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \
145 FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition)
146
147class LowerDFGToB3 {
148 WTF_MAKE_NONCOPYABLE(LowerDFGToB3);
149public:
150 LowerDFGToB3(State& state)
151 : m_graph(state.graph)
152 , m_ftlState(state)
153 , m_out(state)
154 , m_proc(*state.proc)
155 , m_availabilityCalculator(m_graph)
156 , m_state(state.graph)
157 , m_interpreter(state.graph, m_state)
158 , m_indexMaskingMode(Options::enableSpectreMitigations() ? IndexMaskingEnabled : IndexMaskingDisabled)
159 {
160 if (Options::validateAbstractInterpreterState()) {
161 performLivenessAnalysis(m_graph);
162
163 // We only use node liveness here, not combined liveness, as we only track
164 // AI state for live nodes.
165 for (DFG::BasicBlock* block : m_graph.blocksInNaturalOrder()) {
166 NodeSet live;
167
168 for (NodeFlowProjection node : block->ssa->liveAtTail) {
169 if (node.kind() == NodeFlowProjection::Primary)
170 live.addVoid(node.node());
171 }
172
173 for (unsigned i = block->size(); i--; ) {
174 Node* node = block->at(i);
175 live.remove(node);
176 m_graph.doToChildren(node, [&] (Edge child) {
177 live.addVoid(child.node());
178 });
179 m_liveInToNode.add(node, live);
180 }
181 }
182 }
183 }
184
185 void lower()
186 {
187 State* state = &m_ftlState;
188
189 CString name;
190 if (verboseCompilationEnabled()) {
191 name = toCString(
192 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
193 "_", codeBlock()->hash());
194 } else
195 name = "jsBody";
196
197 {
198 m_proc.setNumEntrypoints(m_graph.m_numberOfEntrypoints);
199 CodeBlock* codeBlock = m_graph.m_codeBlock;
200
201 Ref<B3::Air::PrologueGenerator> catchPrologueGenerator = createSharedTask<B3::Air::PrologueGeneratorFunction>(
202 [codeBlock] (CCallHelpers& jit, B3::Air::Code& code) {
203 AllowMacroScratchRegisterUsage allowScratch(jit);
204 jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
205 if (Options::zeroStackFrame())
206 jit.clearStackFrame(GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister, GPRInfo::regT0, code.frameSize());
207
208 jit.emitSave(code.calleeSaveRegisterAtOffsetList());
209 jit.emitPutToCallFrameHeader(codeBlock, CallFrameSlot::codeBlock);
210 });
211
212 for (unsigned catchEntrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeOffset.keys()) {
213 RELEASE_ASSERT(catchEntrypointIndex != 0);
214 m_proc.code().setPrologueForEntrypoint(catchEntrypointIndex, catchPrologueGenerator.copyRef());
215 }
216
217 if (m_graph.m_maxLocalsForCatchOSREntry) {
218 uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
219 m_ftlState.jitCode->common.catchOSREntryBuffer = m_graph.m_vm.scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
220 }
221 }
222
223 m_graph.ensureSSADominators();
224
225 if (verboseCompilationEnabled())
226 dataLog("Function ready, beginning lowering.\n");
227
228 m_out.initialize(m_heaps);
229
230 // We use prologue frequency for all of the initialization code.
231 m_out.setFrequency(1);
232
233 bool hasMultipleEntrypoints = m_graph.m_numberOfEntrypoints > 1;
234
235 LBasicBlock prologue = m_out.newBlock();
236 LBasicBlock callEntrypointArgumentSpeculations = hasMultipleEntrypoints ? m_out.newBlock() : nullptr;
237 m_handleExceptions = m_out.newBlock();
238
239 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
240 m_highBlock = m_graph.block(blockIndex);
241 if (!m_highBlock)
242 continue;
243 m_out.setFrequency(m_highBlock->executionCount);
244 m_blocks.add(m_highBlock, m_out.newBlock());
245 }
246
247 // Back to prologue frequency for any bocks that get sneakily created in the initialization code.
248 m_out.setFrequency(1);
249
250 m_out.appendTo(prologue, hasMultipleEntrypoints ? callEntrypointArgumentSpeculations : m_handleExceptions);
251 m_out.initializeConstants(m_proc, prologue);
252 createPhiVariables();
253
254 size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
255 B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured);
256 m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured));
257 state->capturedValue = capturedBase->slot();
258
259 auto preOrder = m_graph.blocksInPreOrder();
260
261 m_callFrame = m_out.framePointer();
262 m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
263 m_tagMask = m_out.constInt64(TagMask);
264
265 // Make sure that B3 knows that we really care about the mask registers. This forces the
266 // constants to be materialized in registers.
267 m_proc.addFastConstant(m_tagTypeNumber->key());
268 m_proc.addFastConstant(m_tagMask->key());
269
270 // We don't want the CodeBlock to have a weak pointer to itself because
271 // that would cause it to always get collected.
272 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), addressFor(CallFrameSlot::codeBlock));
273
274 VM* vm = &this->vm();
275
276 // Stack Overflow Check.
277 unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
278 MacroAssembler::AbsoluteAddress addressOfStackLimit(vm->addressOfSoftStackLimit());
279 PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
280 CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
281 stackOverflowHandler->appendSomeRegister(m_callFrame);
282 stackOverflowHandler->clobber(RegisterSet::macroScratchRegisters());
283 stackOverflowHandler->numGPScratchRegisters = 1;
284 stackOverflowHandler->setGenerator(
285 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
286 AllowMacroScratchRegisterUsage allowScratch(jit);
287 GPRReg fp = params[0].gpr();
288 GPRReg scratch = params.gpScratch(0);
289
290 unsigned ftlFrameSize = params.proc().frameSize();
291 unsigned maxFrameSize = std::max(exitFrameSize, ftlFrameSize);
292
293 jit.addPtr(MacroAssembler::TrustedImm32(-maxFrameSize), fp, scratch);
294 MacroAssembler::JumpList stackOverflow;
295 if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
296 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, scratch, fp));
297 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, addressOfStackLimit, scratch));
298
299 params.addLatePath([=] (CCallHelpers& jit) {
300 AllowMacroScratchRegisterUsage allowScratch(jit);
301
302 stackOverflow.link(&jit);
303
304 // FIXME: We would not have to do this if the stack check was part of the Air
305 // prologue. Then, we would know that there is no way for the callee-saves to
306 // get clobbered.
307 // https://bugs.webkit.org/show_bug.cgi?id=172456
308 jit.emitRestore(params.proc().calleeSaveRegisterAtOffsetList());
309
310 jit.store32(
311 MacroAssembler::TrustedImm32(callSiteIndex.bits()),
312 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
313 jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
314
315 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
316 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
317 CCallHelpers::Call throwCall = jit.call(OperationPtrTag);
318
319 jit.move(CCallHelpers::TrustedImmPtr(vm), GPRInfo::argumentGPR0);
320 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
321 CCallHelpers::Call lookupExceptionHandlerCall = jit.call(OperationPtrTag);
322 jit.jumpToExceptionHandler(*vm);
323
324 jit.addLinkTask(
325 [=] (LinkBuffer& linkBuffer) {
326 linkBuffer.link(throwCall, FunctionPtr<OperationPtrTag>(operationThrowStackOverflowError));
327 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame));
328 });
329 });
330 });
331
332 LBasicBlock firstDFGBasicBlock = lowBlock(m_graph.block(0));
333
334 {
335 if (hasMultipleEntrypoints) {
336 Vector<LBasicBlock> successors(m_graph.m_numberOfEntrypoints);
337 successors[0] = callEntrypointArgumentSpeculations;
338 for (unsigned i = 1; i < m_graph.m_numberOfEntrypoints; ++i) {
339 // Currently, the only other entrypoint is an op_catch entrypoint.
340 // We do OSR entry at op_catch, and we prove argument formats before
341 // jumping to FTL code, so we don't need to check argument types here
342 // for these entrypoints.
343 successors[i] = firstDFGBasicBlock;
344 }
345
346 m_out.entrySwitch(successors);
347 m_out.appendTo(callEntrypointArgumentSpeculations, m_handleExceptions);
348 }
349
350 m_node = nullptr;
351 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
352
353 // Check Arguments.
354 availabilityMap().clear();
355 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
356 for (unsigned i = codeBlock()->numParameters(); i--;) {
357 availabilityMap().m_locals.argument(i) =
358 Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
359 }
360
361 for (unsigned i = codeBlock()->numParameters(); i--;) {
362 MethodOfGettingAValueProfile profile(&m_graph.m_profiledBlock->valueProfileForArgument(i));
363 VirtualRegister operand = virtualRegisterForArgument(i);
364 LValue jsValue = m_out.load64(addressFor(operand));
365
366 switch (m_graph.m_argumentFormats[0][i]) {
367 case FlushedInt32:
368 speculate(BadType, jsValueValue(jsValue), profile, isNotInt32(jsValue));
369 break;
370 case FlushedBoolean:
371 speculate(BadType, jsValueValue(jsValue), profile, isNotBoolean(jsValue));
372 break;
373 case FlushedCell:
374 speculate(BadType, jsValueValue(jsValue), profile, isNotCell(jsValue));
375 break;
376 case FlushedJSValue:
377 break;
378 default:
379 DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
380 break;
381 }
382 }
383 m_out.jump(firstDFGBasicBlock);
384 }
385
386
387 m_out.appendTo(m_handleExceptions, firstDFGBasicBlock);
388 Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
389 m_out.patchpoint(Void)->setGenerator(
390 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
391 CCallHelpers::Jump jump = jit.jump();
392 jit.addLinkTask(
393 [=] (LinkBuffer& linkBuffer) {
394 linkBuffer.link(jump, linkBuffer.locationOf<ExceptionHandlerPtrTag>(*exceptionHandler));
395 });
396 });
397 m_out.unreachable();
398
399 for (DFG::BasicBlock* block : preOrder)
400 compileBlock(block);
401
402 // Make sure everything is decorated. This does a bunch of deferred decorating. This has
403 // to happen last because our abstract heaps are generated lazily. They have to be
404 // generated lazily because we have an infinite number of numbered, indexed, and
405 // absolute heaps. We only become aware of the ones we actually mention while lowering.
406 m_heaps.computeRangesAndDecorateInstructions();
407
408 // We create all Phi's up front, but we may then decide not to compile the basic block
409 // that would have contained one of them. So this creates orphans, which triggers B3
410 // validation failures. Calling this fixes the issue.
411 //
412 // Note that you should avoid the temptation to make this call conditional upon
413 // validation being enabled. B3 makes no guarantees of any kind of correctness when
414 // dealing with IR that would have failed validation. For example, it would be valid to
415 // write a B3 phase that so aggressively assumes the lack of orphans that it would crash
416 // if any orphans were around. We might even have such phases already.
417 m_proc.deleteOrphans();
418
419 // We put the blocks into the B3 procedure in a super weird order. Now we reorder them.
420 m_out.applyBlockOrder();
421 }
422
423private:
424
425 void createPhiVariables()
426 {
427 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
428 DFG::BasicBlock* block = m_graph.block(blockIndex);
429 if (!block)
430 continue;
431 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
432 Node* node = block->at(nodeIndex);
433 if (node->op() != DFG::Phi)
434 continue;
435 LType type;
436 switch (node->flags() & NodeResultMask) {
437 case NodeResultDouble:
438 type = Double;
439 break;
440 case NodeResultInt32:
441 type = Int32;
442 break;
443 case NodeResultInt52:
444 type = Int64;
445 break;
446 case NodeResultBoolean:
447 type = Int32;
448 break;
449 case NodeResultJS:
450 type = Int64;
451 break;
452 default:
453 DFG_CRASH(m_graph, node, "Bad Phi node result type");
454 break;
455 }
456 m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node)));
457 }
458 }
459 }
460
461 void compileBlock(DFG::BasicBlock* block)
462 {
463 if (!block)
464 return;
465
466 if (verboseCompilationEnabled())
467 dataLog("Compiling block ", *block, "\n");
468
469 m_highBlock = block;
470
471 // Make sure that any blocks created while lowering code in the high block have the frequency of
472 // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs
473 // something roughly approximate for things like register allocation.
474 m_out.setFrequency(m_highBlock->executionCount);
475
476 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
477
478 m_nextHighBlock = 0;
479 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
480 m_nextHighBlock = m_graph.block(nextBlockIndex);
481 if (m_nextHighBlock)
482 break;
483 }
484 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
485
486 // All of this effort to find the next block gives us the ability to keep the
487 // generated IR in roughly program order. This ought not affect the performance
488 // of the generated code (since we expect B3 to reorder things) but it will
489 // make IR dumps easier to read.
490 m_out.appendTo(lowBlock, m_nextLowBlock);
491
492 if (Options::ftlCrashes())
493 m_out.trap();
494
495 if (!m_highBlock->cfaHasVisited) {
496 if (verboseCompilationEnabled())
497 dataLog("Bailing because CFA didn't reach.\n");
498 crash(m_highBlock, nullptr);
499 return;
500 }
501
502 m_aiCheckedNodes.clear();
503
504 m_availabilityCalculator.beginBlock(m_highBlock);
505
506 m_state.reset();
507 m_state.beginBasicBlock(m_highBlock);
508
509 for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
510 if (!compileNode(m_nodeIndex))
511 break;
512 }
513 }
514
515 void safelyInvalidateAfterTermination()
516 {
517 if (verboseCompilationEnabled())
518 dataLog("Bailing.\n");
519 crash();
520
521 // Invalidate dominated blocks. Under normal circumstances we would expect
522 // them to be invalidated already. But you can have the CFA become more
523 // precise over time because the structures of objects change on the main
524 // thread. Failing to do this would result in weird crashes due to a value
525 // being used but not defined. Race conditions FTW!
526 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
527 DFG::BasicBlock* target = m_graph.block(blockIndex);
528 if (!target)
529 continue;
530 if (m_graph.m_ssaDominators->dominates(m_highBlock, target)) {
531 if (verboseCompilationEnabled())
532 dataLog("Block ", *target, " will bail also.\n");
533 target->cfaHasVisited = false;
534 }
535 }
536 }
537
538 void validateAIState(Node* node)
539 {
540 if (!m_graphDump) {
541 StringPrintStream out;
542 m_graph.dump(out);
543 m_graphDump = out.toString();
544 }
545
546 switch (node->op()) {
547 case MovHint:
548 case ZombieHint:
549 case JSConstant:
550 case LazyJSConstant:
551 case DoubleConstant:
552 case Int52Constant:
553 case GetStack:
554 case PutStack:
555 case KillStack:
556 case ExitOK:
557 return;
558 default:
559 break;
560 }
561
562 // Before we execute node.
563 NodeSet& live = m_liveInToNode.find(node)->value;
564 unsigned highParentIndex = node->index();
565 {
566 uint64_t hash = WTF::intHash(highParentIndex);
567 if (hash >= static_cast<uint64_t>((static_cast<double>(std::numeric_limits<unsigned>::max()) + 1) * Options::validateAbstractInterpreterStateProbability()))
568 return;
569 }
570
571 for (Node* node : live) {
572 if (node->isPhantomAllocation())
573 continue;
574
575 if (node->op() == CheckInBounds)
576 continue;
577
578 AbstractValue value = m_interpreter.forNode(node);
579 {
580 auto iter = m_aiCheckedNodes.find(node);
581 if (iter != m_aiCheckedNodes.end()) {
582 AbstractValue checkedValue = iter->value;
583 if (checkedValue == value) {
584 if (!(value.m_type & SpecCell))
585 continue;
586 }
587 }
588 m_aiCheckedNodes.set(node, value);
589 }
590
591 FlushFormat flushFormat;
592 LValue input;
593 if (node->hasJSResult()) {
594 input = lowJSValue(Edge(node, UntypedUse));
595 flushFormat = FlushedJSValue;
596 } else if (node->hasDoubleResult()) {
597 input = lowDouble(Edge(node, DoubleRepUse));
598 flushFormat = FlushedDouble;
599 } else if (node->hasInt52Result()) {
600 input = strictInt52ToJSValue(lowStrictInt52(Edge(node, Int52RepUse)));
601 flushFormat = FlushedInt52;
602 } else
603 continue;
604
605 unsigned highChildIndex = node->index();
606
607 String graphDump = m_graphDump;
608
609 PatchpointValue* patchpoint = m_out.patchpoint(Void);
610 patchpoint->effects = Effects::none();
611 patchpoint->effects.writesLocalState = true;
612 patchpoint->appendSomeRegister(input);
613 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
614 GPRReg reg = InvalidGPRReg;
615 FPRReg fpReg = InvalidFPRReg;
616 if (flushFormat == FlushedDouble)
617 fpReg = params[0].fpr();
618 else
619 reg = params[0].gpr();
620 jit.probe([=] (Probe::Context& context) {
621 JSValue input;
622 double doubleInput;
623
624 auto dumpAndCrash = [&] {
625 dataLogLn("Validation failed at node: @", highParentIndex);
626 dataLogLn("Failed validating live value: @", highChildIndex);
627 dataLogLn();
628 dataLogLn("Expected AI value = ", value);
629 if (flushFormat != FlushedDouble)
630 dataLogLn("Unexpected value = ", input);
631 else
632 dataLogLn("Unexpected double value = ", doubleInput);
633 dataLogLn();
634 dataLogLn(graphDump);
635 CRASH();
636 };
637
638 if (flushFormat == FlushedDouble) {
639 doubleInput = context.fpr(fpReg);
640 SpeculatedType type;
641 if (!std::isnan(doubleInput))
642 type = speculationFromValue(jsDoubleNumber(doubleInput));
643 else if (isImpureNaN(doubleInput))
644 type = SpecDoubleImpureNaN;
645 else
646 type = SpecDoublePureNaN;
647
648 if (!value.couldBeType(type))
649 dumpAndCrash();
650 } else {
651 input = JSValue::decode(context.gpr(reg));
652 if (flushFormat == FlushedInt52) {
653 RELEASE_ASSERT(input.isAnyInt());
654 input = jsDoubleNumber(input.asAnyInt());
655 }
656 if (!value.validateOSREntryValue(input, flushFormat))
657 dumpAndCrash();
658 }
659
660 });
661 });
662 }
663 }
664
665 bool compileNode(unsigned nodeIndex)
666 {
667 if (!m_state.isValid()) {
668 safelyInvalidateAfterTermination();
669 return false;
670 }
671
672 m_node = m_highBlock->at(nodeIndex);
673 m_origin = m_node->origin;
674 m_out.setOrigin(m_node);
675
676 if (verboseCompilationEnabled())
677 dataLog("Lowering ", m_node, "\n");
678
679 m_interpreter.startExecuting();
680 m_interpreter.executeKnownEdgeTypes(m_node);
681
682 if (Options::validateAbstractInterpreterState())
683 validateAIState(m_node);
684
685 if (validateDFGDoesGC) {
686 bool expectDoesGC = doesGC(m_graph, m_node);
687 m_out.store(m_out.constBool(expectDoesGC), m_out.absolute(vm().heap.addressOfExpectDoesGC()));
688 }
689
690 switch (m_node->op()) {
691 case DFG::Upsilon:
692 compileUpsilon();
693 break;
694 case DFG::Phi:
695 compilePhi();
696 break;
697 case JSConstant:
698 break;
699 case DoubleConstant:
700 compileDoubleConstant();
701 break;
702 case Int52Constant:
703 compileInt52Constant();
704 break;
705 case LazyJSConstant:
706 compileLazyJSConstant();
707 break;
708 case DoubleRep:
709 compileDoubleRep();
710 break;
711 case DoubleAsInt32:
712 compileDoubleAsInt32();
713 break;
714 case DFG::ValueRep:
715 compileValueRep();
716 break;
717 case Int52Rep:
718 compileInt52Rep();
719 break;
720 case ValueToInt32:
721 compileValueToInt32();
722 break;
723 case BooleanToNumber:
724 compileBooleanToNumber();
725 break;
726 case ExtractOSREntryLocal:
727 compileExtractOSREntryLocal();
728 break;
729 case ExtractCatchLocal:
730 compileExtractCatchLocal();
731 break;
732 case ClearCatchLocals:
733 compileClearCatchLocals();
734 break;
735 case GetStack:
736 compileGetStack();
737 break;
738 case PutStack:
739 compilePutStack();
740 break;
741 case DFG::Check:
742 case CheckVarargs:
743 compileNoOp();
744 break;
745 case ToObject:
746 case CallObjectConstructor:
747 compileToObjectOrCallObjectConstructor();
748 break;
749 case ToThis:
750 compileToThis();
751 break;
752 case ValueNegate:
753 compileValueNegate();
754 break;
755 case ValueAdd:
756 compileValueAdd();
757 break;
758 case ValueSub:
759 compileValueSub();
760 break;
761 case ValueMul:
762 compileValueMul();
763 break;
764 case StrCat:
765 compileStrCat();
766 break;
767 case ArithAdd:
768 case ArithSub:
769 compileArithAddOrSub();
770 break;
771 case ArithClz32:
772 compileArithClz32();
773 break;
774 case ArithMul:
775 compileArithMul();
776 break;
777 case ValueDiv:
778 compileValueDiv();
779 break;
780 case ArithDiv:
781 compileArithDiv();
782 break;
783 case ArithMod:
784 compileArithMod();
785 break;
786 case ArithMin:
787 case ArithMax:
788 compileArithMinOrMax();
789 break;
790 case ArithAbs:
791 compileArithAbs();
792 break;
793 case ArithPow:
794 compileArithPow();
795 break;
796 case ArithRandom:
797 compileArithRandom();
798 break;
799 case ArithRound:
800 compileArithRound();
801 break;
802 case ArithFloor:
803 compileArithFloor();
804 break;
805 case ArithCeil:
806 compileArithCeil();
807 break;
808 case ArithTrunc:
809 compileArithTrunc();
810 break;
811 case ArithSqrt:
812 compileArithSqrt();
813 break;
814 case ArithFRound:
815 compileArithFRound();
816 break;
817 case ArithNegate:
818 compileArithNegate();
819 break;
820 case ArithUnary:
821 compileArithUnary();
822 break;
823 case ValueBitNot:
824 compileValueBitNot();
825 break;
826 case ArithBitNot:
827 compileArithBitNot();
828 break;
829 case ValueBitAnd:
830 compileValueBitAnd();
831 break;
832 case ArithBitAnd:
833 compileArithBitAnd();
834 break;
835 case ValueBitOr:
836 compileValueBitOr();
837 break;
838 case ArithBitOr:
839 compileArithBitOr();
840 break;
841 case ArithBitXor:
842 compileArithBitXor();
843 break;
844 case ValueBitXor:
845 compileValueBitXor();
846 break;
847 case BitRShift:
848 compileBitRShift();
849 break;
850 case BitLShift:
851 compileBitLShift();
852 break;
853 case BitURShift:
854 compileBitURShift();
855 break;
856 case UInt32ToNumber:
857 compileUInt32ToNumber();
858 break;
859 case CheckStructure:
860 compileCheckStructure();
861 break;
862 case CheckStructureOrEmpty:
863 compileCheckStructureOrEmpty();
864 break;
865 case CheckCell:
866 compileCheckCell();
867 break;
868 case CheckNotEmpty:
869 compileCheckNotEmpty();
870 break;
871 case AssertNotEmpty:
872 compileAssertNotEmpty();
873 break;
874 case CheckBadCell:
875 compileCheckBadCell();
876 break;
877 case CheckStringIdent:
878 compileCheckStringIdent();
879 break;
880 case GetExecutable:
881 compileGetExecutable();
882 break;
883 case Arrayify:
884 case ArrayifyToStructure:
885 compileArrayify();
886 break;
887 case PutStructure:
888 compilePutStructure();
889 break;
890 case TryGetById:
891 compileGetById(AccessType::TryGet);
892 break;
893 case GetById:
894 case GetByIdFlush:
895 compileGetById(AccessType::Get);
896 break;
897 case GetByIdWithThis:
898 compileGetByIdWithThis();
899 break;
900 case GetByIdDirect:
901 case GetByIdDirectFlush:
902 compileGetById(AccessType::GetDirect);
903 break;
904 case InById:
905 compileInById();
906 break;
907 case InByVal:
908 compileInByVal();
909 break;
910 case HasOwnProperty:
911 compileHasOwnProperty();
912 break;
913 case PutById:
914 case PutByIdDirect:
915 case PutByIdFlush:
916 compilePutById();
917 break;
918 case PutByIdWithThis:
919 compilePutByIdWithThis();
920 break;
921 case PutGetterById:
922 case PutSetterById:
923 compilePutAccessorById();
924 break;
925 case PutGetterSetterById:
926 compilePutGetterSetterById();
927 break;
928 case PutGetterByVal:
929 case PutSetterByVal:
930 compilePutAccessorByVal();
931 break;
932 case DeleteById:
933 compileDeleteById();
934 break;
935 case DeleteByVal:
936 compileDeleteByVal();
937 break;
938 case GetButterfly:
939 compileGetButterfly();
940 break;
941 case ConstantStoragePointer:
942 compileConstantStoragePointer();
943 break;
944 case GetIndexedPropertyStorage:
945 compileGetIndexedPropertyStorage();
946 break;
947 case CheckArray:
948 compileCheckArray();
949 break;
950 case GetArrayLength:
951 compileGetArrayLength();
952 break;
953 case GetVectorLength:
954 compileGetVectorLength();
955 break;
956 case CheckInBounds:
957 compileCheckInBounds();
958 break;
959 case GetByVal:
960 compileGetByVal();
961 break;
962 case GetMyArgumentByVal:
963 case GetMyArgumentByValOutOfBounds:
964 compileGetMyArgumentByVal();
965 break;
966 case GetByValWithThis:
967 compileGetByValWithThis();
968 break;
969 case PutByVal:
970 case PutByValAlias:
971 case PutByValDirect:
972 compilePutByVal();
973 break;
974 case PutByValWithThis:
975 compilePutByValWithThis();
976 break;
977 case AtomicsAdd:
978 case AtomicsAnd:
979 case AtomicsCompareExchange:
980 case AtomicsExchange:
981 case AtomicsLoad:
982 case AtomicsOr:
983 case AtomicsStore:
984 case AtomicsSub:
985 case AtomicsXor:
986 compileAtomicsReadModifyWrite();
987 break;
988 case AtomicsIsLockFree:
989 compileAtomicsIsLockFree();
990 break;
991 case DefineDataProperty:
992 compileDefineDataProperty();
993 break;
994 case DefineAccessorProperty:
995 compileDefineAccessorProperty();
996 break;
997 case ArrayPush:
998 compileArrayPush();
999 break;
1000 case ArrayPop:
1001 compileArrayPop();
1002 break;
1003 case ArraySlice:
1004 compileArraySlice();
1005 break;
1006 case ArrayIndexOf:
1007 compileArrayIndexOf();
1008 break;
1009 case CreateActivation:
1010 compileCreateActivation();
1011 break;
1012 case PushWithScope:
1013 compilePushWithScope();
1014 break;
1015 case NewFunction:
1016 case NewGeneratorFunction:
1017 case NewAsyncGeneratorFunction:
1018 case NewAsyncFunction:
1019 compileNewFunction();
1020 break;
1021 case CreateDirectArguments:
1022 compileCreateDirectArguments();
1023 break;
1024 case CreateScopedArguments:
1025 compileCreateScopedArguments();
1026 break;
1027 case CreateClonedArguments:
1028 compileCreateClonedArguments();
1029 break;
1030 case ObjectCreate:
1031 compileObjectCreate();
1032 break;
1033 case ObjectKeys:
1034 compileObjectKeys();
1035 break;
1036 case NewObject:
1037 compileNewObject();
1038 break;
1039 case NewStringObject:
1040 compileNewStringObject();
1041 break;
1042 case NewSymbol:
1043 compileNewSymbol();
1044 break;
1045 case NewArray:
1046 compileNewArray();
1047 break;
1048 case NewArrayWithSpread:
1049 compileNewArrayWithSpread();
1050 break;
1051 case CreateThis:
1052 compileCreateThis();
1053 break;
1054 case Spread:
1055 compileSpread();
1056 break;
1057 case NewArrayBuffer:
1058 compileNewArrayBuffer();
1059 break;
1060 case NewArrayWithSize:
1061 compileNewArrayWithSize();
1062 break;
1063 case NewTypedArray:
1064 compileNewTypedArray();
1065 break;
1066 case GetTypedArrayByteOffset:
1067 compileGetTypedArrayByteOffset();
1068 break;
1069 case GetPrototypeOf:
1070 compileGetPrototypeOf();
1071 break;
1072 case AllocatePropertyStorage:
1073 compileAllocatePropertyStorage();
1074 break;
1075 case ReallocatePropertyStorage:
1076 compileReallocatePropertyStorage();
1077 break;
1078 case NukeStructureAndSetButterfly:
1079 compileNukeStructureAndSetButterfly();
1080 break;
1081 case ToNumber:
1082 compileToNumber();
1083 break;
1084 case ToString:
1085 case CallStringConstructor:
1086 case StringValueOf:
1087 compileToStringOrCallStringConstructorOrStringValueOf();
1088 break;
1089 case ToPrimitive:
1090 compileToPrimitive();
1091 break;
1092 case MakeRope:
1093 compileMakeRope();
1094 break;
1095 case StringCharAt:
1096 compileStringCharAt();
1097 break;
1098 case StringCharCodeAt:
1099 compileStringCharCodeAt();
1100 break;
1101 case StringFromCharCode:
1102 compileStringFromCharCode();
1103 break;
1104 case GetByOffset:
1105 case GetGetterSetterByOffset:
1106 compileGetByOffset();
1107 break;
1108 case GetGetter:
1109 compileGetGetter();
1110 break;
1111 case GetSetter:
1112 compileGetSetter();
1113 break;
1114 case MultiGetByOffset:
1115 compileMultiGetByOffset();
1116 break;
1117 case PutByOffset:
1118 compilePutByOffset();
1119 break;
1120 case MultiPutByOffset:
1121 compileMultiPutByOffset();
1122 break;
1123 case MatchStructure:
1124 compileMatchStructure();
1125 break;
1126 case GetGlobalVar:
1127 case GetGlobalLexicalVariable:
1128 compileGetGlobalVariable();
1129 break;
1130 case PutGlobalVariable:
1131 compilePutGlobalVariable();
1132 break;
1133 case NotifyWrite:
1134 compileNotifyWrite();
1135 break;
1136 case GetCallee:
1137 compileGetCallee();
1138 break;
1139 case SetCallee:
1140 compileSetCallee();
1141 break;
1142 case GetArgumentCountIncludingThis:
1143 compileGetArgumentCountIncludingThis();
1144 break;
1145 case SetArgumentCountIncludingThis:
1146 compileSetArgumentCountIncludingThis();
1147 break;
1148 case GetScope:
1149 compileGetScope();
1150 break;
1151 case SkipScope:
1152 compileSkipScope();
1153 break;
1154 case GetGlobalObject:
1155 compileGetGlobalObject();
1156 break;
1157 case GetGlobalThis:
1158 compileGetGlobalThis();
1159 break;
1160 case GetClosureVar:
1161 compileGetClosureVar();
1162 break;
1163 case PutClosureVar:
1164 compilePutClosureVar();
1165 break;
1166 case GetFromArguments:
1167 compileGetFromArguments();
1168 break;
1169 case PutToArguments:
1170 compilePutToArguments();
1171 break;
1172 case GetArgument:
1173 compileGetArgument();
1174 break;
1175 case CompareEq:
1176 compileCompareEq();
1177 break;
1178 case CompareStrictEq:
1179 compileCompareStrictEq();
1180 break;
1181 case CompareLess:
1182 compileCompareLess();
1183 break;
1184 case CompareLessEq:
1185 compileCompareLessEq();
1186 break;
1187 case CompareGreater:
1188 compileCompareGreater();
1189 break;
1190 case CompareGreaterEq:
1191 compileCompareGreaterEq();
1192 break;
1193 case CompareBelow:
1194 compileCompareBelow();
1195 break;
1196 case CompareBelowEq:
1197 compileCompareBelowEq();
1198 break;
1199 case CompareEqPtr:
1200 compileCompareEqPtr();
1201 break;
1202 case SameValue:
1203 compileSameValue();
1204 break;
1205 case LogicalNot:
1206 compileLogicalNot();
1207 break;
1208 case Call:
1209 case TailCallInlinedCaller:
1210 case Construct:
1211 compileCallOrConstruct();
1212 break;
1213 case DirectCall:
1214 case DirectTailCallInlinedCaller:
1215 case DirectConstruct:
1216 case DirectTailCall:
1217 compileDirectCallOrConstruct();
1218 break;
1219 case TailCall:
1220 compileTailCall();
1221 break;
1222 case CallVarargs:
1223 case CallForwardVarargs:
1224 case TailCallVarargs:
1225 case TailCallVarargsInlinedCaller:
1226 case TailCallForwardVarargs:
1227 case TailCallForwardVarargsInlinedCaller:
1228 case ConstructVarargs:
1229 case ConstructForwardVarargs:
1230 compileCallOrConstructVarargs();
1231 break;
1232 case CallEval:
1233 compileCallEval();
1234 break;
1235 case LoadVarargs:
1236 compileLoadVarargs();
1237 break;
1238 case ForwardVarargs:
1239 compileForwardVarargs();
1240 break;
1241 case DFG::Jump:
1242 compileJump();
1243 break;
1244 case DFG::Branch:
1245 compileBranch();
1246 break;
1247 case DFG::Switch:
1248 compileSwitch();
1249 break;
1250 case DFG::EntrySwitch:
1251 compileEntrySwitch();
1252 break;
1253 case DFG::Return:
1254 compileReturn();
1255 break;
1256 case ForceOSRExit:
1257 compileForceOSRExit();
1258 break;
1259 case CPUIntrinsic:
1260#if CPU(X86_64)
1261 compileCPUIntrinsic();
1262#else
1263 RELEASE_ASSERT_NOT_REACHED();
1264#endif
1265 break;
1266 case Throw:
1267 compileThrow();
1268 break;
1269 case ThrowStaticError:
1270 compileThrowStaticError();
1271 break;
1272 case InvalidationPoint:
1273 compileInvalidationPoint();
1274 break;
1275 case IsEmpty:
1276 compileIsEmpty();
1277 break;
1278 case IsUndefined:
1279 compileIsUndefined();
1280 break;
1281 case IsUndefinedOrNull:
1282 compileIsUndefinedOrNull();
1283 break;
1284 case IsBoolean:
1285 compileIsBoolean();
1286 break;
1287 case IsNumber:
1288 compileIsNumber();
1289 break;
1290 case NumberIsInteger:
1291 compileNumberIsInteger();
1292 break;
1293 case IsCellWithType:
1294 compileIsCellWithType();
1295 break;
1296 case MapHash:
1297 compileMapHash();
1298 break;
1299 case NormalizeMapKey:
1300 compileNormalizeMapKey();
1301 break;
1302 case GetMapBucket:
1303 compileGetMapBucket();
1304 break;
1305 case GetMapBucketHead:
1306 compileGetMapBucketHead();
1307 break;
1308 case GetMapBucketNext:
1309 compileGetMapBucketNext();
1310 break;
1311 case LoadKeyFromMapBucket:
1312 compileLoadKeyFromMapBucket();
1313 break;
1314 case LoadValueFromMapBucket:
1315 compileLoadValueFromMapBucket();
1316 break;
1317 case ExtractValueFromWeakMapGet:
1318 compileExtractValueFromWeakMapGet();
1319 break;
1320 case SetAdd:
1321 compileSetAdd();
1322 break;
1323 case MapSet:
1324 compileMapSet();
1325 break;
1326 case WeakMapGet:
1327 compileWeakMapGet();
1328 break;
1329 case WeakSetAdd:
1330 compileWeakSetAdd();
1331 break;
1332 case WeakMapSet:
1333 compileWeakMapSet();
1334 break;
1335 case IsObject:
1336 compileIsObject();
1337 break;
1338 case IsObjectOrNull:
1339 compileIsObjectOrNull();
1340 break;
1341 case IsFunction:
1342 compileIsFunction();
1343 break;
1344 case IsTypedArrayView:
1345 compileIsTypedArrayView();
1346 break;
1347 case ParseInt:
1348 compileParseInt();
1349 break;
1350 case TypeOf:
1351 compileTypeOf();
1352 break;
1353 case CheckTypeInfoFlags:
1354 compileCheckTypeInfoFlags();
1355 break;
1356 case OverridesHasInstance:
1357 compileOverridesHasInstance();
1358 break;
1359 case InstanceOf:
1360 compileInstanceOf();
1361 break;
1362 case InstanceOfCustom:
1363 compileInstanceOfCustom();
1364 break;
1365 case CountExecution:
1366 compileCountExecution();
1367 break;
1368 case SuperSamplerBegin:
1369 compileSuperSamplerBegin();
1370 break;
1371 case SuperSamplerEnd:
1372 compileSuperSamplerEnd();
1373 break;
1374 case StoreBarrier:
1375 case FencedStoreBarrier:
1376 compileStoreBarrier();
1377 break;
1378 case HasIndexedProperty:
1379 compileHasIndexedProperty();
1380 break;
1381 case HasGenericProperty:
1382 compileHasGenericProperty();
1383 break;
1384 case HasStructureProperty:
1385 compileHasStructureProperty();
1386 break;
1387 case GetDirectPname:
1388 compileGetDirectPname();
1389 break;
1390 case GetEnumerableLength:
1391 compileGetEnumerableLength();
1392 break;
1393 case GetPropertyEnumerator:
1394 compileGetPropertyEnumerator();
1395 break;
1396 case GetEnumeratorStructurePname:
1397 compileGetEnumeratorStructurePname();
1398 break;
1399 case GetEnumeratorGenericPname:
1400 compileGetEnumeratorGenericPname();
1401 break;
1402 case ToIndexString:
1403 compileToIndexString();
1404 break;
1405 case CheckStructureImmediate:
1406 compileCheckStructureImmediate();
1407 break;
1408 case MaterializeNewObject:
1409 compileMaterializeNewObject();
1410 break;
1411 case MaterializeCreateActivation:
1412 compileMaterializeCreateActivation();
1413 break;
1414 case CheckTraps:
1415 compileCheckTraps();
1416 break;
1417 case CreateRest:
1418 compileCreateRest();
1419 break;
1420 case GetRestLength:
1421 compileGetRestLength();
1422 break;
1423 case RegExpExec:
1424 compileRegExpExec();
1425 break;
1426 case RegExpExecNonGlobalOrSticky:
1427 compileRegExpExecNonGlobalOrSticky();
1428 break;
1429 case RegExpTest:
1430 compileRegExpTest();
1431 break;
1432 case RegExpMatchFast:
1433 compileRegExpMatchFast();
1434 break;
1435 case RegExpMatchFastGlobal:
1436 compileRegExpMatchFastGlobal();
1437 break;
1438 case NewRegexp:
1439 compileNewRegexp();
1440 break;
1441 case SetFunctionName:
1442 compileSetFunctionName();
1443 break;
1444 case StringReplace:
1445 case StringReplaceRegExp:
1446 compileStringReplace();
1447 break;
1448 case GetRegExpObjectLastIndex:
1449 compileGetRegExpObjectLastIndex();
1450 break;
1451 case SetRegExpObjectLastIndex:
1452 compileSetRegExpObjectLastIndex();
1453 break;
1454 case LogShadowChickenPrologue:
1455 compileLogShadowChickenPrologue();
1456 break;
1457 case LogShadowChickenTail:
1458 compileLogShadowChickenTail();
1459 break;
1460 case RecordRegExpCachedResult:
1461 compileRecordRegExpCachedResult();
1462 break;
1463 case ResolveScopeForHoistingFuncDeclInEval:
1464 compileResolveScopeForHoistingFuncDeclInEval();
1465 break;
1466 case ResolveScope:
1467 compileResolveScope();
1468 break;
1469 case GetDynamicVar:
1470 compileGetDynamicVar();
1471 break;
1472 case PutDynamicVar:
1473 compilePutDynamicVar();
1474 break;
1475 case Unreachable:
1476 compileUnreachable();
1477 break;
1478 case StringSlice:
1479 compileStringSlice();
1480 break;
1481 case ToLowerCase:
1482 compileToLowerCase();
1483 break;
1484 case NumberToStringWithRadix:
1485 compileNumberToStringWithRadix();
1486 break;
1487 case NumberToStringWithValidRadixConstant:
1488 compileNumberToStringWithValidRadixConstant();
1489 break;
1490 case CheckSubClass:
1491 compileCheckSubClass();
1492 break;
1493 case CallDOM:
1494 compileCallDOM();
1495 break;
1496 case CallDOMGetter:
1497 compileCallDOMGetter();
1498 break;
1499 case FilterCallLinkStatus:
1500 case FilterGetByIdStatus:
1501 case FilterPutByIdStatus:
1502 case FilterInByIdStatus:
1503 compileFilterICStatus();
1504 break;
1505 case DataViewGetInt:
1506 case DataViewGetFloat:
1507 compileDataViewGet();
1508 break;
1509 case DataViewSet:
1510 compileDataViewSet();
1511 break;
1512
1513 case PhantomLocal:
1514 case LoopHint:
1515 case MovHint:
1516 case ZombieHint:
1517 case ExitOK:
1518 case PhantomNewObject:
1519 case PhantomNewFunction:
1520 case PhantomNewGeneratorFunction:
1521 case PhantomNewAsyncGeneratorFunction:
1522 case PhantomNewAsyncFunction:
1523 case PhantomCreateActivation:
1524 case PhantomDirectArguments:
1525 case PhantomCreateRest:
1526 case PhantomSpread:
1527 case PhantomNewArrayWithSpread:
1528 case PhantomNewArrayBuffer:
1529 case PhantomClonedArguments:
1530 case PhantomNewRegexp:
1531 case PutHint:
1532 case BottomValue:
1533 case KillStack:
1534 case InitializeEntrypointArguments:
1535 break;
1536 default:
1537 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
1538 break;
1539 }
1540
1541 if (m_node->isTerminal())
1542 return false;
1543
1544 if (!m_state.isValid()) {
1545 safelyInvalidateAfterTermination();
1546 return false;
1547 }
1548
1549 m_availabilityCalculator.executeNode(m_node);
1550 m_interpreter.executeEffects(nodeIndex);
1551
1552 return true;
1553 }
1554
1555 void compileUpsilon()
1556 {
1557 LValue upsilonValue = nullptr;
1558 switch (m_node->child1().useKind()) {
1559 case DoubleRepUse:
1560 upsilonValue = lowDouble(m_node->child1());
1561 break;
1562 case Int32Use:
1563 case KnownInt32Use:
1564 upsilonValue = lowInt32(m_node->child1());
1565 break;
1566 case Int52RepUse:
1567 upsilonValue = lowInt52(m_node->child1());
1568 break;
1569 case BooleanUse:
1570 case KnownBooleanUse:
1571 upsilonValue = lowBoolean(m_node->child1());
1572 break;
1573 case CellUse:
1574 case KnownCellUse:
1575 upsilonValue = lowCell(m_node->child1());
1576 break;
1577 case UntypedUse:
1578 upsilonValue = lowJSValue(m_node->child1());
1579 break;
1580 default:
1581 DFG_CRASH(m_graph, m_node, "Bad use kind");
1582 break;
1583 }
1584 ValueFromBlock upsilon = m_out.anchor(upsilonValue);
1585 LValue phiNode = m_phis.get(m_node->phi());
1586 m_out.addIncomingToPhi(phiNode, upsilon);
1587 }
1588
1589 void compilePhi()
1590 {
1591 LValue phi = m_phis.get(m_node);
1592 m_out.m_block->append(phi);
1593
1594 switch (m_node->flags() & NodeResultMask) {
1595 case NodeResultDouble:
1596 setDouble(phi);
1597 break;
1598 case NodeResultInt32:
1599 setInt32(phi);
1600 break;
1601 case NodeResultInt52:
1602 setInt52(phi);
1603 break;
1604 case NodeResultBoolean:
1605 setBoolean(phi);
1606 break;
1607 case NodeResultJS:
1608 setJSValue(phi);
1609 break;
1610 default:
1611 DFG_CRASH(m_graph, m_node, "Bad result type");
1612 break;
1613 }
1614 }
1615
1616 void compileDoubleConstant()
1617 {
1618 setDouble(m_out.constDouble(m_node->asNumber()));
1619 }
1620
1621 void compileInt52Constant()
1622 {
1623 int64_t value = m_node->asAnyInt();
1624
1625 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
1626 setStrictInt52(m_out.constInt64(value));
1627 }
1628
1629 void compileLazyJSConstant()
1630 {
1631 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1632 LazyJSValue value = m_node->lazyJSValue();
1633 patchpoint->setGenerator(
1634 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1635 value.emit(jit, JSValueRegs(params[0].gpr()));
1636 });
1637 patchpoint->effects = Effects::none();
1638 setJSValue(patchpoint);
1639 }
1640
1641 void compileDoubleRep()
1642 {
1643 switch (m_node->child1().useKind()) {
1644 case RealNumberUse: {
1645 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1646
1647 LValue doubleValue = unboxDouble(value);
1648
1649 LBasicBlock intCase = m_out.newBlock();
1650 LBasicBlock continuation = m_out.newBlock();
1651
1652 ValueFromBlock fastResult = m_out.anchor(doubleValue);
1653 m_out.branch(
1654 m_out.doubleEqual(doubleValue, doubleValue),
1655 usually(continuation), rarely(intCase));
1656
1657 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
1658
1659 FTL_TYPE_CHECK(
1660 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
1661 isNotInt32(value, provenType(m_node->child1()) & ~SpecDoubleReal));
1662 ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
1663 m_out.jump(continuation);
1664
1665 m_out.appendTo(continuation, lastNext);
1666
1667 setDouble(m_out.phi(Double, fastResult, slowResult));
1668 return;
1669 }
1670
1671 case NotCellUse:
1672 case NumberUse: {
1673 bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse;
1674
1675 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1676
1677 LBasicBlock intCase = m_out.newBlock();
1678 LBasicBlock doubleTesting = m_out.newBlock();
1679 LBasicBlock doubleCase = m_out.newBlock();
1680 LBasicBlock nonDoubleCase = m_out.newBlock();
1681 LBasicBlock continuation = m_out.newBlock();
1682
1683 m_out.branch(
1684 isNotInt32(value, provenType(m_node->child1())),
1685 unsure(doubleTesting), unsure(intCase));
1686
1687 LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
1688
1689 ValueFromBlock intToDouble = m_out.anchor(
1690 m_out.intToDouble(unboxInt32(value)));
1691 m_out.jump(continuation);
1692
1693 m_out.appendTo(doubleTesting, doubleCase);
1694 LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
1695 m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
1696
1697 m_out.appendTo(doubleCase, nonDoubleCase);
1698 ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
1699 m_out.jump(continuation);
1700
1701 if (shouldConvertNonNumber) {
1702 LBasicBlock undefinedCase = m_out.newBlock();
1703 LBasicBlock testNullCase = m_out.newBlock();
1704 LBasicBlock nullCase = m_out.newBlock();
1705 LBasicBlock testBooleanTrueCase = m_out.newBlock();
1706 LBasicBlock convertBooleanTrueCase = m_out.newBlock();
1707 LBasicBlock convertBooleanFalseCase = m_out.newBlock();
1708
1709 m_out.appendTo(nonDoubleCase, undefinedCase);
1710 LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
1711 m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
1712
1713 m_out.appendTo(undefinedCase, testNullCase);
1714 ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
1715 m_out.jump(continuation);
1716
1717 m_out.appendTo(testNullCase, nullCase);
1718 LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
1719 m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
1720
1721 m_out.appendTo(nullCase, testBooleanTrueCase);
1722 ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
1723 m_out.jump(continuation);
1724
1725 m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
1726 LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
1727 m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
1728
1729 m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
1730 ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
1731 m_out.jump(continuation);
1732
1733 m_out.appendTo(convertBooleanFalseCase, continuation);
1734
1735 LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
1736 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCellCheck, valueIsNotBooleanFalse);
1737 ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
1738 m_out.jump(continuation);
1739
1740 m_out.appendTo(continuation, lastNext);
1741 setDouble(m_out.phi(Double, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
1742 return;
1743 }
1744 m_out.appendTo(nonDoubleCase, continuation);
1745 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
1746 m_out.unreachable();
1747
1748 m_out.appendTo(continuation, lastNext);
1749
1750 setDouble(m_out.phi(Double, intToDouble, unboxedDouble));
1751 return;
1752 }
1753
1754 case Int52RepUse: {
1755 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
1756 return;
1757 }
1758
1759 default:
1760 DFG_CRASH(m_graph, m_node, "Bad use kind");
1761 }
1762 }
1763
1764 void compileDoubleAsInt32()
1765 {
1766 LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
1767 setInt32(integerValue);
1768 }
1769
1770 void compileValueRep()
1771 {
1772 switch (m_node->child1().useKind()) {
1773 case DoubleRepUse: {
1774 LValue value = lowDouble(m_node->child1());
1775
1776 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
1777 value = m_out.select(
1778 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
1779 }
1780
1781 setJSValue(boxDouble(value));
1782 return;
1783 }
1784
1785 case Int52RepUse: {
1786 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
1787 return;
1788 }
1789
1790 default:
1791 DFG_CRASH(m_graph, m_node, "Bad use kind");
1792 }
1793 }
1794
1795 void compileInt52Rep()
1796 {
1797 switch (m_node->child1().useKind()) {
1798 case Int32Use:
1799 setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1())));
1800 return;
1801
1802 case AnyIntUse:
1803 setStrictInt52(
1804 jsValueToStrictInt52(
1805 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1806 return;
1807
1808 case DoubleRepAnyIntUse:
1809 setStrictInt52(
1810 doubleToStrictInt52(
1811 m_node->child1(), lowDouble(m_node->child1())));
1812 return;
1813
1814 default:
1815 RELEASE_ASSERT_NOT_REACHED();
1816 }
1817 }
1818
1819 void compileValueToInt32()
1820 {
1821 switch (m_node->child1().useKind()) {
1822 case Int52RepUse:
1823 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
1824 break;
1825
1826 case DoubleRepUse:
1827 setInt32(doubleToInt32(lowDouble(m_node->child1())));
1828 break;
1829
1830 case NumberUse:
1831 case NotCellUse: {
1832 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
1833 if (isValid(value)) {
1834 setInt32(value.value());
1835 break;
1836 }
1837
1838 value = m_jsValueValues.get(m_node->child1().node());
1839 if (isValid(value)) {
1840 setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
1841 break;
1842 }
1843
1844 // We'll basically just get here for constants. But it's good to have this
1845 // catch-all since we often add new representations into the mix.
1846 setInt32(
1847 numberOrNotCellToInt32(
1848 m_node->child1(),
1849 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1850 break;
1851 }
1852
1853 default:
1854 DFG_CRASH(m_graph, m_node, "Bad use kind");
1855 break;
1856 }
1857 }
1858
1859 void compileBooleanToNumber()
1860 {
1861 switch (m_node->child1().useKind()) {
1862 case BooleanUse: {
1863 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), Int32));
1864 return;
1865 }
1866
1867 case UntypedUse: {
1868 LValue value = lowJSValue(m_node->child1());
1869
1870 if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
1871 setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
1872 return;
1873 }
1874
1875 LBasicBlock booleanCase = m_out.newBlock();
1876 LBasicBlock continuation = m_out.newBlock();
1877
1878 ValueFromBlock notBooleanResult = m_out.anchor(value);
1879 m_out.branch(
1880 isBoolean(value, provenType(m_node->child1())),
1881 unsure(booleanCase), unsure(continuation));
1882
1883 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
1884 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
1885 m_out.zeroExt(unboxBoolean(value), Int64), m_tagTypeNumber));
1886 m_out.jump(continuation);
1887
1888 m_out.appendTo(continuation, lastNext);
1889 setJSValue(m_out.phi(Int64, booleanResult, notBooleanResult));
1890 return;
1891 }
1892
1893 default:
1894 RELEASE_ASSERT_NOT_REACHED();
1895 return;
1896 }
1897 }
1898
1899 void compileExtractOSREntryLocal()
1900 {
1901 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
1902 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
1903 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
1904 }
1905
1906 void compileExtractCatchLocal()
1907 {
1908 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_ftlState.jitCode->common.catchOSREntryBuffer->dataBuffer());
1909 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->catchOSREntryIndex())));
1910 }
1911
1912 void compileClearCatchLocals()
1913 {
1914 ScratchBuffer* scratchBuffer = m_ftlState.jitCode->common.catchOSREntryBuffer;
1915 ASSERT(scratchBuffer);
1916 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
1917 }
1918
1919 void compileGetStack()
1920 {
1921 StackAccessData* data = m_node->stackAccessData();
1922 AbstractValue& value = m_state.operand(data->local);
1923
1924 DFG_ASSERT(m_graph, m_node, isConcrete(data->format), data->format);
1925
1926 if (data->format == FlushedDouble)
1927 setDouble(m_out.loadDouble(addressFor(data->machineLocal)));
1928 else if (isInt32Speculation(value.m_type))
1929 setInt32(m_out.load32(payloadFor(data->machineLocal)));
1930 else
1931 setJSValue(m_out.load64(addressFor(data->machineLocal)));
1932 }
1933
1934 void compilePutStack()
1935 {
1936 StackAccessData* data = m_node->stackAccessData();
1937 switch (data->format) {
1938 case FlushedJSValue: {
1939 LValue value = lowJSValue(m_node->child1());
1940 m_out.store64(value, addressFor(data->machineLocal));
1941 break;
1942 }
1943
1944 case FlushedDouble: {
1945 LValue value = lowDouble(m_node->child1());
1946 m_out.storeDouble(value, addressFor(data->machineLocal));
1947 break;
1948 }
1949
1950 case FlushedInt32: {
1951 LValue value = lowInt32(m_node->child1());
1952 m_out.store32(value, payloadFor(data->machineLocal));
1953 break;
1954 }
1955
1956 case FlushedInt52: {
1957 LValue value = lowInt52(m_node->child1());
1958 m_out.store64(value, addressFor(data->machineLocal));
1959 break;
1960 }
1961
1962 case FlushedCell: {
1963 LValue value = lowCell(m_node->child1());
1964 m_out.store64(value, addressFor(data->machineLocal));
1965 break;
1966 }
1967
1968 case FlushedBoolean: {
1969 speculateBoolean(m_node->child1());
1970 m_out.store64(
1971 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1972 addressFor(data->machineLocal));
1973 break;
1974 }
1975
1976 default:
1977 DFG_CRASH(m_graph, m_node, "Bad flush format");
1978 break;
1979 }
1980 }
1981
1982 void compileNoOp()
1983 {
1984 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
1985 }
1986
1987 void compileToObjectOrCallObjectConstructor()
1988 {
1989 LValue value = lowJSValue(m_node->child1());
1990
1991 LBasicBlock isCellCase = m_out.newBlock();
1992 LBasicBlock slowCase = m_out.newBlock();
1993 LBasicBlock continuation = m_out.newBlock();
1994
1995 m_out.branch(isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1996
1997 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1998 ValueFromBlock fastResult = m_out.anchor(value);
1999 m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
2000
2001 m_out.appendTo(slowCase, continuation);
2002
2003 ValueFromBlock slowResult;
2004 if (m_node->op() == ToObject) {
2005 auto* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2006 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToObject), m_callFrame, weakPointer(globalObject), value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
2007 } else
2008 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationCallObjectConstructor), m_callFrame, frozenPointer(m_node->cellOperand()), value));
2009 m_out.jump(continuation);
2010
2011 m_out.appendTo(continuation, lastNext);
2012 setJSValue(m_out.phi(Int64, fastResult, slowResult));
2013 }
2014
2015 void compileToThis()
2016 {
2017 LValue value = lowJSValue(m_node->child1());
2018
2019 LBasicBlock isCellCase = m_out.newBlock();
2020 LBasicBlock slowCase = m_out.newBlock();
2021 LBasicBlock continuation = m_out.newBlock();
2022
2023 m_out.branch(
2024 isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
2025
2026 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
2027 ValueFromBlock fastResult = m_out.anchor(value);
2028 m_out.branch(
2029 m_out.testIsZero32(
2030 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
2031 m_out.constInt32(OverridesToThis)),
2032 usually(continuation), rarely(slowCase));
2033
2034 m_out.appendTo(slowCase, continuation);
2035 J_JITOperation_EJ function;
2036 if (m_graph.isStrictModeFor(m_node->origin.semantic))
2037 function = operationToThisStrict;
2038 else
2039 function = operationToThis;
2040 ValueFromBlock slowResult = m_out.anchor(
2041 vmCall(Int64, m_out.operation(function), m_callFrame, value));
2042 m_out.jump(continuation);
2043
2044 m_out.appendTo(continuation, lastNext);
2045 setJSValue(m_out.phi(Int64, fastResult, slowResult));
2046 }
2047
2048 void compileValueAdd()
2049 {
2050 if (m_node->isBinaryUseKind(BigIntUse)) {
2051 LValue left = lowBigInt(m_node->child1());
2052 LValue right = lowBigInt(m_node->child2());
2053
2054 LValue result = vmCall(pointerType(), m_out.operation(operationAddBigInt), m_callFrame, left, right);
2055 setJSValue(result);
2056 return;
2057 }
2058
2059 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2060 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2061 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2062 const Instruction* instruction = baselineCodeBlock->instructions().at(bytecodeIndex).ptr();
2063 auto repatchingFunction = operationValueAddOptimize;
2064 auto nonRepatchingFunction = operationValueAdd;
2065 compileBinaryMathIC<JITAddGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction);
2066 }
2067
2068 void compileValueSub()
2069 {
2070 if (m_node->isBinaryUseKind(BigIntUse)) {
2071 LValue left = lowBigInt(m_node->child1());
2072 LValue right = lowBigInt(m_node->child2());
2073
2074 LValue result = vmCall(pointerType(), m_out.operation(operationSubBigInt), m_callFrame, left, right);
2075 setJSValue(result);
2076 return;
2077 }
2078
2079 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2080 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2081 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2082 const Instruction* instruction = baselineCodeBlock->instructions().at(bytecodeIndex).ptr();
2083 auto repatchingFunction = operationValueSubOptimize;
2084 auto nonRepatchingFunction = operationValueSub;
2085 compileBinaryMathIC<JITSubGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction);
2086 }
2087
2088 void compileValueMul()
2089 {
2090 if (m_node->isBinaryUseKind(BigIntUse)) {
2091 LValue left = lowBigInt(m_node->child1());
2092 LValue right = lowBigInt(m_node->child2());
2093
2094 LValue result = vmCall(Int64, m_out.operation(operationMulBigInt), m_callFrame, left, right);
2095 setJSValue(result);
2096 return;
2097 }
2098
2099 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2100 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2101 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2102 const Instruction* instruction = baselineCodeBlock->instructions().at(bytecodeIndex).ptr();
2103 auto repatchingFunction = operationValueMulOptimize;
2104 auto nonRepatchingFunction = operationValueMul;
2105 compileBinaryMathIC<JITMulGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction);
2106 }
2107
2108 template <typename Generator, typename Func1, typename Func2,
2109 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
2110 void compileUnaryMathIC(ArithProfile* arithProfile, const Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction)
2111 {
2112 Node* node = m_node;
2113
2114 LValue operand = lowJSValue(node->child1());
2115
2116 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
2117 patchpoint->appendSomeRegister(operand);
2118 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
2119 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
2120 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
2121 patchpoint->numGPScratchRegisters = 1;
2122 patchpoint->clobber(RegisterSet::macroScratchRegisters());
2123 State* state = &m_ftlState;
2124 patchpoint->setGenerator(
2125 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
2126 AllowMacroScratchRegisterUsage allowScratch(jit);
2127
2128 Box<CCallHelpers::JumpList> exceptions =
2129 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
2130
2131#if ENABLE(MATH_IC_STATS)
2132 auto inlineStart = jit.label();
2133#endif
2134
2135 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
2136 JITUnaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile, instruction);
2137 mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
2138
2139 bool shouldEmitProfiling = false;
2140 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
2141
2142 if (generatedInline) {
2143 ASSERT(!mathICGenerationState->slowPathJumps.empty());
2144 auto done = jit.label();
2145 params.addLatePath([=] (CCallHelpers& jit) {
2146 AllowMacroScratchRegisterUsage allowScratch(jit);
2147 mathICGenerationState->slowPathJumps.link(&jit);
2148 mathICGenerationState->slowPathStart = jit.label();
2149#if ENABLE(MATH_IC_STATS)
2150 auto slowPathStart = jit.label();
2151#endif
2152
2153 if (mathICGenerationState->shouldSlowPathRepatch) {
2154 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2155 repatchingFunction, params[0].gpr(), params[1].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
2156 mathICGenerationState->slowPathCall = call.call();
2157 } else {
2158 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
2159 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr());
2160 mathICGenerationState->slowPathCall = call.call();
2161 }
2162 jit.jump().linkTo(done, &jit);
2163
2164 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2165 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
2166 });
2167
2168#if ENABLE(MATH_IC_STATS)
2169 auto slowPathEnd = jit.label();
2170 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2171 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
2172 mathIC->m_generatedCodeSize += size;
2173 });
2174#endif
2175 });
2176 } else {
2177 callOperation(
2178 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2179 nonRepatchingFunction, params[0].gpr(), params[1].gpr());
2180 }
2181
2182#if ENABLE(MATH_IC_STATS)
2183 auto inlineEnd = jit.label();
2184 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2185 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
2186 mathIC->m_generatedCodeSize += size;
2187 });
2188#endif
2189 });
2190
2191 setJSValue(patchpoint);
2192 }
2193
2194 template <typename Generator, typename Func1, typename Func2,
2195 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
2196 void compileBinaryMathIC(ArithProfile* arithProfile, const Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction)
2197 {
2198 Node* node = m_node;
2199
2200 LValue left = lowJSValue(node->child1());
2201 LValue right = lowJSValue(node->child2());
2202
2203 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
2204 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
2205
2206 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
2207 patchpoint->appendSomeRegister(left);
2208 patchpoint->appendSomeRegister(right);
2209 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
2210 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
2211 RefPtr<PatchpointExceptionHandle> exceptionHandle =
2212 preparePatchpointForExceptions(patchpoint);
2213 patchpoint->numGPScratchRegisters = 1;
2214 patchpoint->numFPScratchRegisters = 2;
2215 patchpoint->clobber(RegisterSet::macroScratchRegisters());
2216 State* state = &m_ftlState;
2217 patchpoint->setGenerator(
2218 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
2219 AllowMacroScratchRegisterUsage allowScratch(jit);
2220
2221
2222 Box<CCallHelpers::JumpList> exceptions =
2223 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
2224
2225#if ENABLE(MATH_IC_STATS)
2226 auto inlineStart = jit.label();
2227#endif
2228
2229 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
2230 JITBinaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile, instruction);
2231 mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
2232 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
2233 params.fpScratch(1), params.gpScratch(0), InvalidFPRReg);
2234
2235 bool shouldEmitProfiling = false;
2236 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
2237
2238 if (generatedInline) {
2239 ASSERT(!mathICGenerationState->slowPathJumps.empty());
2240 auto done = jit.label();
2241 params.addLatePath([=] (CCallHelpers& jit) {
2242 AllowMacroScratchRegisterUsage allowScratch(jit);
2243 mathICGenerationState->slowPathJumps.link(&jit);
2244 mathICGenerationState->slowPathStart = jit.label();
2245#if ENABLE(MATH_IC_STATS)
2246 auto slowPathStart = jit.label();
2247#endif
2248
2249 if (mathICGenerationState->shouldSlowPathRepatch) {
2250 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2251 repatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
2252 mathICGenerationState->slowPathCall = call.call();
2253 } else {
2254 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
2255 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
2256 mathICGenerationState->slowPathCall = call.call();
2257 }
2258 jit.jump().linkTo(done, &jit);
2259
2260 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2261 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
2262 });
2263
2264#if ENABLE(MATH_IC_STATS)
2265 auto slowPathEnd = jit.label();
2266 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2267 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
2268 mathIC->m_generatedCodeSize += size;
2269 });
2270#endif
2271 });
2272 } else {
2273 callOperation(
2274 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2275 nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
2276 }
2277
2278#if ENABLE(MATH_IC_STATS)
2279 auto inlineEnd = jit.label();
2280 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2281 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
2282 mathIC->m_generatedCodeSize += size;
2283 });
2284#endif
2285 });
2286
2287 setJSValue(patchpoint);
2288 }
2289
2290 void compileStrCat()
2291 {
2292 LValue result;
2293 if (m_node->child3()) {
2294 result = vmCall(
2295 Int64, m_out.operation(operationStrCat3), m_callFrame,
2296 lowJSValue(m_node->child1(), ManualOperandSpeculation),
2297 lowJSValue(m_node->child2(), ManualOperandSpeculation),
2298 lowJSValue(m_node->child3(), ManualOperandSpeculation));
2299 } else {
2300 result = vmCall(
2301 Int64, m_out.operation(operationStrCat2), m_callFrame,
2302 lowJSValue(m_node->child1(), ManualOperandSpeculation),
2303 lowJSValue(m_node->child2(), ManualOperandSpeculation));
2304 }
2305 setJSValue(result);
2306 }
2307
2308 void compileArithAddOrSub()
2309 {
2310 bool isSub = m_node->op() == ArithSub;
2311 switch (m_node->binaryUseKind()) {
2312 case Int32Use: {
2313 LValue left = lowInt32(m_node->child1());
2314 LValue right = lowInt32(m_node->child2());
2315
2316 if (!shouldCheckOverflow(m_node->arithMode())) {
2317 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
2318 break;
2319 }
2320
2321 CheckValue* result =
2322 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
2323 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2324 setInt32(result);
2325 break;
2326 }
2327
2328 case Int52RepUse: {
2329 if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)
2330 && !abstractValue(m_node->child2()).couldBeType(SpecNonInt32AsInt52)) {
2331 Int52Kind kind;
2332 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2333 LValue right = lowInt52(m_node->child2(), kind);
2334 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
2335 break;
2336 }
2337
2338 LValue left = lowInt52(m_node->child1());
2339 LValue right = lowInt52(m_node->child2());
2340 CheckValue* result =
2341 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
2342 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2343 setInt52(result);
2344 break;
2345 }
2346
2347 case DoubleRepUse: {
2348 LValue C1 = lowDouble(m_node->child1());
2349 LValue C2 = lowDouble(m_node->child2());
2350
2351 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
2352 break;
2353 }
2354
2355 case UntypedUse: {
2356 if (!isSub) {
2357 DFG_CRASH(m_graph, m_node, "Bad use kind");
2358 break;
2359 }
2360
2361 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2362 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2363 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2364 const Instruction* instruction = baselineCodeBlock->instructions().at(bytecodeIndex).ptr();
2365 auto repatchingFunction = operationValueSubOptimize;
2366 auto nonRepatchingFunction = operationValueSub;
2367 compileBinaryMathIC<JITSubGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction);
2368 break;
2369 }
2370
2371 default:
2372 DFG_CRASH(m_graph, m_node, "Bad use kind");
2373 break;
2374 }
2375 }
2376
2377 void compileArithClz32()
2378 {
2379 if (m_node->child1().useKind() == Int32Use || m_node->child1().useKind() == KnownInt32Use) {
2380 LValue operand = lowInt32(m_node->child1());
2381 setInt32(m_out.ctlz32(operand));
2382 return;
2383 }
2384 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2385 LValue argument = lowJSValue(m_node->child1());
2386 LValue result = vmCall(Int32, m_out.operation(operationArithClz32), m_callFrame, argument);
2387 setInt32(result);
2388 }
2389
2390 void compileArithMul()
2391 {
2392 switch (m_node->binaryUseKind()) {
2393 case Int32Use: {
2394 LValue left = lowInt32(m_node->child1());
2395 LValue right = lowInt32(m_node->child2());
2396
2397 LValue result;
2398
2399 if (!shouldCheckOverflow(m_node->arithMode()))
2400 result = m_out.mul(left, right);
2401 else {
2402 CheckValue* speculation = m_out.speculateMul(left, right);
2403 blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin);
2404 result = speculation;
2405 }
2406
2407 if (shouldCheckNegativeZero(m_node->arithMode())) {
2408 LBasicBlock slowCase = m_out.newBlock();
2409 LBasicBlock continuation = m_out.newBlock();
2410
2411 m_out.branch(
2412 m_out.notZero32(result), usually(continuation), rarely(slowCase));
2413
2414 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2415 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero));
2416 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero));
2417 m_out.jump(continuation);
2418 m_out.appendTo(continuation, lastNext);
2419 }
2420
2421 setInt32(result);
2422 break;
2423 }
2424
2425 case Int52RepUse: {
2426 Int52Kind kind;
2427 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2428 LValue right = lowInt52(m_node->child2(), opposite(kind));
2429
2430 CheckValue* result = m_out.speculateMul(left, right);
2431 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2432
2433 if (shouldCheckNegativeZero(m_node->arithMode())) {
2434 LBasicBlock slowCase = m_out.newBlock();
2435 LBasicBlock continuation = m_out.newBlock();
2436
2437 m_out.branch(
2438 m_out.notZero64(result), usually(continuation), rarely(slowCase));
2439
2440 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2441 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero));
2442 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero));
2443 m_out.jump(continuation);
2444 m_out.appendTo(continuation, lastNext);
2445 }
2446
2447 setInt52(result);
2448 break;
2449 }
2450
2451 case DoubleRepUse: {
2452 setDouble(
2453 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2454 break;
2455 }
2456
2457 default:
2458 DFG_CRASH(m_graph, m_node, "Bad use kind");
2459 break;
2460 }
2461 }
2462
2463 void compileValueDiv()
2464 {
2465 if (m_node->isBinaryUseKind(BigIntUse)) {
2466 LValue left = lowBigInt(m_node->child1());
2467 LValue right = lowBigInt(m_node->child2());
2468
2469 LValue result = vmCall(pointerType(), m_out.operation(operationDivBigInt), m_callFrame, left, right);
2470 setJSValue(result);
2471 return;
2472 }
2473
2474 emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv);
2475 }
2476
2477 void compileArithDiv()
2478 {
2479 switch (m_node->binaryUseKind()) {
2480 case Int32Use: {
2481 LValue numerator = lowInt32(m_node->child1());
2482 LValue denominator = lowInt32(m_node->child2());
2483
2484 if (shouldCheckNegativeZero(m_node->arithMode())) {
2485 LBasicBlock zeroNumerator = m_out.newBlock();
2486 LBasicBlock numeratorContinuation = m_out.newBlock();
2487
2488 m_out.branch(
2489 m_out.isZero32(numerator),
2490 rarely(zeroNumerator), usually(numeratorContinuation));
2491
2492 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
2493
2494 speculate(
2495 NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
2496
2497 m_out.jump(numeratorContinuation);
2498
2499 m_out.appendTo(numeratorContinuation, innerLastNext);
2500 }
2501
2502 if (shouldCheckOverflow(m_node->arithMode())) {
2503 LBasicBlock unsafeDenominator = m_out.newBlock();
2504 LBasicBlock continuation = m_out.newBlock();
2505
2506 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2507 m_out.branch(
2508 m_out.above(adjustedDenominator, m_out.int32One),
2509 usually(continuation), rarely(unsafeDenominator));
2510
2511 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2512 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2513 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2514 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2515 m_out.jump(continuation);
2516
2517 m_out.appendTo(continuation, lastNext);
2518 LValue result = m_out.div(numerator, denominator);
2519 speculate(
2520 Overflow, noValue(), 0,
2521 m_out.notEqual(m_out.mul(result, denominator), numerator));
2522 setInt32(result);
2523 } else
2524 setInt32(m_out.chillDiv(numerator, denominator));
2525
2526 break;
2527 }
2528
2529 case DoubleRepUse: {
2530 setDouble(m_out.doubleDiv(
2531 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2532 break;
2533 }
2534
2535 default:
2536 DFG_CRASH(m_graph, m_node, "Bad use kind");
2537 break;
2538 }
2539 }
2540
2541 void compileArithMod()
2542 {
2543 switch (m_node->binaryUseKind()) {
2544 case Int32Use: {
2545 LValue numerator = lowInt32(m_node->child1());
2546 LValue denominator = lowInt32(m_node->child2());
2547
2548 LValue remainder;
2549 if (shouldCheckOverflow(m_node->arithMode())) {
2550 LBasicBlock unsafeDenominator = m_out.newBlock();
2551 LBasicBlock continuation = m_out.newBlock();
2552
2553 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2554 m_out.branch(
2555 m_out.above(adjustedDenominator, m_out.int32One),
2556 usually(continuation), rarely(unsafeDenominator));
2557
2558 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2559 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2560 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2561 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2562 m_out.jump(continuation);
2563
2564 m_out.appendTo(continuation, lastNext);
2565 LValue result = m_out.mod(numerator, denominator);
2566 remainder = result;
2567 } else
2568 remainder = m_out.chillMod(numerator, denominator);
2569
2570 if (shouldCheckNegativeZero(m_node->arithMode())) {
2571 LBasicBlock negativeNumerator = m_out.newBlock();
2572 LBasicBlock numeratorContinuation = m_out.newBlock();
2573
2574 m_out.branch(
2575 m_out.lessThan(numerator, m_out.int32Zero),
2576 unsure(negativeNumerator), unsure(numeratorContinuation));
2577
2578 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
2579
2580 speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
2581
2582 m_out.jump(numeratorContinuation);
2583
2584 m_out.appendTo(numeratorContinuation, innerLastNext);
2585 }
2586
2587 setInt32(remainder);
2588 break;
2589 }
2590
2591 case DoubleRepUse: {
2592 setDouble(
2593 m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2594 break;
2595 }
2596
2597 default:
2598 DFG_CRASH(m_graph, m_node, "Bad use kind");
2599 break;
2600 }
2601 }
2602
2603 void compileArithMinOrMax()
2604 {
2605 switch (m_node->binaryUseKind()) {
2606 case Int32Use: {
2607 LValue left = lowInt32(m_node->child1());
2608 LValue right = lowInt32(m_node->child2());
2609
2610 setInt32(
2611 m_out.select(
2612 m_node->op() == ArithMin
2613 ? m_out.lessThan(left, right)
2614 : m_out.lessThan(right, left),
2615 left, right));
2616 break;
2617 }
2618
2619 case DoubleRepUse: {
2620 LValue left = lowDouble(m_node->child1());
2621 LValue right = lowDouble(m_node->child2());
2622
2623 LBasicBlock notLessThan = m_out.newBlock();
2624 LBasicBlock continuation = m_out.newBlock();
2625
2626 Vector<ValueFromBlock, 2> results;
2627
2628 results.append(m_out.anchor(left));
2629 m_out.branch(
2630 m_node->op() == ArithMin
2631 ? m_out.doubleLessThan(left, right)
2632 : m_out.doubleGreaterThan(left, right),
2633 unsure(continuation), unsure(notLessThan));
2634
2635 LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
2636 results.append(m_out.anchor(m_out.select(
2637 m_node->op() == ArithMin
2638 ? m_out.doubleGreaterThanOrEqual(left, right)
2639 : m_out.doubleLessThanOrEqual(left, right),
2640 right, m_out.constDouble(PNaN))));
2641 m_out.jump(continuation);
2642
2643 m_out.appendTo(continuation, lastNext);
2644 setDouble(m_out.phi(Double, results));
2645 break;
2646 }
2647
2648 default:
2649 DFG_CRASH(m_graph, m_node, "Bad use kind");
2650 break;
2651 }
2652 }
2653
2654 void compileArithAbs()
2655 {
2656 switch (m_node->child1().useKind()) {
2657 case Int32Use: {
2658 LValue value = lowInt32(m_node->child1());
2659
2660 LValue mask = m_out.aShr(value, m_out.constInt32(31));
2661 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
2662
2663 if (shouldCheckOverflow(m_node->arithMode()))
2664 speculate(Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
2665
2666 setInt32(result);
2667 break;
2668 }
2669
2670 case DoubleRepUse: {
2671 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
2672 break;
2673 }
2674
2675 default: {
2676 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2677 LValue argument = lowJSValue(m_node->child1());
2678 LValue result = vmCall(Double, m_out.operation(operationArithAbs), m_callFrame, argument);
2679 setDouble(result);
2680 break;
2681 }
2682 }
2683 }
2684
2685 void compileArithUnary()
2686 {
2687 if (m_node->child1().useKind() == DoubleRepUse) {
2688 setDouble(m_out.doubleUnary(m_node->arithUnaryType(), lowDouble(m_node->child1())));
2689 return;
2690 }
2691 LValue argument = lowJSValue(m_node->child1());
2692 LValue result = vmCall(Double, m_out.operation(DFG::arithUnaryOperation(m_node->arithUnaryType())), m_callFrame, argument);
2693 setDouble(result);
2694 }
2695
2696 void compileArithPow()
2697 {
2698 if (m_node->child2().useKind() == Int32Use)
2699 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
2700 else {
2701 LValue base = lowDouble(m_node->child1());
2702 LValue exponent = lowDouble(m_node->child2());
2703
2704 LBasicBlock integerExponentIsSmallBlock = m_out.newBlock();
2705 LBasicBlock integerExponentPowBlock = m_out.newBlock();
2706 LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock();
2707 LBasicBlock nanExceptionBaseIsOne = m_out.newBlock();
2708 LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock();
2709 LBasicBlock testExponentIsOneHalf = m_out.newBlock();
2710 LBasicBlock handleBaseZeroExponentIsOneHalf = m_out.newBlock();
2711 LBasicBlock handleInfinityForExponentIsOneHalf = m_out.newBlock();
2712 LBasicBlock exponentIsOneHalfNormal = m_out.newBlock();
2713 LBasicBlock exponentIsOneHalfInfinity = m_out.newBlock();
2714 LBasicBlock testExponentIsNegativeOneHalf = m_out.newBlock();
2715 LBasicBlock testBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2716 LBasicBlock handleBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2717 LBasicBlock handleInfinityForExponentIsNegativeOneHalf = m_out.newBlock();
2718 LBasicBlock exponentIsNegativeOneHalfNormal = m_out.newBlock();
2719 LBasicBlock exponentIsNegativeOneHalfInfinity = m_out.newBlock();
2720 LBasicBlock powBlock = m_out.newBlock();
2721 LBasicBlock nanExceptionResultIsNaN = m_out.newBlock();
2722 LBasicBlock continuation = m_out.newBlock();
2723
2724 LValue integerExponent = m_out.doubleToInt(exponent);
2725 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
2726 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
2727 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
2728
2729 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
2730 LValue integerExponentBelowMax = m_out.belowOrEqual(integerExponent, m_out.constInt32(maxExponentForIntegerMathPow));
2731 m_out.branch(integerExponentBelowMax, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
2732
2733 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
2734 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
2735 m_out.jump(continuation);
2736
2737 // If y is NaN, the result is NaN.
2738 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionBaseIsOne);
2739 LValue exponentIsNaN;
2740 if (provenType(m_node->child2()) & SpecDoubleNaN)
2741 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
2742 else
2743 exponentIsNaN = m_out.booleanFalse;
2744 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionBaseIsOne));
2745
2746 // If abs(x) is 1 and y is +infinity, the result is NaN.
2747 // If abs(x) is 1 and y is -infinity, the result is NaN.
2748
2749 // Test if base == 1.
2750 m_out.appendTo(nanExceptionBaseIsOne, nanExceptionExponentIsInfinity);
2751 LValue absoluteBase = m_out.doubleAbs(base);
2752 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
2753 m_out.branch(absoluteBaseIsOne, rarely(nanExceptionExponentIsInfinity), usually(testExponentIsOneHalf));
2754
2755 // Test if abs(y) == Infinity.
2756 m_out.appendTo(nanExceptionExponentIsInfinity, testExponentIsOneHalf);
2757 LValue absoluteExponent = m_out.doubleAbs(exponent);
2758 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
2759 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionResultIsNaN), usually(testExponentIsOneHalf));
2760
2761 // If y == 0.5 or y == -0.5, handle it through SQRT.
2762 // We have be carefuly with -0 and -Infinity.
2763
2764 // Test if y == 0.5
2765 m_out.appendTo(testExponentIsOneHalf, handleBaseZeroExponentIsOneHalf);
2766 LValue exponentIsOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(0.5));
2767 m_out.branch(exponentIsOneHalf, rarely(handleBaseZeroExponentIsOneHalf), usually(testExponentIsNegativeOneHalf));
2768
2769 // Handle x == -0.
2770 m_out.appendTo(handleBaseZeroExponentIsOneHalf, handleInfinityForExponentIsOneHalf);
2771 LValue baseIsZeroExponentIsOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2772 ValueFromBlock zeroResultExponentIsOneHalf = m_out.anchor(m_out.doubleZero);
2773 m_out.branch(baseIsZeroExponentIsOneHalf, rarely(continuation), usually(handleInfinityForExponentIsOneHalf));
2774
2775 // Test if abs(x) == Infinity.
2776 m_out.appendTo(handleInfinityForExponentIsOneHalf, exponentIsOneHalfNormal);
2777 LValue absoluteBaseIsInfinityOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2778 m_out.branch(absoluteBaseIsInfinityOneHalf, rarely(exponentIsOneHalfInfinity), usually(exponentIsOneHalfNormal));
2779
2780 // The exponent is 0.5, the base is finite or NaN, we can use SQRT.
2781 m_out.appendTo(exponentIsOneHalfNormal, exponentIsOneHalfInfinity);
2782 ValueFromBlock sqrtResult = m_out.anchor(m_out.doubleSqrt(base));
2783 m_out.jump(continuation);
2784
2785 // The exponent is 0.5, the base is infinite, the result is always infinite.
2786 m_out.appendTo(exponentIsOneHalfInfinity, testExponentIsNegativeOneHalf);
2787 ValueFromBlock sqrtInfinityResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2788 m_out.jump(continuation);
2789
2790 // Test if y == -0.5
2791 m_out.appendTo(testExponentIsNegativeOneHalf, testBaseZeroExponentIsNegativeOneHalf);
2792 LValue exponentIsNegativeOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(-0.5));
2793 m_out.branch(exponentIsNegativeOneHalf, rarely(testBaseZeroExponentIsNegativeOneHalf), usually(powBlock));
2794
2795 // Handle x == -0.
2796 m_out.appendTo(testBaseZeroExponentIsNegativeOneHalf, handleBaseZeroExponentIsNegativeOneHalf);
2797 LValue baseIsZeroExponentIsNegativeOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2798 m_out.branch(baseIsZeroExponentIsNegativeOneHalf, rarely(handleBaseZeroExponentIsNegativeOneHalf), usually(handleInfinityForExponentIsNegativeOneHalf));
2799
2800 m_out.appendTo(handleBaseZeroExponentIsNegativeOneHalf, handleInfinityForExponentIsNegativeOneHalf);
2801 ValueFromBlock oneOverSqrtZeroResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2802 m_out.jump(continuation);
2803
2804 // Test if abs(x) == Infinity.
2805 m_out.appendTo(handleInfinityForExponentIsNegativeOneHalf, exponentIsNegativeOneHalfNormal);
2806 LValue absoluteBaseIsInfinityNegativeOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2807 m_out.branch(absoluteBaseIsInfinityNegativeOneHalf, rarely(exponentIsNegativeOneHalfInfinity), usually(exponentIsNegativeOneHalfNormal));
2808
2809 // The exponent is -0.5, the base is finite or NaN, we can use 1/SQRT.
2810 m_out.appendTo(exponentIsNegativeOneHalfNormal, exponentIsNegativeOneHalfInfinity);
2811 LValue sqrtBase = m_out.doubleSqrt(base);
2812 ValueFromBlock oneOverSqrtResult = m_out.anchor(m_out.div(m_out.constDouble(1.), sqrtBase));
2813 m_out.jump(continuation);
2814
2815 // The exponent is -0.5, the base is infinite, the result is always zero.
2816 m_out.appendTo(exponentIsNegativeOneHalfInfinity, powBlock);
2817 ValueFromBlock oneOverSqrtInfinityResult = m_out.anchor(m_out.doubleZero);
2818 m_out.jump(continuation);
2819
2820 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
2821 ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
2822 m_out.jump(continuation);
2823
2824 m_out.appendTo(nanExceptionResultIsNaN, continuation);
2825 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
2826 m_out.jump(continuation);
2827
2828 m_out.appendTo(continuation, lastNext);
2829 setDouble(m_out.phi(Double, powDoubleIntResult, zeroResultExponentIsOneHalf, sqrtResult, sqrtInfinityResult, oneOverSqrtZeroResult, oneOverSqrtResult, oneOverSqrtInfinityResult, powResult, pureNan));
2830 }
2831 }
2832
2833 void compileArithRandom()
2834 {
2835 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2836
2837 // Inlined WeakRandom::advance().
2838 // uint64_t x = m_low;
2839 void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
2840 LValue low = m_out.load64(m_out.absolute(lowAddress));
2841 // uint64_t y = m_high;
2842 void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
2843 LValue high = m_out.load64(m_out.absolute(highAddress));
2844 // m_low = y;
2845 m_out.store64(high, m_out.absolute(lowAddress));
2846
2847 // x ^= x << 23;
2848 LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low);
2849
2850 // x ^= x >> 17;
2851 LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1);
2852
2853 // x ^= y ^ (y >> 26);
2854 LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2);
2855
2856 // m_high = x;
2857 m_out.store64(phase3, m_out.absolute(highAddress));
2858
2859 // return x + y;
2860 LValue random64 = m_out.add(phase3, high);
2861
2862 // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
2863 LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1));
2864
2865 LValue double53Integer = m_out.intToDouble(random53);
2866
2867 // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
2868 // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
2869 static const double scale = 1.0 / (1ULL << 53);
2870
2871 // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
2872 // It just reduces the exp part of the given 53bit double integer.
2873 // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
2874 // Now we get 53bit precision random double value in [0, 1).
2875 LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale));
2876
2877 setDouble(result);
2878 }
2879
2880 void compileArithRound()
2881 {
2882 if (m_node->child1().useKind() == DoubleRepUse) {
2883 LValue result = nullptr;
2884 if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) {
2885 LValue value = lowDouble(m_node->child1());
2886 result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
2887 } else {
2888 LBasicBlock realPartIsMoreThanHalf = m_out.newBlock();
2889 LBasicBlock continuation = m_out.newBlock();
2890
2891 LValue value = lowDouble(m_node->child1());
2892 LValue integerValue = m_out.doubleCeil(value);
2893 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
2894
2895 LValue realPart = m_out.doubleSub(integerValue, value);
2896
2897 m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
2898
2899 LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
2900 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
2901 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
2902 m_out.jump(continuation);
2903 m_out.appendTo(continuation, lastNext);
2904
2905 result = m_out.phi(Double, integerValueResult, integerValueRoundedDownResult);
2906 }
2907
2908 if (producesInteger(m_node->arithRoundingMode())) {
2909 LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
2910 setInt32(integerValue);
2911 } else
2912 setDouble(result);
2913 return;
2914 }
2915
2916 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2917 LValue argument = lowJSValue(m_node->child1());
2918 setJSValue(vmCall(Int64, m_out.operation(operationArithRound), m_callFrame, argument));
2919 }
2920
2921 void compileArithFloor()
2922 {
2923 if (m_node->child1().useKind() == DoubleRepUse) {
2924 LValue value = lowDouble(m_node->child1());
2925 LValue integerValue = m_out.doubleFloor(value);
2926 if (producesInteger(m_node->arithRoundingMode()))
2927 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2928 else
2929 setDouble(integerValue);
2930 return;
2931 }
2932 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2933 LValue argument = lowJSValue(m_node->child1());
2934 setJSValue(vmCall(Int64, m_out.operation(operationArithFloor), m_callFrame, argument));
2935 }
2936
2937 void compileArithCeil()
2938 {
2939 if (m_node->child1().useKind() == DoubleRepUse) {
2940 LValue value = lowDouble(m_node->child1());
2941 LValue integerValue = m_out.doubleCeil(value);
2942 if (producesInteger(m_node->arithRoundingMode()))
2943 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2944 else
2945 setDouble(integerValue);
2946 return;
2947 }
2948 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2949 LValue argument = lowJSValue(m_node->child1());
2950 setJSValue(vmCall(Int64, m_out.operation(operationArithCeil), m_callFrame, argument));
2951 }
2952
2953 void compileArithTrunc()
2954 {
2955 if (m_node->child1().useKind() == DoubleRepUse) {
2956 LValue value = lowDouble(m_node->child1());
2957 LValue result = m_out.doubleTrunc(value);
2958 if (producesInteger(m_node->arithRoundingMode()))
2959 setInt32(convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2960 else
2961 setDouble(result);
2962 return;
2963 }
2964 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2965 LValue argument = lowJSValue(m_node->child1());
2966 setJSValue(vmCall(Int64, m_out.operation(operationArithTrunc), m_callFrame, argument));
2967 }
2968
2969 void compileArithSqrt()
2970 {
2971 if (m_node->child1().useKind() == DoubleRepUse) {
2972 setDouble(m_out.doubleSqrt(lowDouble(m_node->child1())));
2973 return;
2974 }
2975 LValue argument = lowJSValue(m_node->child1());
2976 LValue result = vmCall(Double, m_out.operation(operationArithSqrt), m_callFrame, argument);
2977 setDouble(result);
2978 }
2979
2980 void compileArithFRound()
2981 {
2982 if (m_node->child1().useKind() == DoubleRepUse) {
2983 setDouble(m_out.fround(lowDouble(m_node->child1())));
2984 return;
2985 }
2986 LValue argument = lowJSValue(m_node->child1());
2987 LValue result = vmCall(Double, m_out.operation(operationArithFRound), m_callFrame, argument);
2988 setDouble(result);
2989 }
2990
2991 void compileValueNegate()
2992 {
2993 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2994 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2995 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2996 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2997 const Instruction* instruction = baselineCodeBlock->instructions().at(bytecodeIndex).ptr();
2998 auto repatchingFunction = operationArithNegateOptimize;
2999 auto nonRepatchingFunction = operationArithNegate;
3000 compileUnaryMathIC<JITNegGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction);
3001 }
3002
3003 void compileArithNegate()
3004 {
3005 switch (m_node->child1().useKind()) {
3006 case Int32Use: {
3007 LValue value = lowInt32(m_node->child1());
3008
3009 LValue result;
3010 if (!shouldCheckOverflow(m_node->arithMode()))
3011 result = m_out.neg(value);
3012 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
3013 CheckValue* check = m_out.speculateSub(m_out.int32Zero, value);
3014 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
3015 result = check;
3016 } else {
3017 speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
3018 result = m_out.neg(value);
3019 }
3020
3021 setInt32(result);
3022 break;
3023 }
3024
3025 case Int52RepUse: {
3026 if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)) {
3027 Int52Kind kind;
3028 LValue value = lowWhicheverInt52(m_node->child1(), kind);
3029 LValue result = m_out.neg(value);
3030 if (shouldCheckNegativeZero(m_node->arithMode()))
3031 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
3032 setInt52(result, kind);
3033 break;
3034 }
3035
3036 LValue value = lowInt52(m_node->child1());
3037 CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
3038 blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
3039 if (shouldCheckNegativeZero(m_node->arithMode()))
3040 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
3041 setInt52(result);
3042 break;
3043 }
3044
3045 case DoubleRepUse: {
3046 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
3047 break;
3048 }
3049
3050 default:
3051 DFG_CRASH(m_graph, m_node, "Bad use kind");
3052 break;
3053 }
3054 }
3055
3056 void compileValueBitNot()
3057 {
3058 if (m_node->child1().useKind() == BigIntUse) {
3059 LValue operand = lowBigInt(m_node->child1());
3060 LValue result = vmCall(pointerType(), m_out.operation(operationBitNotBigInt), m_callFrame, operand);
3061 setJSValue(result);
3062 return;
3063 }
3064
3065 LValue operand = lowJSValue(m_node->child1());
3066 LValue result = vmCall(Int64, m_out.operation(operationValueBitNot), m_callFrame, operand);
3067 setJSValue(result);
3068 }
3069
3070 void compileArithBitNot()
3071 {
3072 setInt32(m_out.bitNot(lowInt32(m_node->child1())));
3073 }
3074
3075 void compileValueBitAnd()
3076 {
3077 if (m_node->isBinaryUseKind(BigIntUse)) {
3078 LValue left = lowBigInt(m_node->child1());
3079 LValue right = lowBigInt(m_node->child2());
3080
3081 LValue result = vmCall(pointerType(), m_out.operation(operationBitAndBigInt), m_callFrame, left, right);
3082 setJSValue(result);
3083 return;
3084 }
3085
3086 emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd);
3087 }
3088
3089 void compileArithBitAnd()
3090 {
3091 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3092 }
3093
3094 void compileValueBitOr()
3095 {
3096 if (m_node->isBinaryUseKind(BigIntUse)) {
3097 LValue left = lowBigInt(m_node->child1());
3098 LValue right = lowBigInt(m_node->child2());
3099
3100 LValue result = vmCall(pointerType(), m_out.operation(operationBitOrBigInt), m_callFrame, left, right);
3101 setJSValue(result);
3102 return;
3103 }
3104
3105 emitBinaryBitOpSnippet<JITBitOrGenerator>(operationValueBitOr);
3106 }
3107
3108 void compileArithBitOr()
3109 {
3110 setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3111 }
3112
3113 void compileValueBitXor()
3114 {
3115 if (m_node->isBinaryUseKind(BigIntUse)) {
3116 LValue left = lowBigInt(m_node->child1());
3117 LValue right = lowBigInt(m_node->child2());
3118
3119 LValue result = vmCall(pointerType(), m_out.operation(operationBitXorBigInt), m_callFrame, left, right);
3120 setJSValue(result);
3121 return;
3122 }
3123
3124 emitBinaryBitOpSnippet<JITBitXorGenerator>(operationValueBitXor);
3125 }
3126
3127 void compileArithBitXor()
3128 {
3129 setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3130 }
3131
3132 void compileBitRShift()
3133 {
3134 if (m_node->isBinaryUseKind(UntypedUse)) {
3135 emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
3136 return;
3137 }
3138 setInt32(m_out.aShr(
3139 lowInt32(m_node->child1()),
3140 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
3141 }
3142
3143 void compileBitLShift()
3144 {
3145 if (m_node->isBinaryUseKind(UntypedUse)) {
3146 emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
3147 return;
3148 }
3149 setInt32(m_out.shl(
3150 lowInt32(m_node->child1()),
3151 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
3152 }
3153
3154 void compileBitURShift()
3155 {
3156 if (m_node->isBinaryUseKind(UntypedUse)) {
3157 emitRightShiftSnippet(JITRightShiftGenerator::UnsignedShift);
3158 return;
3159 }
3160 setInt32(m_out.lShr(
3161 lowInt32(m_node->child1()),
3162 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
3163 }
3164
3165 void compileUInt32ToNumber()
3166 {
3167 LValue value = lowInt32(m_node->child1());
3168
3169 if (doesOverflow(m_node->arithMode())) {
3170 setStrictInt52(m_out.zeroExtPtr(value));
3171 return;
3172 }
3173
3174 speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
3175 setInt32(value);
3176 }
3177
3178 void compileCheckStructure()
3179 {
3180 ExitKind exitKind;
3181 if (m_node->child1()->hasConstant())
3182 exitKind = BadConstantCache;
3183 else
3184 exitKind = BadCache;
3185
3186 switch (m_node->child1().useKind()) {
3187 case CellUse:
3188 case KnownCellUse: {
3189 LValue cell = lowCell(m_node->child1());
3190
3191 checkStructure(
3192 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
3193 exitKind, m_node->structureSet(),
3194 [&] (RegisteredStructure structure) {
3195 return weakStructureID(structure);
3196 });
3197 return;
3198 }
3199
3200 case CellOrOtherUse: {
3201 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
3202
3203 LBasicBlock cellCase = m_out.newBlock();
3204 LBasicBlock notCellCase = m_out.newBlock();
3205 LBasicBlock continuation = m_out.newBlock();
3206
3207 m_out.branch(
3208 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
3209
3210 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
3211 checkStructure(
3212 m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value),
3213 exitKind, m_node->structureSet(),
3214 [&] (RegisteredStructure structure) {
3215 return weakStructureID(structure);
3216 });
3217 m_out.jump(continuation);
3218
3219 m_out.appendTo(notCellCase, continuation);
3220 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecCell | SpecOther, isNotOther(value));
3221 m_out.jump(continuation);
3222
3223 m_out.appendTo(continuation, lastNext);
3224 return;
3225 }
3226
3227 default:
3228 DFG_CRASH(m_graph, m_node, "Bad use kind");
3229 return;
3230 }
3231 }
3232
3233 void compileCheckStructureOrEmpty()
3234 {
3235 ExitKind exitKind;
3236 if (m_node->child1()->hasConstant())
3237 exitKind = BadConstantCache;
3238 else
3239 exitKind = BadCache;
3240
3241 LValue cell = lowCell(m_node->child1());
3242 bool maySeeEmptyValue = m_interpreter.forNode(m_node->child1()).m_type & SpecEmpty;
3243 LBasicBlock notEmpty;
3244 LBasicBlock continuation;
3245 LBasicBlock lastNext;
3246 if (maySeeEmptyValue) {
3247 notEmpty = m_out.newBlock();
3248 continuation = m_out.newBlock();
3249 m_out.branch(m_out.isZero64(cell), unsure(continuation), unsure(notEmpty));
3250 lastNext = m_out.appendTo(notEmpty, continuation);
3251 }
3252
3253 checkStructure(
3254 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
3255 exitKind, m_node->structureSet(),
3256 [&] (RegisteredStructure structure) {
3257 return weakStructureID(structure);
3258 });
3259
3260 if (maySeeEmptyValue) {
3261 m_out.jump(continuation);
3262 m_out.appendTo(continuation, lastNext);
3263 }
3264 }
3265
3266 void compileCheckCell()
3267 {
3268 LValue cell = lowCell(m_node->child1());
3269
3270 speculate(
3271 BadCell, jsValueValue(cell), m_node->child1().node(),
3272 m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
3273 }
3274
3275 void compileCheckBadCell()
3276 {
3277 terminate(BadCell);
3278 }
3279
3280 void compileCheckNotEmpty()
3281 {
3282 speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
3283 }
3284
3285 void compileAssertNotEmpty()
3286 {
3287 if (!validationEnabled())
3288 return;
3289
3290 LValue val = lowJSValue(m_node->child1());
3291 PatchpointValue* patchpoint = m_out.patchpoint(Void);
3292 patchpoint->appendSomeRegister(val);
3293 patchpoint->setGenerator(
3294 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
3295 AllowMacroScratchRegisterUsage allowScratch(jit);
3296 GPRReg input = params[0].gpr();
3297 CCallHelpers::Jump done = jit.branchIfNotEmpty(input);
3298 jit.breakpoint();
3299 done.link(&jit);
3300 });
3301 }
3302
3303 void compileCheckStringIdent()
3304 {
3305 UniquedStringImpl* uid = m_node->uidOperand();
3306 LValue stringImpl = lowStringIdent(m_node->child1());
3307 speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid)));
3308 }
3309
3310 void compileGetExecutable()
3311 {
3312 LValue cell = lowCell(m_node->child1());
3313 speculateFunction(m_node->child1(), cell);
3314 setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
3315 }
3316
3317 void compileArrayify()
3318 {
3319 LValue cell = lowCell(m_node->child1());
3320 LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
3321
3322 LBasicBlock unexpectedStructure = m_out.newBlock();
3323 LBasicBlock continuation = m_out.newBlock();
3324
3325 auto isUnexpectedArray = [&] (LValue cell) {
3326 if (m_node->op() == Arrayify)
3327 return m_out.logicalNot(isArrayTypeForArrayify(cell, m_node->arrayMode()));
3328
3329 ASSERT(m_node->op() == ArrayifyToStructure);
3330 return m_out.notEqual(m_out.load32(cell, m_heaps.JSCell_structureID), weakStructureID(m_node->structure()));
3331 };
3332
3333 m_out.branch(isUnexpectedArray(cell), rarely(unexpectedStructure), usually(continuation));
3334
3335 LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
3336
3337 if (property) {
3338 switch (m_node->arrayMode().type()) {
3339 case Array::Int32:
3340 case Array::Double:
3341 case Array::Contiguous:
3342 speculate(
3343 Uncountable, noValue(), 0,
3344 m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
3345 break;
3346 default:
3347 break;
3348 }
3349 }
3350
3351 switch (m_node->arrayMode().type()) {
3352 case Array::Int32:
3353 vmCall(Void, m_out.operation(operationEnsureInt32), m_callFrame, cell);
3354 break;
3355 case Array::Double:
3356 vmCall(Void, m_out.operation(operationEnsureDouble), m_callFrame, cell);
3357 break;
3358 case Array::Contiguous:
3359 vmCall(Void, m_out.operation(operationEnsureContiguous), m_callFrame, cell);
3360 break;
3361 case Array::ArrayStorage:
3362 case Array::SlowPutArrayStorage:
3363 vmCall(Void, m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
3364 break;
3365 default:
3366 DFG_CRASH(m_graph, m_node, "Bad array type");
3367 break;
3368 }
3369
3370 speculate(BadIndexingType, jsValueValue(cell), 0, isUnexpectedArray(cell));
3371 m_out.jump(continuation);
3372
3373 m_out.appendTo(continuation, lastNext);
3374 }
3375
3376 void compilePutStructure()
3377 {
3378 m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
3379
3380 RegisteredStructure oldStructure = m_node->transition()->previous;
3381 RegisteredStructure newStructure = m_node->transition()->next;
3382 ASSERT_UNUSED(oldStructure, oldStructure->indexingMode() == newStructure->indexingMode());
3383 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
3384 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
3385
3386 LValue cell = lowCell(m_node->child1());
3387 m_out.store32(
3388 weakStructureID(newStructure),
3389 cell, m_heaps.JSCell_structureID);
3390 }
3391
3392 void compileGetById(AccessType type)
3393 {
3394 ASSERT(type == AccessType::Get || type == AccessType::TryGet || type == AccessType::GetDirect);
3395 switch (m_node->child1().useKind()) {
3396 case CellUse: {
3397 setJSValue(getById(lowCell(m_node->child1()), type));
3398 return;
3399 }
3400
3401 case UntypedUse: {
3402 // This is pretty weird, since we duplicate the slow path both here and in the
3403 // code generated by the IC. We should investigate making this less bad.
3404 // https://bugs.webkit.org/show_bug.cgi?id=127830
3405 LValue value = lowJSValue(m_node->child1());
3406
3407 LBasicBlock cellCase = m_out.newBlock();
3408 LBasicBlock notCellCase = m_out.newBlock();
3409 LBasicBlock continuation = m_out.newBlock();
3410
3411 m_out.branch(
3412 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
3413
3414 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
3415 ValueFromBlock cellResult = m_out.anchor(getById(value, type));
3416 m_out.jump(continuation);
3417
3418 J_JITOperation_EJI getByIdFunction = appropriateGenericGetByIdFunction(type);
3419
3420 m_out.appendTo(notCellCase, continuation);
3421 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3422 Int64, m_out.operation(getByIdFunction),
3423 m_callFrame, value,
3424 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3425 m_out.jump(continuation);
3426
3427 m_out.appendTo(continuation, lastNext);
3428 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3429 return;
3430 }
3431
3432 default:
3433 DFG_CRASH(m_graph, m_node, "Bad use kind");
3434 return;
3435 }
3436 }
3437
3438 void compileGetByIdWithThis()
3439 {
3440 if (m_node->child1().useKind() == CellUse && m_node->child2().useKind() == CellUse)
3441 setJSValue(getByIdWithThis(lowCell(m_node->child1()), lowCell(m_node->child2())));
3442 else {
3443 LValue base = lowJSValue(m_node->child1());
3444 LValue thisValue = lowJSValue(m_node->child2());
3445
3446 LBasicBlock baseCellCase = m_out.newBlock();
3447 LBasicBlock notCellCase = m_out.newBlock();
3448 LBasicBlock thisValueCellCase = m_out.newBlock();
3449 LBasicBlock continuation = m_out.newBlock();
3450
3451 m_out.branch(
3452 isCell(base, provenType(m_node->child1())), unsure(baseCellCase), unsure(notCellCase));
3453
3454 LBasicBlock lastNext = m_out.appendTo(baseCellCase, thisValueCellCase);
3455
3456 m_out.branch(
3457 isCell(thisValue, provenType(m_node->child2())), unsure(thisValueCellCase), unsure(notCellCase));
3458
3459 m_out.appendTo(thisValueCellCase, notCellCase);
3460 ValueFromBlock cellResult = m_out.anchor(getByIdWithThis(base, thisValue));
3461 m_out.jump(continuation);
3462
3463 m_out.appendTo(notCellCase, continuation);
3464 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3465 Int64, m_out.operation(operationGetByIdWithThisGeneric),
3466 m_callFrame, base, thisValue,
3467 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3468 m_out.jump(continuation);
3469
3470 m_out.appendTo(continuation, lastNext);
3471 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3472 }
3473
3474 }
3475
3476 void compileGetByValWithThis()
3477 {
3478 LValue base = lowJSValue(m_node->child1());
3479 LValue thisValue = lowJSValue(m_node->child2());
3480 LValue subscript = lowJSValue(m_node->child3());
3481
3482 LValue result = vmCall(Int64, m_out.operation(operationGetByValWithThis), m_callFrame, base, thisValue, subscript);
3483 setJSValue(result);
3484 }
3485
3486 void compilePutByIdWithThis()
3487 {
3488 LValue base = lowJSValue(m_node->child1());
3489 LValue thisValue = lowJSValue(m_node->child2());
3490 LValue value = lowJSValue(m_node->child3());
3491
3492 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByIdWithThisStrict : operationPutByIdWithThis),
3493 m_callFrame, base, thisValue, value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
3494 }
3495
3496 void compilePutByValWithThis()
3497 {
3498 LValue base = lowJSValue(m_graph.varArgChild(m_node, 0));
3499 LValue thisValue = lowJSValue(m_graph.varArgChild(m_node, 1));
3500 LValue property = lowJSValue(m_graph.varArgChild(m_node, 2));
3501 LValue value = lowJSValue(m_graph.varArgChild(m_node, 3));
3502
3503 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis),
3504 m_callFrame, base, thisValue, property, value);
3505 }
3506
3507 void compileAtomicsReadModifyWrite()
3508 {
3509 TypedArrayType type = m_node->arrayMode().typedArrayType();
3510 unsigned numExtraArgs = numExtraAtomicsArgs(m_node->op());
3511 Edge baseEdge = m_graph.child(m_node, 0);
3512 Edge indexEdge = m_graph.child(m_node, 1);
3513 Edge argEdges[maxNumExtraAtomicsArgs];
3514 for (unsigned i = numExtraArgs; i--;)
3515 argEdges[i] = m_graph.child(m_node, 2 + i);
3516 Edge storageEdge = m_graph.child(m_node, 2 + numExtraArgs);
3517
3518 auto operation = [&] () -> LValue {
3519 switch (m_node->op()) {
3520 case AtomicsAdd:
3521 return m_out.operation(operationAtomicsAdd);
3522 case AtomicsAnd:
3523 return m_out.operation(operationAtomicsAnd);
3524 case AtomicsCompareExchange:
3525 return m_out.operation(operationAtomicsCompareExchange);
3526 case AtomicsExchange:
3527 return m_out.operation(operationAtomicsExchange);
3528 case AtomicsLoad:
3529 return m_out.operation(operationAtomicsLoad);
3530 case AtomicsOr:
3531 return m_out.operation(operationAtomicsOr);
3532 case AtomicsStore:
3533 return m_out.operation(operationAtomicsStore);
3534 case AtomicsSub:
3535 return m_out.operation(operationAtomicsSub);
3536 case AtomicsXor:
3537 return m_out.operation(operationAtomicsXor);
3538 default:
3539 RELEASE_ASSERT_NOT_REACHED();
3540 break;
3541 }
3542 };
3543
3544 if (!storageEdge) {
3545 Vector<LValue> args;
3546 args.append(m_callFrame);
3547 args.append(lowJSValue(baseEdge));
3548 args.append(lowJSValue(indexEdge));
3549 for (unsigned i = 0; i < numExtraArgs; ++i)
3550 args.append(lowJSValue(argEdges[i]));
3551 LValue result = vmCall(Int64, operation(), args);
3552 setJSValue(result);
3553 return;
3554 }
3555
3556 LValue index = lowInt32(indexEdge);
3557 LValue args[2];
3558 for (unsigned i = numExtraArgs; i--;)
3559 args[i] = getIntTypedArrayStoreOperand(argEdges[i]);
3560 LValue storage = lowStorage(storageEdge);
3561
3562 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
3563 Width width = widthForBytes(elementSize(type));
3564
3565 LValue atomicValue;
3566 LValue result;
3567
3568 auto sanitizeResult = [&] (LValue value) -> LValue {
3569 if (isSigned(type)) {
3570 switch (elementSize(type)) {
3571 case 1:
3572 value = m_out.bitAnd(value, m_out.constInt32(0xff));
3573 break;
3574 case 2:
3575 value = m_out.bitAnd(value, m_out.constInt32(0xffff));
3576 break;
3577 case 4:
3578 break;
3579 default:
3580 RELEASE_ASSERT_NOT_REACHED();
3581 break;
3582 }
3583 }
3584 return value;
3585 };
3586
3587 switch (m_node->op()) {
3588 case AtomicsAdd:
3589 atomicValue = m_out.atomicXchgAdd(args[0], pointer, width);
3590 result = sanitizeResult(atomicValue);
3591 break;
3592 case AtomicsAnd:
3593 atomicValue = m_out.atomicXchgAnd(args[0], pointer, width);
3594 result = sanitizeResult(atomicValue);
3595 break;
3596 case AtomicsCompareExchange:
3597 atomicValue = m_out.atomicStrongCAS(args[0], args[1], pointer, width);
3598 result = sanitizeResult(atomicValue);
3599 break;
3600 case AtomicsExchange:
3601 atomicValue = m_out.atomicXchg(args[0], pointer, width);
3602 result = sanitizeResult(atomicValue);
3603 break;
3604 case AtomicsLoad:
3605 atomicValue = m_out.atomicXchgAdd(m_out.int32Zero, pointer, width);
3606 result = sanitizeResult(atomicValue);
3607 break;
3608 case AtomicsOr:
3609 atomicValue = m_out.atomicXchgOr(args[0], pointer, width);
3610 result = sanitizeResult(atomicValue);
3611 break;
3612 case AtomicsStore:
3613 atomicValue = m_out.atomicXchg(args[0], pointer, width);
3614 result = args[0];
3615 break;
3616 case AtomicsSub:
3617 atomicValue = m_out.atomicXchgSub(args[0], pointer, width);
3618 result = sanitizeResult(atomicValue);
3619 break;
3620 case AtomicsXor:
3621 atomicValue = m_out.atomicXchgXor(args[0], pointer, width);
3622 result = sanitizeResult(atomicValue);
3623 break;
3624 default:
3625 RELEASE_ASSERT_NOT_REACHED();
3626 break;
3627 }
3628 // Signify that the state against which the atomic operations are serialized is confined to just
3629 // the typed array storage, since that's as precise of an abstraction as we can have of shared
3630 // array buffer storage.
3631 m_heaps.decorateFencedAccess(&m_heaps.typedArrayProperties, atomicValue);
3632
3633 setIntTypedArrayLoadResult(result, type);
3634 }
3635
3636 void compileAtomicsIsLockFree()
3637 {
3638 if (m_node->child1().useKind() != Int32Use) {
3639 setJSValue(vmCall(Int64, m_out.operation(operationAtomicsIsLockFree), m_callFrame, lowJSValue(m_node->child1())));
3640 return;
3641 }
3642
3643 LValue bytes = lowInt32(m_node->child1());
3644
3645 LBasicBlock trueCase = m_out.newBlock();
3646 LBasicBlock falseCase = m_out.newBlock();
3647 LBasicBlock continuation = m_out.newBlock();
3648
3649 LBasicBlock lastNext = m_out.insertNewBlocksBefore(trueCase);
3650
3651 Vector<SwitchCase> cases;
3652 cases.append(SwitchCase(m_out.constInt32(1), trueCase, Weight()));
3653 cases.append(SwitchCase(m_out.constInt32(2), trueCase, Weight()));
3654 cases.append(SwitchCase(m_out.constInt32(4), trueCase, Weight()));
3655 m_out.switchInstruction(bytes, cases, falseCase, Weight());
3656
3657 m_out.appendTo(trueCase, falseCase);
3658 ValueFromBlock trueValue = m_out.anchor(m_out.booleanTrue);
3659 m_out.jump(continuation);
3660 m_out.appendTo(falseCase, continuation);
3661 ValueFromBlock falseValue = m_out.anchor(m_out.booleanFalse);
3662 m_out.jump(continuation);
3663
3664 m_out.appendTo(continuation, lastNext);
3665 setBoolean(m_out.phi(Int32, trueValue, falseValue));
3666 }
3667
3668 void compileDefineDataProperty()
3669 {
3670 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
3671 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
3672 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 3));
3673 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
3674 switch (propertyEdge.useKind()) {
3675 case StringUse: {
3676 LValue property = lowString(propertyEdge);
3677 vmCall(Void, m_out.operation(operationDefineDataPropertyString), m_callFrame, base, property, value, attributes);
3678 break;
3679 }
3680 case StringIdentUse: {
3681 LValue property = lowStringIdent(propertyEdge);
3682 vmCall(Void, m_out.operation(operationDefineDataPropertyStringIdent), m_callFrame, base, property, value, attributes);
3683 break;
3684 }
3685 case SymbolUse: {
3686 LValue property = lowSymbol(propertyEdge);
3687 vmCall(Void, m_out.operation(operationDefineDataPropertySymbol), m_callFrame, base, property, value, attributes);
3688 break;
3689 }
3690 case UntypedUse: {
3691 LValue property = lowJSValue(propertyEdge);
3692 vmCall(Void, m_out.operation(operationDefineDataProperty), m_callFrame, base, property, value, attributes);
3693 break;
3694 }
3695 default:
3696 RELEASE_ASSERT_NOT_REACHED();
3697 }
3698 }
3699
3700 void compileDefineAccessorProperty()
3701 {
3702 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
3703 LValue getter = lowCell(m_graph.varArgChild(m_node, 2));
3704 LValue setter = lowCell(m_graph.varArgChild(m_node, 3));
3705 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 4));
3706 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
3707 switch (propertyEdge.useKind()) {
3708 case StringUse: {
3709 LValue property = lowString(propertyEdge);
3710 vmCall(Void, m_out.operation(operationDefineAccessorPropertyString), m_callFrame, base, property, getter, setter, attributes);
3711 break;
3712 }
3713 case StringIdentUse: {
3714 LValue property = lowStringIdent(propertyEdge);
3715 vmCall(Void, m_out.operation(operationDefineAccessorPropertyStringIdent), m_callFrame, base, property, getter, setter, attributes);
3716 break;
3717 }
3718 case SymbolUse: {
3719 LValue property = lowSymbol(propertyEdge);
3720 vmCall(Void, m_out.operation(operationDefineAccessorPropertySymbol), m_callFrame, base, property, getter, setter, attributes);
3721 break;
3722 }
3723 case UntypedUse: {
3724 LValue property = lowJSValue(propertyEdge);
3725 vmCall(Void, m_out.operation(operationDefineAccessorProperty), m_callFrame, base, property, getter, setter, attributes);
3726 break;
3727 }
3728 default:
3729 RELEASE_ASSERT_NOT_REACHED();
3730 }
3731 }
3732
3733 void compilePutById()
3734 {
3735 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == CellUse, m_node->child1().useKind());
3736
3737 Node* node = m_node;
3738 LValue base = lowCell(node->child1());
3739 LValue value = lowJSValue(node->child2());
3740 auto uid = m_graph.identifiers()[node->identifierNumber()];
3741
3742 PatchpointValue* patchpoint = m_out.patchpoint(Void);
3743 patchpoint->appendSomeRegister(base);
3744 patchpoint->appendSomeRegister(value);
3745 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
3746 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
3747 patchpoint->clobber(RegisterSet::macroScratchRegisters());
3748
3749 // FIXME: If this is a PutByIdFlush, we might want to late-clobber volatile registers.
3750 // https://bugs.webkit.org/show_bug.cgi?id=152848
3751
3752 RefPtr<PatchpointExceptionHandle> exceptionHandle =
3753 preparePatchpointForExceptions(patchpoint);
3754
3755 State* state = &m_ftlState;
3756 ECMAMode ecmaMode = m_graph.executableFor(node->origin.semantic)->ecmaMode();
3757
3758 patchpoint->setGenerator(
3759 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
3760 AllowMacroScratchRegisterUsage allowScratch(jit);
3761
3762 CallSiteIndex callSiteIndex =
3763 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
3764
3765 Box<CCallHelpers::JumpList> exceptions =
3766 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
3767
3768 // JS setter call ICs generated by the PutById IC will need this.
3769 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
3770
3771 auto generator = Box<JITPutByIdGenerator>::create(
3772 jit.codeBlock(), node->origin.semantic, callSiteIndex,
3773 params.unavailableRegisters(), JSValueRegs(params[0].gpr()),
3774 JSValueRegs(params[1].gpr()), GPRInfo::patchpointScratchRegister, ecmaMode,
3775 node->op() == PutByIdDirect ? Direct : NotDirect);
3776
3777 generator->generateFastPath(jit);
3778 CCallHelpers::Label done = jit.label();
3779
3780 params.addLatePath(
3781 [=] (CCallHelpers& jit) {
3782 AllowMacroScratchRegisterUsage allowScratch(jit);
3783
3784 generator->slowPathJump().link(&jit);
3785 CCallHelpers::Label slowPathBegin = jit.label();
3786 CCallHelpers::Call slowPathCall = callOperation(
3787 *state, params.unavailableRegisters(), jit, node->origin.semantic,
3788 exceptions.get(), generator->slowPathFunction(), InvalidGPRReg,
3789 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
3790 params[0].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
3791 jit.jump().linkTo(done, &jit);
3792
3793 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
3794
3795 jit.addLinkTask(
3796 [=] (LinkBuffer& linkBuffer) {
3797 generator->finalize(linkBuffer, linkBuffer);
3798 });
3799 });
3800 });
3801 }
3802
3803 void compileGetButterfly()
3804 {
3805 LValue butterfly = m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly);
3806 setStorage(butterfly);
3807 }
3808
3809 void compileConstantStoragePointer()
3810 {
3811 setStorage(m_out.constIntPtr(m_node->storagePointer()));
3812 }
3813
3814 void compileGetIndexedPropertyStorage()
3815 {
3816 LValue cell = lowCell(m_node->child1());
3817
3818 if (m_node->arrayMode().type() == Array::String) {
3819 LBasicBlock slowPath = m_out.newBlock();
3820 LBasicBlock continuation = m_out.newBlock();
3821
3822 LValue fastResultValue = m_out.loadPtr(cell, m_heaps.JSString_value);
3823 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
3824
3825 m_out.branch(isRopeString(cell, m_node->child1()), rarely(slowPath), usually(continuation));
3826
3827 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
3828
3829 ValueFromBlock slowResult = m_out.anchor(
3830 vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, cell));
3831
3832 m_out.jump(continuation);
3833
3834 m_out.appendTo(continuation, lastNext);
3835
3836 setStorage(m_out.loadPtr(m_out.phi(pointerType(), fastResult, slowResult), m_heaps.StringImpl_data));
3837 return;
3838 }
3839
3840 DFG_ASSERT(m_graph, m_node, isTypedView(m_node->arrayMode().typedArrayType()), m_node->arrayMode().typedArrayType());
3841 LValue vector = m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector);
3842 setStorage(caged(Gigacage::Primitive, vector));
3843 }
3844
3845 void compileCheckArray()
3846 {
3847 Edge edge = m_node->child1();
3848 LValue cell = lowCell(edge);
3849
3850 if (m_node->arrayMode().alreadyChecked(m_graph, m_node, abstractValue(edge)))
3851 return;
3852
3853 speculate(
3854 BadIndexingType, jsValueValue(cell), 0,
3855 m_out.logicalNot(isArrayTypeForCheckArray(cell, m_node->arrayMode())));
3856 }
3857
3858 void compileGetTypedArrayByteOffset()
3859 {
3860 LValue basePtr = lowCell(m_node->child1());
3861
3862 LBasicBlock simpleCase = m_out.newBlock();
3863 LBasicBlock wastefulCase = m_out.newBlock();
3864 LBasicBlock notNull = m_out.newBlock();
3865 LBasicBlock continuation = m_out.newBlock();
3866
3867 LValue mode = m_out.load32(basePtr, m_heaps.JSArrayBufferView_mode);
3868 m_out.branch(
3869 m_out.notEqual(mode, m_out.constInt32(WastefulTypedArray)),
3870 unsure(simpleCase), unsure(wastefulCase));
3871
3872 LBasicBlock lastNext = m_out.appendTo(simpleCase, wastefulCase);
3873
3874 ValueFromBlock simpleOut = m_out.anchor(m_out.constIntPtr(0));
3875
3876 m_out.jump(continuation);
3877
3878 m_out.appendTo(wastefulCase, notNull);
3879
3880 LValue vector = m_out.loadPtr(basePtr, m_heaps.JSArrayBufferView_vector);
3881 ValueFromBlock nullVectorOut = m_out.anchor(vector);
3882 m_out.branch(vector, unsure(notNull), unsure(continuation));
3883
3884 m_out.appendTo(notNull, continuation);
3885
3886 LValue butterflyPtr = caged(Gigacage::JSValue, m_out.loadPtr(basePtr, m_heaps.JSObject_butterfly));
3887 LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer);
3888
3889 LValue vectorPtr = caged(Gigacage::Primitive, vector);
3890
3891 // FIXME: This needs caging.
3892 // https://bugs.webkit.org/show_bug.cgi?id=175515
3893 LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data);
3894
3895 ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr));
3896
3897 m_out.jump(continuation);
3898 m_out.appendTo(continuation, lastNext);
3899
3900 setInt32(m_out.castToInt32(m_out.phi(pointerType(), simpleOut, nullVectorOut, wastefulOut)));
3901 }
3902
3903 void compileGetPrototypeOf()
3904 {
3905 switch (m_node->child1().useKind()) {
3906 case ArrayUse:
3907 case FunctionUse:
3908 case FinalObjectUse: {
3909 LValue object = lowCell(m_node->child1());
3910 switch (m_node->child1().useKind()) {
3911 case ArrayUse:
3912 speculateArray(m_node->child1(), object);
3913 break;
3914 case FunctionUse:
3915 speculateFunction(m_node->child1(), object);
3916 break;
3917 case FinalObjectUse:
3918 speculateFinalObject(m_node->child1(), object);
3919 break;
3920 default:
3921 RELEASE_ASSERT_NOT_REACHED();
3922 break;
3923 }
3924
3925 LValue structure = loadStructure(object);
3926
3927 AbstractValue& value = m_state.forNode(m_node->child1());
3928 if ((value.m_type && !(value.m_type & ~SpecObject)) && value.m_structure.isFinite()) {
3929 bool hasPolyProto = false;
3930 bool hasMonoProto = false;
3931 value.m_structure.forEach([&] (RegisteredStructure structure) {
3932 if (structure->hasPolyProto())
3933 hasPolyProto = true;
3934 else
3935 hasMonoProto = true;
3936 });
3937
3938 if (hasMonoProto && !hasPolyProto) {
3939 setJSValue(m_out.load64(structure, m_heaps.Structure_prototype));
3940 return;
3941 }
3942
3943 if (hasPolyProto && !hasMonoProto) {
3944 setJSValue(m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), object, m_out.constInt64(knownPolyProtoOffset), ScaleEight, JSObject::offsetOfInlineStorage())));
3945 return;
3946 }
3947 }
3948
3949 LBasicBlock continuation = m_out.newBlock();
3950 LBasicBlock loadPolyProto = m_out.newBlock();
3951
3952 LValue prototypeBits = m_out.load64(structure, m_heaps.Structure_prototype);
3953 ValueFromBlock directPrototype = m_out.anchor(prototypeBits);
3954 m_out.branch(m_out.isZero64(prototypeBits), unsure(loadPolyProto), unsure(continuation));
3955
3956 LBasicBlock lastNext = m_out.appendTo(loadPolyProto, continuation);
3957 ValueFromBlock polyProto = m_out.anchor(
3958 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), object, m_out.constInt64(knownPolyProtoOffset), ScaleEight, JSObject::offsetOfInlineStorage())));
3959 m_out.jump(continuation);
3960
3961 m_out.appendTo(continuation, lastNext);
3962 setJSValue(m_out.phi(Int64, directPrototype, polyProto));
3963 return;
3964 }
3965 case ObjectUse: {
3966 setJSValue(vmCall(Int64, m_out.operation(operationGetPrototypeOfObject), m_callFrame, lowObject(m_node->child1())));
3967 return;
3968 }
3969 default: {
3970 setJSValue(vmCall(Int64, m_out.operation(operationGetPrototypeOf), m_callFrame, lowJSValue(m_node->child1())));
3971 return;
3972 }
3973 }
3974 }
3975
3976 void compileGetArrayLength()
3977 {
3978 switch (m_node->arrayMode().type()) {
3979 case Array::Undecided:
3980 case Array::Int32:
3981 case Array::Double:
3982 case Array::Contiguous: {
3983 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.Butterfly_publicLength));
3984 return;
3985 }
3986
3987 case Array::ArrayStorage:
3988 case Array::SlowPutArrayStorage: {
3989 LValue length = m_out.load32(lowStorage(m_node->child2()), m_heaps.ArrayStorage_publicLength);
3990 speculate(Uncountable, noValue(), nullptr, m_out.lessThan(length, m_out.int32Zero));
3991 setInt32(length);
3992 return;
3993 }
3994
3995 case Array::String: {
3996 LValue string = lowCell(m_node->child1());
3997
3998 LBasicBlock ropePath = m_out.newBlock();
3999 LBasicBlock nonRopePath = m_out.newBlock();
4000 LBasicBlock continuation = m_out.newBlock();
4001
4002 m_out.branch(isRopeString(string, m_node->child1()), rarely(ropePath), usually(nonRopePath));
4003
4004 LBasicBlock lastNext = m_out.appendTo(ropePath, nonRopePath);
4005 ValueFromBlock ropeLength = m_out.anchor(m_out.load32NonNegative(string, m_heaps.JSRopeString_length));
4006 m_out.jump(continuation);
4007
4008 m_out.appendTo(nonRopePath, continuation);
4009 ValueFromBlock nonRopeLength = m_out.anchor(m_out.load32NonNegative(m_out.loadPtr(string, m_heaps.JSString_value), m_heaps.StringImpl_length));
4010 m_out.jump(continuation);
4011
4012 m_out.appendTo(continuation, lastNext);
4013 setInt32(m_out.phi(Int32, ropeLength, nonRopeLength));
4014 return;
4015 }
4016
4017 case Array::DirectArguments: {
4018 LValue arguments = lowCell(m_node->child1());
4019 speculate(
4020 ExoticObjectMode, noValue(), nullptr,
4021 m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_mappedArguments)));
4022 setInt32(m_out.load32NonNegative(arguments, m_heaps.DirectArguments_length));
4023 return;
4024 }
4025
4026 case Array::ScopedArguments: {
4027 LValue arguments = lowCell(m_node->child1());
4028 LValue storage = m_out.loadPtr(arguments, m_heaps.ScopedArguments_storage);
4029 speculate(
4030 ExoticObjectMode, noValue(), nullptr,
4031 m_out.notZero32(m_out.load8ZeroExt32(storage, m_heaps.ScopedArguments_Storage_overrodeThings)));
4032 setInt32(m_out.load32NonNegative(storage, m_heaps.ScopedArguments_Storage_totalLength));
4033 return;
4034 }
4035
4036 default:
4037 if (m_node->arrayMode().isSomeTypedArrayView()) {
4038 setInt32(
4039 m_out.load32NonNegative(lowCell(m_node->child1()), m_heaps.JSArrayBufferView_length));
4040 return;
4041 }
4042
4043 DFG_CRASH(m_graph, m_node, "Bad array type");
4044 return;
4045 }
4046 }
4047
4048 void compileGetVectorLength()
4049 {
4050 switch (m_node->arrayMode().type()) {
4051 case Array::ArrayStorage:
4052 case Array::SlowPutArrayStorage:
4053 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.ArrayStorage_vectorLength));
4054 return;
4055 default:
4056 return;
4057 }
4058 }
4059
4060 void compileCheckInBounds()
4061 {
4062 speculate(
4063 OutOfBounds, noValue(), 0,
4064 m_out.aboveOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
4065
4066 // Even though we claim to have JSValue result, no user of us should
4067 // depend on our value. Users of this node just need to maintain that
4068 // we dominate them.
4069 }
4070
4071 void compileGetByVal()
4072 {
4073 switch (m_node->arrayMode().type()) {
4074 case Array::Int32:
4075 case Array::Contiguous: {
4076 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4077 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4078
4079 IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
4080 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
4081
4082 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4083
4084 if (m_node->arrayMode().isInBounds()) {
4085 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4086 LValue isHole = m_out.isZero64(result);
4087 if (m_node->arrayMode().isSaneChain()) {
4088 DFG_ASSERT(
4089 m_graph, m_node, m_node->arrayMode().type() == Array::Contiguous, m_node->arrayMode().type());
4090 result = m_out.select(
4091 isHole, m_out.constInt64(JSValue::encode(jsUndefined())), result);
4092 } else
4093 speculate(LoadFromHole, noValue(), 0, isHole);
4094 setJSValue(result);
4095 return;
4096 }
4097
4098 LBasicBlock fastCase = m_out.newBlock();
4099 LBasicBlock slowCase = m_out.newBlock();
4100 LBasicBlock continuation = m_out.newBlock();
4101
4102 m_out.branch(
4103 m_out.aboveOrEqual(
4104 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
4105 rarely(slowCase), usually(fastCase));
4106
4107 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
4108
4109 LValue fastResultValue = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4110 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
4111 m_out.branch(
4112 m_out.isZero64(fastResultValue), rarely(slowCase), usually(continuation));
4113
4114 m_out.appendTo(slowCase, continuation);
4115 ValueFromBlock slowResult = m_out.anchor(
4116 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4117 m_out.jump(continuation);
4118
4119 m_out.appendTo(continuation, lastNext);
4120 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4121 return;
4122 }
4123
4124 case Array::Double: {
4125 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4126 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4127 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4128
4129 IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
4130
4131 if (m_node->arrayMode().isInBounds()) {
4132 LValue result = m_out.loadDouble(
4133 baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4134
4135 if (!m_node->arrayMode().isSaneChain()) {
4136 speculate(
4137 LoadFromHole, noValue(), 0,
4138 m_out.doubleNotEqualOrUnordered(result, result));
4139 }
4140 setDouble(result);
4141 break;
4142 }
4143
4144 LBasicBlock inBounds = m_out.newBlock();
4145 LBasicBlock boxPath = m_out.newBlock();
4146 LBasicBlock slowCase = m_out.newBlock();
4147 LBasicBlock continuation = m_out.newBlock();
4148
4149 m_out.branch(
4150 m_out.aboveOrEqual(
4151 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
4152 rarely(slowCase), usually(inBounds));
4153
4154 LBasicBlock lastNext = m_out.appendTo(inBounds, boxPath);
4155 LValue doubleValue = m_out.loadDouble(
4156 baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4157 m_out.branch(
4158 m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue),
4159 rarely(slowCase), usually(boxPath));
4160
4161 m_out.appendTo(boxPath, slowCase);
4162 ValueFromBlock fastResult = m_out.anchor(boxDouble(doubleValue));
4163 m_out.jump(continuation);
4164
4165 m_out.appendTo(slowCase, continuation);
4166 ValueFromBlock slowResult = m_out.anchor(
4167 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4168 m_out.jump(continuation);
4169
4170 m_out.appendTo(continuation, lastNext);
4171 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4172 return;
4173 }
4174
4175 case Array::Undecided: {
4176 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4177
4178 speculate(OutOfBounds, noValue(), m_node, m_out.lessThan(index, m_out.int32Zero));
4179 setJSValue(m_out.constInt64(ValueUndefined));
4180 return;
4181 }
4182
4183 case Array::DirectArguments: {
4184 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4185 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4186
4187 speculate(
4188 ExoticObjectMode, noValue(), nullptr,
4189 m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_mappedArguments)));
4190
4191 LValue length = m_out.load32NonNegative(base, m_heaps.DirectArguments_length);
4192 auto isOutOfBounds = m_out.aboveOrEqual(index, length);
4193 if (m_node->arrayMode().isInBounds()) {
4194 speculate(OutOfBounds, noValue(), nullptr, isOutOfBounds);
4195 TypedPointer address = m_out.baseIndex(
4196 m_heaps.DirectArguments_storage, base, m_out.zeroExtPtr(index));
4197 setJSValue(m_out.load64(address));
4198 return;
4199 }
4200
4201 LBasicBlock inBounds = m_out.newBlock();
4202 LBasicBlock slowCase = m_out.newBlock();
4203 LBasicBlock continuation = m_out.newBlock();
4204
4205 m_out.branch(isOutOfBounds, rarely(slowCase), usually(inBounds));
4206
4207 LBasicBlock lastNext = m_out.appendTo(inBounds, slowCase);
4208 TypedPointer address = m_out.baseIndex(
4209 m_heaps.DirectArguments_storage,
4210 base,
4211 m_out.zeroExt(index, pointerType()));
4212 ValueFromBlock fastResult = m_out.anchor(m_out.load64(address));
4213 m_out.jump(continuation);
4214
4215 m_out.appendTo(slowCase, continuation);
4216 ValueFromBlock slowResult = m_out.anchor(
4217 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4218 m_out.jump(continuation);
4219
4220 m_out.appendTo(continuation, lastNext);
4221 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4222 return;
4223 }
4224
4225 case Array::ScopedArguments: {
4226 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4227 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4228
4229 LValue storage = m_out.loadPtr(base, m_heaps.ScopedArguments_storage);
4230 LValue totalLength = m_out.load32NonNegative(
4231 storage, m_heaps.ScopedArguments_Storage_totalLength);
4232 speculate(
4233 ExoticObjectMode, noValue(), nullptr,
4234 m_out.aboveOrEqual(index, totalLength));
4235
4236 LValue table = m_out.loadPtr(base, m_heaps.ScopedArguments_table);
4237 LValue namedLength = m_out.load32(table, m_heaps.ScopedArgumentsTable_length);
4238
4239 LBasicBlock namedCase = m_out.newBlock();
4240 LBasicBlock overflowCase = m_out.newBlock();
4241 LBasicBlock continuation = m_out.newBlock();
4242
4243 m_out.branch(
4244 m_out.aboveOrEqual(index, namedLength), unsure(overflowCase), unsure(namedCase));
4245
4246 LBasicBlock lastNext = m_out.appendTo(namedCase, overflowCase);
4247
4248 LValue scope = m_out.loadPtr(base, m_heaps.ScopedArguments_scope);
4249 LValue arguments = m_out.loadPtr(table, m_heaps.ScopedArgumentsTable_arguments);
4250
4251 TypedPointer address = m_out.baseIndex(
4252 m_heaps.scopedArgumentsTableArguments, arguments, m_out.zeroExtPtr(index));
4253 LValue scopeOffset = m_out.load32(address);
4254
4255 speculate(
4256 ExoticObjectMode, noValue(), nullptr,
4257 m_out.equal(scopeOffset, m_out.constInt32(ScopeOffset::invalidOffset)));
4258
4259 address = m_out.baseIndex(
4260 m_heaps.JSLexicalEnvironment_variables, scope, m_out.zeroExtPtr(scopeOffset));
4261 ValueFromBlock namedResult = m_out.anchor(m_out.load64(address));
4262 m_out.jump(continuation);
4263
4264 m_out.appendTo(overflowCase, continuation);
4265
4266 address = m_out.baseIndex(
4267 m_heaps.ScopedArguments_Storage_storage, storage,
4268 m_out.zeroExtPtr(m_out.sub(index, namedLength)));
4269 LValue overflowValue = m_out.load64(address);
4270 speculate(ExoticObjectMode, noValue(), nullptr, m_out.isZero64(overflowValue));
4271 ValueFromBlock overflowResult = m_out.anchor(overflowValue);
4272 m_out.jump(continuation);
4273
4274 m_out.appendTo(continuation, lastNext);
4275
4276 LValue result = m_out.phi(Int64, namedResult, overflowResult);
4277 result = preciseIndexMask32(result, index, totalLength);
4278
4279 setJSValue(result);
4280 return;
4281 }
4282
4283 case Array::Generic: {
4284 if (m_graph.varArgChild(m_node, 0).useKind() == ObjectUse) {
4285 if (m_graph.varArgChild(m_node, 1).useKind() == StringUse) {
4286 setJSValue(vmCall(
4287 Int64, m_out.operation(operationGetByValObjectString), m_callFrame,
4288 lowObject(m_graph.varArgChild(m_node, 0)), lowString(m_graph.varArgChild(m_node, 1))));
4289 return;
4290 }
4291
4292 if (m_graph.varArgChild(m_node, 1).useKind() == SymbolUse) {
4293 setJSValue(vmCall(
4294 Int64, m_out.operation(operationGetByValObjectSymbol), m_callFrame,
4295 lowObject(m_graph.varArgChild(m_node, 0)), lowSymbol(m_graph.varArgChild(m_node, 1))));
4296 return;
4297 }
4298 }
4299 setJSValue(vmCall(
4300 Int64, m_out.operation(operationGetByVal), m_callFrame,
4301 lowJSValue(m_graph.varArgChild(m_node, 0)), lowJSValue(m_graph.varArgChild(m_node, 1))));
4302 return;
4303 }
4304
4305 case Array::ArrayStorage:
4306 case Array::SlowPutArrayStorage: {
4307 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4308 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4309 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4310
4311 IndexedAbstractHeap& heap = m_heaps.ArrayStorage_vector;
4312
4313 if (m_node->arrayMode().isInBounds()) {
4314 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4315 speculate(LoadFromHole, noValue(), 0, m_out.isZero64(result));
4316 setJSValue(result);
4317 break;
4318 }
4319
4320 LBasicBlock inBounds = m_out.newBlock();
4321 LBasicBlock slowCase = m_out.newBlock();
4322 LBasicBlock continuation = m_out.newBlock();
4323
4324 m_out.branch(
4325 m_out.aboveOrEqual(index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength)),
4326 rarely(slowCase), usually(inBounds));
4327
4328 LBasicBlock lastNext = m_out.appendTo(inBounds, slowCase);
4329 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4330 ValueFromBlock fastResult = m_out.anchor(result);
4331 m_out.branch(
4332 m_out.isZero64(result),
4333 rarely(slowCase), usually(continuation));
4334
4335 m_out.appendTo(slowCase, continuation);
4336 ValueFromBlock slowResult = m_out.anchor(
4337 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4338 m_out.jump(continuation);
4339
4340 m_out.appendTo(continuation, lastNext);
4341 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4342 return;
4343 }
4344
4345 case Array::String: {
4346 compileStringCharAt();
4347 return;
4348 }
4349
4350 case Array::Int8Array:
4351 case Array::Int16Array:
4352 case Array::Int32Array:
4353 case Array::Uint8Array:
4354 case Array::Uint8ClampedArray:
4355 case Array::Uint16Array:
4356 case Array::Uint32Array:
4357 case Array::Float32Array:
4358 case Array::Float64Array: {
4359 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4360 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4361
4362 TypedArrayType type = m_node->arrayMode().typedArrayType();
4363 ASSERT(isTypedView(type));
4364 {
4365 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
4366
4367 if (isInt(type)) {
4368 LValue result = loadFromIntTypedArray(pointer, type);
4369 bool canSpeculate = true;
4370 setIntTypedArrayLoadResult(result, type, canSpeculate);
4371 return;
4372 }
4373
4374 ASSERT(isFloat(type));
4375
4376 LValue result;
4377 switch (type) {
4378 case TypeFloat32:
4379 result = m_out.floatToDouble(m_out.loadFloat(pointer));
4380 break;
4381 case TypeFloat64:
4382 result = m_out.loadDouble(pointer);
4383 break;
4384 default:
4385 DFG_CRASH(m_graph, m_node, "Bad typed array type");
4386 }
4387
4388 setDouble(result);
4389 return;
4390 }
4391 }
4392
4393 case Array::AnyTypedArray:
4394 case Array::ForceExit:
4395 case Array::SelectUsingArguments:
4396 case Array::SelectUsingPredictions:
4397 case Array::Unprofiled:
4398 DFG_CRASH(m_graph, m_node, "Bad array type");
4399 return;
4400 }
4401 }
4402
4403 void compileGetMyArgumentByVal()
4404 {
4405 InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
4406
4407 LValue originalIndex = lowInt32(m_node->child2());
4408
4409 LValue numberOfArgsIncludingThis;
4410 if (inlineCallFrame && !inlineCallFrame->isVarargs())
4411 numberOfArgsIncludingThis = m_out.constInt32(inlineCallFrame->argumentCountIncludingThis);
4412 else {
4413 VirtualRegister argumentCountRegister = AssemblyHelpers::argumentCount(inlineCallFrame);
4414 numberOfArgsIncludingThis = m_out.load32(payloadFor(argumentCountRegister));
4415 }
4416
4417 LValue numberOfArgs = m_out.sub(numberOfArgsIncludingThis, m_out.int32One);
4418 LValue indexToCheck = originalIndex;
4419 if (m_node->numberOfArgumentsToSkip()) {
4420 CheckValue* check = m_out.speculateAdd(indexToCheck, m_out.constInt32(m_node->numberOfArgumentsToSkip()));
4421 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
4422 indexToCheck = check;
4423 }
4424
4425 LValue isOutOfBounds = m_out.aboveOrEqual(indexToCheck, numberOfArgs);
4426 LBasicBlock continuation = nullptr;
4427 LBasicBlock lastNext = nullptr;
4428 ValueFromBlock slowResult;
4429 if (m_node->op() == GetMyArgumentByValOutOfBounds) {
4430 LBasicBlock normalCase = m_out.newBlock();
4431 continuation = m_out.newBlock();
4432
4433 slowResult = m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined())));
4434 m_out.branch(isOutOfBounds, unsure(continuation), unsure(normalCase));
4435
4436 lastNext = m_out.appendTo(normalCase, continuation);
4437 } else
4438 speculate(OutOfBounds, noValue(), nullptr, isOutOfBounds);
4439
4440 LValue index = m_out.add(indexToCheck, m_out.int32One);
4441
4442 TypedPointer base;
4443 if (inlineCallFrame) {
4444 if (inlineCallFrame->argumentCountIncludingThis > 1)
4445 base = addressFor(inlineCallFrame->argumentsWithFixup[0].virtualRegister());
4446 } else
4447 base = addressFor(virtualRegisterForArgument(0));
4448
4449 LValue result;
4450 if (base) {
4451 LValue pointer = m_out.baseIndex(
4452 base.value(), m_out.zeroExt(index, pointerType()), ScaleEight);
4453 result = m_out.load64(TypedPointer(m_heaps.variables.atAnyIndex(), pointer));
4454 result = preciseIndexMask32(result, indexToCheck, numberOfArgs);
4455 } else
4456 result = m_out.constInt64(JSValue::encode(jsUndefined()));
4457
4458 if (m_node->op() == GetMyArgumentByValOutOfBounds) {
4459 ValueFromBlock normalResult = m_out.anchor(result);
4460 m_out.jump(continuation);
4461
4462 m_out.appendTo(continuation, lastNext);
4463 result = m_out.phi(Int64, slowResult, normalResult);
4464 }
4465
4466 setJSValue(result);
4467 }
4468
4469 void compilePutByVal()
4470 {
4471 Edge child1 = m_graph.varArgChild(m_node, 0);
4472 Edge child2 = m_graph.varArgChild(m_node, 1);
4473 Edge child3 = m_graph.varArgChild(m_node, 2);
4474 Edge child4 = m_graph.varArgChild(m_node, 3);
4475 Edge child5 = m_graph.varArgChild(m_node, 4);
4476
4477 ArrayMode arrayMode = m_node->arrayMode().modeForPut();
4478 switch (arrayMode.type()) {
4479 case Array::Generic: {
4480 if (child1.useKind() == CellUse) {
4481 V_JITOperation_ECCJ operation = nullptr;
4482 if (child2.useKind() == StringUse) {
4483 if (m_node->op() == PutByValDirect) {
4484 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4485 operation = operationPutByValDirectCellStringStrict;
4486 else
4487 operation = operationPutByValDirectCellStringNonStrict;
4488 } else {
4489 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4490 operation = operationPutByValCellStringStrict;
4491 else
4492 operation = operationPutByValCellStringNonStrict;
4493 }
4494 vmCall(Void, m_out.operation(operation), m_callFrame, lowCell(child1), lowString(child2), lowJSValue(child3));
4495 return;
4496 }
4497
4498 if (child2.useKind() == SymbolUse) {
4499 if (m_node->op() == PutByValDirect) {
4500 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4501 operation = operationPutByValDirectCellSymbolStrict;
4502 else
4503 operation = operationPutByValDirectCellSymbolNonStrict;
4504 } else {
4505 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4506 operation = operationPutByValCellSymbolStrict;
4507 else
4508 operation = operationPutByValCellSymbolNonStrict;
4509 }
4510 vmCall(Void, m_out.operation(operation), m_callFrame, lowCell(child1), lowSymbol(child2), lowJSValue(child3));
4511 return;
4512 }
4513 }
4514
4515 V_JITOperation_EJJJ operation;
4516 if (m_node->op() == PutByValDirect) {
4517 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4518 operation = operationPutByValDirectStrict;
4519 else
4520 operation = operationPutByValDirectNonStrict;
4521 } else {
4522 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4523 operation = operationPutByValStrict;
4524 else
4525 operation = operationPutByValNonStrict;
4526 }
4527
4528 vmCall(
4529 Void, m_out.operation(operation), m_callFrame,
4530 lowJSValue(child1), lowJSValue(child2), lowJSValue(child3));
4531 return;
4532 }
4533
4534 default:
4535 break;
4536 }
4537
4538 LValue base = lowCell(child1);
4539 LValue index = lowInt32(child2);
4540 LValue storage = lowStorage(child4);
4541
4542 switch (arrayMode.type()) {
4543 case Array::Int32:
4544 case Array::Double:
4545 case Array::Contiguous: {
4546 LBasicBlock continuation = m_out.newBlock();
4547 LBasicBlock outerLastNext = m_out.appendTo(m_out.m_block, continuation);
4548
4549 switch (arrayMode.type()) {
4550 case Array::Int32:
4551 case Array::Contiguous: {
4552 LValue value = lowJSValue(child3, ManualOperandSpeculation);
4553
4554 if (arrayMode.type() == Array::Int32)
4555 FTL_TYPE_CHECK(jsValueValue(value), child3, SpecInt32Only, isNotInt32(value));
4556
4557 TypedPointer elementPointer = m_out.baseIndex(
4558 arrayMode.type() == Array::Int32 ?
4559 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties,
4560 storage, m_out.zeroExtPtr(index), provenValue(child2));
4561
4562 if (m_node->op() == PutByValAlias) {
4563 m_out.store64(value, elementPointer);
4564 break;
4565 }
4566
4567 contiguousPutByValOutOfBounds(
4568 m_graph.isStrictModeFor(m_node->origin.semantic)
4569 ? (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict)
4570 : (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict),
4571 base, storage, index, value, continuation);
4572
4573 m_out.store64(value, elementPointer);
4574 break;
4575 }
4576
4577 case Array::Double: {
4578 LValue value = lowDouble(child3);
4579
4580 FTL_TYPE_CHECK(
4581 doubleValue(value), child3, SpecDoubleReal,
4582 m_out.doubleNotEqualOrUnordered(value, value));
4583
4584 TypedPointer elementPointer = m_out.baseIndex(
4585 m_heaps.indexedDoubleProperties, storage, m_out.zeroExtPtr(index),
4586 provenValue(child2));
4587
4588 if (m_node->op() == PutByValAlias) {
4589 m_out.storeDouble(value, elementPointer);
4590 break;
4591 }
4592
4593 contiguousPutByValOutOfBounds(
4594 m_graph.isStrictModeFor(m_node->origin.semantic)
4595 ? (m_node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsStrict)
4596 : (m_node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsNonStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict),
4597 base, storage, index, value, continuation);
4598
4599 m_out.storeDouble(value, elementPointer);
4600 break;
4601 }
4602
4603 default:
4604 DFG_CRASH(m_graph, m_node, "Bad array type");
4605 }
4606
4607 m_out.jump(continuation);
4608 m_out.appendTo(continuation, outerLastNext);
4609 return;
4610 }
4611
4612 case Array::ArrayStorage:
4613 case Array::SlowPutArrayStorage: {
4614 LValue value = lowJSValue(child3);
4615
4616 TypedPointer elementPointer = m_out.baseIndex(
4617 m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(index),
4618 provenValue(child2));
4619
4620 if (m_node->op() == PutByValAlias) {
4621 m_out.store64(value, elementPointer);
4622 return;
4623 }
4624
4625 if (arrayMode.isInBounds()) {
4626 speculate(StoreToHole, noValue(), 0, m_out.isZero64(m_out.load64(elementPointer)));
4627 m_out.store64(value, elementPointer);
4628 return;
4629 }
4630
4631 LValue isOutOfBounds = m_out.aboveOrEqual(
4632 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength));
4633
4634 auto slowPathFunction = m_graph.isStrictModeFor(m_node->origin.semantic)
4635 ? (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict)
4636 : (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict);
4637 if (!arrayMode.isOutOfBounds()) {
4638 speculate(OutOfBounds, noValue(), 0, isOutOfBounds);
4639 isOutOfBounds = m_out.booleanFalse;
4640 }
4641
4642 LBasicBlock inBoundCase = m_out.newBlock();
4643 LBasicBlock slowCase = m_out.newBlock();
4644 LBasicBlock holeCase = m_out.newBlock();
4645 LBasicBlock doStoreCase = m_out.newBlock();
4646 LBasicBlock lengthUpdateCase = m_out.newBlock();
4647 LBasicBlock continuation = m_out.newBlock();
4648
4649 m_out.branch(isOutOfBounds, rarely(slowCase), usually(inBoundCase));
4650
4651 LBasicBlock lastNext = m_out.appendTo(slowCase, inBoundCase);
4652 vmCall(
4653 Void, m_out.operation(slowPathFunction),
4654 m_callFrame, base, index, value);
4655 m_out.jump(continuation);
4656
4657
4658 if (arrayMode.isSlowPut()) {
4659 m_out.appendTo(inBoundCase, doStoreCase);
4660 m_out.branch(m_out.isZero64(m_out.load64(elementPointer)), rarely(slowCase), usually(doStoreCase));
4661 } else {
4662 m_out.appendTo(inBoundCase, holeCase);
4663 m_out.branch(m_out.isZero64(m_out.load64(elementPointer)), rarely(holeCase), usually(doStoreCase));
4664
4665 m_out.appendTo(holeCase, lengthUpdateCase);
4666 m_out.store32(
4667 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
4668 storage, m_heaps.ArrayStorage_numValuesInVector);
4669 m_out.branch(
4670 m_out.below(
4671 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_publicLength)),
4672 unsure(doStoreCase), unsure(lengthUpdateCase));
4673
4674 m_out.appendTo(lengthUpdateCase, doStoreCase);
4675 m_out.store32(
4676 m_out.add(index, m_out.int32One),
4677 storage, m_heaps.ArrayStorage_publicLength);
4678 m_out.jump(doStoreCase);
4679 }
4680
4681 m_out.appendTo(doStoreCase, continuation);
4682 m_out.store64(value, elementPointer);
4683 m_out.jump(continuation);
4684
4685 m_out.appendTo(continuation, lastNext);
4686 return;
4687 }
4688
4689 case Array::Int8Array:
4690 case Array::Int16Array:
4691 case Array::Int32Array:
4692 case Array::Uint8Array:
4693 case Array::Uint8ClampedArray:
4694 case Array::Uint16Array:
4695 case Array::Uint32Array:
4696 case Array::Float32Array:
4697 case Array::Float64Array: {
4698 TypedArrayType type = arrayMode.typedArrayType();
4699
4700 ASSERT(isTypedView(type));
4701 {
4702 TypedPointer pointer = TypedPointer(
4703 m_heaps.typedArrayProperties,
4704 m_out.add(
4705 storage,
4706 m_out.shl(
4707 m_out.zeroExt(index, pointerType()),
4708 m_out.constIntPtr(logElementSize(type)))));
4709
4710 LValue valueToStore;
4711
4712 if (isInt(type)) {
4713 LValue intValue = getIntTypedArrayStoreOperand(child3, isClamped(type));
4714
4715 valueToStore = intValue;
4716 } else /* !isInt(type) */ {
4717 LValue value = lowDouble(child3);
4718 switch (type) {
4719 case TypeFloat32:
4720 valueToStore = m_out.doubleToFloat(value);
4721 break;
4722 case TypeFloat64:
4723 valueToStore = value;
4724 break;
4725 default:
4726 DFG_CRASH(m_graph, m_node, "Bad typed array type");
4727 }
4728 }
4729
4730 if (arrayMode.isInBounds() || m_node->op() == PutByValAlias)
4731 m_out.store(valueToStore, pointer, storeType(type));
4732 else {
4733 LBasicBlock isInBounds = m_out.newBlock();
4734 LBasicBlock isOutOfBounds = m_out.newBlock();
4735 LBasicBlock continuation = m_out.newBlock();
4736
4737 m_out.branch(
4738 m_out.aboveOrEqual(index, lowInt32(child5)),
4739 unsure(isOutOfBounds), unsure(isInBounds));
4740
4741 LBasicBlock lastNext = m_out.appendTo(isInBounds, isOutOfBounds);
4742 m_out.store(valueToStore, pointer, storeType(type));
4743 m_out.jump(continuation);
4744
4745 m_out.appendTo(isOutOfBounds, continuation);
4746 speculateTypedArrayIsNotNeutered(base);
4747 m_out.jump(continuation);
4748
4749 m_out.appendTo(continuation, lastNext);
4750 }
4751
4752 return;
4753 }
4754 }
4755
4756 case Array::AnyTypedArray:
4757 case Array::String:
4758 case Array::DirectArguments:
4759 case Array::ForceExit:
4760 case Array::Generic:
4761 case Array::ScopedArguments:
4762 case Array::SelectUsingArguments:
4763 case Array::SelectUsingPredictions:
4764 case Array::Undecided:
4765 case Array::Unprofiled:
4766 DFG_CRASH(m_graph, m_node, "Bad array type");
4767 break;
4768 }
4769 }
4770
4771 void compilePutAccessorById()
4772 {
4773 LValue base = lowCell(m_node->child1());
4774 LValue accessor = lowCell(m_node->child2());
4775 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
4776 vmCall(
4777 Void,
4778 m_out.operation(m_node->op() == PutGetterById ? operationPutGetterById : operationPutSetterById),
4779 m_callFrame, base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), accessor);
4780 }
4781
4782 void compilePutGetterSetterById()
4783 {
4784 LValue base = lowCell(m_node->child1());
4785 LValue getter = lowJSValue(m_node->child2());
4786 LValue setter = lowJSValue(m_node->child3());
4787 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
4788 vmCall(
4789 Void, m_out.operation(operationPutGetterSetter),
4790 m_callFrame, base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), getter, setter);
4791
4792 }
4793
4794 void compilePutAccessorByVal()
4795 {
4796 LValue base = lowCell(m_node->child1());
4797 LValue subscript = lowJSValue(m_node->child2());
4798 LValue accessor = lowCell(m_node->child3());
4799 vmCall(
4800 Void,
4801 m_out.operation(m_node->op() == PutGetterByVal ? operationPutGetterByVal : operationPutSetterByVal),
4802 m_callFrame, base, subscript, m_out.constInt32(m_node->accessorAttributes()), accessor);
4803 }
4804
4805 void compileDeleteById()
4806 {
4807 LValue base = lowJSValue(m_node->child1());
4808 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
4809 setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationDeleteById), m_callFrame, base, m_out.constIntPtr(uid))));
4810 }
4811
4812 void compileDeleteByVal()
4813 {
4814 LValue base = lowJSValue(m_node->child1());
4815 LValue subscript = lowJSValue(m_node->child2());
4816 setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationDeleteByVal), m_callFrame, base, subscript)));
4817 }
4818
4819 void compileArrayPush()
4820 {
4821 LValue base = lowCell(m_graph.varArgChild(m_node, 1));
4822 LValue storage = lowStorage(m_graph.varArgChild(m_node, 0));
4823 unsigned elementOffset = 2;
4824 unsigned elementCount = m_node->numChildren() - elementOffset;
4825
4826 switch (m_node->arrayMode().type()) {
4827 case Array::Int32:
4828 case Array::Contiguous:
4829 case Array::Double: {
4830 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
4831
4832 if (elementCount == 1) {
4833 LValue value;
4834 Output::StoreType storeType;
4835
4836 Edge& element = m_graph.varArgChild(m_node, elementOffset);
4837 speculate(element);
4838 if (m_node->arrayMode().type() != Array::Double) {
4839 value = lowJSValue(element, ManualOperandSpeculation);
4840 storeType = Output::Store64;
4841 } else {
4842 value = lowDouble(element);
4843 storeType = Output::StoreDouble;
4844 }
4845
4846 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
4847
4848 LBasicBlock fastPath = m_out.newBlock();
4849 LBasicBlock slowPath = m_out.newBlock();
4850 LBasicBlock continuation = m_out.newBlock();
4851
4852 m_out.branch(
4853 m_out.aboveOrEqual(
4854 prevLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength)),
4855 unsure(slowPath), unsure(fastPath));
4856
4857 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
4858 m_out.store(
4859 value, m_out.baseIndex(heap, storage, m_out.zeroExtPtr(prevLength)), storeType);
4860 LValue newLength = m_out.add(prevLength, m_out.int32One);
4861 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
4862
4863 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
4864 m_out.jump(continuation);
4865
4866 m_out.appendTo(slowPath, continuation);
4867 LValue operation;
4868 if (m_node->arrayMode().type() != Array::Double)
4869 operation = m_out.operation(operationArrayPush);
4870 else
4871 operation = m_out.operation(operationArrayPushDouble);
4872 ValueFromBlock slowResult = m_out.anchor(
4873 vmCall(Int64, operation, m_callFrame, value, base));
4874 m_out.jump(continuation);
4875
4876 m_out.appendTo(continuation, lastNext);
4877 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4878 return;
4879 }
4880
4881 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
4882 Edge element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
4883 speculate(element);
4884 }
4885
4886 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
4887 LValue newLength = m_out.add(prevLength, m_out.constInt32(elementCount));
4888
4889 LBasicBlock fastPath = m_out.newBlock();
4890 LBasicBlock slowPath = m_out.newBlock();
4891 LBasicBlock setup = m_out.newBlock();
4892 LBasicBlock slowCallPath = m_out.newBlock();
4893 LBasicBlock continuation = m_out.newBlock();
4894
4895 LValue beyondVectorLength = m_out.above(newLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength));
4896
4897 m_out.branch(beyondVectorLength, unsure(slowPath), unsure(fastPath));
4898
4899 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
4900 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
4901 ValueFromBlock fastBufferResult = m_out.anchor(m_out.baseIndex(storage, m_out.zeroExtPtr(prevLength), ScaleEight));
4902 m_out.jump(setup);
4903
4904 m_out.appendTo(slowPath, setup);
4905 size_t scratchSize = sizeof(EncodedJSValue) * elementCount;
4906 static_assert(sizeof(EncodedJSValue) == sizeof(double), "");
4907 ASSERT(scratchSize);
4908 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
4909 m_out.storePtr(m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
4910 ValueFromBlock slowBufferResult = m_out.anchor(m_out.constIntPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())));
4911 m_out.jump(setup);
4912
4913 m_out.appendTo(setup, slowCallPath);
4914 LValue buffer = m_out.phi(pointerType(), fastBufferResult, slowBufferResult);
4915 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
4916 Edge& element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
4917
4918 LValue value;
4919 Output::StoreType storeType;
4920 if (m_node->arrayMode().type() != Array::Double) {
4921 value = lowJSValue(element, ManualOperandSpeculation);
4922 storeType = Output::Store64;
4923 } else {
4924 value = lowDouble(element);
4925 storeType = Output::StoreDouble;
4926 }
4927
4928 m_out.store(value, m_out.baseIndex(heap, buffer, m_out.constInt32(elementIndex), jsNumber(elementIndex)), storeType);
4929 }
4930 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
4931
4932 m_out.branch(beyondVectorLength, unsure(slowCallPath), unsure(continuation));
4933
4934 m_out.appendTo(slowCallPath, continuation);
4935 LValue operation;
4936 if (m_node->arrayMode().type() != Array::Double)
4937 operation = m_out.operation(operationArrayPushMultiple);
4938 else
4939 operation = m_out.operation(operationArrayPushDoubleMultiple);
4940 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, operation, m_callFrame, base, buffer, m_out.constInt32(elementCount)));
4941 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
4942 m_out.jump(continuation);
4943
4944 m_out.appendTo(continuation, lastNext);
4945 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4946 return;
4947 }
4948
4949 case Array::ArrayStorage: {
4950 // This ensures that the result of ArrayPush is Int32 in AI.
4951 int32_t largestPositiveInt32Length = 0x7fffffff - elementCount;
4952
4953 LValue prevLength = m_out.load32(storage, m_heaps.ArrayStorage_publicLength);
4954 // Refuse to handle bizarre lengths.
4955 speculate(Uncountable, noValue(), nullptr, m_out.above(prevLength, m_out.constInt32(largestPositiveInt32Length)));
4956
4957 if (elementCount == 1) {
4958 Edge& element = m_graph.varArgChild(m_node, elementOffset);
4959
4960 LValue value = lowJSValue(element);
4961
4962 LBasicBlock fastPath = m_out.newBlock();
4963 LBasicBlock slowPath = m_out.newBlock();
4964 LBasicBlock continuation = m_out.newBlock();
4965
4966 m_out.branch(
4967 m_out.aboveOrEqual(
4968 prevLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength)),
4969 rarely(slowPath), usually(fastPath));
4970
4971 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
4972 m_out.store64(
4973 value, m_out.baseIndex(m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(prevLength)));
4974 LValue newLength = m_out.add(prevLength, m_out.int32One);
4975 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
4976 m_out.store32(
4977 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
4978 storage, m_heaps.ArrayStorage_numValuesInVector);
4979
4980 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
4981 m_out.jump(continuation);
4982
4983 m_out.appendTo(slowPath, continuation);
4984 ValueFromBlock slowResult = m_out.anchor(
4985 vmCall(Int64, m_out.operation(operationArrayPush), m_callFrame, value, base));
4986 m_out.jump(continuation);
4987
4988 m_out.appendTo(continuation, lastNext);
4989 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4990 return;
4991 }
4992
4993 LValue newLength = m_out.add(prevLength, m_out.constInt32(elementCount));
4994
4995 LBasicBlock fastPath = m_out.newBlock();
4996 LBasicBlock slowPath = m_out.newBlock();
4997 LBasicBlock setup = m_out.newBlock();
4998 LBasicBlock slowCallPath = m_out.newBlock();
4999 LBasicBlock continuation = m_out.newBlock();
5000
5001 LValue beyondVectorLength = m_out.above(newLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength));
5002
5003 m_out.branch(beyondVectorLength, rarely(slowPath), usually(fastPath));
5004
5005 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
5006 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
5007 m_out.store32(
5008 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.constInt32(elementCount)),
5009 storage, m_heaps.ArrayStorage_numValuesInVector);
5010 ValueFromBlock fastBufferResult = m_out.anchor(m_out.baseIndex(storage, m_out.zeroExtPtr(prevLength), ScaleEight, ArrayStorage::vectorOffset()));
5011 m_out.jump(setup);
5012
5013 m_out.appendTo(slowPath, setup);
5014 size_t scratchSize = sizeof(EncodedJSValue) * elementCount;
5015 ASSERT(scratchSize);
5016 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
5017 m_out.storePtr(m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
5018 ValueFromBlock slowBufferResult = m_out.anchor(m_out.constIntPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())));
5019 m_out.jump(setup);
5020
5021 m_out.appendTo(setup, slowCallPath);
5022 LValue buffer = m_out.phi(pointerType(), fastBufferResult, slowBufferResult);
5023 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
5024 Edge& element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
5025
5026 LValue value = lowJSValue(element);
5027 m_out.store64(value, m_out.baseIndex(m_heaps.ArrayStorage_vector.atAnyIndex(), buffer, m_out.constIntPtr(elementIndex), ScaleEight));
5028 }
5029 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
5030
5031 m_out.branch(beyondVectorLength, rarely(slowCallPath), usually(continuation));
5032
5033 m_out.appendTo(slowCallPath, continuation);
5034 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationArrayPushMultiple), m_callFrame, base, buffer, m_out.constInt32(elementCount)));
5035 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
5036 m_out.jump(continuation);
5037
5038 m_out.appendTo(continuation, lastNext);
5039 setJSValue(m_out.phi(Int64, fastResult, slowResult));
5040 return;
5041 }
5042
5043 default:
5044 DFG_CRASH(m_graph, m_node, "Bad array type");
5045 return;
5046 }
5047 }
5048
5049 std::pair<LValue, LValue> populateSliceRange(LValue start, LValue end, LValue length)
5050 {
5051 // end can be nullptr.
5052 ASSERT(start);
5053 ASSERT(length);
5054
5055 auto pickIndex = [&] (LValue index) {
5056 return m_out.select(m_out.greaterThanOrEqual(index, m_out.int32Zero),
5057 m_out.select(m_out.above(index, length), length, index),
5058 m_out.select(m_out.lessThan(m_out.add(length, index), m_out.int32Zero), m_out.int32Zero, m_out.add(length, index)));
5059 };
5060
5061 LValue endBoundary = length;
5062 if (end)
5063 endBoundary = pickIndex(end);
5064 LValue startIndex = pickIndex(start);
5065 return std::make_pair(startIndex, endBoundary);
5066 }
5067
5068 void compileArraySlice()
5069 {
5070 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5071
5072 LValue sourceStorage = lowStorage(m_graph.varArgChild(m_node, m_node->numChildren() - 1));
5073 LValue inputLength = m_out.load32(sourceStorage, m_heaps.Butterfly_publicLength);
5074
5075 LValue startIndex = nullptr;
5076 LValue resultLength = nullptr;
5077 if (m_node->numChildren() == 2) {
5078 startIndex = m_out.constInt32(0);
5079 resultLength = inputLength;
5080 } else {
5081 LValue start = lowInt32(m_graph.varArgChild(m_node, 1));
5082 LValue end = nullptr;
5083 if (m_node->numChildren() != 3)
5084 end = lowInt32(m_graph.varArgChild(m_node, 2));
5085
5086 auto range = populateSliceRange(start, end, inputLength);
5087 startIndex = range.first;
5088 LValue endBoundary = range.second;
5089
5090 resultLength = m_out.select(m_out.belowOrEqual(startIndex, endBoundary),
5091 m_out.sub(endBoundary, startIndex),
5092 m_out.constInt32(0));
5093 }
5094
5095 ArrayValues arrayResult;
5096 {
5097 LValue indexingType = m_out.load8ZeroExt32(lowCell(m_graph.varArgChild(m_node, 0)), m_heaps.JSCell_indexingTypeAndMisc);
5098 // We can ignore the writability of the cell since we won't write to the source.
5099 indexingType = m_out.bitAnd(indexingType, m_out.constInt32(AllWritableArrayTypesAndHistory));
5100 // When we emit an ArraySlice, we dominate the use of the array by a CheckStructure
5101 // to ensure the incoming array is one to be one of the original array structures
5102 // with one of the following indexing shapes: Int32, Contiguous, Double.
5103 LValue structure = m_out.select(
5104 m_out.equal(indexingType, m_out.constInt32(ArrayWithInt32)),
5105 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithInt32))),
5106 m_out.select(m_out.equal(indexingType, m_out.constInt32(ArrayWithContiguous)),
5107 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithContiguous))),
5108 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithDouble)))));
5109 arrayResult = allocateJSArray(resultLength, resultLength, structure, indexingType, false, false);
5110 }
5111
5112 LBasicBlock loop = m_out.newBlock();
5113 LBasicBlock continuation = m_out.newBlock();
5114
5115 resultLength = m_out.zeroExtPtr(resultLength);
5116 ValueFromBlock startLoadIndex = m_out.anchor(m_out.zeroExtPtr(startIndex));
5117 ValueFromBlock startStoreIndex = m_out.anchor(m_out.constIntPtr(0));
5118
5119 m_out.branch(
5120 m_out.below(m_out.constIntPtr(0), resultLength), unsure(loop), unsure(continuation));
5121
5122 LBasicBlock lastNext = m_out.appendTo(loop, continuation);
5123 LValue storeIndex = m_out.phi(pointerType(), startStoreIndex);
5124 LValue loadIndex = m_out.phi(pointerType(), startLoadIndex);
5125 LValue value = m_out.load64(m_out.baseIndex(m_heaps.root, sourceStorage, loadIndex, ScaleEight));
5126 m_out.store64(value, m_out.baseIndex(m_heaps.root, arrayResult.butterfly, storeIndex, ScaleEight));
5127 LValue nextStoreIndex = m_out.add(storeIndex, m_out.constIntPtr(1));
5128 m_out.addIncomingToPhi(storeIndex, m_out.anchor(nextStoreIndex));
5129 m_out.addIncomingToPhi(loadIndex, m_out.anchor(m_out.add(loadIndex, m_out.constIntPtr(1))));
5130 m_out.branch(
5131 m_out.below(nextStoreIndex, resultLength), unsure(loop), unsure(continuation));
5132
5133 m_out.appendTo(continuation, lastNext);
5134
5135 mutatorFence();
5136 setJSValue(arrayResult.array);
5137 }
5138
5139 void compileArrayIndexOf()
5140 {
5141 LValue storage = lowStorage(m_node->numChildren() == 3 ? m_graph.varArgChild(m_node, 2) : m_graph.varArgChild(m_node, 3));
5142 LValue length = m_out.load32(storage, m_heaps.Butterfly_publicLength);
5143
5144 LValue startIndex;
5145 if (m_node->numChildren() == 4) {
5146 startIndex = lowInt32(m_graph.varArgChild(m_node, 2));
5147 startIndex = m_out.select(m_out.greaterThanOrEqual(startIndex, m_out.int32Zero),
5148 m_out.select(m_out.above(startIndex, length), length, startIndex),
5149 m_out.select(m_out.lessThan(m_out.add(length, startIndex), m_out.int32Zero), m_out.int32Zero, m_out.add(length, startIndex)));
5150 } else
5151 startIndex = m_out.int32Zero;
5152
5153 Edge& searchElementEdge = m_graph.varArgChild(m_node, 1);
5154 switch (searchElementEdge.useKind()) {
5155 case Int32Use:
5156 case ObjectUse:
5157 case SymbolUse:
5158 case OtherUse:
5159 case DoubleRepUse: {
5160 LBasicBlock loopHeader = m_out.newBlock();
5161 LBasicBlock loopBody = m_out.newBlock();
5162 LBasicBlock loopNext = m_out.newBlock();
5163 LBasicBlock notFound = m_out.newBlock();
5164 LBasicBlock continuation = m_out.newBlock();
5165
5166 LValue searchElement;
5167 switch (searchElementEdge.useKind()) {
5168 case Int32Use:
5169 ASSERT(m_node->arrayMode().type() == Array::Int32);
5170 speculate(searchElementEdge);
5171 searchElement = lowJSValue(searchElementEdge, ManualOperandSpeculation);
5172 break;
5173 case ObjectUse:
5174 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5175 searchElement = lowObject(searchElementEdge);
5176 break;
5177 case SymbolUse:
5178 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5179 searchElement = lowSymbol(searchElementEdge);
5180 break;
5181 case OtherUse:
5182 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5183 speculate(searchElementEdge);
5184 searchElement = lowJSValue(searchElementEdge, ManualOperandSpeculation);
5185 break;
5186 case DoubleRepUse:
5187 ASSERT(m_node->arrayMode().type() == Array::Double);
5188 searchElement = lowDouble(searchElementEdge);
5189 break;
5190 default:
5191 RELEASE_ASSERT_NOT_REACHED();
5192 break;
5193 }
5194
5195 startIndex = m_out.zeroExtPtr(startIndex);
5196 length = m_out.zeroExtPtr(length);
5197
5198 ValueFromBlock initialStartIndex = m_out.anchor(startIndex);
5199 m_out.jump(loopHeader);
5200
5201 LBasicBlock lastNext = m_out.appendTo(loopHeader, loopBody);
5202 LValue index = m_out.phi(pointerType(), initialStartIndex);
5203 m_out.branch(m_out.notEqual(index, length), unsure(loopBody), unsure(notFound));
5204
5205 m_out.appendTo(loopBody, loopNext);
5206 ValueFromBlock foundResult = m_out.anchor(index);
5207 switch (searchElementEdge.useKind()) {
5208 case Int32Use: {
5209 // Empty value is ignored because of TagTypeNumber.
5210 LValue value = m_out.load64(m_out.baseIndex(m_heaps.indexedInt32Properties, storage, index));
5211 m_out.branch(m_out.equal(value, searchElement), unsure(continuation), unsure(loopNext));
5212 break;
5213 }
5214 case ObjectUse:
5215 case SymbolUse:
5216 case OtherUse: {
5217 // Empty value never matches against non-empty JS values.
5218 LValue value = m_out.load64(m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, index));
5219 m_out.branch(m_out.equal(value, searchElement), unsure(continuation), unsure(loopNext));
5220 break;
5221 }
5222 case DoubleRepUse: {
5223 // Empty value is ignored because of NaN.
5224 LValue value = m_out.loadDouble(m_out.baseIndex(m_heaps.indexedDoubleProperties, storage, index));
5225 m_out.branch(m_out.doubleEqual(value, searchElement), unsure(continuation), unsure(loopNext));
5226 break;
5227 }
5228 default:
5229 RELEASE_ASSERT_NOT_REACHED();
5230 break;
5231 }
5232
5233 m_out.appendTo(loopNext, notFound);
5234 LValue nextIndex = m_out.add(index, m_out.intPtrOne);
5235 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
5236 m_out.jump(loopHeader);
5237
5238 m_out.appendTo(notFound, continuation);
5239 ValueFromBlock notFoundResult = m_out.anchor(m_out.constIntPtr(-1));
5240 m_out.jump(continuation);
5241
5242 m_out.appendTo(continuation, lastNext);
5243 setInt32(m_out.castToInt32(m_out.phi(pointerType(), notFoundResult, foundResult)));
5244 break;
5245 }
5246
5247 case StringUse:
5248 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5249 setInt32(vmCall(Int32, m_out.operation(operationArrayIndexOfString), m_callFrame, storage, lowString(searchElementEdge), startIndex));
5250 break;
5251
5252 case UntypedUse:
5253 switch (m_node->arrayMode().type()) {
5254 case Array::Double:
5255 setInt32(vmCall(Int32, m_out.operation(operationArrayIndexOfValueDouble), m_callFrame, storage, lowJSValue(searchElementEdge), startIndex));
5256 break;
5257 case Array::Int32:
5258 case Array::Contiguous:
5259 setInt32(vmCall(Int32, m_out.operation(operationArrayIndexOfValueInt32OrContiguous), m_callFrame, storage, lowJSValue(searchElementEdge), startIndex));
5260 break;
5261 default:
5262 RELEASE_ASSERT_NOT_REACHED();
5263 break;
5264 }
5265 break;
5266
5267 default:
5268 RELEASE_ASSERT_NOT_REACHED();
5269 break;
5270 }
5271 }
5272
5273
5274 void compileArrayPop()
5275 {
5276 LValue base = lowCell(m_node->child1());
5277 LValue storage = lowStorage(m_node->child2());
5278
5279 switch (m_node->arrayMode().type()) {
5280 case Array::Int32:
5281 case Array::Double:
5282 case Array::Contiguous: {
5283 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
5284
5285 LBasicBlock fastCase = m_out.newBlock();
5286 LBasicBlock slowCase = m_out.newBlock();
5287 LBasicBlock continuation = m_out.newBlock();
5288
5289 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
5290
5291 Vector<ValueFromBlock, 3> results;
5292 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
5293 m_out.branch(
5294 m_out.isZero32(prevLength), rarely(continuation), usually(fastCase));
5295
5296 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
5297 LValue newLength = m_out.sub(prevLength, m_out.int32One);
5298 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
5299 TypedPointer pointer = m_out.baseIndex(heap, storage, m_out.zeroExtPtr(newLength));
5300 if (m_node->arrayMode().type() != Array::Double) {
5301 LValue result = m_out.load64(pointer);
5302 m_out.store64(m_out.int64Zero, pointer);
5303 results.append(m_out.anchor(result));
5304 m_out.branch(
5305 m_out.notZero64(result), usually(continuation), rarely(slowCase));
5306 } else {
5307 LValue result = m_out.loadDouble(pointer);
5308 m_out.store64(m_out.constInt64(bitwise_cast<int64_t>(PNaN)), pointer);
5309 results.append(m_out.anchor(boxDouble(result)));
5310 m_out.branch(
5311 m_out.doubleEqual(result, result),
5312 usually(continuation), rarely(slowCase));
5313 }
5314
5315 m_out.appendTo(slowCase, continuation);
5316 results.append(m_out.anchor(vmCall(
5317 Int64, m_out.operation(operationArrayPopAndRecoverLength), m_callFrame, base)));
5318 m_out.jump(continuation);
5319
5320 m_out.appendTo(continuation, lastNext);
5321 setJSValue(m_out.phi(Int64, results));
5322 return;
5323 }
5324
5325 case Array::ArrayStorage: {
5326 LBasicBlock vectorLengthCheckCase = m_out.newBlock();
5327 LBasicBlock popCheckCase = m_out.newBlock();
5328 LBasicBlock fastCase = m_out.newBlock();
5329 LBasicBlock slowCase = m_out.newBlock();
5330 LBasicBlock continuation = m_out.newBlock();
5331
5332 LValue prevLength = m_out.load32(storage, m_heaps.ArrayStorage_publicLength);
5333
5334 Vector<ValueFromBlock, 3> results;
5335 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
5336 m_out.branch(
5337 m_out.isZero32(prevLength), rarely(continuation), usually(vectorLengthCheckCase));
5338
5339 LBasicBlock lastNext = m_out.appendTo(vectorLengthCheckCase, popCheckCase);
5340 LValue newLength = m_out.sub(prevLength, m_out.int32One);
5341 m_out.branch(
5342 m_out.aboveOrEqual(newLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength)), rarely(slowCase), usually(popCheckCase));
5343
5344 m_out.appendTo(popCheckCase, fastCase);
5345 TypedPointer pointer = m_out.baseIndex(m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(newLength));
5346 LValue result = m_out.load64(pointer);
5347 m_out.branch(m_out.notZero64(result), usually(fastCase), rarely(slowCase));
5348
5349 m_out.appendTo(fastCase, slowCase);
5350 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
5351 m_out.store64(m_out.int64Zero, pointer);
5352 m_out.store32(
5353 m_out.sub(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
5354 storage, m_heaps.ArrayStorage_numValuesInVector);
5355 results.append(m_out.anchor(result));
5356 m_out.jump(continuation);
5357
5358 m_out.appendTo(slowCase, continuation);
5359 results.append(m_out.anchor(vmCall(
5360 Int64, m_out.operation(operationArrayPop), m_callFrame, base)));
5361 m_out.jump(continuation);
5362
5363 m_out.appendTo(continuation, lastNext);
5364 setJSValue(m_out.phi(Int64, results));
5365 return;
5366 }
5367
5368 default:
5369 DFG_CRASH(m_graph, m_node, "Bad array type");
5370 return;
5371 }
5372 }
5373
5374 void compilePushWithScope()
5375 {
5376 LValue parentScope = lowCell(m_node->child1());
5377 auto objectEdge = m_node->child2();
5378 if (objectEdge.useKind() == ObjectUse) {
5379 LValue object = lowNonNullObject(objectEdge);
5380 LValue result = vmCall(Int64, m_out.operation(operationPushWithScopeObject), m_callFrame, parentScope, object);
5381 setJSValue(result);
5382 } else {
5383 ASSERT(objectEdge.useKind() == UntypedUse);
5384 LValue object = lowJSValue(m_node->child2());
5385 LValue result = vmCall(Int64, m_out.operation(operationPushWithScope), m_callFrame, parentScope, object);
5386 setJSValue(result);
5387 }
5388 }
5389
5390 void compileCreateActivation()
5391 {
5392 LValue scope = lowCell(m_node->child1());
5393 SymbolTable* table = m_node->castOperand<SymbolTable*>();
5394 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
5395 JSValue initializationValue = m_node->initializationValueForActivation();
5396 ASSERT(initializationValue.isUndefined() || initializationValue == jsTDZValue());
5397 if (table->singletonScope()->isStillValid()) {
5398 LValue callResult = vmCall(
5399 Int64,
5400 m_out.operation(operationCreateActivationDirect), m_callFrame, weakStructure(structure),
5401 scope, weakPointer(table), m_out.constInt64(JSValue::encode(initializationValue)));
5402 setJSValue(callResult);
5403 return;
5404 }
5405
5406 LBasicBlock slowPath = m_out.newBlock();
5407 LBasicBlock continuation = m_out.newBlock();
5408
5409 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
5410
5411 LValue fastObject = allocateObject<JSLexicalEnvironment>(
5412 JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
5413
5414 // We don't need memory barriers since we just fast-created the activation, so the
5415 // activation must be young.
5416 m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
5417 m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
5418
5419 for (unsigned i = 0; i < table->scopeSize(); ++i) {
5420 m_out.store64(
5421 m_out.constInt64(JSValue::encode(initializationValue)),
5422 fastObject, m_heaps.JSLexicalEnvironment_variables[i]);
5423 }
5424
5425 mutatorFence();
5426
5427 ValueFromBlock fastResult = m_out.anchor(fastObject);
5428 m_out.jump(continuation);
5429
5430 m_out.appendTo(slowPath, continuation);
5431 VM& vm = this->vm();
5432 LValue callResult = lazySlowPath(
5433 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5434 return createLazyCallGenerator(vm,
5435 operationCreateActivationDirect, locations[0].directGPR(),
5436 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
5437 CCallHelpers::TrustedImmPtr(table),
5438 CCallHelpers::TrustedImm64(JSValue::encode(initializationValue)));
5439 },
5440 scope);
5441 ValueFromBlock slowResult = m_out.anchor(callResult);
5442 m_out.jump(continuation);
5443
5444 m_out.appendTo(continuation, lastNext);
5445 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
5446 }
5447
5448 void compileNewFunction()
5449 {
5450 ASSERT(m_node->op() == NewFunction || m_node->op() == NewGeneratorFunction || m_node->op() == NewAsyncGeneratorFunction || m_node->op() == NewAsyncFunction);
5451 bool isGeneratorFunction = m_node->op() == NewGeneratorFunction;
5452 bool isAsyncFunction = m_node->op() == NewAsyncFunction;
5453 bool isAsyncGeneratorFunction = m_node->op() == NewAsyncGeneratorFunction;
5454
5455 LValue scope = lowCell(m_node->child1());
5456
5457 FunctionExecutable* executable = m_node->castOperand<FunctionExecutable*>();
5458 if (executable->singletonFunction()->isStillValid()) {
5459 LValue callResult =
5460 isGeneratorFunction ? vmCall(Int64, m_out.operation(operationNewGeneratorFunction), m_callFrame, scope, weakPointer(executable)) :
5461 isAsyncFunction ? vmCall(Int64, m_out.operation(operationNewAsyncFunction), m_callFrame, scope, weakPointer(executable)) :
5462 isAsyncGeneratorFunction ? vmCall(Int64, m_out.operation(operationNewAsyncGeneratorFunction), m_callFrame, scope, weakPointer(executable)) :
5463 vmCall(Int64, m_out.operation(operationNewFunction), m_callFrame, scope, weakPointer(executable));
5464 setJSValue(callResult);
5465 return;
5466 }
5467
5468 RegisteredStructure structure = m_graph.registerStructure(
5469 [&] () {
5470 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5471 switch (m_node->op()) {
5472 case NewGeneratorFunction:
5473 return globalObject->generatorFunctionStructure();
5474 case NewAsyncFunction:
5475 return globalObject->asyncFunctionStructure();
5476 case NewAsyncGeneratorFunction:
5477 return globalObject->asyncGeneratorFunctionStructure();
5478 case NewFunction:
5479 return JSFunction::selectStructureForNewFuncExp(globalObject, m_node->castOperand<FunctionExecutable*>());
5480 default:
5481 RELEASE_ASSERT_NOT_REACHED();
5482 }
5483 }());
5484
5485 LBasicBlock slowPath = m_out.newBlock();
5486 LBasicBlock continuation = m_out.newBlock();
5487
5488 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
5489
5490 LValue fastObject =
5491 isGeneratorFunction ? allocateObject<JSGeneratorFunction>(structure, m_out.intPtrZero, slowPath) :
5492 isAsyncFunction ? allocateObject<JSAsyncFunction>(structure, m_out.intPtrZero, slowPath) :
5493 isAsyncGeneratorFunction ? allocateObject<JSAsyncGeneratorFunction>(structure, m_out.intPtrZero, slowPath) :
5494 allocateObject<JSFunction>(structure, m_out.intPtrZero, slowPath);
5495
5496
5497 // We don't need memory barriers since we just fast-created the function, so it
5498 // must be young.
5499 m_out.storePtr(scope, fastObject, m_heaps.JSFunction_scope);
5500 m_out.storePtr(weakPointer(executable), fastObject, m_heaps.JSFunction_executable);
5501 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.JSFunction_rareData);
5502
5503 mutatorFence();
5504
5505 ValueFromBlock fastResult = m_out.anchor(fastObject);
5506 m_out.jump(continuation);
5507
5508 m_out.appendTo(slowPath, continuation);
5509
5510 Vector<LValue> slowPathArguments;
5511 slowPathArguments.append(scope);
5512 VM& vm = this->vm();
5513 LValue callResult = lazySlowPath(
5514 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5515 auto* operation = operationNewFunctionWithInvalidatedReallocationWatchpoint;
5516 if (isGeneratorFunction)
5517 operation = operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint;
5518 else if (isAsyncFunction)
5519 operation = operationNewAsyncFunctionWithInvalidatedReallocationWatchpoint;
5520 else if (isAsyncGeneratorFunction)
5521 operation = operationNewAsyncGeneratorFunctionWithInvalidatedReallocationWatchpoint;
5522
5523 return createLazyCallGenerator(vm, operation,
5524 locations[0].directGPR(), locations[1].directGPR(),
5525 CCallHelpers::TrustedImmPtr(executable));
5526 },
5527 slowPathArguments);
5528 ValueFromBlock slowResult = m_out.anchor(callResult);
5529 m_out.jump(continuation);
5530
5531 m_out.appendTo(continuation, lastNext);
5532 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
5533 }
5534
5535 void compileCreateDirectArguments()
5536 {
5537 // FIXME: A more effective way of dealing with the argument count and callee is to have
5538 // them be explicit arguments to this node.
5539 // https://bugs.webkit.org/show_bug.cgi?id=142207
5540
5541 RegisteredStructure structure =
5542 m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->directArgumentsStructure());
5543
5544 unsigned minCapacity = m_graph.baselineCodeBlockFor(m_node->origin.semantic)->numParameters() - 1;
5545
5546 LBasicBlock slowPath = m_out.newBlock();
5547 LBasicBlock continuation = m_out.newBlock();
5548
5549 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
5550
5551 ArgumentsLength length = getArgumentsLength();
5552
5553 LValue fastObject;
5554 if (length.isKnown) {
5555 fastObject = allocateObject<DirectArguments>(
5556 DirectArguments::allocationSize(std::max(length.known, minCapacity)), structure,
5557 m_out.intPtrZero, slowPath);
5558 } else {
5559 LValue size = m_out.add(
5560 m_out.shl(length.value, m_out.constInt32(3)),
5561 m_out.constInt32(DirectArguments::storageOffset()));
5562
5563 size = m_out.select(
5564 m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
5565 size, m_out.constInt32(DirectArguments::allocationSize(minCapacity)));
5566
5567 fastObject = allocateVariableSizedObject<DirectArguments>(
5568 m_out.zeroExtPtr(size), structure, m_out.intPtrZero, slowPath);
5569 }
5570
5571 m_out.store32(length.value, fastObject, m_heaps.DirectArguments_length);
5572 m_out.store32(m_out.constInt32(minCapacity), fastObject, m_heaps.DirectArguments_minCapacity);
5573 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_mappedArguments);
5574 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_modifiedArgumentsDescriptor);
5575
5576 ValueFromBlock fastResult = m_out.anchor(fastObject);
5577 m_out.jump(continuation);
5578
5579 m_out.appendTo(slowPath, continuation);
5580 VM& vm = this->vm();
5581 LValue callResult = lazySlowPath(
5582 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5583 return createLazyCallGenerator(vm,
5584 operationCreateDirectArguments, locations[0].directGPR(),
5585 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
5586 CCallHelpers::TrustedImm32(minCapacity));
5587 }, length.value);
5588 ValueFromBlock slowResult = m_out.anchor(callResult);
5589 m_out.jump(continuation);
5590
5591 m_out.appendTo(continuation, lastNext);
5592 LValue result = m_out.phi(pointerType(), fastResult, slowResult);
5593
5594 m_out.storePtr(getCurrentCallee(), result, m_heaps.DirectArguments_callee);
5595
5596 if (length.isKnown) {
5597 VirtualRegister start = AssemblyHelpers::argumentsStart(m_node->origin.semantic);
5598 for (unsigned i = 0; i < std::max(length.known, minCapacity); ++i) {
5599 m_out.store64(
5600 m_out.load64(addressFor(start + i)),
5601 result, m_heaps.DirectArguments_storage[i]);
5602 }
5603 } else {
5604 LValue stackBase = getArgumentsStart();
5605
5606 LBasicBlock loop = m_out.newBlock();
5607 LBasicBlock end = m_out.newBlock();
5608
5609 ValueFromBlock originalLength;
5610 if (minCapacity) {
5611 LValue capacity = m_out.select(
5612 m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
5613 length.value,
5614 m_out.constInt32(minCapacity));
5615 LValue originalLengthValue = m_out.zeroExtPtr(capacity);
5616 originalLength = m_out.anchor(originalLengthValue);
5617 m_out.jump(loop);
5618 } else {
5619 LValue originalLengthValue = m_out.zeroExtPtr(length.value);
5620 originalLength = m_out.anchor(originalLengthValue);
5621 m_out.branch(m_out.isNull(originalLengthValue), unsure(end), unsure(loop));
5622 }
5623
5624 lastNext = m_out.appendTo(loop, end);
5625 LValue previousIndex = m_out.phi(pointerType(), originalLength);
5626 LValue index = m_out.sub(previousIndex, m_out.intPtrOne);
5627 m_out.store64(
5628 m_out.load64(m_out.baseIndex(m_heaps.variables, stackBase, index)),
5629 m_out.baseIndex(m_heaps.DirectArguments_storage, result, index));
5630 ValueFromBlock nextIndex = m_out.anchor(index);
5631 m_out.addIncomingToPhi(previousIndex, nextIndex);
5632 m_out.branch(m_out.isNull(index), unsure(end), unsure(loop));
5633
5634 m_out.appendTo(end, lastNext);
5635 }
5636
5637 mutatorFence();
5638
5639 setJSValue(result);
5640 }
5641
5642 void compileCreateScopedArguments()
5643 {
5644 LValue scope = lowCell(m_node->child1());
5645
5646 LValue result = vmCall(
5647 Int64, m_out.operation(operationCreateScopedArguments), m_callFrame,
5648 weakPointer(
5649 m_graph.globalObjectFor(m_node->origin.semantic)->scopedArgumentsStructure()),
5650 getArgumentsStart(), getArgumentsLength().value, getCurrentCallee(), scope);
5651
5652 setJSValue(result);
5653 }
5654
5655 void compileCreateClonedArguments()
5656 {
5657 LValue result = vmCall(
5658 Int64, m_out.operation(operationCreateClonedArguments), m_callFrame,
5659 weakPointer(
5660 m_graph.globalObjectFor(m_node->origin.semantic)->clonedArgumentsStructure()),
5661 getArgumentsStart(), getArgumentsLength().value, getCurrentCallee());
5662
5663 setJSValue(result);
5664 }
5665
5666 void compileCreateRest()
5667 {
5668 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
5669 LBasicBlock continuation = m_out.newBlock();
5670 LValue arrayLength = lowInt32(m_node->child1());
5671 LBasicBlock loopStart = m_out.newBlock();
5672 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5673 RegisteredStructure structure = m_graph.registerStructure(globalObject->originalRestParameterStructure());
5674 ArrayValues arrayValues = allocateUninitializedContiguousJSArray(arrayLength, structure);
5675 LValue array = arrayValues.array;
5676 LValue butterfly = arrayValues.butterfly;
5677 ValueFromBlock startLength = m_out.anchor(arrayLength);
5678 LValue argumentRegion = m_out.add(getArgumentsStart(), m_out.constInt64(sizeof(Register) * m_node->numberOfArgumentsToSkip()));
5679 m_out.branch(m_out.equal(arrayLength, m_out.constInt32(0)),
5680 unsure(continuation), unsure(loopStart));
5681
5682 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
5683 LValue phiOffset = m_out.phi(Int32, startLength);
5684 LValue currentOffset = m_out.sub(phiOffset, m_out.int32One);
5685 m_out.addIncomingToPhi(phiOffset, m_out.anchor(currentOffset));
5686 LValue loadedValue = m_out.load64(m_out.baseIndex(m_heaps.variables, argumentRegion, m_out.zeroExtPtr(currentOffset)));
5687 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
5688 m_out.store64(loadedValue, m_out.baseIndex(heap, butterfly, m_out.zeroExtPtr(currentOffset)));
5689 m_out.branch(m_out.equal(currentOffset, m_out.constInt32(0)), unsure(continuation), unsure(loopStart));
5690
5691 m_out.appendTo(continuation, lastNext);
5692 mutatorFence();
5693 setJSValue(array);
5694 return;
5695 }
5696
5697 LValue arrayLength = lowInt32(m_node->child1());
5698 LValue argumentStart = getArgumentsStart();
5699 LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
5700 setJSValue(vmCall(
5701 Int64, m_out.operation(operationCreateRest), m_callFrame, argumentStart, numberOfArgumentsToSkip, arrayLength));
5702 }
5703
5704 void compileGetRestLength()
5705 {
5706 LBasicBlock nonZeroLength = m_out.newBlock();
5707 LBasicBlock continuation = m_out.newBlock();
5708
5709 ValueFromBlock zeroLengthResult = m_out.anchor(m_out.constInt32(0));
5710
5711 LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
5712 LValue argumentsLength = getArgumentsLength().value;
5713 m_out.branch(m_out.above(argumentsLength, numberOfArgumentsToSkip),
5714 unsure(nonZeroLength), unsure(continuation));
5715
5716 LBasicBlock lastNext = m_out.appendTo(nonZeroLength, continuation);
5717 ValueFromBlock nonZeroLengthResult = m_out.anchor(m_out.sub(argumentsLength, numberOfArgumentsToSkip));
5718 m_out.jump(continuation);
5719
5720 m_out.appendTo(continuation, lastNext);
5721 setInt32(m_out.phi(Int32, zeroLengthResult, nonZeroLengthResult));
5722 }
5723
5724 void compileObjectKeys()
5725 {
5726 switch (m_node->child1().useKind()) {
5727 case ObjectUse: {
5728 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
5729 LBasicBlock notNullCase = m_out.newBlock();
5730 LBasicBlock rareDataCase = m_out.newBlock();
5731 LBasicBlock useCacheCase = m_out.newBlock();
5732 LBasicBlock slowButArrayBufferCase = m_out.newBlock();
5733 LBasicBlock slowCase = m_out.newBlock();
5734 LBasicBlock continuation = m_out.newBlock();
5735
5736 LValue object = lowObject(m_node->child1());
5737 LValue structure = loadStructure(object);
5738 LValue previousOrRareData = m_out.loadPtr(structure, m_heaps.Structure_previousOrRareData);
5739 m_out.branch(m_out.notNull(previousOrRareData), unsure(notNullCase), unsure(slowCase));
5740
5741 LBasicBlock lastNext = m_out.appendTo(notNullCase, rareDataCase);
5742 m_out.branch(
5743 m_out.notEqual(m_out.load32(previousOrRareData, m_heaps.JSCell_structureID), m_out.constInt32(m_graph.m_vm.structureStructure->structureID())),
5744 unsure(rareDataCase), unsure(slowCase));
5745
5746 m_out.appendTo(rareDataCase, useCacheCase);
5747 ASSERT(bitwise_cast<uintptr_t>(StructureRareData::cachedOwnKeysSentinel()) == 1);
5748 LValue cachedOwnKeys = m_out.loadPtr(previousOrRareData, m_heaps.StructureRareData_cachedOwnKeys);
5749 m_out.branch(m_out.belowOrEqual(cachedOwnKeys, m_out.constIntPtr(bitwise_cast<void*>(StructureRareData::cachedOwnKeysSentinel()))), unsure(slowCase), unsure(useCacheCase));
5750
5751 m_out.appendTo(useCacheCase, slowButArrayBufferCase);
5752 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5753 RegisteredStructure arrayStructure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(CopyOnWriteArrayWithContiguous));
5754 LValue fastArray = allocateObject<JSArray>(arrayStructure, m_out.addPtr(cachedOwnKeys, JSImmutableButterfly::offsetOfData()), slowButArrayBufferCase);
5755 ValueFromBlock fastResult = m_out.anchor(fastArray);
5756 m_out.jump(continuation);
5757
5758 m_out.appendTo(slowButArrayBufferCase, slowCase);
5759 LValue slowArray = vmCall(Int64, m_out.operation(operationNewArrayBuffer), m_callFrame, weakStructure(arrayStructure), cachedOwnKeys);
5760 ValueFromBlock slowButArrayBufferResult = m_out.anchor(slowArray);
5761 m_out.jump(continuation);
5762
5763 m_out.appendTo(slowCase, continuation);
5764 VM& vm = this->vm();
5765 LValue slowResultValue = lazySlowPath(
5766 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5767 return createLazyCallGenerator(vm,
5768 operationObjectKeysObject, locations[0].directGPR(), locations[1].directGPR());
5769 },
5770 object);
5771 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
5772 m_out.jump(continuation);
5773
5774 m_out.appendTo(continuation, lastNext);
5775 setJSValue(m_out.phi(pointerType(), fastResult, slowButArrayBufferResult, slowResult));
5776 break;
5777 }
5778 setJSValue(vmCall(Int64, m_out.operation(operationObjectKeysObject), m_callFrame, lowObject(m_node->child1())));
5779 break;
5780 }
5781 case UntypedUse:
5782 setJSValue(vmCall(Int64, m_out.operation(operationObjectKeys), m_callFrame, lowJSValue(m_node->child1())));
5783 break;
5784 default:
5785 RELEASE_ASSERT_NOT_REACHED();
5786 break;
5787 }
5788 }
5789
5790 void compileObjectCreate()
5791 {
5792 switch (m_node->child1().useKind()) {
5793 case ObjectUse:
5794 setJSValue(vmCall(Int64, m_out.operation(operationObjectCreateObject), m_callFrame, lowObject(m_node->child1())));
5795 break;
5796 case UntypedUse:
5797 setJSValue(vmCall(Int64, m_out.operation(operationObjectCreate), m_callFrame, lowJSValue(m_node->child1())));
5798 break;
5799 default:
5800 RELEASE_ASSERT_NOT_REACHED();
5801 break;
5802 }
5803 }
5804
5805 void compileNewObject()
5806 {
5807 setJSValue(allocateObject(m_node->structure()));
5808 mutatorFence();
5809 }
5810
5811 void compileNewStringObject()
5812 {
5813 RegisteredStructure structure = m_node->structure();
5814 LValue string = lowString(m_node->child1());
5815
5816 LBasicBlock slowCase = m_out.newBlock();
5817 LBasicBlock continuation = m_out.newBlock();
5818
5819 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
5820
5821 LValue fastResultValue = allocateObject<StringObject>(structure, m_out.intPtrZero, slowCase);
5822 m_out.storePtr(m_out.constIntPtr(StringObject::info()), fastResultValue, m_heaps.JSDestructibleObject_classInfo);
5823 m_out.store64(string, fastResultValue, m_heaps.JSWrapperObject_internalValue);
5824 mutatorFence();
5825 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
5826 m_out.jump(continuation);
5827
5828 m_out.appendTo(slowCase, continuation);
5829 VM& vm = this->vm();
5830 LValue slowResultValue = lazySlowPath(
5831 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5832 return createLazyCallGenerator(vm,
5833 operationNewStringObject, locations[0].directGPR(), locations[1].directGPR(),
5834 CCallHelpers::TrustedImmPtr(structure.get()));
5835 },
5836 string);
5837 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
5838 m_out.jump(continuation);
5839
5840 m_out.appendTo(continuation, lastNext);
5841 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
5842 }
5843
5844 void compileNewSymbol()
5845 {
5846 if (!m_node->child1()) {
5847 setJSValue(vmCall(pointerType(), m_out.operation(operationNewSymbol), m_callFrame));
5848 return;
5849 }
5850 ASSERT(m_node->child1().useKind() == KnownStringUse);
5851 setJSValue(vmCall(pointerType(), m_out.operation(operationNewSymbolWithDescription), m_callFrame, lowString(m_node->child1())));
5852 }
5853
5854 void compileNewArray()
5855 {
5856 // First speculate appropriately on all of the children. Do this unconditionally up here
5857 // because some of the slow paths may otherwise forget to do it. It's sort of arguable
5858 // that doing the speculations up here might be unprofitable for RA - so we can consider
5859 // sinking this to below the allocation fast path if we find that this has a lot of
5860 // register pressure.
5861 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex)
5862 speculate(m_graph.varArgChild(m_node, operandIndex));
5863
5864 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5865 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
5866 m_node->indexingType()));
5867
5868 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
5869 unsigned numElements = m_node->numChildren();
5870 unsigned vectorLengthHint = m_node->vectorLengthHint();
5871 ASSERT(vectorLengthHint >= numElements);
5872
5873 ArrayValues arrayValues =
5874 allocateUninitializedContiguousJSArray(numElements, vectorLengthHint, structure);
5875
5876 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
5877 Edge edge = m_graph.varArgChild(m_node, operandIndex);
5878
5879 switch (m_node->indexingType()) {
5880 case ALL_BLANK_INDEXING_TYPES:
5881 case ALL_UNDECIDED_INDEXING_TYPES:
5882 DFG_CRASH(m_graph, m_node, "Bad indexing type");
5883 break;
5884
5885 case ALL_DOUBLE_INDEXING_TYPES:
5886 m_out.storeDouble(
5887 lowDouble(edge),
5888 arrayValues.butterfly, m_heaps.indexedDoubleProperties[operandIndex]);
5889 break;
5890
5891 case ALL_INT32_INDEXING_TYPES:
5892 case ALL_CONTIGUOUS_INDEXING_TYPES:
5893 m_out.store64(
5894 lowJSValue(edge, ManualOperandSpeculation),
5895 arrayValues.butterfly,
5896 m_heaps.forIndexingType(m_node->indexingType())->at(operandIndex));
5897 break;
5898
5899 default:
5900 DFG_CRASH(m_graph, m_node, "Corrupt indexing type");
5901 break;
5902 }
5903 }
5904
5905 setJSValue(arrayValues.array);
5906 mutatorFence();
5907 return;
5908 }
5909
5910 if (!m_node->numChildren()) {
5911 setJSValue(vmCall(
5912 Int64, m_out.operation(operationNewEmptyArray), m_callFrame,
5913 weakStructure(structure)));
5914 return;
5915 }
5916
5917 size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
5918 ASSERT(scratchSize);
5919 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
5920 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
5921
5922 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
5923 Edge edge = m_graph.varArgChild(m_node, operandIndex);
5924 LValue valueToStore;
5925 switch (m_node->indexingType()) {
5926 case ALL_DOUBLE_INDEXING_TYPES:
5927 valueToStore = boxDouble(lowDouble(edge));
5928 break;
5929 default:
5930 valueToStore = lowJSValue(edge, ManualOperandSpeculation);
5931 break;
5932 }
5933 m_out.store64(valueToStore, m_out.absolute(buffer + operandIndex));
5934 }
5935
5936 m_out.storePtr(
5937 m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
5938
5939 LValue result = vmCall(
5940 Int64, m_out.operation(operationNewArray), m_callFrame,
5941 weakStructure(structure), m_out.constIntPtr(buffer),
5942 m_out.constIntPtr(m_node->numChildren()));
5943
5944 m_out.storePtr(m_out.intPtrZero, m_out.absolute(scratchBuffer->addressOfActiveLength()));
5945
5946 setJSValue(result);
5947 }
5948
5949 void compileNewArrayWithSpread()
5950 {
5951 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
5952 CheckedInt32 startLength = 0;
5953 BitVector* bitVector = m_node->bitVector();
5954 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
5955
5956 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
5957 if (!bitVector->get(i))
5958 ++startLength;
5959 else {
5960 Edge& child = m_graph.varArgChild(m_node, i);
5961 if (child->op() == PhantomSpread && child->child1()->op() == PhantomNewArrayBuffer)
5962 startLength += child->child1()->castOperand<JSImmutableButterfly*>()->length();
5963 }
5964 }
5965
5966 if (startLength.hasOverflowed()) {
5967 terminate(Overflow);
5968 return;
5969 }
5970
5971 LValue length = m_out.constInt32(startLength.unsafeGet());
5972
5973 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
5974 if (bitVector->get(i)) {
5975 Edge use = m_graph.varArgChild(m_node, i);
5976 CheckValue* lengthCheck = nullptr;
5977 if (use->op() == PhantomSpread) {
5978 if (use->child1()->op() == PhantomCreateRest) {
5979 InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame();
5980 unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
5981 LValue spreadLength = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
5982 return getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
5983 }).iterator->value;
5984 lengthCheck = m_out.speculateAdd(length, spreadLength);
5985 }
5986 } else {
5987 LValue fixedArray = lowCell(use);
5988 lengthCheck = m_out.speculateAdd(length, m_out.load32(fixedArray, m_heaps.JSFixedArray_size));
5989 }
5990
5991 if (lengthCheck) {
5992 blessSpeculation(lengthCheck, Overflow, noValue(), nullptr, m_origin);
5993 length = lengthCheck;
5994 }
5995 }
5996 }
5997
5998 LValue exceedsMaxAllowedLength = m_out.aboveOrEqual(length, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
5999 blessSpeculation(m_out.speculate(exceedsMaxAllowedLength), Overflow, noValue(), nullptr, m_origin);
6000
6001 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->originalArrayStructureForIndexingType(ArrayWithContiguous));
6002 ArrayValues arrayValues = allocateUninitializedContiguousJSArray(length, structure);
6003 LValue result = arrayValues.array;
6004 LValue storage = arrayValues.butterfly;
6005 LValue index = m_out.constIntPtr(0);
6006
6007 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
6008 Edge use = m_graph.varArgChild(m_node, i);
6009 if (bitVector->get(i)) {
6010 if (use->op() == PhantomSpread) {
6011 if (use->child1()->op() == PhantomNewArrayBuffer) {
6012 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
6013 auto* array = use->child1()->castOperand<JSImmutableButterfly*>();
6014 for (unsigned i = 0; i < array->length(); ++i) {
6015 // Because resulted array from NewArrayWithSpread is always contiguous, we should not generate value
6016 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
6017 int64_t value = JSValue::encode(array->get(i));
6018 m_out.store64(m_out.constInt64(value), m_out.baseIndex(heap, storage, index, JSValue(), (Checked<int32_t>(sizeof(JSValue)) * i).unsafeGet()));
6019 }
6020 index = m_out.add(index, m_out.constIntPtr(array->length()));
6021 } else {
6022 RELEASE_ASSERT(use->child1()->op() == PhantomCreateRest);
6023 InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame();
6024 unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
6025
6026 LValue length = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
6027 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
6028
6029 LBasicBlock loopStart = m_out.newBlock();
6030 LBasicBlock continuation = m_out.newBlock();
6031
6032 ValueFromBlock loadIndexStart = m_out.anchor(m_out.constIntPtr(0));
6033 ValueFromBlock arrayIndexStart = m_out.anchor(index);
6034 ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
6035
6036 m_out.branch(
6037 m_out.isZero64(length),
6038 unsure(continuation), unsure(loopStart));
6039
6040 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
6041
6042 LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
6043 LValue loadIndex = m_out.phi(pointerType(), loadIndexStart);
6044
6045 LValue item = m_out.load64(m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
6046 m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
6047
6048 LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
6049 LValue nextLoadIndex = m_out.add(loadIndex, m_out.constIntPtr(1));
6050 ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
6051
6052 m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
6053 m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
6054
6055 m_out.branch(
6056 m_out.below(nextLoadIndex, length),
6057 unsure(loopStart), unsure(continuation));
6058
6059 m_out.appendTo(continuation, lastNext);
6060 index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
6061 }
6062 } else {
6063 LBasicBlock loopStart = m_out.newBlock();
6064 LBasicBlock continuation = m_out.newBlock();
6065
6066 LValue fixedArray = lowCell(use);
6067
6068 ValueFromBlock fixedIndexStart = m_out.anchor(m_out.constIntPtr(0));
6069 ValueFromBlock arrayIndexStart = m_out.anchor(index);
6070 ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
6071
6072 LValue fixedArraySize = m_out.zeroExtPtr(m_out.load32(fixedArray, m_heaps.JSFixedArray_size));
6073
6074 m_out.branch(
6075 m_out.isZero64(fixedArraySize),
6076 unsure(continuation), unsure(loopStart));
6077
6078 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
6079
6080 LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
6081 LValue fixedArrayIndex = m_out.phi(pointerType(), fixedIndexStart);
6082
6083 LValue item = m_out.load64(m_out.baseIndex(m_heaps.JSFixedArray_buffer, fixedArray, fixedArrayIndex));
6084 m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
6085
6086 LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
6087 LValue nextFixedArrayIndex = m_out.add(fixedArrayIndex, m_out.constIntPtr(1));
6088 ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
6089
6090 m_out.addIncomingToPhi(fixedArrayIndex, m_out.anchor(nextFixedArrayIndex));
6091 m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
6092
6093 m_out.branch(
6094 m_out.below(nextFixedArrayIndex, fixedArraySize),
6095 unsure(loopStart), unsure(continuation));
6096
6097 m_out.appendTo(continuation, lastNext);
6098 index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
6099 }
6100 } else {
6101 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
6102 LValue item = lowJSValue(use);
6103 m_out.store64(item, m_out.baseIndex(heap, storage, index));
6104 index = m_out.add(index, m_out.constIntPtr(1));
6105 }
6106 }
6107
6108 mutatorFence();
6109 setJSValue(result);
6110 return;
6111 }
6112
6113 ASSERT(m_node->numChildren());
6114 size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
6115 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
6116 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
6117 BitVector* bitVector = m_node->bitVector();
6118 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
6119 Edge use = m_graph.m_varArgChildren[m_node->firstChild() + i];
6120 LValue value;
6121 if (bitVector->get(i))
6122 value = lowCell(use);
6123 else
6124 value = lowJSValue(use);
6125 m_out.store64(value, m_out.absolute(&buffer[i]));
6126 }
6127
6128 m_out.storePtr(m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
6129 LValue result = vmCall(Int64, m_out.operation(operationNewArrayWithSpreadSlow), m_callFrame, m_out.constIntPtr(buffer), m_out.constInt32(m_node->numChildren()));
6130 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
6131
6132 setJSValue(result);
6133 }
6134
6135 void compileCreateThis()
6136 {
6137 LValue callee = lowCell(m_node->child1());
6138
6139 LBasicBlock isFunctionBlock = m_out.newBlock();
6140 LBasicBlock hasRareData = m_out.newBlock();
6141 LBasicBlock slowPath = m_out.newBlock();
6142 LBasicBlock continuation = m_out.newBlock();
6143
6144 m_out.branch(isFunction(callee, provenType(m_node->child1())), usually(isFunctionBlock), rarely(slowPath));
6145
6146 LBasicBlock lastNext = m_out.appendTo(isFunctionBlock, hasRareData);
6147 LValue rareData = m_out.loadPtr(callee, m_heaps.JSFunction_rareData);
6148 m_out.branch(m_out.isZero64(rareData), rarely(slowPath), usually(hasRareData));
6149
6150 m_out.appendTo(hasRareData, slowPath);
6151 LValue allocator = m_out.loadPtr(rareData, m_heaps.FunctionRareData_allocator);
6152 LValue structure = m_out.loadPtr(rareData, m_heaps.FunctionRareData_structure);
6153 LValue butterfly = m_out.constIntPtr(0);
6154 ValueFromBlock fastResult = m_out.anchor(allocateObject(allocator, structure, butterfly, slowPath));
6155 m_out.jump(continuation);
6156
6157 m_out.appendTo(slowPath, continuation);
6158 ValueFromBlock slowResult = m_out.anchor(vmCall(
6159 Int64, m_out.operation(operationCreateThis), m_callFrame, callee, m_out.constInt32(m_node->inlineCapacity())));
6160 m_out.jump(continuation);
6161
6162 m_out.appendTo(continuation, lastNext);
6163 LValue result = m_out.phi(Int64, fastResult, slowResult);
6164
6165 mutatorFence();
6166 setJSValue(result);
6167 }
6168
6169 void compileSpread()
6170 {
6171 if (m_node->child1()->op() == PhantomNewArrayBuffer) {
6172 LBasicBlock slowAllocation = m_out.newBlock();
6173 LBasicBlock continuation = m_out.newBlock();
6174
6175 auto* immutableButterfly = m_node->child1()->castOperand<JSImmutableButterfly*>();
6176
6177 LValue fastFixedArrayValue = allocateVariableSizedCell<JSFixedArray>(
6178 m_out.constIntPtr(JSFixedArray::allocationSize(immutableButterfly->length()).unsafeGet()),
6179 m_graph.m_vm.fixedArrayStructure.get(), slowAllocation);
6180 m_out.store32(m_out.constInt32(immutableButterfly->length()), fastFixedArrayValue, m_heaps.JSFixedArray_size);
6181 ValueFromBlock fastFixedArray = m_out.anchor(fastFixedArrayValue);
6182 m_out.jump(continuation);
6183
6184 LBasicBlock lastNext = m_out.appendTo(slowAllocation, continuation);
6185 ValueFromBlock slowFixedArray = m_out.anchor(vmCall(pointerType(), m_out.operation(operationCreateFixedArray), m_callFrame, m_out.constInt32(immutableButterfly->length())));
6186 m_out.jump(continuation);
6187
6188 m_out.appendTo(continuation, lastNext);
6189 LValue fixedArray = m_out.phi(pointerType(), fastFixedArray, slowFixedArray);
6190 for (unsigned i = 0; i < immutableButterfly->length(); i++) {
6191 // Because forwarded values are drained as JSValue, we should not generate value
6192 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
6193 int64_t value = JSValue::encode(immutableButterfly->get(i));
6194 m_out.store64(
6195 m_out.constInt64(value),
6196 m_out.baseIndex(m_heaps.JSFixedArray_buffer, fixedArray, m_out.constIntPtr(i), jsNumber(i)));
6197 }
6198 mutatorFence();
6199 setJSValue(fixedArray);
6200 return;
6201 }
6202
6203 if (m_node->child1()->op() == PhantomCreateRest) {
6204 // This IR is rare to generate since it requires escaping the Spread
6205 // but not the CreateRest. In bytecode, we have only few operations that
6206 // accept Spread's result as input. This usually leads to the Spread node not
6207 // escaping. However, this can happen if for example we generate a PutStack on
6208 // the Spread but nothing escapes the CreateRest.
6209 LBasicBlock loopHeader = m_out.newBlock();
6210 LBasicBlock loopBody = m_out.newBlock();
6211 LBasicBlock slowAllocation = m_out.newBlock();
6212 LBasicBlock continuation = m_out.newBlock();
6213 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopHeader);
6214
6215 InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
6216 unsigned numberOfArgumentsToSkip = m_node->child1()->numberOfArgumentsToSkip();
6217 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
6218 LValue length = getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
6219 static_assert(sizeof(JSValue) == 8 && 1 << 3 == 8, "Assumed in the code below.");
6220 LValue size = m_out.add(
6221 m_out.shl(m_out.zeroExtPtr(length), m_out.constInt32(3)),
6222 m_out.constIntPtr(JSFixedArray::offsetOfData()));
6223
6224 LValue fastArrayValue = allocateVariableSizedCell<JSFixedArray>(size, m_graph.m_vm.fixedArrayStructure.get(), slowAllocation);
6225 m_out.store32(length, fastArrayValue, m_heaps.JSFixedArray_size);
6226 ValueFromBlock fastArray = m_out.anchor(fastArrayValue);
6227 m_out.jump(loopHeader);
6228
6229 m_out.appendTo(slowAllocation, loopHeader);
6230 ValueFromBlock slowArray = m_out.anchor(vmCall(pointerType(), m_out.operation(operationCreateFixedArray), m_callFrame, length));
6231 m_out.jump(loopHeader);
6232
6233 m_out.appendTo(loopHeader, loopBody);
6234 LValue fixedArray = m_out.phi(pointerType(), fastArray, slowArray);
6235 ValueFromBlock startIndex = m_out.anchor(m_out.constIntPtr(0));
6236 m_out.branch(m_out.isZero32(length), unsure(continuation), unsure(loopBody));
6237
6238 m_out.appendTo(loopBody, continuation);
6239 LValue index = m_out.phi(pointerType(), startIndex);
6240 LValue value = m_out.load64(
6241 m_out.baseIndex(m_heaps.variables, sourceStart, index));
6242 m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fixedArray, index));
6243 LValue nextIndex = m_out.add(m_out.constIntPtr(1), index);
6244 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
6245 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)), unsure(loopBody), unsure(continuation));
6246
6247 m_out.appendTo(continuation, lastNext);
6248 mutatorFence();
6249 setJSValue(fixedArray);
6250 return;
6251 }
6252
6253 LValue argument = lowCell(m_node->child1());
6254
6255 LValue result;
6256
6257 if (m_node->child1().useKind() == ArrayUse)
6258 speculateArray(m_node->child1());
6259
6260 if (m_graph.canDoFastSpread(m_node, m_state.forNode(m_node->child1()))) {
6261 LBasicBlock preLoop = m_out.newBlock();
6262 LBasicBlock loopSelection = m_out.newBlock();
6263 LBasicBlock contiguousLoopStart = m_out.newBlock();
6264 LBasicBlock doubleLoopStart = m_out.newBlock();
6265 LBasicBlock slowPath = m_out.newBlock();
6266 LBasicBlock continuation = m_out.newBlock();
6267
6268 LValue indexingShape = m_out.load8ZeroExt32(argument, m_heaps.JSCell_indexingTypeAndMisc);
6269 indexingShape = m_out.bitAnd(indexingShape, m_out.constInt32(IndexingShapeMask));
6270 LValue isOKIndexingType = m_out.belowOrEqual(
6271 m_out.sub(indexingShape, m_out.constInt32(Int32Shape)),
6272 m_out.constInt32(ContiguousShape - Int32Shape));
6273
6274 m_out.branch(isOKIndexingType, unsure(preLoop), unsure(slowPath));
6275 LBasicBlock lastNext = m_out.appendTo(preLoop, loopSelection);
6276
6277 LValue butterfly = m_out.loadPtr(argument, m_heaps.JSObject_butterfly);
6278 LValue length = m_out.load32NonNegative(butterfly, m_heaps.Butterfly_publicLength);
6279 static_assert(sizeof(JSValue) == 8 && 1 << 3 == 8, "Assumed in the code below.");
6280 LValue size = m_out.add(
6281 m_out.shl(m_out.zeroExtPtr(length), m_out.constInt32(3)),
6282 m_out.constIntPtr(JSFixedArray::offsetOfData()));
6283
6284 LValue fastAllocation = allocateVariableSizedCell<JSFixedArray>(size, m_graph.m_vm.fixedArrayStructure.get(), slowPath);
6285 ValueFromBlock fastResult = m_out.anchor(fastAllocation);
6286 m_out.store32(length, fastAllocation, m_heaps.JSFixedArray_size);
6287
6288 ValueFromBlock startIndexForContiguous = m_out.anchor(m_out.constIntPtr(0));
6289 ValueFromBlock startIndexForDouble = m_out.anchor(m_out.constIntPtr(0));
6290
6291 m_out.branch(m_out.isZero32(length), unsure(continuation), unsure(loopSelection));
6292
6293 m_out.appendTo(loopSelection, contiguousLoopStart);
6294 m_out.branch(m_out.equal(indexingShape, m_out.constInt32(DoubleShape)),
6295 unsure(doubleLoopStart), unsure(contiguousLoopStart));
6296
6297 {
6298 m_out.appendTo(contiguousLoopStart, doubleLoopStart);
6299 LValue index = m_out.phi(pointerType(), startIndexForContiguous);
6300
6301 TypedPointer loadSite = m_out.baseIndex(m_heaps.root, butterfly, index, ScaleEight); // We read TOP here since we can be reading either int32 or contiguous properties.
6302 LValue value = m_out.load64(loadSite);
6303 value = m_out.select(m_out.isZero64(value), m_out.constInt64(JSValue::encode(jsUndefined())), value);
6304 m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fastAllocation, index));
6305
6306 LValue nextIndex = m_out.add(index, m_out.constIntPtr(1));
6307 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
6308
6309 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)),
6310 unsure(contiguousLoopStart), unsure(continuation));
6311 }
6312
6313 {
6314 m_out.appendTo(doubleLoopStart, slowPath);
6315 LValue index = m_out.phi(pointerType(), startIndexForDouble);
6316
6317 LValue value = m_out.loadDouble(m_out.baseIndex(m_heaps.indexedDoubleProperties, butterfly, index));
6318 LValue isNaN = m_out.doubleNotEqualOrUnordered(value, value);
6319 LValue holeResult = m_out.constInt64(JSValue::encode(jsUndefined()));
6320 LValue normalResult = boxDouble(value);
6321 value = m_out.select(isNaN, holeResult, normalResult);
6322 m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fastAllocation, index));
6323
6324 LValue nextIndex = m_out.add(index, m_out.constIntPtr(1));
6325 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
6326
6327 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)),
6328 unsure(doubleLoopStart), unsure(continuation));
6329 }
6330
6331 m_out.appendTo(slowPath, continuation);
6332 ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), m_out.operation(operationSpreadFastArray), m_callFrame, argument));
6333 m_out.jump(continuation);
6334
6335 m_out.appendTo(continuation, lastNext);
6336 result = m_out.phi(pointerType(), fastResult, slowResult);
6337 mutatorFence();
6338 } else
6339 result = vmCall(pointerType(), m_out.operation(operationSpreadGeneric), m_callFrame, argument);
6340
6341 setJSValue(result);
6342 }
6343
6344 void compileNewArrayBuffer()
6345 {
6346 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6347 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
6348 m_node->indexingMode()));
6349 auto* immutableButterfly = m_node->castOperand<JSImmutableButterfly*>();
6350
6351 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingMode())) {
6352 LBasicBlock slowPath = m_out.newBlock();
6353 LBasicBlock continuation = m_out.newBlock();
6354
6355 LValue fastArray = allocateObject<JSArray>(structure, m_out.constIntPtr(immutableButterfly->toButterfly()), slowPath);
6356 ValueFromBlock fastResult = m_out.anchor(fastArray);
6357 m_out.jump(continuation);
6358
6359 m_out.appendTo(slowPath, continuation);
6360 LValue slowArray = vmCall(Int64, m_out.operation(operationNewArrayBuffer), m_callFrame, weakStructure(structure), m_out.weakPointer(m_node->cellOperand()));
6361 ValueFromBlock slowResult = m_out.anchor(slowArray);
6362 m_out.jump(continuation);
6363
6364 m_out.appendTo(continuation);
6365
6366 mutatorFence();
6367 setJSValue(m_out.phi(pointerType(), slowResult, fastResult));
6368 return;
6369 }
6370
6371 setJSValue(vmCall(
6372 Int64, m_out.operation(operationNewArrayBuffer), m_callFrame,
6373 weakStructure(structure), m_out.weakPointer(m_node->cellOperand())));
6374 }
6375
6376 void compileNewArrayWithSize()
6377 {
6378 LValue publicLength = lowInt32(m_node->child1());
6379
6380 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6381 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
6382 m_node->indexingType()));
6383
6384 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
6385 IndexingType indexingType = m_node->indexingType();
6386 setJSValue(
6387 allocateJSArray(
6388 publicLength, publicLength, weakPointer(globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType)), m_out.constInt32(indexingType)).array);
6389 mutatorFence();
6390 return;
6391 }
6392
6393 LValue structureValue = m_out.select(
6394 m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)),
6395 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))),
6396 weakStructure(structure));
6397 setJSValue(vmCall(Int64, m_out.operation(operationNewArrayWithSize), m_callFrame, structureValue, publicLength, m_out.intPtrZero));
6398 }
6399
6400 void compileNewTypedArray()
6401 {
6402 TypedArrayType typedArrayType = m_node->typedArrayType();
6403 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6404
6405 switch (m_node->child1().useKind()) {
6406 case Int32Use: {
6407 RegisteredStructure structure = m_graph.registerStructure(globalObject->typedArrayStructureConcurrently(typedArrayType));
6408
6409 LValue size = lowInt32(m_node->child1());
6410
6411 LBasicBlock smallEnoughCase = m_out.newBlock();
6412 LBasicBlock slowCase = m_out.newBlock();
6413 LBasicBlock continuation = m_out.newBlock();
6414
6415 ValueFromBlock noStorage = m_out.anchor(m_out.intPtrZero);
6416
6417 m_out.branch(
6418 m_out.above(size, m_out.constInt32(JSArrayBufferView::fastSizeLimit)),
6419 rarely(slowCase), usually(smallEnoughCase));
6420
6421 LBasicBlock lastNext = m_out.appendTo(smallEnoughCase, slowCase);
6422
6423 LValue byteSize =
6424 m_out.shl(m_out.zeroExtPtr(size), m_out.constInt32(logElementSize(typedArrayType)));
6425 if (elementSize(typedArrayType) < 8) {
6426 byteSize = m_out.bitAnd(
6427 m_out.add(byteSize, m_out.constIntPtr(7)),
6428 m_out.constIntPtr(~static_cast<intptr_t>(7)));
6429 }
6430
6431 LValue allocator = allocatorForSize(vm().primitiveGigacageAuxiliarySpace, byteSize, slowCase);
6432 LValue storage = allocateHeapCell(allocator, slowCase);
6433
6434 splatWords(
6435 storage,
6436 m_out.int32Zero,
6437 m_out.castToInt32(m_out.lShr(byteSize, m_out.constIntPtr(3))),
6438 m_out.int64Zero,
6439 m_heaps.typedArrayProperties);
6440
6441 ValueFromBlock haveStorage = m_out.anchor(storage);
6442
6443 LValue fastResultValue =
6444 allocateObject<JSArrayBufferView>(structure, m_out.intPtrZero, slowCase);
6445
6446 m_out.storePtr(storage, fastResultValue, m_heaps.JSArrayBufferView_vector);
6447 m_out.store32(size, fastResultValue, m_heaps.JSArrayBufferView_length);
6448 m_out.store32(m_out.constInt32(FastTypedArray), fastResultValue, m_heaps.JSArrayBufferView_mode);
6449
6450 mutatorFence();
6451 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
6452 m_out.jump(continuation);
6453
6454 m_out.appendTo(slowCase, continuation);
6455 LValue storageValue = m_out.phi(pointerType(), noStorage, haveStorage);
6456
6457 VM& vm = this->vm();
6458 LValue slowResultValue = lazySlowPath(
6459 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6460 return createLazyCallGenerator(vm,
6461 operationNewTypedArrayWithSizeForType(typedArrayType), locations[0].directGPR(),
6462 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
6463 locations[2].directGPR());
6464 },
6465 size, storageValue);
6466 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
6467 m_out.jump(continuation);
6468
6469 m_out.appendTo(continuation, lastNext);
6470 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
6471 return;
6472 }
6473
6474 case UntypedUse: {
6475 LValue argument = lowJSValue(m_node->child1());
6476
6477 LValue result = vmCall(
6478 pointerType(), m_out.operation(operationNewTypedArrayWithOneArgumentForType(typedArrayType)),
6479 m_callFrame, weakPointer(globalObject->typedArrayStructureConcurrently(typedArrayType)), argument);
6480
6481 setJSValue(result);
6482 return;
6483 }
6484
6485 default:
6486 DFG_CRASH(m_graph, m_node, "Bad use kind");
6487 return;
6488 }
6489 }
6490
6491 void compileAllocatePropertyStorage()
6492 {
6493 LValue object = lowCell(m_node->child1());
6494 setStorage(allocatePropertyStorage(object, m_node->transition()->previous.get()));
6495 }
6496
6497 void compileReallocatePropertyStorage()
6498 {
6499 Transition* transition = m_node->transition();
6500 LValue object = lowCell(m_node->child1());
6501 LValue oldStorage = lowStorage(m_node->child2());
6502
6503 setStorage(
6504 reallocatePropertyStorage(
6505 object, oldStorage, transition->previous.get(), transition->next.get()));
6506 }
6507
6508 void compileNukeStructureAndSetButterfly()
6509 {
6510 nukeStructureAndSetButterfly(lowStorage(m_node->child2()), lowCell(m_node->child1()));
6511 }
6512
6513 void compileToNumber()
6514 {
6515 LValue value = lowJSValue(m_node->child1());
6516
6517 if (!(abstractValue(m_node->child1()).m_type & SpecBytecodeNumber))
6518 setJSValue(vmCall(Int64, m_out.operation(operationToNumber), m_callFrame, value));
6519 else {
6520 LBasicBlock notNumber = m_out.newBlock();
6521 LBasicBlock continuation = m_out.newBlock();
6522
6523 ValueFromBlock fastResult = m_out.anchor(value);
6524 m_out.branch(isNumber(value, provenType(m_node->child1())), unsure(continuation), unsure(notNumber));
6525
6526 // notNumber case.
6527 LBasicBlock lastNext = m_out.appendTo(notNumber, continuation);
6528 // We have several attempts to remove ToNumber. But ToNumber still exists.
6529 // It means that converting non-numbers to numbers by this ToNumber is not rare.
6530 // Instead of the lazy slow path generator, we call the operation here.
6531 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToNumber), m_callFrame, value));
6532 m_out.jump(continuation);
6533
6534 // continuation case.
6535 m_out.appendTo(continuation, lastNext);
6536 setJSValue(m_out.phi(Int64, fastResult, slowResult));
6537 }
6538 }
6539
6540 void compileToStringOrCallStringConstructorOrStringValueOf()
6541 {
6542 ASSERT(m_node->op() != StringValueOf || m_node->child1().useKind() == UntypedUse);
6543 switch (m_node->child1().useKind()) {
6544 case StringObjectUse: {
6545 LValue cell = lowCell(m_node->child1());
6546 speculateStringObjectForCell(m_node->child1(), cell);
6547 setJSValue(m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue));
6548 return;
6549 }
6550
6551 case StringOrStringObjectUse: {
6552 LValue cell = lowCell(m_node->child1());
6553 LValue type = m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType);
6554
6555 LBasicBlock notString = m_out.newBlock();
6556 LBasicBlock continuation = m_out.newBlock();
6557
6558 ValueFromBlock simpleResult = m_out.anchor(cell);
6559 m_out.branch(
6560 m_out.equal(type, m_out.constInt32(StringType)),
6561 unsure(continuation), unsure(notString));
6562
6563 LBasicBlock lastNext = m_out.appendTo(notString, continuation);
6564 speculate(
6565 BadType, jsValueValue(cell), m_node->child1().node(),
6566 m_out.notEqual(type, m_out.constInt32(StringObjectType)));
6567 ValueFromBlock unboxedResult = m_out.anchor(
6568 m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue));
6569 m_out.jump(continuation);
6570
6571 m_out.appendTo(continuation, lastNext);
6572 setJSValue(m_out.phi(Int64, simpleResult, unboxedResult));
6573
6574 m_interpreter.filter(m_node->child1(), SpecString | SpecStringObject);
6575 return;
6576 }
6577
6578 case CellUse:
6579 case NotCellUse:
6580 case UntypedUse: {
6581 LValue value;
6582 if (m_node->child1().useKind() == CellUse)
6583 value = lowCell(m_node->child1());
6584 else if (m_node->child1().useKind() == NotCellUse)
6585 value = lowNotCell(m_node->child1());
6586 else
6587 value = lowJSValue(m_node->child1());
6588
6589 LBasicBlock isCell = m_out.newBlock();
6590 LBasicBlock notString = m_out.newBlock();
6591 LBasicBlock continuation = m_out.newBlock();
6592
6593 LValue isCellPredicate;
6594 if (m_node->child1().useKind() == CellUse)
6595 isCellPredicate = m_out.booleanTrue;
6596 else if (m_node->child1().useKind() == NotCellUse)
6597 isCellPredicate = m_out.booleanFalse;
6598 else
6599 isCellPredicate = this->isCell(value, provenType(m_node->child1()));
6600 m_out.branch(isCellPredicate, unsure(isCell), unsure(notString));
6601
6602 LBasicBlock lastNext = m_out.appendTo(isCell, notString);
6603 ValueFromBlock simpleResult = m_out.anchor(value);
6604 LValue isStringPredicate;
6605 if (m_node->child1()->prediction() & SpecString) {
6606 isStringPredicate = isString(value, provenType(m_node->child1()));
6607 } else
6608 isStringPredicate = m_out.booleanFalse;
6609 m_out.branch(isStringPredicate, unsure(continuation), unsure(notString));
6610
6611 m_out.appendTo(notString, continuation);
6612 LValue operation;
6613 if (m_node->child1().useKind() == CellUse) {
6614 ASSERT(m_node->op() != StringValueOf);
6615 operation = m_out.operation(m_node->op() == ToString ? operationToStringOnCell : operationCallStringConstructorOnCell);
6616 } else {
6617 operation = m_out.operation(m_node->op() == ToString
6618 ? operationToString : m_node->op() == StringValueOf
6619 ? operationStringValueOf : operationCallStringConstructor);
6620 }
6621 ValueFromBlock convertedResult = m_out.anchor(vmCall(Int64, operation, m_callFrame, value));
6622 m_out.jump(continuation);
6623
6624 m_out.appendTo(continuation, lastNext);
6625 setJSValue(m_out.phi(Int64, simpleResult, convertedResult));
6626 return;
6627 }
6628
6629 case Int32Use:
6630 setJSValue(vmCall(Int64, m_out.operation(operationInt32ToStringWithValidRadix), m_callFrame, lowInt32(m_node->child1()), m_out.constInt32(10)));
6631 return;
6632
6633 case Int52RepUse:
6634 setJSValue(vmCall(Int64, m_out.operation(operationInt52ToStringWithValidRadix), m_callFrame, lowStrictInt52(m_node->child1()), m_out.constInt32(10)));
6635 return;
6636
6637 case DoubleRepUse:
6638 setJSValue(vmCall(Int64, m_out.operation(operationDoubleToStringWithValidRadix), m_callFrame, lowDouble(m_node->child1()), m_out.constInt32(10)));
6639 return;
6640
6641 default:
6642 DFG_CRASH(m_graph, m_node, "Bad use kind");
6643 break;
6644 }
6645 }
6646
6647 void compileToPrimitive()
6648 {
6649 LValue value = lowJSValue(m_node->child1());
6650
6651 LBasicBlock isCellCase = m_out.newBlock();
6652 LBasicBlock isObjectCase = m_out.newBlock();
6653 LBasicBlock continuation = m_out.newBlock();
6654
6655 Vector<ValueFromBlock, 3> results;
6656
6657 results.append(m_out.anchor(value));
6658 m_out.branch(
6659 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
6660
6661 LBasicBlock lastNext = m_out.appendTo(isCellCase, isObjectCase);
6662 results.append(m_out.anchor(value));
6663 m_out.branch(
6664 isObject(value, provenType(m_node->child1())),
6665 unsure(isObjectCase), unsure(continuation));
6666
6667 m_out.appendTo(isObjectCase, continuation);
6668 results.append(m_out.anchor(vmCall(
6669 Int64, m_out.operation(operationToPrimitive), m_callFrame, value)));
6670 m_out.jump(continuation);
6671
6672 m_out.appendTo(continuation, lastNext);
6673 setJSValue(m_out.phi(Int64, results));
6674 }
6675
6676 void compileMakeRope()
6677 {
6678 struct FlagsAndLength {
6679 LValue flags;
6680 LValue length;
6681 };
6682
6683 Edge edges[3] = {
6684 m_node->child1(),
6685 m_node->child2(),
6686 m_node->child3(),
6687 };
6688 LValue kids[3];
6689 unsigned numKids;
6690 kids[0] = lowCell(edges[0]);
6691 kids[1] = lowCell(edges[1]);
6692 if (edges[2]) {
6693 kids[2] = lowCell(edges[2]);
6694 numKids = 3;
6695 } else {
6696 kids[2] = 0;
6697 numKids = 2;
6698 }
6699
6700 LBasicBlock emptyCase = m_out.newBlock();
6701 LBasicBlock slowPath = m_out.newBlock();
6702 LBasicBlock continuation = m_out.newBlock();
6703
6704 Allocator allocator = allocatorForNonVirtualConcurrently<JSRopeString>(vm(), sizeof(JSRopeString), AllocatorForMode::AllocatorIfExists);
6705
6706 LValue result = allocateCell(
6707 m_out.constIntPtr(allocator.localAllocator()), vm().stringStructure.get(), slowPath);
6708
6709 // This puts nullptr for the first fiber. It makes visitChildren safe even if this JSRopeString is discarded due to the speculation failure in the following path.
6710 m_out.storePtr(m_out.constIntPtr(JSString::isRopeInPointer), result, m_heaps.JSRopeString_fiber0);
6711
6712 auto getFlagsAndLength = [&] (Edge& edge, LValue child) {
6713 if (JSString* string = edge->dynamicCastConstant<JSString*>(vm())) {
6714 return FlagsAndLength {
6715 m_out.constInt32(string->is8Bit() ? StringImpl::flagIs8Bit() : 0),
6716 m_out.constInt32(string->length())
6717 };
6718 }
6719
6720 LBasicBlock continuation = m_out.newBlock();
6721 LBasicBlock ropeCase = m_out.newBlock();
6722 LBasicBlock notRopeCase = m_out.newBlock();
6723
6724 m_out.branch(isRopeString(child, edge), unsure(ropeCase), unsure(notRopeCase));
6725
6726 LBasicBlock lastNext = m_out.appendTo(ropeCase, notRopeCase);
6727 ValueFromBlock flagsForRope = m_out.anchor(m_out.load32NonNegative(child, m_heaps.JSRopeString_flags));
6728 ValueFromBlock lengthForRope = m_out.anchor(m_out.load32NonNegative(child, m_heaps.JSRopeString_length));
6729 m_out.jump(continuation);
6730
6731 m_out.appendTo(notRopeCase, continuation);
6732 LValue stringImpl = m_out.loadPtr(child, m_heaps.JSString_value);
6733 ValueFromBlock flagsForNonRope = m_out.anchor(m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_hashAndFlags));
6734 ValueFromBlock lengthForNonRope = m_out.anchor(m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length));
6735 m_out.jump(continuation);
6736
6737 m_out.appendTo(continuation, lastNext);
6738 return FlagsAndLength {
6739 m_out.phi(Int32, flagsForRope, flagsForNonRope),
6740 m_out.phi(Int32, lengthForRope, lengthForNonRope)
6741 };
6742 };
6743
6744 FlagsAndLength flagsAndLength = getFlagsAndLength(edges[0], kids[0]);
6745 for (unsigned i = 1; i < numKids; ++i) {
6746 auto mergeFlagsAndLength = [&] (Edge& edge, LValue child, FlagsAndLength previousFlagsAndLength) {
6747 FlagsAndLength flagsAndLength = getFlagsAndLength(edge, child);
6748 LValue flags = m_out.bitAnd(previousFlagsAndLength.flags, flagsAndLength.flags);
6749 CheckValue* lengthCheck = m_out.speculateAdd(previousFlagsAndLength.length, flagsAndLength.length);
6750 blessSpeculation(lengthCheck, Uncountable, noValue(), nullptr, m_origin);
6751 return FlagsAndLength {
6752 flags,
6753 lengthCheck
6754 };
6755 };
6756 flagsAndLength = mergeFlagsAndLength(edges[i], kids[i], flagsAndLength);
6757 }
6758
6759 m_out.storePtr(
6760 m_out.bitOr(
6761 m_out.bitOr(kids[0], m_out.constIntPtr(JSString::isRopeInPointer)),
6762 m_out.bitAnd(m_out.constIntPtr(JSRopeString::is8BitInPointer), m_out.zeroExtPtr(flagsAndLength.flags))),
6763 result, m_heaps.JSRopeString_fiber0);
6764 m_out.storePtr(
6765 m_out.bitOr(m_out.zeroExtPtr(flagsAndLength.length), m_out.shl(kids[1], m_out.constInt32(32))),
6766 result, m_heaps.JSRopeString_fiber1);
6767 if (numKids == 2)
6768 m_out.storePtr(m_out.lShr(kids[1], m_out.constInt32(32)), result, m_heaps.JSRopeString_fiber2);
6769 else
6770 m_out.storePtr(m_out.bitOr(m_out.lShr(kids[1], m_out.constInt32(32)), m_out.shl(kids[2], m_out.constInt32(16))), result, m_heaps.JSRopeString_fiber2);
6771
6772 mutatorFence();
6773 ValueFromBlock fastResult = m_out.anchor(result);
6774 m_out.branch(m_out.isZero32(flagsAndLength.length), rarely(emptyCase), usually(continuation));
6775
6776 LBasicBlock lastNext = m_out.appendTo(emptyCase, slowPath);
6777 ValueFromBlock emptyResult = m_out.anchor(weakPointer(jsEmptyString(&m_graph.m_vm)));
6778 m_out.jump(continuation);
6779
6780 m_out.appendTo(slowPath, continuation);
6781 LValue slowResultValue;
6782 VM& vm = this->vm();
6783 switch (numKids) {
6784 case 2:
6785 slowResultValue = lazySlowPath(
6786 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6787 return createLazyCallGenerator(vm,
6788 operationMakeRope2, locations[0].directGPR(), locations[1].directGPR(),
6789 locations[2].directGPR());
6790 }, kids[0], kids[1]);
6791 break;
6792 case 3:
6793 slowResultValue = lazySlowPath(
6794 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6795 return createLazyCallGenerator(vm,
6796 operationMakeRope3, locations[0].directGPR(), locations[1].directGPR(),
6797 locations[2].directGPR(), locations[3].directGPR());
6798 }, kids[0], kids[1], kids[2]);
6799 break;
6800 default:
6801 DFG_CRASH(m_graph, m_node, "Bad number of children");
6802 break;
6803 }
6804 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
6805 m_out.jump(continuation);
6806
6807 m_out.appendTo(continuation, lastNext);
6808 setJSValue(m_out.phi(Int64, fastResult, emptyResult, slowResult));
6809 }
6810
6811 void compileStringCharAt()
6812 {
6813 LValue base = lowString(m_graph.child(m_node, 0));
6814 LValue index = lowInt32(m_graph.child(m_node, 1));
6815 LValue storage = lowStorage(m_graph.child(m_node, 2));
6816
6817 LBasicBlock fastPath = m_out.newBlock();
6818 LBasicBlock slowPath = m_out.newBlock();
6819 LBasicBlock continuation = m_out.newBlock();
6820
6821 LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
6822 m_out.branch(
6823 m_out.aboveOrEqual(
6824 index, m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length)),
6825 rarely(slowPath), usually(fastPath));
6826
6827 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
6828
6829 LBasicBlock is8Bit = m_out.newBlock();
6830 LBasicBlock is16Bit = m_out.newBlock();
6831 LBasicBlock bitsContinuation = m_out.newBlock();
6832 LBasicBlock bigCharacter = m_out.newBlock();
6833
6834 m_out.branch(
6835 m_out.testIsZero32(
6836 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
6837 m_out.constInt32(StringImpl::flagIs8Bit())),
6838 unsure(is16Bit), unsure(is8Bit));
6839
6840 m_out.appendTo(is8Bit, is16Bit);
6841
6842 // FIXME: Need to cage strings!
6843 // https://bugs.webkit.org/show_bug.cgi?id=174924
6844 ValueFromBlock char8Bit = m_out.anchor(
6845 m_out.load8ZeroExt32(m_out.baseIndex(
6846 m_heaps.characters8, storage, m_out.zeroExtPtr(index),
6847 provenValue(m_graph.child(m_node, 1)))));
6848 m_out.jump(bitsContinuation);
6849
6850 m_out.appendTo(is16Bit, bigCharacter);
6851
6852 LValue char16BitValue = m_out.load16ZeroExt32(
6853 m_out.baseIndex(
6854 m_heaps.characters16, storage, m_out.zeroExtPtr(index),
6855 provenValue(m_graph.child(m_node, 1))));
6856 ValueFromBlock char16Bit = m_out.anchor(char16BitValue);
6857 m_out.branch(
6858 m_out.above(char16BitValue, m_out.constInt32(maxSingleCharacterString)),
6859 rarely(bigCharacter), usually(bitsContinuation));
6860
6861 m_out.appendTo(bigCharacter, bitsContinuation);
6862
6863 Vector<ValueFromBlock, 4> results;
6864 results.append(m_out.anchor(vmCall(
6865 Int64, m_out.operation(operationSingleCharacterString),
6866 m_callFrame, char16BitValue)));
6867 m_out.jump(continuation);
6868
6869 m_out.appendTo(bitsContinuation, slowPath);
6870
6871 LValue character = m_out.phi(Int32, char8Bit, char16Bit);
6872
6873 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
6874
6875 results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex(
6876 m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character)))));
6877 m_out.jump(continuation);
6878
6879 m_out.appendTo(slowPath, continuation);
6880
6881 if (m_node->arrayMode().isInBounds()) {
6882 speculate(OutOfBounds, noValue(), 0, m_out.booleanTrue);
6883 results.append(m_out.anchor(m_out.intPtrZero));
6884 } else {
6885 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6886
6887 bool prototypeChainIsSane = false;
6888 if (globalObject->stringPrototypeChainIsSane()) {
6889 // FIXME: This could be captured using a Speculation mode that means
6890 // "out-of-bounds loads return a trivial value", something like
6891 // SaneChainOutOfBounds.
6892 // https://bugs.webkit.org/show_bug.cgi?id=144668
6893
6894 m_graph.registerAndWatchStructureTransition(globalObject->stringPrototype()->structure(vm()));
6895 m_graph.registerAndWatchStructureTransition(globalObject->objectPrototype()->structure(vm()));
6896
6897 prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
6898 }
6899 if (prototypeChainIsSane) {
6900 LBasicBlock negativeIndex = m_out.newBlock();
6901
6902 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
6903 m_out.branch(
6904 m_out.lessThan(index, m_out.int32Zero),
6905 rarely(negativeIndex), usually(continuation));
6906
6907 m_out.appendTo(negativeIndex, continuation);
6908 }
6909
6910 results.append(m_out.anchor(vmCall(
6911 Int64, m_out.operation(operationGetByValStringInt), m_callFrame, base, index)));
6912 }
6913
6914 m_out.jump(continuation);
6915
6916 m_out.appendTo(continuation, lastNext);
6917 setJSValue(m_out.phi(Int64, results));
6918 }
6919
6920 void compileStringCharCodeAt()
6921 {
6922 LBasicBlock is8Bit = m_out.newBlock();
6923 LBasicBlock is16Bit = m_out.newBlock();
6924 LBasicBlock continuation = m_out.newBlock();
6925
6926 LValue base = lowString(m_node->child1());
6927 LValue index = lowInt32(m_node->child2());
6928 LValue storage = lowStorage(m_node->child3());
6929
6930 LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
6931
6932 speculate(
6933 Uncountable, noValue(), 0,
6934 m_out.aboveOrEqual(
6935 index, m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length)));
6936
6937 m_out.branch(
6938 m_out.testIsZero32(
6939 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
6940 m_out.constInt32(StringImpl::flagIs8Bit())),
6941 unsure(is16Bit), unsure(is8Bit));
6942
6943 LBasicBlock lastNext = m_out.appendTo(is8Bit, is16Bit);
6944
6945 // FIXME: need to cage strings!
6946 // https://bugs.webkit.org/show_bug.cgi?id=174924
6947 ValueFromBlock char8Bit = m_out.anchor(
6948 m_out.load8ZeroExt32(m_out.baseIndex(
6949 m_heaps.characters8, storage, m_out.zeroExtPtr(index),
6950 provenValue(m_node->child2()))));
6951 m_out.jump(continuation);
6952
6953 m_out.appendTo(is16Bit, continuation);
6954
6955 ValueFromBlock char16Bit = m_out.anchor(
6956 m_out.load16ZeroExt32(m_out.baseIndex(
6957 m_heaps.characters16, storage, m_out.zeroExtPtr(index),
6958 provenValue(m_node->child2()))));
6959 m_out.jump(continuation);
6960
6961 m_out.appendTo(continuation, lastNext);
6962
6963 setInt32(m_out.phi(Int32, char8Bit, char16Bit));
6964 }
6965
6966 void compileStringFromCharCode()
6967 {
6968 Edge childEdge = m_node->child1();
6969
6970 if (childEdge.useKind() == UntypedUse) {
6971 LValue result = vmCall(
6972 Int64, m_out.operation(operationStringFromCharCodeUntyped), m_callFrame,
6973 lowJSValue(childEdge));
6974 setJSValue(result);
6975 return;
6976 }
6977
6978 DFG_ASSERT(m_graph, m_node, childEdge.useKind() == Int32Use, childEdge.useKind());
6979
6980 LValue value = lowInt32(childEdge);
6981
6982 LBasicBlock smallIntCase = m_out.newBlock();
6983 LBasicBlock slowCase = m_out.newBlock();
6984 LBasicBlock continuation = m_out.newBlock();
6985
6986 m_out.branch(
6987 m_out.above(value, m_out.constInt32(maxSingleCharacterString)),
6988 rarely(slowCase), usually(smallIntCase));
6989
6990 LBasicBlock lastNext = m_out.appendTo(smallIntCase, slowCase);
6991
6992 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
6993 LValue fastResultValue = m_out.loadPtr(
6994 m_out.baseIndex(m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(value)));
6995 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
6996 m_out.jump(continuation);
6997
6998 m_out.appendTo(slowCase, continuation);
6999
7000 LValue slowResultValue = vmCall(
7001 pointerType(), m_out.operation(operationStringFromCharCode), m_callFrame, value);
7002 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
7003 m_out.jump(continuation);
7004
7005 m_out.appendTo(continuation, lastNext);
7006
7007 setJSValue(m_out.phi(Int64, fastResult, slowResult));
7008 }
7009
7010 void compileGetByOffset()
7011 {
7012 StorageAccessData& data = m_node->storageAccessData();
7013
7014 setJSValue(loadProperty(
7015 lowStorage(m_node->child1()), data.identifierNumber, data.offset));
7016 }
7017
7018 void compileGetGetter()
7019 {
7020 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_getter));
7021 }
7022
7023 void compileGetSetter()
7024 {
7025 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_setter));
7026 }
7027
7028 void compileMultiGetByOffset()
7029 {
7030 LValue base = lowCell(m_node->child1());
7031
7032 MultiGetByOffsetData& data = m_node->multiGetByOffsetData();
7033
7034 Vector<LBasicBlock, 2> blocks(data.cases.size());
7035 for (unsigned i = data.cases.size(); i--;)
7036 blocks[i] = m_out.newBlock();
7037 LBasicBlock exit = m_out.newBlock();
7038 LBasicBlock continuation = m_out.newBlock();
7039
7040 Vector<SwitchCase, 2> cases;
7041 RegisteredStructureSet baseSet;
7042 for (unsigned i = data.cases.size(); i--;) {
7043 MultiGetByOffsetCase getCase = data.cases[i];
7044 for (unsigned j = getCase.set().size(); j--;) {
7045 RegisteredStructure structure = getCase.set()[j];
7046 baseSet.add(structure);
7047 cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
7048 }
7049 }
7050 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
7051 emitSwitchForMultiByOffset(base, structuresChecked, cases, exit);
7052
7053 LBasicBlock lastNext = m_out.m_nextBlock;
7054
7055 Vector<ValueFromBlock, 2> results;
7056 for (unsigned i = data.cases.size(); i--;) {
7057 MultiGetByOffsetCase getCase = data.cases[i];
7058 GetByOffsetMethod method = getCase.method();
7059
7060 m_out.appendTo(blocks[i], i + 1 < data.cases.size() ? blocks[i + 1] : exit);
7061
7062 LValue result;
7063
7064 switch (method.kind()) {
7065 case GetByOffsetMethod::Invalid:
7066 RELEASE_ASSERT_NOT_REACHED();
7067 break;
7068
7069 case GetByOffsetMethod::Constant:
7070 result = m_out.constInt64(JSValue::encode(method.constant()->value()));
7071 break;
7072
7073 case GetByOffsetMethod::Load:
7074 case GetByOffsetMethod::LoadFromPrototype: {
7075 LValue propertyBase;
7076 if (method.kind() == GetByOffsetMethod::Load)
7077 propertyBase = base;
7078 else
7079 propertyBase = weakPointer(method.prototype()->value().asCell());
7080 if (!isInlineOffset(method.offset()))
7081 propertyBase = m_out.loadPtr(propertyBase, m_heaps.JSObject_butterfly);
7082 result = loadProperty(
7083 propertyBase, data.identifierNumber, method.offset());
7084 break;
7085 } }
7086
7087 results.append(m_out.anchor(result));
7088 m_out.jump(continuation);
7089 }
7090
7091 m_out.appendTo(exit, continuation);
7092 if (!structuresChecked)
7093 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
7094 m_out.unreachable();
7095
7096 m_out.appendTo(continuation, lastNext);
7097 setJSValue(m_out.phi(Int64, results));
7098 }
7099
7100 void compilePutByOffset()
7101 {
7102 StorageAccessData& data = m_node->storageAccessData();
7103
7104 storeProperty(
7105 lowJSValue(m_node->child3()),
7106 lowStorage(m_node->child1()), data.identifierNumber, data.offset);
7107 }
7108
7109 void compileMultiPutByOffset()
7110 {
7111 LValue base = lowCell(m_node->child1());
7112 LValue value = lowJSValue(m_node->child2());
7113
7114 MultiPutByOffsetData& data = m_node->multiPutByOffsetData();
7115
7116 Vector<LBasicBlock, 2> blocks(data.variants.size());
7117 for (unsigned i = data.variants.size(); i--;)
7118 blocks[i] = m_out.newBlock();
7119 LBasicBlock exit = m_out.newBlock();
7120 LBasicBlock continuation = m_out.newBlock();
7121
7122 Vector<SwitchCase, 2> cases;
7123 RegisteredStructureSet baseSet;
7124 for (unsigned i = data.variants.size(); i--;) {
7125 PutByIdVariant variant = data.variants[i];
7126 for (unsigned j = variant.oldStructure().size(); j--;) {
7127 RegisteredStructure structure = m_graph.registerStructure(variant.oldStructure()[j]);
7128 baseSet.add(structure);
7129 cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
7130 }
7131 }
7132 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
7133 emitSwitchForMultiByOffset(base, structuresChecked, cases, exit);
7134
7135 LBasicBlock lastNext = m_out.m_nextBlock;
7136
7137 for (unsigned i = data.variants.size(); i--;) {
7138 m_out.appendTo(blocks[i], i + 1 < data.variants.size() ? blocks[i + 1] : exit);
7139
7140 PutByIdVariant variant = data.variants[i];
7141
7142 LValue storage;
7143 if (variant.kind() == PutByIdVariant::Replace) {
7144 if (isInlineOffset(variant.offset()))
7145 storage = base;
7146 else
7147 storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
7148 } else {
7149 DFG_ASSERT(m_graph, m_node, variant.kind() == PutByIdVariant::Transition, variant.kind());
7150 m_graph.m_plan.transitions().addLazily(
7151 codeBlock(), m_node->origin.semantic.codeOriginOwner(),
7152 variant.oldStructureForTransition(), variant.newStructure());
7153
7154 storage = storageForTransition(
7155 base, variant.offset(),
7156 variant.oldStructureForTransition(), variant.newStructure());
7157 }
7158
7159 storeProperty(value, storage, data.identifierNumber, variant.offset());
7160
7161 if (variant.kind() == PutByIdVariant::Transition) {
7162 ASSERT(variant.oldStructureForTransition()->indexingType() == variant.newStructure()->indexingType());
7163 ASSERT(variant.oldStructureForTransition()->typeInfo().inlineTypeFlags() == variant.newStructure()->typeInfo().inlineTypeFlags());
7164 ASSERT(variant.oldStructureForTransition()->typeInfo().type() == variant.newStructure()->typeInfo().type());
7165 m_out.store32(
7166 weakStructureID(m_graph.registerStructure(variant.newStructure())), base, m_heaps.JSCell_structureID);
7167 }
7168
7169 m_out.jump(continuation);
7170 }
7171
7172 m_out.appendTo(exit, continuation);
7173 if (!structuresChecked)
7174 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
7175 m_out.unreachable();
7176
7177 m_out.appendTo(continuation, lastNext);
7178 }
7179
7180 void compileMatchStructure()
7181 {
7182 LValue base = lowCell(m_node->child1());
7183
7184 MatchStructureData& data = m_node->matchStructureData();
7185
7186 LBasicBlock trueBlock = m_out.newBlock();
7187 LBasicBlock falseBlock = m_out.newBlock();
7188 LBasicBlock exitBlock = m_out.newBlock();
7189 LBasicBlock continuation = m_out.newBlock();
7190
7191 LBasicBlock lastNext = m_out.insertNewBlocksBefore(trueBlock);
7192
7193 Vector<SwitchCase, 2> cases;
7194 RegisteredStructureSet baseSet;
7195 for (MatchStructureVariant& variant : data.variants) {
7196 baseSet.add(variant.structure);
7197 cases.append(SwitchCase(
7198 weakStructureID(variant.structure),
7199 variant.result ? trueBlock : falseBlock, Weight(1)));
7200 }
7201 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
7202 emitSwitchForMultiByOffset(base, structuresChecked, cases, exitBlock);
7203
7204 m_out.appendTo(trueBlock, falseBlock);
7205 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
7206 m_out.jump(continuation);
7207
7208 m_out.appendTo(falseBlock, exitBlock);
7209 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
7210 m_out.jump(continuation);
7211
7212 m_out.appendTo(exitBlock, continuation);
7213 if (!structuresChecked)
7214 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
7215 m_out.unreachable();
7216
7217 m_out.appendTo(continuation, lastNext);
7218 setBoolean(m_out.phi(Int32, trueResult, falseResult));
7219 }
7220
7221 void compileGetGlobalVariable()
7222 {
7223 setJSValue(m_out.load64(m_out.absolute(m_node->variablePointer())));
7224 }
7225
7226 void compilePutGlobalVariable()
7227 {
7228 m_out.store64(
7229 lowJSValue(m_node->child2()), m_out.absolute(m_node->variablePointer()));
7230 }
7231
7232 void compileNotifyWrite()
7233 {
7234 WatchpointSet* set = m_node->watchpointSet();
7235
7236 LBasicBlock isNotInvalidated = m_out.newBlock();
7237 LBasicBlock continuation = m_out.newBlock();
7238
7239 LValue state = m_out.load8ZeroExt32(m_out.absolute(set->addressOfState()));
7240 m_out.branch(
7241 m_out.equal(state, m_out.constInt32(IsInvalidated)),
7242 usually(continuation), rarely(isNotInvalidated));
7243
7244 LBasicBlock lastNext = m_out.appendTo(isNotInvalidated, continuation);
7245
7246 VM& vm = this->vm();
7247 lazySlowPath(
7248 [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
7249 return createLazyCallGenerator(vm,
7250 operationNotifyWrite, InvalidGPRReg, CCallHelpers::TrustedImmPtr(set));
7251 });
7252 m_out.jump(continuation);
7253
7254 m_out.appendTo(continuation, lastNext);
7255 }
7256
7257 void compileGetCallee()
7258 {
7259 setJSValue(m_out.loadPtr(addressFor(CallFrameSlot::callee)));
7260 }
7261
7262 void compileSetCallee()
7263 {
7264 auto callee = lowCell(m_node->child1());
7265 m_out.storePtr(callee, payloadFor(CallFrameSlot::callee));
7266 }
7267
7268 void compileGetArgumentCountIncludingThis()
7269 {
7270 VirtualRegister argumentCountRegister;
7271 if (InlineCallFrame* inlineCallFrame = m_node->argumentsInlineCallFrame())
7272 argumentCountRegister = inlineCallFrame->argumentCountRegister;
7273 else
7274 argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
7275 setInt32(m_out.load32(payloadFor(argumentCountRegister)));
7276 }
7277
7278 void compileSetArgumentCountIncludingThis()
7279 {
7280 m_out.store32(m_out.constInt32(m_node->argumentCountIncludingThis()), payloadFor(CallFrameSlot::argumentCount));
7281 }
7282
7283 void compileGetScope()
7284 {
7285 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSFunction_scope));
7286 }
7287
7288 void compileSkipScope()
7289 {
7290 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSScope_next));
7291 }
7292
7293 void compileGetGlobalObject()
7294 {
7295 LValue structure = loadStructure(lowCell(m_node->child1()));
7296 setJSValue(m_out.loadPtr(structure, m_heaps.Structure_globalObject));
7297 }
7298
7299 void compileGetGlobalThis()
7300 {
7301 auto* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
7302 setJSValue(m_out.loadPtr(m_out.absolute(globalObject->addressOfGlobalThis())));
7303 }
7304
7305 void compileGetClosureVar()
7306 {
7307 setJSValue(
7308 m_out.load64(
7309 lowCell(m_node->child1()),
7310 m_heaps.JSLexicalEnvironment_variables[m_node->scopeOffset().offset()]));
7311 }
7312
7313 void compilePutClosureVar()
7314 {
7315 m_out.store64(
7316 lowJSValue(m_node->child2()),
7317 lowCell(m_node->child1()),
7318 m_heaps.JSLexicalEnvironment_variables[m_node->scopeOffset().offset()]);
7319 }
7320
7321 void compileGetFromArguments()
7322 {
7323 setJSValue(
7324 m_out.load64(
7325 lowCell(m_node->child1()),
7326 m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]));
7327 }
7328
7329 void compilePutToArguments()
7330 {
7331 m_out.store64(
7332 lowJSValue(m_node->child2()),
7333 lowCell(m_node->child1()),
7334 m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]);
7335 }
7336
7337 void compileGetArgument()
7338 {
7339 LValue argumentCount = m_out.load32(payloadFor(AssemblyHelpers::argumentCount(m_node->origin.semantic)));
7340
7341 LBasicBlock inBounds = m_out.newBlock();
7342 LBasicBlock outOfBounds = m_out.newBlock();
7343 LBasicBlock continuation = m_out.newBlock();
7344
7345 m_out.branch(m_out.lessThanOrEqual(argumentCount, m_out.constInt32(m_node->argumentIndex())), unsure(outOfBounds), unsure(inBounds));
7346
7347 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
7348 VirtualRegister arg = AssemblyHelpers::argumentsStart(m_node->origin.semantic) + m_node->argumentIndex() - 1;
7349 ValueFromBlock inBoundsResult = m_out.anchor(m_out.load64(addressFor(arg)));
7350 m_out.jump(continuation);
7351
7352 m_out.appendTo(outOfBounds, continuation);
7353 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueUndefined));
7354 m_out.jump(continuation);
7355
7356 m_out.appendTo(continuation, lastNext);
7357 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
7358 }
7359
7360 void compileCompareEq()
7361 {
7362 if (m_node->isBinaryUseKind(Int32Use)
7363 || m_node->isBinaryUseKind(Int52RepUse)
7364 || m_node->isBinaryUseKind(DoubleRepUse)
7365 || m_node->isBinaryUseKind(ObjectUse)
7366 || m_node->isBinaryUseKind(BooleanUse)
7367 || m_node->isBinaryUseKind(SymbolUse)
7368 || m_node->isBinaryUseKind(StringIdentUse)
7369 || m_node->isBinaryUseKind(StringUse)) {
7370 compileCompareStrictEq();
7371 return;
7372 }
7373
7374 if (m_node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
7375 compareEqObjectOrOtherToObject(m_node->child2(), m_node->child1());
7376 return;
7377 }
7378
7379 if (m_node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
7380 compareEqObjectOrOtherToObject(m_node->child1(), m_node->child2());
7381 return;
7382 }
7383
7384 if (m_node->child1().useKind() == KnownOtherUse) {
7385 ASSERT(!m_interpreter.needsTypeCheck(m_node->child1(), SpecOther));
7386 setBoolean(equalNullOrUndefined(m_node->child2(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation));
7387 return;
7388 }
7389
7390 if (m_node->child2().useKind() == KnownOtherUse) {
7391 ASSERT(!m_interpreter.needsTypeCheck(m_node->child2(), SpecOther));
7392 setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation));
7393 return;
7394 }
7395
7396 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
7397 nonSpeculativeCompare(
7398 [&] (LValue left, LValue right) {
7399 return m_out.equal(left, right);
7400 },
7401 operationCompareEq);
7402 }
7403
7404 void compileCompareStrictEq()
7405 {
7406 if (m_node->isBinaryUseKind(Int32Use)) {
7407 setBoolean(
7408 m_out.equal(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
7409 return;
7410 }
7411
7412 if (m_node->isBinaryUseKind(Int52RepUse)) {
7413 Int52Kind kind;
7414 LValue left = lowWhicheverInt52(m_node->child1(), kind);
7415 LValue right = lowInt52(m_node->child2(), kind);
7416 setBoolean(m_out.equal(left, right));
7417 return;
7418 }
7419
7420 if (m_node->isBinaryUseKind(DoubleRepUse)) {
7421 setBoolean(
7422 m_out.doubleEqual(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
7423 return;
7424 }
7425
7426 if (m_node->isBinaryUseKind(StringIdentUse)) {
7427 setBoolean(
7428 m_out.equal(lowStringIdent(m_node->child1()), lowStringIdent(m_node->child2())));
7429 return;
7430 }
7431
7432 if (m_node->isBinaryUseKind(StringUse)) {
7433 LValue left = lowCell(m_node->child1());
7434 LValue right = lowCell(m_node->child2());
7435
7436 LBasicBlock notTriviallyEqualCase = m_out.newBlock();
7437 LBasicBlock continuation = m_out.newBlock();
7438
7439 speculateString(m_node->child1(), left);
7440
7441 ValueFromBlock fastResult = m_out.anchor(m_out.booleanTrue);
7442 m_out.branch(
7443 m_out.equal(left, right), unsure(continuation), unsure(notTriviallyEqualCase));
7444
7445 LBasicBlock lastNext = m_out.appendTo(notTriviallyEqualCase, continuation);
7446
7447 speculateString(m_node->child2(), right);
7448
7449 ValueFromBlock slowResult = m_out.anchor(stringsEqual(left, right, m_node->child1(), m_node->child2()));
7450 m_out.jump(continuation);
7451
7452 m_out.appendTo(continuation, lastNext);
7453 setBoolean(m_out.phi(Int32, fastResult, slowResult));
7454 return;
7455 }
7456
7457 if (m_node->isBinaryUseKind(ObjectUse, UntypedUse)) {
7458 setBoolean(
7459 m_out.equal(
7460 lowNonNullObject(m_node->child1()),
7461 lowJSValue(m_node->child2())));
7462 return;
7463 }
7464
7465 if (m_node->isBinaryUseKind(UntypedUse, ObjectUse)) {
7466 setBoolean(
7467 m_out.equal(
7468 lowNonNullObject(m_node->child2()),
7469 lowJSValue(m_node->child1())));
7470 return;
7471 }
7472
7473 if (m_node->isBinaryUseKind(ObjectUse)) {
7474 setBoolean(
7475 m_out.equal(
7476 lowNonNullObject(m_node->child1()),
7477 lowNonNullObject(m_node->child2())));
7478 return;
7479 }
7480
7481 if (m_node->isBinaryUseKind(BooleanUse)) {
7482 setBoolean(
7483 m_out.equal(lowBoolean(m_node->child1()), lowBoolean(m_node->child2())));
7484 return;
7485 }
7486
7487 if (m_node->isBinaryUseKind(SymbolUse)) {
7488 LValue leftSymbol = lowSymbol(m_node->child1());
7489 LValue rightSymbol = lowSymbol(m_node->child2());
7490 setBoolean(m_out.equal(leftSymbol, rightSymbol));
7491 return;
7492 }
7493
7494 if (m_node->isBinaryUseKind(BigIntUse)) {
7495 // FIXME: [ESNext][BigInt] Create specialized version of strict equals for BigIntUse
7496 // https://bugs.webkit.org/show_bug.cgi?id=182895
7497 LValue left = lowBigInt(m_node->child1());
7498 LValue right = lowBigInt(m_node->child2());
7499
7500 LBasicBlock notTriviallyEqualCase = m_out.newBlock();
7501 LBasicBlock continuation = m_out.newBlock();
7502
7503 ValueFromBlock fastResult = m_out.anchor(m_out.booleanTrue);
7504 m_out.branch(m_out.equal(left, right), rarely(continuation), usually(notTriviallyEqualCase));
7505
7506 LBasicBlock lastNext = m_out.appendTo(notTriviallyEqualCase, continuation);
7507
7508 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
7509 pointerType(), m_out.operation(operationCompareStrictEq), m_callFrame, left, right)));
7510 m_out.jump(continuation);
7511
7512 m_out.appendTo(continuation, lastNext);
7513 setBoolean(m_out.phi(Int32, fastResult, slowResult));
7514 return;
7515 }
7516
7517 if (m_node->isBinaryUseKind(SymbolUse, UntypedUse)
7518 || m_node->isBinaryUseKind(UntypedUse, SymbolUse)) {
7519 Edge symbolEdge = m_node->child1();
7520 Edge untypedEdge = m_node->child2();
7521 if (symbolEdge.useKind() != SymbolUse)
7522 std::swap(symbolEdge, untypedEdge);
7523
7524 LValue leftSymbol = lowSymbol(symbolEdge);
7525 LValue untypedValue = lowJSValue(untypedEdge);
7526
7527 setBoolean(m_out.equal(leftSymbol, untypedValue));
7528 return;
7529 }
7530
7531 if (m_node->isBinaryUseKind(MiscUse, UntypedUse)
7532 || m_node->isBinaryUseKind(UntypedUse, MiscUse)) {
7533 speculate(m_node->child1());
7534 speculate(m_node->child2());
7535 LValue left = lowJSValue(m_node->child1(), ManualOperandSpeculation);
7536 LValue right = lowJSValue(m_node->child2(), ManualOperandSpeculation);
7537 setBoolean(m_out.equal(left, right));
7538 return;
7539 }
7540
7541 if (m_node->isBinaryUseKind(StringIdentUse, NotStringVarUse)
7542 || m_node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
7543 Edge leftEdge = m_node->childFor(StringIdentUse);
7544 Edge rightEdge = m_node->childFor(NotStringVarUse);
7545
7546 LValue left = lowStringIdent(leftEdge);
7547 LValue rightValue = lowJSValue(rightEdge, ManualOperandSpeculation);
7548
7549 LBasicBlock isCellCase = m_out.newBlock();
7550 LBasicBlock isStringCase = m_out.newBlock();
7551 LBasicBlock continuation = m_out.newBlock();
7552
7553 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
7554 m_out.branch(
7555 isCell(rightValue, provenType(rightEdge)),
7556 unsure(isCellCase), unsure(continuation));
7557
7558 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
7559 ValueFromBlock notStringResult = m_out.anchor(m_out.booleanFalse);
7560 m_out.branch(
7561 isString(rightValue, provenType(rightEdge)),
7562 unsure(isStringCase), unsure(continuation));
7563
7564 m_out.appendTo(isStringCase, continuation);
7565 LValue right = m_out.loadPtr(rightValue, m_heaps.JSString_value);
7566 speculateStringIdent(rightEdge, rightValue, right);
7567 ValueFromBlock isStringResult = m_out.anchor(m_out.equal(left, right));
7568 m_out.jump(continuation);
7569
7570 m_out.appendTo(continuation, lastNext);
7571 setBoolean(m_out.phi(Int32, notCellResult, notStringResult, isStringResult));
7572 return;
7573 }
7574
7575 if (m_node->isBinaryUseKind(StringUse, UntypedUse)) {
7576 compileStringToUntypedStrictEquality(m_node->child1(), m_node->child2());
7577 return;
7578 }
7579 if (m_node->isBinaryUseKind(UntypedUse, StringUse)) {
7580 compileStringToUntypedStrictEquality(m_node->child2(), m_node->child1());
7581 return;
7582 }
7583
7584 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
7585 nonSpeculativeCompare(
7586 [&] (LValue left, LValue right) {
7587 return m_out.equal(left, right);
7588 },
7589 operationCompareStrictEq);
7590 }
7591
7592 void compileStringToUntypedStrictEquality(Edge stringEdge, Edge untypedEdge)
7593 {
7594 ASSERT(stringEdge.useKind() == StringUse);
7595 ASSERT(untypedEdge.useKind() == UntypedUse);
7596
7597 LValue leftString = lowCell(stringEdge);
7598 LValue rightValue = lowJSValue(untypedEdge);
7599 SpeculatedType rightValueType = provenType(untypedEdge);
7600
7601 // Verify left is string.
7602 speculateString(stringEdge, leftString);
7603
7604 LBasicBlock testUntypedEdgeIsCell = m_out.newBlock();
7605 LBasicBlock testUntypedEdgeIsString = m_out.newBlock();
7606 LBasicBlock testStringEquality = m_out.newBlock();
7607 LBasicBlock continuation = m_out.newBlock();
7608
7609 // Given left is string. If the value are strictly equal, rightValue has to be the same string.
7610 ValueFromBlock fastTrue = m_out.anchor(m_out.booleanTrue);
7611 m_out.branch(m_out.equal(leftString, rightValue), unsure(continuation), unsure(testUntypedEdgeIsCell));
7612
7613 LBasicBlock lastNext = m_out.appendTo(testUntypedEdgeIsCell, testUntypedEdgeIsString);
7614 ValueFromBlock fastFalse = m_out.anchor(m_out.booleanFalse);
7615 m_out.branch(isNotCell(rightValue, rightValueType), unsure(continuation), unsure(testUntypedEdgeIsString));
7616
7617 // Check if the untyped edge is a string.
7618 m_out.appendTo(testUntypedEdgeIsString, testStringEquality);
7619 m_out.branch(isNotString(rightValue, rightValueType), unsure(continuation), unsure(testStringEquality));
7620
7621 // Full String compare.
7622 m_out.appendTo(testStringEquality, continuation);
7623 ValueFromBlock slowResult = m_out.anchor(stringsEqual(leftString, rightValue, stringEdge, untypedEdge));
7624 m_out.jump(continuation);
7625
7626 // Continuation.
7627 m_out.appendTo(continuation, lastNext);
7628 setBoolean(m_out.phi(Int32, fastTrue, fastFalse, slowResult));
7629 }
7630
7631 void compileCompareEqPtr()
7632 {
7633 setBoolean(
7634 m_out.equal(
7635 lowJSValue(m_node->child1()),
7636 weakPointer(m_node->cellOperand()->cell())));
7637 }
7638
7639 void compileCompareLess()
7640 {
7641 compare(
7642 [&] (LValue left, LValue right) {
7643 return m_out.lessThan(left, right);
7644 },
7645 [&] (LValue left, LValue right) {
7646 return m_out.doubleLessThan(left, right);
7647 },
7648 operationCompareStringImplLess,
7649 operationCompareStringLess,
7650 operationCompareLess);
7651 }
7652
7653 void compileCompareLessEq()
7654 {
7655 compare(
7656 [&] (LValue left, LValue right) {
7657 return m_out.lessThanOrEqual(left, right);
7658 },
7659 [&] (LValue left, LValue right) {
7660 return m_out.doubleLessThanOrEqual(left, right);
7661 },
7662 operationCompareStringImplLessEq,
7663 operationCompareStringLessEq,
7664 operationCompareLessEq);
7665 }
7666
7667 void compileCompareGreater()
7668 {
7669 compare(
7670 [&] (LValue left, LValue right) {
7671 return m_out.greaterThan(left, right);
7672 },
7673 [&] (LValue left, LValue right) {
7674 return m_out.doubleGreaterThan(left, right);
7675 },
7676 operationCompareStringImplGreater,
7677 operationCompareStringGreater,
7678 operationCompareGreater);
7679 }
7680
7681 void compileCompareGreaterEq()
7682 {
7683 compare(
7684 [&] (LValue left, LValue right) {
7685 return m_out.greaterThanOrEqual(left, right);
7686 },
7687 [&] (LValue left, LValue right) {
7688 return m_out.doubleGreaterThanOrEqual(left, right);
7689 },
7690 operationCompareStringImplGreaterEq,
7691 operationCompareStringGreaterEq,
7692 operationCompareGreaterEq);
7693 }
7694
7695 void compileCompareBelow()
7696 {
7697 setBoolean(m_out.below(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
7698 }
7699
7700 void compileCompareBelowEq()
7701 {
7702 setBoolean(m_out.belowOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
7703 }
7704
7705 void compileSameValue()
7706 {
7707 if (m_node->isBinaryUseKind(DoubleRepUse)) {
7708 LValue arg1 = lowDouble(m_node->child1());
7709 LValue arg2 = lowDouble(m_node->child2());
7710
7711 LBasicBlock numberCase = m_out.newBlock();
7712 LBasicBlock continuation = m_out.newBlock();
7713
7714 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
7715 patchpoint->append(arg1, ValueRep::SomeRegister);
7716 patchpoint->append(arg2, ValueRep::SomeRegister);
7717 patchpoint->numGPScratchRegisters = 1;
7718 patchpoint->setGenerator(
7719 [] (CCallHelpers& jit, const StackmapGenerationParams& params) {
7720 GPRReg scratchGPR = params.gpScratch(0);
7721 jit.moveDoubleTo64(params[1].fpr(), scratchGPR);
7722 jit.moveDoubleTo64(params[2].fpr(), params[0].gpr());
7723 jit.compare64(CCallHelpers::Equal, scratchGPR, params[0].gpr(), params[0].gpr());
7724 });
7725 patchpoint->effects = Effects::none();
7726 ValueFromBlock compareResult = m_out.anchor(patchpoint);
7727 m_out.branch(patchpoint, unsure(continuation), unsure(numberCase));
7728
7729 LBasicBlock lastNext = m_out.appendTo(numberCase, continuation);
7730 LValue isArg1NaN = m_out.doubleNotEqualOrUnordered(arg1, arg1);
7731 LValue isArg2NaN = m_out.doubleNotEqualOrUnordered(arg2, arg2);
7732 ValueFromBlock nanResult = m_out.anchor(m_out.bitAnd(isArg1NaN, isArg2NaN));
7733 m_out.jump(continuation);
7734
7735 m_out.appendTo(continuation, lastNext);
7736 setBoolean(m_out.phi(Int32, compareResult, nanResult));
7737 return;
7738 }
7739
7740 ASSERT(m_node->isBinaryUseKind(UntypedUse));
7741 setBoolean(vmCall(Int32, m_out.operation(operationSameValue), m_callFrame, lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
7742 }
7743
7744 void compileLogicalNot()
7745 {
7746 setBoolean(m_out.logicalNot(boolify(m_node->child1())));
7747 }
7748
7749 void compileCallOrConstruct()
7750 {
7751 Node* node = m_node;
7752 unsigned numArgs = node->numChildren() - 1;
7753
7754 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
7755
7756 unsigned frameSize = (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue);
7757 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
7758
7759 // JS->JS calling convention requires that the caller allows this much space on top of stack to
7760 // get trashed by the callee, even if not all of that space is used to pass arguments. We tell
7761 // B3 this explicitly for two reasons:
7762 //
7763 // - We will only pass frameSize worth of stuff.
7764 // - The trashed stack guarantee is logically separate from the act of passing arguments, so we
7765 // shouldn't rely on Air to infer the trashed stack property based on the arguments it ends
7766 // up seeing.
7767 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
7768
7769 // Collect the arguments, since this can generate code and we want to generate it before we emit
7770 // the call.
7771 Vector<ConstrainedValue> arguments;
7772
7773 // Make sure that the callee goes into GPR0 because that's where the slow path thunks expect the
7774 // callee to be.
7775 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
7776
7777 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
7778 intptr_t offsetFromSP =
7779 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
7780 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
7781 };
7782
7783 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
7784 addArgument(m_out.constInt32(numArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
7785 for (unsigned i = 0; i < numArgs; ++i)
7786 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
7787
7788 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
7789 patchpoint->appendVector(arguments);
7790
7791 RefPtr<PatchpointExceptionHandle> exceptionHandle =
7792 preparePatchpointForExceptions(patchpoint);
7793
7794 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
7795 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
7796 patchpoint->clobber(RegisterSet::macroScratchRegisters());
7797 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
7798 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
7799
7800 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
7801 State* state = &m_ftlState;
7802 VM* vm = &this->vm();
7803 patchpoint->setGenerator(
7804 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
7805 AllowMacroScratchRegisterUsage allowScratch(jit);
7806 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
7807
7808 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
7809
7810 jit.store32(
7811 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
7812 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
7813
7814 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
7815
7816 CCallHelpers::DataLabelPtr targetToCheck;
7817 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
7818 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
7819 CCallHelpers::TrustedImmPtr(nullptr));
7820
7821 CCallHelpers::Call fastCall = jit.nearCall();
7822 CCallHelpers::Jump done = jit.jump();
7823
7824 slowPath.link(&jit);
7825
7826 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
7827 CCallHelpers::Call slowCall = jit.nearCall();
7828 done.link(&jit);
7829
7830 callLinkInfo->setUpCall(
7831 node->op() == Construct ? CallLinkInfo::Construct : CallLinkInfo::Call,
7832 node->origin.semantic, GPRInfo::regT0);
7833
7834 jit.addPtr(
7835 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
7836 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
7837
7838 jit.addLinkTask(
7839 [=] (LinkBuffer& linkBuffer) {
7840 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
7841 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
7842
7843 callLinkInfo->setCallLocations(
7844 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
7845 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
7846 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
7847 });
7848 });
7849
7850 setJSValue(patchpoint);
7851 }
7852
7853 void compileDirectCallOrConstruct()
7854 {
7855 Node* node = m_node;
7856 bool isTail = node->op() == DirectTailCall;
7857 bool isConstruct = node->op() == DirectConstruct;
7858
7859 ExecutableBase* executable = node->castOperand<ExecutableBase*>();
7860 FunctionExecutable* functionExecutable = jsDynamicCast<FunctionExecutable*>(vm(), executable);
7861
7862 unsigned numPassedArgs = node->numChildren() - 1;
7863 unsigned numAllocatedArgs = numPassedArgs;
7864
7865 if (functionExecutable) {
7866 numAllocatedArgs = std::max(
7867 numAllocatedArgs,
7868 std::min(
7869 static_cast<unsigned>(functionExecutable->parameterCount()) + 1,
7870 Options::maximumDirectCallStackSize()));
7871 }
7872
7873 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
7874
7875 if (!isTail) {
7876 unsigned frameSize = (CallFrame::headerSizeInRegisters + numAllocatedArgs) * sizeof(EncodedJSValue);
7877 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
7878
7879 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
7880 }
7881
7882 Vector<ConstrainedValue> arguments;
7883
7884 arguments.append(ConstrainedValue(jsCallee, ValueRep::SomeRegister));
7885 if (!isTail) {
7886 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
7887 intptr_t offsetFromSP =
7888 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
7889 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
7890 };
7891
7892 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
7893 addArgument(m_out.constInt32(numPassedArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
7894 for (unsigned i = 0; i < numPassedArgs; ++i)
7895 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
7896 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
7897 addArgument(m_out.constInt64(JSValue::encode(jsUndefined())), virtualRegisterForArgument(i), 0);
7898 } else {
7899 for (unsigned i = 0; i < numPassedArgs; ++i)
7900 arguments.append(ConstrainedValue(lowJSValue(m_graph.varArgChild(node, 1 + i)), ValueRep::WarmAny));
7901 }
7902
7903 PatchpointValue* patchpoint = m_out.patchpoint(isTail ? Void : Int64);
7904 patchpoint->appendVector(arguments);
7905
7906 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
7907
7908 if (isTail) {
7909 // The shuffler needs tags.
7910 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
7911 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
7912 }
7913
7914 patchpoint->clobber(RegisterSet::macroScratchRegisters());
7915 if (!isTail) {
7916 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
7917 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
7918 }
7919
7920 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
7921 State* state = &m_ftlState;
7922 patchpoint->setGenerator(
7923 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
7924 AllowMacroScratchRegisterUsage allowScratch(jit);
7925 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
7926
7927 GPRReg calleeGPR = params[!isTail].gpr();
7928
7929 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
7930
7931 Box<CCallHelpers::JumpList> exceptions =
7932 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
7933
7934 if (isTail) {
7935 CallFrameShuffleData shuffleData;
7936 shuffleData.numLocals = state->jitCode->common.frameRegisterCount;
7937
7938 RegisterSet toSave = params.unavailableRegisters();
7939 shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatCell);
7940 toSave.set(calleeGPR);
7941 for (unsigned i = 0; i < numPassedArgs; ++i) {
7942 ValueRecovery recovery = params[1 + i].recoveryForJSValue();
7943 shuffleData.args.append(recovery);
7944 recovery.forEachReg(
7945 [&] (Reg reg) {
7946 toSave.set(reg);
7947 });
7948 }
7949 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
7950 shuffleData.args.append(ValueRecovery::constant(jsUndefined()));
7951 shuffleData.numPassedArgs = numPassedArgs;
7952 shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
7953
7954 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
7955
7956 CCallHelpers::PatchableJump patchableJump = jit.patchableJump();
7957 CCallHelpers::Label mainPath = jit.label();
7958
7959 jit.store32(
7960 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
7961 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
7962
7963 callLinkInfo->setFrameShuffleData(shuffleData);
7964 CallFrameShuffler(jit, shuffleData).prepareForTailCall();
7965
7966 CCallHelpers::Call call = jit.nearTailCall();
7967
7968 jit.abortWithReason(JITDidReturnFromTailCall);
7969
7970 CCallHelpers::Label slowPath = jit.label();
7971 patchableJump.m_jump.linkTo(slowPath, &jit);
7972 callOperation(
7973 *state, toSave, jit,
7974 node->origin.semantic, exceptions.get(), operationLinkDirectCall,
7975 InvalidGPRReg, CCallHelpers::TrustedImmPtr(callLinkInfo), calleeGPR).call();
7976 jit.jump().linkTo(mainPath, &jit);
7977
7978 callLinkInfo->setUpCall(
7979 CallLinkInfo::DirectTailCall, node->origin.semantic, InvalidGPRReg);
7980 callLinkInfo->setExecutableDuringCompilation(executable);
7981 if (numAllocatedArgs > numPassedArgs)
7982 callLinkInfo->setMaxNumArguments(numAllocatedArgs);
7983
7984 jit.addLinkTask(
7985 [=] (LinkBuffer& linkBuffer) {
7986 CodeLocationLabel<JSInternalPtrTag> patchableJumpLocation = linkBuffer.locationOf<JSInternalPtrTag>(patchableJump);
7987 CodeLocationNearCall<JSInternalPtrTag> callLocation = linkBuffer.locationOfNearCall<JSInternalPtrTag>(call);
7988 CodeLocationLabel<JSInternalPtrTag> slowPathLocation = linkBuffer.locationOf<JSInternalPtrTag>(slowPath);
7989
7990 callLinkInfo->setCallLocations(
7991 patchableJumpLocation,
7992 slowPathLocation,
7993 callLocation);
7994 });
7995 return;
7996 }
7997
7998 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
7999
8000 CCallHelpers::Label mainPath = jit.label();
8001
8002 jit.store32(
8003 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8004 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8005
8006 CCallHelpers::Call call = jit.nearCall();
8007 jit.addPtr(
8008 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
8009 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8010
8011 callLinkInfo->setUpCall(
8012 isConstruct ? CallLinkInfo::DirectConstruct : CallLinkInfo::DirectCall,
8013 node->origin.semantic, InvalidGPRReg);
8014 callLinkInfo->setExecutableDuringCompilation(executable);
8015 if (numAllocatedArgs > numPassedArgs)
8016 callLinkInfo->setMaxNumArguments(numAllocatedArgs);
8017
8018 params.addLatePath(
8019 [=] (CCallHelpers& jit) {
8020 AllowMacroScratchRegisterUsage allowScratch(jit);
8021
8022 CCallHelpers::Label slowPath = jit.label();
8023 if (isX86())
8024 jit.pop(CCallHelpers::selectScratchGPR(calleeGPR));
8025
8026 callOperation(
8027 *state, params.unavailableRegisters(), jit,
8028 node->origin.semantic, exceptions.get(), operationLinkDirectCall,
8029 InvalidGPRReg, CCallHelpers::TrustedImmPtr(callLinkInfo),
8030 calleeGPR).call();
8031 jit.jump().linkTo(mainPath, &jit);
8032
8033 jit.addLinkTask(
8034 [=] (LinkBuffer& linkBuffer) {
8035 CodeLocationNearCall<JSInternalPtrTag> callLocation = linkBuffer.locationOfNearCall<JSInternalPtrTag>(call);
8036 CodeLocationLabel<JSInternalPtrTag> slowPathLocation = linkBuffer.locationOf<JSInternalPtrTag>(slowPath);
8037
8038 linkBuffer.link(call, slowPathLocation);
8039
8040 callLinkInfo->setCallLocations(
8041 CodeLocationLabel<JSInternalPtrTag>(),
8042 slowPathLocation,
8043 callLocation);
8044 });
8045 });
8046 });
8047
8048 if (isTail)
8049 patchpoint->effects.terminal = true;
8050 else
8051 setJSValue(patchpoint);
8052 }
8053
8054 void compileTailCall()
8055 {
8056 Node* node = m_node;
8057 unsigned numArgs = node->numChildren() - 1;
8058
8059 // It seems counterintuitive that this is needed given that tail calls don't create a new frame
8060 // on the stack. However, the tail call slow path builds the frame at SP instead of FP before
8061 // calling into the slow path C code. This slow path may decide to throw an exception because
8062 // the callee we're trying to call is not callable. Throwing an exception will cause us to walk
8063 // the stack, which may read, for the sake of the correctness of this code, arbitrary slots on the
8064 // stack to recover state. This call arg area ensures the call frame shuffler does not overwrite
8065 // any of the slots the stack walking code requires when on the slow path.
8066 m_proc.requestCallArgAreaSizeInBytes(
8067 WTF::roundUpToMultipleOf(stackAlignmentBytes(), (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue)));
8068
8069 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
8070
8071 // We want B3 to give us all of the arguments using whatever mechanism it thinks is
8072 // convenient. The generator then shuffles those arguments into our own call frame,
8073 // destroying our frame in the process.
8074
8075 // Note that we don't have to do anything special for exceptions. A tail call is only a
8076 // tail call if it is not inside a try block.
8077
8078 Vector<ConstrainedValue> arguments;
8079
8080 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
8081
8082 for (unsigned i = 0; i < numArgs; ++i) {
8083 // Note: we could let the shuffler do boxing for us, but it's not super clear that this
8084 // would be better. Also, if we wanted to do that, then we'd have to teach the shuffler
8085 // that 32-bit values could land at 4-byte alignment but not 8-byte alignment.
8086
8087 ConstrainedValue constrainedValue(
8088 lowJSValue(m_graph.varArgChild(node, 1 + i)),
8089 ValueRep::WarmAny);
8090 arguments.append(constrainedValue);
8091 }
8092
8093 PatchpointValue* patchpoint = m_out.patchpoint(Void);
8094 patchpoint->appendVector(arguments);
8095
8096 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8097 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8098
8099 // Prevent any of the arguments from using the scratch register.
8100 patchpoint->clobberEarly(RegisterSet::macroScratchRegisters());
8101
8102 patchpoint->effects.terminal = true;
8103
8104 // We don't have to tell the patchpoint that we will clobber registers, since we won't return
8105 // anyway.
8106
8107 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8108 State* state = &m_ftlState;
8109 VM* vm = &this->vm();
8110 patchpoint->setGenerator(
8111 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8112 AllowMacroScratchRegisterUsage allowScratch(jit);
8113 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8114
8115 // Yes, this is really necessary. You could throw an exception in a host call on the
8116 // slow path. That'll route us to lookupExceptionHandler(), which unwinds starting
8117 // with the call site index of our frame. Bad things happen if it's not set.
8118 jit.store32(
8119 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8120 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8121
8122 CallFrameShuffleData shuffleData;
8123 shuffleData.numLocals = state->jitCode->common.frameRegisterCount;
8124 shuffleData.callee = ValueRecovery::inGPR(GPRInfo::regT0, DataFormatJS);
8125
8126 for (unsigned i = 0; i < numArgs; ++i)
8127 shuffleData.args.append(params[1 + i].recoveryForJSValue());
8128
8129 shuffleData.numPassedArgs = numArgs;
8130
8131 shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
8132
8133 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8134
8135 CCallHelpers::DataLabelPtr targetToCheck;
8136 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
8137 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
8138 CCallHelpers::TrustedImmPtr(nullptr));
8139
8140 callLinkInfo->setFrameShuffleData(shuffleData);
8141 CallFrameShuffler(jit, shuffleData).prepareForTailCall();
8142
8143 CCallHelpers::Call fastCall = jit.nearTailCall();
8144
8145 slowPath.link(&jit);
8146
8147 CallFrameShuffler slowPathShuffler(jit, shuffleData);
8148 slowPathShuffler.setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
8149 slowPathShuffler.prepareForSlowPath();
8150
8151 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
8152 CCallHelpers::Call slowCall = jit.nearCall();
8153
8154 jit.abortWithReason(JITDidReturnFromTailCall);
8155
8156 callLinkInfo->setUpCall(CallLinkInfo::TailCall, codeOrigin, GPRInfo::regT0);
8157
8158 jit.addLinkTask(
8159 [=] (LinkBuffer& linkBuffer) {
8160 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
8161 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
8162
8163 callLinkInfo->setCallLocations(
8164 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
8165 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
8166 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
8167 });
8168 });
8169 }
8170
8171 void compileCallOrConstructVarargsSpread()
8172 {
8173 Node* node = m_node;
8174 Node* arguments = node->child3().node();
8175
8176 LValue jsCallee = lowJSValue(m_node->child1());
8177 LValue thisArg = lowJSValue(m_node->child2());
8178
8179 RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomSpread || arguments->op() == PhantomNewArrayBuffer);
8180
8181 unsigned staticArgumentCount = 0;
8182 Vector<LValue, 2> spreadLengths;
8183 Vector<LValue, 8> patchpointArguments;
8184 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
8185 auto pushAndCountArgumentsFromRightToLeft = recursableLambda([&](auto self, Node* target) -> void {
8186 if (target->op() == PhantomSpread) {
8187 self(target->child1().node());
8188 return;
8189 }
8190
8191 if (target->op() == PhantomNewArrayWithSpread) {
8192 BitVector* bitVector = target->bitVector();
8193 for (unsigned i = target->numChildren(); i--; ) {
8194 if (bitVector->get(i))
8195 self(m_graph.varArgChild(target, i).node());
8196 else {
8197 ++staticArgumentCount;
8198 LValue argument = this->lowJSValue(m_graph.varArgChild(target, i));
8199 patchpointArguments.append(argument);
8200 }
8201 }
8202 return;
8203 }
8204
8205 if (target->op() == PhantomNewArrayBuffer) {
8206 staticArgumentCount += target->castOperand<JSImmutableButterfly*>()->length();
8207 return;
8208 }
8209
8210 RELEASE_ASSERT(target->op() == PhantomCreateRest);
8211 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
8212 unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
8213 LValue length = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
8214 return m_out.zeroExtPtr(this->getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip));
8215 }).iterator->value;
8216 patchpointArguments.append(length);
8217 spreadLengths.append(length);
8218 });
8219
8220 pushAndCountArgumentsFromRightToLeft(arguments);
8221 LValue argumentCountIncludingThis = m_out.constIntPtr(staticArgumentCount + 1);
8222 for (LValue length : spreadLengths)
8223 argumentCountIncludingThis = m_out.add(length, argumentCountIncludingThis);
8224
8225 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
8226
8227 patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0));
8228 patchpoint->append(thisArg, ValueRep::WarmAny);
8229 patchpoint->append(argumentCountIncludingThis, ValueRep::WarmAny);
8230 patchpoint->appendVectorWithRep(patchpointArguments, ValueRep::WarmAny);
8231 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8232 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8233
8234 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
8235
8236 patchpoint->clobber(RegisterSet::macroScratchRegisters());
8237 patchpoint->clobber(RegisterSet::volatileRegistersForJSCall()); // No inputs will be in a volatile register.
8238 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
8239
8240 patchpoint->numGPScratchRegisters = 0;
8241
8242 // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
8243 unsigned minimumJSCallAreaSize =
8244 sizeof(CallerFrameAndPC) +
8245 WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue));
8246
8247 m_proc.requestCallArgAreaSizeInBytes(minimumJSCallAreaSize);
8248
8249 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8250 State* state = &m_ftlState;
8251 VM* vm = &this->vm();
8252 patchpoint->setGenerator(
8253 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8254 AllowMacroScratchRegisterUsage allowScratch(jit);
8255 CallSiteIndex callSiteIndex =
8256 state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8257
8258 Box<CCallHelpers::JumpList> exceptions =
8259 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
8260
8261 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
8262
8263 jit.store32(
8264 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8265 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8266
8267 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8268
8269 RegisterSet usedRegisters = RegisterSet::allRegisters();
8270 usedRegisters.exclude(RegisterSet::volatileRegistersForJSCall());
8271 GPRReg calleeGPR = params[1].gpr();
8272 usedRegisters.set(calleeGPR);
8273
8274 ScratchRegisterAllocator allocator(usedRegisters);
8275 GPRReg scratchGPR1 = allocator.allocateScratchGPR();
8276 GPRReg scratchGPR2 = allocator.allocateScratchGPR();
8277 GPRReg scratchGPR3 = allocator.allocateScratchGPR();
8278 GPRReg scratchGPR4 = allocator.allocateScratchGPR();
8279 RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
8280
8281 auto getValueFromRep = [&] (B3::ValueRep rep, GPRReg result) {
8282 ASSERT(!usedRegisters.get(result));
8283
8284 if (rep.isConstant()) {
8285 jit.move(CCallHelpers::Imm64(rep.value()), result);
8286 return;
8287 }
8288
8289 // Note: in this function, we only request 64 bit values.
8290 if (rep.isStack()) {
8291 jit.load64(
8292 CCallHelpers::Address(GPRInfo::callFrameRegister, rep.offsetFromFP()),
8293 result);
8294 return;
8295 }
8296
8297 RELEASE_ASSERT(rep.isGPR());
8298 ASSERT(usedRegisters.get(rep.gpr()));
8299 jit.move(rep.gpr(), result);
8300 };
8301
8302 auto callWithExceptionCheck = [&] (void* callee) {
8303 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(callee)), GPRInfo::nonPreservedNonArgumentGPR0);
8304 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
8305 exceptions->append(jit.emitExceptionCheck(*vm, AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
8306 };
8307
8308 CCallHelpers::JumpList slowCase;
8309 unsigned originalStackHeight = params.proc().frameSize();
8310
8311 {
8312 unsigned numUsedSlots = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), originalStackHeight / sizeof(EncodedJSValue));
8313 B3::ValueRep argumentCountIncludingThisRep = params[3];
8314 getValueFromRep(argumentCountIncludingThisRep, scratchGPR2);
8315 slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR2, CCallHelpers::TrustedImm32(JSC::maxArguments + 1)));
8316
8317 jit.move(scratchGPR2, scratchGPR1);
8318 jit.addPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(numUsedSlots + CallFrame::headerSizeInRegisters)), scratchGPR1);
8319 // scratchGPR1 now has the required frame size in Register units
8320 // Round scratchGPR1 to next multiple of stackAlignmentRegisters()
8321 jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), scratchGPR1);
8322 jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), scratchGPR1);
8323 jit.negPtr(scratchGPR1);
8324 jit.getEffectiveAddress(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight), scratchGPR1);
8325
8326 // Before touching stack values, we should update the stack pointer to protect them from signal stack.
8327 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR1, CCallHelpers::stackPointerRegister);
8328
8329 jit.store32(scratchGPR2, CCallHelpers::Address(scratchGPR1, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));
8330
8331 int storeOffset = CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register));
8332
8333 unsigned paramsOffset = 4;
8334 unsigned index = 0;
8335 auto emitArgumentsFromRightToLeft = recursableLambda([&](auto self, Node* target) -> void {
8336 if (target->op() == PhantomSpread) {
8337 self(target->child1().node());
8338 return;
8339 }
8340
8341 if (target->op() == PhantomNewArrayWithSpread) {
8342 BitVector* bitVector = target->bitVector();
8343 for (unsigned i = target->numChildren(); i--; ) {
8344 if (bitVector->get(i))
8345 self(state->graph.varArgChild(target, i).node());
8346 else {
8347 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
8348 getValueFromRep(params[paramsOffset + (index++)], scratchGPR3);
8349 jit.store64(scratchGPR3,
8350 CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
8351 }
8352 }
8353 return;
8354 }
8355
8356 if (target->op() == PhantomNewArrayBuffer) {
8357 auto* array = target->castOperand<JSImmutableButterfly*>();
8358 Checked<int32_t> offsetCount { 1 };
8359 for (unsigned i = array->length(); i--; ++offsetCount) {
8360 // Because varargs values are drained as JSValue, we should not generate value
8361 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
8362 int64_t value = JSValue::encode(array->get(i));
8363 jit.move(CCallHelpers::TrustedImm64(value), scratchGPR3);
8364 Checked<int32_t> currentStoreOffset { storeOffset };
8365 currentStoreOffset -= (offsetCount * static_cast<int32_t>(sizeof(Register)));
8366 jit.store64(scratchGPR3,
8367 CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, currentStoreOffset.unsafeGet()));
8368 }
8369 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(array->length())), scratchGPR2);
8370 return;
8371 }
8372
8373 RELEASE_ASSERT(target->op() == PhantomCreateRest);
8374 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
8375
8376 unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
8377
8378 B3::ValueRep numArgumentsToCopy = params[paramsOffset + (index++)];
8379 getValueFromRep(numArgumentsToCopy, scratchGPR3);
8380 int loadOffset = (AssemblyHelpers::argumentsStart(inlineCallFrame).offset() + numberOfArgumentsToSkip) * static_cast<int>(sizeof(Register));
8381
8382 auto done = jit.branchTestPtr(MacroAssembler::Zero, scratchGPR3);
8383 auto loopStart = jit.label();
8384 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR3);
8385 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
8386 jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR3, CCallHelpers::TimesEight, loadOffset), scratchGPR4);
8387 jit.store64(scratchGPR4,
8388 CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
8389 jit.branchTestPtr(CCallHelpers::NonZero, scratchGPR3).linkTo(loopStart, &jit);
8390 done.link(&jit);
8391 });
8392 emitArgumentsFromRightToLeft(arguments);
8393 }
8394
8395 {
8396 CCallHelpers::Jump dontThrow = jit.jump();
8397 slowCase.link(&jit);
8398 jit.setupArguments<decltype(operationThrowStackOverflowForVarargs)>();
8399 callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
8400 jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
8401
8402 dontThrow.link(&jit);
8403 }
8404
8405 ASSERT(calleeGPR == GPRInfo::regT0);
8406 jit.store64(calleeGPR, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
8407 getValueFromRep(params[2], scratchGPR3);
8408 jit.store64(scratchGPR3, CCallHelpers::calleeArgumentSlot(0));
8409
8410 CallLinkInfo::CallType callType;
8411 if (node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs)
8412 callType = CallLinkInfo::ConstructVarargs;
8413 else if (node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs)
8414 callType = CallLinkInfo::TailCallVarargs;
8415 else
8416 callType = CallLinkInfo::CallVarargs;
8417
8418 bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail;
8419
8420 CCallHelpers::DataLabelPtr targetToCheck;
8421 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
8422 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
8423 CCallHelpers::TrustedImmPtr(nullptr));
8424
8425 CCallHelpers::Call fastCall;
8426 CCallHelpers::Jump done;
8427
8428 if (isTailCall) {
8429 jit.emitRestoreCalleeSaves();
8430 jit.prepareForTailCallSlow();
8431 fastCall = jit.nearTailCall();
8432 } else {
8433 fastCall = jit.nearCall();
8434 done = jit.jump();
8435 }
8436
8437 slowPath.link(&jit);
8438
8439 if (isTailCall)
8440 jit.emitRestoreCalleeSaves();
8441 ASSERT(!usedRegisters.get(GPRInfo::regT2));
8442 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
8443 CCallHelpers::Call slowCall = jit.nearCall();
8444
8445 if (isTailCall)
8446 jit.abortWithReason(JITDidReturnFromTailCall);
8447 else
8448 done.link(&jit);
8449
8450 callLinkInfo->setUpCall(callType, node->origin.semantic, GPRInfo::regT0);
8451
8452 jit.addPtr(
8453 CCallHelpers::TrustedImm32(-originalStackHeight),
8454 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8455
8456 jit.addLinkTask(
8457 [=] (LinkBuffer& linkBuffer) {
8458 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
8459 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
8460
8461 callLinkInfo->setCallLocations(
8462 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
8463 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
8464 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
8465 });
8466 });
8467
8468 switch (node->op()) {
8469 case TailCallForwardVarargs:
8470 m_out.unreachable();
8471 break;
8472
8473 default:
8474 setJSValue(patchpoint);
8475 break;
8476 }
8477 }
8478
8479 void compileCallOrConstructVarargs()
8480 {
8481 Node* node = m_node;
8482 LValue jsCallee = lowJSValue(m_node->child1());
8483 LValue thisArg = lowJSValue(m_node->child2());
8484
8485 LValue jsArguments = nullptr;
8486 bool forwarding = false;
8487
8488 switch (node->op()) {
8489 case CallVarargs:
8490 case TailCallVarargs:
8491 case TailCallVarargsInlinedCaller:
8492 case ConstructVarargs:
8493 jsArguments = lowJSValue(node->child3());
8494 break;
8495 case CallForwardVarargs:
8496 case TailCallForwardVarargs:
8497 case TailCallForwardVarargsInlinedCaller:
8498 case ConstructForwardVarargs:
8499 forwarding = true;
8500 break;
8501 default:
8502 DFG_CRASH(m_graph, node, "bad node type");
8503 break;
8504 }
8505
8506 if (forwarding && m_node->child3()) {
8507 Node* arguments = m_node->child3().node();
8508 if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread) {
8509 compileCallOrConstructVarargsSpread();
8510 return;
8511 }
8512 }
8513
8514
8515 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
8516
8517 // Append the forms of the arguments that we will use before any clobbering happens.
8518 patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0));
8519 if (jsArguments)
8520 patchpoint->appendSomeRegister(jsArguments);
8521 patchpoint->appendSomeRegister(thisArg);
8522
8523 if (!forwarding) {
8524 // Now append them again for after clobbering. Note that the compiler may ask us to use a
8525 // different register for the late for the post-clobbering version of the value. This gives
8526 // the compiler a chance to spill these values without having to burn any callee-saves.
8527 patchpoint->append(jsCallee, ValueRep::LateColdAny);
8528 patchpoint->append(jsArguments, ValueRep::LateColdAny);
8529 patchpoint->append(thisArg, ValueRep::LateColdAny);
8530 }
8531
8532 RefPtr<PatchpointExceptionHandle> exceptionHandle =
8533 preparePatchpointForExceptions(patchpoint);
8534
8535 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8536 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8537
8538 patchpoint->clobber(RegisterSet::macroScratchRegisters());
8539 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
8540 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
8541
8542 // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
8543 unsigned minimumJSCallAreaSize =
8544 sizeof(CallerFrameAndPC) +
8545 WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue));
8546
8547 m_proc.requestCallArgAreaSizeInBytes(minimumJSCallAreaSize);
8548
8549 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8550 State* state = &m_ftlState;
8551 VM* vm = &this->vm();
8552 patchpoint->setGenerator(
8553 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8554 AllowMacroScratchRegisterUsage allowScratch(jit);
8555 CallSiteIndex callSiteIndex =
8556 state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8557
8558 Box<CCallHelpers::JumpList> exceptions =
8559 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
8560
8561 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
8562
8563 jit.store32(
8564 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8565 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8566
8567 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8568 CallVarargsData* data = node->callVarargsData();
8569
8570 unsigned argIndex = 1;
8571 GPRReg calleeGPR = params[argIndex++].gpr();
8572 ASSERT(calleeGPR == GPRInfo::regT0);
8573 GPRReg argumentsGPR = jsArguments ? params[argIndex++].gpr() : InvalidGPRReg;
8574 GPRReg thisGPR = params[argIndex++].gpr();
8575
8576 B3::ValueRep calleeLateRep;
8577 B3::ValueRep argumentsLateRep;
8578 B3::ValueRep thisLateRep;
8579 if (!forwarding) {
8580 // If we're not forwarding then we'll need callee, arguments, and this after we
8581 // have potentially clobbered calleeGPR, argumentsGPR, and thisGPR. Our technique
8582 // for this is to supply all of those operands as late uses in addition to
8583 // specifying them as early uses. It's possible that the late use uses a spill
8584 // while the early use uses a register, and it's possible for the late and early
8585 // uses to use different registers. We do know that the late uses interfere with
8586 // all volatile registers and so won't use those, but the early uses may use
8587 // volatile registers and in the case of calleeGPR, it's pinned to regT0 so it
8588 // definitely will.
8589 //
8590 // Note that we have to be super careful with these. It's possible that these
8591 // use a shuffling of the registers used for calleeGPR, argumentsGPR, and
8592 // thisGPR. If that happens and we do for example:
8593 //
8594 // calleeLateRep.emitRestore(jit, calleeGPR);
8595 // argumentsLateRep.emitRestore(jit, calleeGPR);
8596 //
8597 // Then we might end up with garbage if calleeLateRep.gpr() == argumentsGPR and
8598 // argumentsLateRep.gpr() == calleeGPR.
8599 //
8600 // We do a variety of things to prevent this from happening. For example, we use
8601 // argumentsLateRep before needing the other two and after we've already stopped
8602 // using the *GPRs. Also, we pin calleeGPR to regT0, and rely on the fact that
8603 // the *LateReps cannot use volatile registers (so they cannot be regT0, so
8604 // calleeGPR != argumentsLateRep.gpr() and calleeGPR != thisLateRep.gpr()).
8605 //
8606 // An alternative would have been to just use early uses and early-clobber all
8607 // volatile registers. But that would force callee, arguments, and this into
8608 // callee-save registers even if we have to spill them. We don't want spilling to
8609 // use up three callee-saves.
8610 //
8611 // TL;DR: The way we use LateReps here is dangerous and barely works but achieves
8612 // some desirable performance properties, so don't mistake the cleverness for
8613 // elegance.
8614 calleeLateRep = params[argIndex++];
8615 argumentsLateRep = params[argIndex++];
8616 thisLateRep = params[argIndex++];
8617 }
8618
8619 // Get some scratch registers.
8620 RegisterSet usedRegisters;
8621 usedRegisters.merge(RegisterSet::stackRegisters());
8622 usedRegisters.merge(RegisterSet::reservedHardwareRegisters());
8623 usedRegisters.merge(RegisterSet::calleeSaveRegisters());
8624 usedRegisters.set(calleeGPR);
8625 if (argumentsGPR != InvalidGPRReg)
8626 usedRegisters.set(argumentsGPR);
8627 usedRegisters.set(thisGPR);
8628 if (calleeLateRep.isReg())
8629 usedRegisters.set(calleeLateRep.reg());
8630 if (argumentsLateRep.isReg())
8631 usedRegisters.set(argumentsLateRep.reg());
8632 if (thisLateRep.isReg())
8633 usedRegisters.set(thisLateRep.reg());
8634 ScratchRegisterAllocator allocator(usedRegisters);
8635 GPRReg scratchGPR1 = allocator.allocateScratchGPR();
8636 GPRReg scratchGPR2 = allocator.allocateScratchGPR();
8637 GPRReg scratchGPR3 = forwarding ? allocator.allocateScratchGPR() : InvalidGPRReg;
8638 RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
8639
8640 auto callWithExceptionCheck = [&] (void* callee) {
8641 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(callee)), GPRInfo::nonPreservedNonArgumentGPR0);
8642 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
8643 exceptions->append(jit.emitExceptionCheck(*vm, AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
8644 };
8645
8646 unsigned originalStackHeight = params.proc().frameSize();
8647
8648 if (forwarding) {
8649 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
8650
8651 CCallHelpers::JumpList slowCase;
8652 InlineCallFrame* inlineCallFrame;
8653 if (node->child3())
8654 inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame();
8655 else
8656 inlineCallFrame = node->origin.semantic.inlineCallFrame();
8657
8658 // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds.
8659 emitSetupVarargsFrameFastCase(*vm, jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
8660
8661 CCallHelpers::Jump done = jit.jump();
8662 slowCase.link(&jit);
8663 jit.setupArguments<decltype(operationThrowStackOverflowForVarargs)>();
8664 callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
8665 jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
8666
8667 done.link(&jit);
8668 } else {
8669 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR1);
8670 jit.setupArguments<decltype(operationSizeFrameForVarargs)>(argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset));
8671 callWithExceptionCheck(bitwise_cast<void*>(operationSizeFrameForVarargs));
8672
8673 jit.move(GPRInfo::returnValueGPR, scratchGPR1);
8674 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
8675 argumentsLateRep.emitRestore(jit, argumentsGPR);
8676 emitSetVarargsFrame(jit, scratchGPR1, false, scratchGPR2, scratchGPR2);
8677 jit.addPtr(CCallHelpers::TrustedImm32(-minimumJSCallAreaSize), scratchGPR2, CCallHelpers::stackPointerRegister);
8678 jit.setupArguments<decltype(operationSetupVarargsFrame)>(scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1);
8679 callWithExceptionCheck(bitwise_cast<void*>(operationSetupVarargsFrame));
8680
8681 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::returnValueGPR, CCallHelpers::stackPointerRegister);
8682
8683 calleeLateRep.emitRestore(jit, GPRInfo::regT0);
8684
8685 // This may not emit code if thisGPR got a callee-save. Also, we're guaranteed
8686 // that thisGPR != GPRInfo::regT0 because regT0 interferes with it.
8687 thisLateRep.emitRestore(jit, thisGPR);
8688 }
8689
8690 jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
8691 jit.store64(thisGPR, CCallHelpers::calleeArgumentSlot(0));
8692
8693 CallLinkInfo::CallType callType;
8694 if (node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs)
8695 callType = CallLinkInfo::ConstructVarargs;
8696 else if (node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs)
8697 callType = CallLinkInfo::TailCallVarargs;
8698 else
8699 callType = CallLinkInfo::CallVarargs;
8700
8701 bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail;
8702
8703 CCallHelpers::DataLabelPtr targetToCheck;
8704 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
8705 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
8706 CCallHelpers::TrustedImmPtr(nullptr));
8707
8708 CCallHelpers::Call fastCall;
8709 CCallHelpers::Jump done;
8710
8711 if (isTailCall) {
8712 jit.emitRestoreCalleeSaves();
8713 jit.prepareForTailCallSlow();
8714 fastCall = jit.nearTailCall();
8715 } else {
8716 fastCall = jit.nearCall();
8717 done = jit.jump();
8718 }
8719
8720 slowPath.link(&jit);
8721
8722 if (isTailCall)
8723 jit.emitRestoreCalleeSaves();
8724 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
8725 CCallHelpers::Call slowCall = jit.nearCall();
8726
8727 if (isTailCall)
8728 jit.abortWithReason(JITDidReturnFromTailCall);
8729 else
8730 done.link(&jit);
8731
8732 callLinkInfo->setUpCall(callType, node->origin.semantic, GPRInfo::regT0);
8733
8734 jit.addPtr(
8735 CCallHelpers::TrustedImm32(-originalStackHeight),
8736 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8737
8738 jit.addLinkTask(
8739 [=] (LinkBuffer& linkBuffer) {
8740 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
8741 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
8742
8743 callLinkInfo->setCallLocations(
8744 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
8745 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
8746 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
8747 });
8748 });
8749
8750 switch (node->op()) {
8751 case TailCallVarargs:
8752 case TailCallForwardVarargs:
8753 m_out.unreachable();
8754 break;
8755
8756 default:
8757 setJSValue(patchpoint);
8758 break;
8759 }
8760 }
8761
8762 void compileCallEval()
8763 {
8764 Node* node = m_node;
8765 unsigned numArgs = node->numChildren() - 1;
8766
8767 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
8768
8769 unsigned frameSize = (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue);
8770 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
8771
8772 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
8773
8774 Vector<ConstrainedValue> arguments;
8775 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
8776
8777 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
8778 intptr_t offsetFromSP =
8779 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
8780 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
8781 };
8782
8783 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
8784 addArgument(m_out.constInt32(numArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
8785 for (unsigned i = 0; i < numArgs; ++i)
8786 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
8787
8788 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
8789 patchpoint->appendVector(arguments);
8790
8791 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
8792
8793 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8794 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8795 patchpoint->clobber(RegisterSet::macroScratchRegisters());
8796 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
8797 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
8798
8799 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8800 State* state = &m_ftlState;
8801 VM& vm = this->vm();
8802 patchpoint->setGenerator(
8803 [=, &vm] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8804 AllowMacroScratchRegisterUsage allowScratch(jit);
8805 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8806
8807 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
8808
8809 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
8810
8811 jit.store32(
8812 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8813 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8814
8815 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8816 callLinkInfo->setUpCall(CallLinkInfo::Call, node->origin.semantic, GPRInfo::regT0);
8817
8818 jit.addPtr(CCallHelpers::TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), CCallHelpers::stackPointerRegister, GPRInfo::regT1);
8819 jit.storePtr(GPRInfo::callFrameRegister, CCallHelpers::Address(GPRInfo::regT1, CallFrame::callerFrameOffset()));
8820
8821 // Now we need to make room for:
8822 // - The caller frame and PC for a call to operationCallEval.
8823 // - Potentially two arguments on the stack.
8824 unsigned requiredBytes = sizeof(CallerFrameAndPC) + sizeof(ExecState*) * 2;
8825 requiredBytes = WTF::roundUpToMultipleOf(stackAlignmentBytes(), requiredBytes);
8826 jit.subPtr(CCallHelpers::TrustedImm32(requiredBytes), CCallHelpers::stackPointerRegister);
8827 jit.setupArguments<decltype(operationCallEval)>(GPRInfo::regT1);
8828 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationCallEval)), GPRInfo::nonPreservedNonArgumentGPR0);
8829 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
8830 exceptions->append(jit.emitExceptionCheck(state->vm(), AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
8831
8832 CCallHelpers::Jump done = jit.branchTest64(CCallHelpers::NonZero, GPRInfo::returnValueGPR);
8833
8834 jit.addPtr(CCallHelpers::TrustedImm32(requiredBytes), CCallHelpers::stackPointerRegister);
8835 jit.load64(CCallHelpers::calleeFrameSlot(CallFrameSlot::callee), GPRInfo::regT0);
8836 jit.emitDumbVirtualCall(vm, callLinkInfo);
8837
8838 done.link(&jit);
8839 jit.addPtr(
8840 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
8841 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8842 });
8843
8844 setJSValue(patchpoint);
8845 }
8846
8847 void compileLoadVarargs()
8848 {
8849 LoadVarargsData* data = m_node->loadVarargsData();
8850 LValue jsArguments = lowJSValue(m_node->child1());
8851
8852 LValue length = vmCall(
8853 Int32, m_out.operation(operationSizeOfVarargs), m_callFrame, jsArguments,
8854 m_out.constInt32(data->offset));
8855
8856 // FIXME: There is a chance that we will call an effectful length property twice. This is safe
8857 // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
8858 // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
8859 // past the sizing.
8860 // https://bugs.webkit.org/show_bug.cgi?id=141448
8861
8862 LValue lengthIncludingThis = m_out.add(length, m_out.int32One);
8863
8864 speculate(
8865 VarargsOverflow, noValue(), nullptr,
8866 m_out.above(length, lengthIncludingThis));
8867
8868 speculate(
8869 VarargsOverflow, noValue(), nullptr,
8870 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
8871
8872 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
8873
8874 // FIXME: This computation is rather silly. If operationLaodVarargs just took a pointer instead
8875 // of a VirtualRegister, we wouldn't have to do this.
8876 // https://bugs.webkit.org/show_bug.cgi?id=141660
8877 LValue machineStart = m_out.lShr(
8878 m_out.sub(addressFor(data->machineStart.offset()).value(), m_callFrame),
8879 m_out.constIntPtr(3));
8880
8881 vmCall(
8882 Void, m_out.operation(operationLoadVarargs), m_callFrame,
8883 m_out.castToInt32(machineStart), jsArguments, m_out.constInt32(data->offset),
8884 length, m_out.constInt32(data->mandatoryMinimum));
8885 }
8886
8887 void compileForwardVarargs()
8888 {
8889 if (m_node->child1()) {
8890 Node* arguments = m_node->child1().node();
8891 if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread) {
8892 compileForwardVarargsWithSpread();
8893 return;
8894 }
8895 }
8896
8897 LoadVarargsData* data = m_node->loadVarargsData();
8898 InlineCallFrame* inlineCallFrame;
8899 if (m_node->child1())
8900 inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
8901 else
8902 inlineCallFrame = m_node->origin.semantic.inlineCallFrame();
8903
8904 LValue length = nullptr;
8905 LValue lengthIncludingThis = nullptr;
8906 ArgumentsLength argumentsLength = getArgumentsLength(inlineCallFrame);
8907 if (argumentsLength.isKnown) {
8908 unsigned knownLength = argumentsLength.known;
8909 if (knownLength >= data->offset)
8910 knownLength = knownLength - data->offset;
8911 else
8912 knownLength = 0;
8913 length = m_out.constInt32(knownLength);
8914 lengthIncludingThis = m_out.constInt32(knownLength + 1);
8915 } else {
8916 // We need to perform the same logical operation as the code above, but through dynamic operations.
8917 if (!data->offset)
8918 length = argumentsLength.value;
8919 else {
8920 LBasicBlock isLarger = m_out.newBlock();
8921 LBasicBlock continuation = m_out.newBlock();
8922
8923 ValueFromBlock smallerOrEqualLengthResult = m_out.anchor(m_out.constInt32(0));
8924 m_out.branch(
8925 m_out.above(argumentsLength.value, m_out.constInt32(data->offset)), unsure(isLarger), unsure(continuation));
8926 LBasicBlock lastNext = m_out.appendTo(isLarger, continuation);
8927 ValueFromBlock largerLengthResult = m_out.anchor(m_out.sub(argumentsLength.value, m_out.constInt32(data->offset)));
8928 m_out.jump(continuation);
8929
8930 m_out.appendTo(continuation, lastNext);
8931 length = m_out.phi(Int32, smallerOrEqualLengthResult, largerLengthResult);
8932 }
8933 lengthIncludingThis = m_out.add(length, m_out.constInt32(1));
8934 }
8935
8936 speculate(
8937 VarargsOverflow, noValue(), nullptr,
8938 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
8939
8940 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
8941
8942 unsigned numberOfArgumentsToSkip = data->offset;
8943 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
8944 LValue targetStart = addressFor(data->machineStart).value();
8945
8946 LBasicBlock undefinedLoop = m_out.newBlock();
8947 LBasicBlock mainLoopEntry = m_out.newBlock();
8948 LBasicBlock mainLoop = m_out.newBlock();
8949 LBasicBlock continuation = m_out.newBlock();
8950
8951 LValue lengthAsPtr = m_out.zeroExtPtr(length);
8952 LValue loopBoundValue = m_out.constIntPtr(data->mandatoryMinimum);
8953 ValueFromBlock loopBound = m_out.anchor(loopBoundValue);
8954 m_out.branch(
8955 m_out.above(loopBoundValue, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
8956
8957 LBasicBlock lastNext = m_out.appendTo(undefinedLoop, mainLoopEntry);
8958 LValue previousIndex = m_out.phi(pointerType(), loopBound);
8959 LValue currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
8960 m_out.store64(
8961 m_out.constInt64(JSValue::encode(jsUndefined())),
8962 m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
8963 ValueFromBlock nextIndex = m_out.anchor(currentIndex);
8964 m_out.addIncomingToPhi(previousIndex, nextIndex);
8965 m_out.branch(
8966 m_out.above(currentIndex, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
8967
8968 m_out.appendTo(mainLoopEntry, mainLoop);
8969 loopBound = m_out.anchor(lengthAsPtr);
8970 m_out.branch(m_out.notNull(lengthAsPtr), unsure(mainLoop), unsure(continuation));
8971
8972 m_out.appendTo(mainLoop, continuation);
8973 previousIndex = m_out.phi(pointerType(), loopBound);
8974 currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
8975 LValue value = m_out.load64(
8976 m_out.baseIndex(m_heaps.variables, sourceStart, currentIndex));
8977 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
8978 nextIndex = m_out.anchor(currentIndex);
8979 m_out.addIncomingToPhi(previousIndex, nextIndex);
8980 m_out.branch(m_out.isNull(currentIndex), unsure(continuation), unsure(mainLoop));
8981
8982 m_out.appendTo(continuation, lastNext);
8983 }
8984
8985 LValue getSpreadLengthFromInlineCallFrame(InlineCallFrame* inlineCallFrame, unsigned numberOfArgumentsToSkip)
8986 {
8987 ArgumentsLength argumentsLength = getArgumentsLength(inlineCallFrame);
8988 if (argumentsLength.isKnown) {
8989 unsigned knownLength = argumentsLength.known;
8990 if (knownLength >= numberOfArgumentsToSkip)
8991 knownLength = knownLength - numberOfArgumentsToSkip;
8992 else
8993 knownLength = 0;
8994 return m_out.constInt32(knownLength);
8995 }
8996
8997
8998 // We need to perform the same logical operation as the code above, but through dynamic operations.
8999 if (!numberOfArgumentsToSkip)
9000 return argumentsLength.value;
9001
9002 LBasicBlock isLarger = m_out.newBlock();
9003 LBasicBlock continuation = m_out.newBlock();
9004
9005 ValueFromBlock smallerOrEqualLengthResult = m_out.anchor(m_out.constInt32(0));
9006 m_out.branch(
9007 m_out.above(argumentsLength.value, m_out.constInt32(numberOfArgumentsToSkip)), unsure(isLarger), unsure(continuation));
9008 LBasicBlock lastNext = m_out.appendTo(isLarger, continuation);
9009 ValueFromBlock largerLengthResult = m_out.anchor(m_out.sub(argumentsLength.value, m_out.constInt32(numberOfArgumentsToSkip)));
9010 m_out.jump(continuation);
9011
9012 m_out.appendTo(continuation, lastNext);
9013 return m_out.phi(Int32, smallerOrEqualLengthResult, largerLengthResult);
9014 }
9015
9016 void compileForwardVarargsWithSpread()
9017 {
9018 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
9019
9020 Node* arguments = m_node->child1().node();
9021 RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread);
9022
9023 unsigned numberOfStaticArguments = 0;
9024 Vector<LValue, 2> spreadLengths;
9025
9026 auto collectArgumentCount = recursableLambda([&](auto self, Node* target) -> void {
9027 if (target->op() == PhantomSpread) {
9028 self(target->child1().node());
9029 return;
9030 }
9031
9032 if (target->op() == PhantomNewArrayWithSpread) {
9033 BitVector* bitVector = target->bitVector();
9034 for (unsigned i = 0; i < target->numChildren(); i++) {
9035 if (bitVector->get(i))
9036 self(m_graph.varArgChild(target, i).node());
9037 else
9038 ++numberOfStaticArguments;
9039 }
9040 return;
9041 }
9042
9043 if (target->op() == PhantomNewArrayBuffer) {
9044 numberOfStaticArguments += target->castOperand<JSImmutableButterfly*>()->length();
9045 return;
9046 }
9047
9048 ASSERT(target->op() == PhantomCreateRest);
9049 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
9050 unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
9051 spreadLengths.append(cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
9052 return this->getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
9053 }).iterator->value);
9054 });
9055
9056 collectArgumentCount(arguments);
9057 LValue lengthIncludingThis = m_out.constInt32(1 + numberOfStaticArguments);
9058 for (LValue length : spreadLengths)
9059 lengthIncludingThis = m_out.add(lengthIncludingThis, length);
9060
9061 LoadVarargsData* data = m_node->loadVarargsData();
9062 speculate(
9063 VarargsOverflow, noValue(), nullptr,
9064 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
9065
9066 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
9067
9068 LValue targetStart = addressFor(data->machineStart).value();
9069
9070 auto forwardSpread = recursableLambda([this, &cachedSpreadLengths, &targetStart](auto self, Node* target, LValue storeIndex) -> LValue {
9071 if (target->op() == PhantomSpread)
9072 return self(target->child1().node(), storeIndex);
9073
9074 if (target->op() == PhantomNewArrayWithSpread) {
9075 BitVector* bitVector = target->bitVector();
9076 for (unsigned i = 0; i < target->numChildren(); i++) {
9077 if (bitVector->get(i))
9078 storeIndex = self(m_graph.varArgChild(target, i).node(), storeIndex);
9079 else {
9080 LValue value = this->lowJSValue(m_graph.varArgChild(target, i));
9081 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, storeIndex));
9082 storeIndex = m_out.add(m_out.constIntPtr(1), storeIndex);
9083 }
9084 }
9085 return storeIndex;
9086 }
9087
9088 if (target->op() == PhantomNewArrayBuffer) {
9089 auto* array = target->castOperand<JSImmutableButterfly*>();
9090 for (unsigned i = 0; i < array->length(); i++) {
9091 // Because forwarded values are drained as JSValue, we should not generate value
9092 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
9093 int64_t value = JSValue::encode(array->get(i));
9094 m_out.store64(m_out.constInt64(value), m_out.baseIndex(m_heaps.variables, targetStart, storeIndex, JSValue(), (Checked<int32_t>(sizeof(Register)) * i).unsafeGet()));
9095 }
9096 return m_out.add(m_out.constIntPtr(array->length()), storeIndex);
9097 }
9098
9099 RELEASE_ASSERT(target->op() == PhantomCreateRest);
9100 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
9101
9102 LValue sourceStart = this->getArgumentsStart(inlineCallFrame, target->numberOfArgumentsToSkip());
9103 LValue spreadLength = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
9104
9105 LBasicBlock loop = m_out.newBlock();
9106 LBasicBlock continuation = m_out.newBlock();
9107 ValueFromBlock startLoadIndex = m_out.anchor(m_out.constIntPtr(0));
9108 ValueFromBlock startStoreIndex = m_out.anchor(storeIndex);
9109 ValueFromBlock startStoreIndexForEnd = m_out.anchor(storeIndex);
9110
9111 m_out.branch(m_out.isZero64(spreadLength), unsure(continuation), unsure(loop));
9112
9113 LBasicBlock lastNext = m_out.appendTo(loop, continuation);
9114 LValue loopStoreIndex = m_out.phi(Int64, startStoreIndex);
9115 LValue loadIndex = m_out.phi(Int64, startLoadIndex);
9116 LValue value = m_out.load64(
9117 m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
9118 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, loopStoreIndex));
9119 LValue nextLoadIndex = m_out.add(m_out.constIntPtr(1), loadIndex);
9120 m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
9121 LValue nextStoreIndex = m_out.add(m_out.constIntPtr(1), loopStoreIndex);
9122 m_out.addIncomingToPhi(loopStoreIndex, m_out.anchor(nextStoreIndex));
9123 ValueFromBlock loopStoreIndexForEnd = m_out.anchor(nextStoreIndex);
9124 m_out.branch(m_out.below(nextLoadIndex, spreadLength), unsure(loop), unsure(continuation));
9125
9126 m_out.appendTo(continuation, lastNext);
9127 return m_out.phi(Int64, startStoreIndexForEnd, loopStoreIndexForEnd);
9128 });
9129
9130 LValue storeIndex = forwardSpread(arguments, m_out.constIntPtr(0));
9131
9132 LBasicBlock undefinedLoop = m_out.newBlock();
9133 LBasicBlock continuation = m_out.newBlock();
9134
9135 ValueFromBlock startStoreIndex = m_out.anchor(storeIndex);
9136 LValue loopBoundValue = m_out.constIntPtr(data->mandatoryMinimum);
9137 m_out.branch(m_out.below(storeIndex, loopBoundValue),
9138 unsure(undefinedLoop), unsure(continuation));
9139
9140 LBasicBlock lastNext = m_out.appendTo(undefinedLoop, continuation);
9141 LValue loopStoreIndex = m_out.phi(Int64, startStoreIndex);
9142 m_out.store64(
9143 m_out.constInt64(JSValue::encode(jsUndefined())),
9144 m_out.baseIndex(m_heaps.variables, targetStart, loopStoreIndex));
9145 LValue nextIndex = m_out.add(loopStoreIndex, m_out.constIntPtr(1));
9146 m_out.addIncomingToPhi(loopStoreIndex, m_out.anchor(nextIndex));
9147 m_out.branch(
9148 m_out.below(nextIndex, loopBoundValue), unsure(undefinedLoop), unsure(continuation));
9149
9150 m_out.appendTo(continuation, lastNext);
9151 }
9152
9153 void compileJump()
9154 {
9155 m_out.jump(lowBlock(m_node->targetBlock()));
9156 }
9157
9158 void compileBranch()
9159 {
9160 m_out.branch(
9161 boolify(m_node->child1()),
9162 WeightedTarget(
9163 lowBlock(m_node->branchData()->taken.block),
9164 m_node->branchData()->taken.count),
9165 WeightedTarget(
9166 lowBlock(m_node->branchData()->notTaken.block),
9167 m_node->branchData()->notTaken.count));
9168 }
9169
9170 void compileSwitch()
9171 {
9172 SwitchData* data = m_node->switchData();
9173 switch (data->kind) {
9174 case SwitchImm: {
9175 Vector<ValueFromBlock, 2> intValues;
9176 LBasicBlock switchOnInts = m_out.newBlock();
9177
9178 LBasicBlock lastNext = m_out.appendTo(m_out.m_block, switchOnInts);
9179
9180 switch (m_node->child1().useKind()) {
9181 case Int32Use: {
9182 intValues.append(m_out.anchor(lowInt32(m_node->child1())));
9183 m_out.jump(switchOnInts);
9184 break;
9185 }
9186
9187 case UntypedUse: {
9188 LBasicBlock isInt = m_out.newBlock();
9189 LBasicBlock isNotInt = m_out.newBlock();
9190 LBasicBlock isDouble = m_out.newBlock();
9191
9192 LValue boxedValue = lowJSValue(m_node->child1());
9193 m_out.branch(isNotInt32(boxedValue), unsure(isNotInt), unsure(isInt));
9194
9195 LBasicBlock innerLastNext = m_out.appendTo(isInt, isNotInt);
9196
9197 intValues.append(m_out.anchor(unboxInt32(boxedValue)));
9198 m_out.jump(switchOnInts);
9199
9200 m_out.appendTo(isNotInt, isDouble);
9201 m_out.branch(
9202 isCellOrMisc(boxedValue, provenType(m_node->child1())),
9203 usually(lowBlock(data->fallThrough.block)), rarely(isDouble));
9204
9205 m_out.appendTo(isDouble, innerLastNext);
9206 LValue doubleValue = unboxDouble(boxedValue);
9207 LValue intInDouble = m_out.doubleToInt(doubleValue);
9208 intValues.append(m_out.anchor(intInDouble));
9209 m_out.branch(
9210 m_out.doubleEqual(m_out.intToDouble(intInDouble), doubleValue),
9211 unsure(switchOnInts), unsure(lowBlock(data->fallThrough.block)));
9212 break;
9213 }
9214
9215 default:
9216 DFG_CRASH(m_graph, m_node, "Bad use kind");
9217 break;
9218 }
9219
9220 m_out.appendTo(switchOnInts, lastNext);
9221 buildSwitch(data, Int32, m_out.phi(Int32, intValues));
9222 return;
9223 }
9224
9225 case SwitchChar: {
9226 LValue stringValue;
9227
9228 // FIXME: We should use something other than unsure() for the branch weight
9229 // of the fallThrough block. The main challenge is just that we have multiple
9230 // branches to fallThrough but a single count, so we would need to divvy it up
9231 // among the different lowered branches.
9232 // https://bugs.webkit.org/show_bug.cgi?id=129082
9233
9234 switch (m_node->child1().useKind()) {
9235 case StringUse: {
9236 stringValue = lowString(m_node->child1());
9237 break;
9238 }
9239
9240 case UntypedUse: {
9241 LValue unboxedValue = lowJSValue(m_node->child1());
9242
9243 LBasicBlock isCellCase = m_out.newBlock();
9244 LBasicBlock isStringCase = m_out.newBlock();
9245
9246 m_out.branch(
9247 isNotCell(unboxedValue, provenType(m_node->child1())),
9248 unsure(lowBlock(data->fallThrough.block)), unsure(isCellCase));
9249
9250 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
9251 LValue cellValue = unboxedValue;
9252 m_out.branch(
9253 isNotString(cellValue, provenType(m_node->child1())),
9254 unsure(lowBlock(data->fallThrough.block)), unsure(isStringCase));
9255
9256 m_out.appendTo(isStringCase, lastNext);
9257 stringValue = cellValue;
9258 break;
9259 }
9260
9261 default:
9262 DFG_CRASH(m_graph, m_node, "Bad use kind");
9263 break;
9264 }
9265
9266 LBasicBlock lengthIs1 = m_out.newBlock();
9267 LBasicBlock needResolution = m_out.newBlock();
9268 LBasicBlock resolved = m_out.newBlock();
9269 LBasicBlock is8Bit = m_out.newBlock();
9270 LBasicBlock is16Bit = m_out.newBlock();
9271 LBasicBlock continuation = m_out.newBlock();
9272
9273 ValueFromBlock fastValue = m_out.anchor(m_out.loadPtr(stringValue, m_heaps.JSString_value));
9274 m_out.branch(
9275 isRopeString(stringValue, m_node->child1()),
9276 rarely(needResolution), usually(resolved));
9277
9278 LBasicBlock lastNext = m_out.appendTo(needResolution, resolved);
9279 ValueFromBlock slowValue = m_out.anchor(
9280 vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, stringValue));
9281 m_out.jump(resolved);
9282
9283 m_out.appendTo(resolved, lengthIs1);
9284 LValue value = m_out.phi(pointerType(), fastValue, slowValue);
9285 m_out.branch(
9286 m_out.notEqual(
9287 m_out.load32NonNegative(value, m_heaps.StringImpl_length),
9288 m_out.int32One),
9289 unsure(lowBlock(data->fallThrough.block)), unsure(lengthIs1));
9290
9291 m_out.appendTo(lengthIs1, is8Bit);
9292 LValue characterData = m_out.loadPtr(value, m_heaps.StringImpl_data);
9293 m_out.branch(
9294 m_out.testNonZero32(
9295 m_out.load32(value, m_heaps.StringImpl_hashAndFlags),
9296 m_out.constInt32(StringImpl::flagIs8Bit())),
9297 unsure(is8Bit), unsure(is16Bit));
9298
9299 Vector<ValueFromBlock, 2> characters;
9300 m_out.appendTo(is8Bit, is16Bit);
9301 characters.append(m_out.anchor(m_out.load8ZeroExt32(characterData, m_heaps.characters8[0])));
9302 m_out.jump(continuation);
9303
9304 m_out.appendTo(is16Bit, continuation);
9305 characters.append(m_out.anchor(m_out.load16ZeroExt32(characterData, m_heaps.characters16[0])));
9306 m_out.jump(continuation);
9307
9308 m_out.appendTo(continuation, lastNext);
9309 buildSwitch(data, Int32, m_out.phi(Int32, characters));
9310 return;
9311 }
9312
9313 case SwitchString: {
9314 switch (m_node->child1().useKind()) {
9315 case StringIdentUse: {
9316 LValue stringImpl = lowStringIdent(m_node->child1());
9317
9318 Vector<SwitchCase> cases;
9319 for (unsigned i = 0; i < data->cases.size(); ++i) {
9320 LValue value = m_out.constIntPtr(data->cases[i].value.stringImpl());
9321 LBasicBlock block = lowBlock(data->cases[i].target.block);
9322 Weight weight = Weight(data->cases[i].target.count);
9323 cases.append(SwitchCase(value, block, weight));
9324 }
9325
9326 m_out.switchInstruction(
9327 stringImpl, cases, lowBlock(data->fallThrough.block),
9328 Weight(data->fallThrough.count));
9329 return;
9330 }
9331
9332 case StringUse: {
9333 switchString(data, lowString(m_node->child1()), m_node->child1());
9334 return;
9335 }
9336
9337 case UntypedUse: {
9338 LValue value = lowJSValue(m_node->child1());
9339
9340 LBasicBlock isCellBlock = m_out.newBlock();
9341 LBasicBlock isStringBlock = m_out.newBlock();
9342
9343 m_out.branch(
9344 isCell(value, provenType(m_node->child1())),
9345 unsure(isCellBlock), unsure(lowBlock(data->fallThrough.block)));
9346
9347 LBasicBlock lastNext = m_out.appendTo(isCellBlock, isStringBlock);
9348
9349 m_out.branch(
9350 isString(value, provenType(m_node->child1())),
9351 unsure(isStringBlock), unsure(lowBlock(data->fallThrough.block)));
9352
9353 m_out.appendTo(isStringBlock, lastNext);
9354
9355 switchString(data, value, m_node->child1());
9356 return;
9357 }
9358
9359 default:
9360 DFG_CRASH(m_graph, m_node, "Bad use kind");
9361 return;
9362 }
9363 return;
9364 }
9365
9366 case SwitchCell: {
9367 LValue cell;
9368 switch (m_node->child1().useKind()) {
9369 case CellUse: {
9370 cell = lowCell(m_node->child1());
9371 break;
9372 }
9373
9374 case UntypedUse: {
9375 LValue value = lowJSValue(m_node->child1());
9376 LBasicBlock cellCase = m_out.newBlock();
9377 m_out.branch(
9378 isCell(value, provenType(m_node->child1())),
9379 unsure(cellCase), unsure(lowBlock(data->fallThrough.block)));
9380 m_out.appendTo(cellCase);
9381 cell = value;
9382 break;
9383 }
9384
9385 default:
9386 DFG_CRASH(m_graph, m_node, "Bad use kind");
9387 return;
9388 }
9389
9390 buildSwitch(m_node->switchData(), pointerType(), cell);
9391 return;
9392 } }
9393
9394 DFG_CRASH(m_graph, m_node, "Bad switch kind");
9395 }
9396
9397 void compileEntrySwitch()
9398 {
9399 Vector<LBasicBlock> successors;
9400 for (DFG::BasicBlock* successor : m_node->entrySwitchData()->cases)
9401 successors.append(lowBlock(successor));
9402 m_out.entrySwitch(successors);
9403 }
9404
9405 void compileReturn()
9406 {
9407 m_out.ret(lowJSValue(m_node->child1()));
9408 }
9409
9410 void compileForceOSRExit()
9411 {
9412 terminate(InadequateCoverage);
9413 }
9414
9415 void compileCPUIntrinsic()
9416 {
9417#if CPU(X86_64)
9418 Intrinsic intrinsic = m_node->intrinsic();
9419 switch (intrinsic) {
9420 case CPUMfenceIntrinsic:
9421 case CPUCpuidIntrinsic:
9422 case CPUPauseIntrinsic: {
9423 PatchpointValue* patchpoint = m_out.patchpoint(Void);
9424 patchpoint->effects = Effects::forCall();
9425 if (intrinsic == CPUCpuidIntrinsic)
9426 patchpoint->clobber(RegisterSet { X86Registers::eax, X86Registers::ebx, X86Registers::ecx, X86Registers::edx });
9427
9428 patchpoint->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) {
9429 switch (intrinsic) {
9430 case CPUMfenceIntrinsic:
9431 jit.mfence();
9432 break;
9433 case CPUCpuidIntrinsic:
9434 jit.cpuid();
9435 break;
9436 case CPUPauseIntrinsic:
9437 jit.pause();
9438 break;
9439 default:
9440 RELEASE_ASSERT_NOT_REACHED();
9441 }
9442 });
9443 setJSValue(m_out.constInt64(JSValue::encode(jsUndefined())));
9444 break;
9445 }
9446 case CPURdtscIntrinsic: {
9447 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
9448 patchpoint->effects = Effects::forCall();
9449 patchpoint->clobber(RegisterSet { X86Registers::eax, X86Registers::edx });
9450 // The low 32-bits of rdtsc go into rax.
9451 patchpoint->resultConstraint = ValueRep::reg(X86Registers::eax);
9452 patchpoint->setGenerator( [=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) {
9453 jit.rdtsc();
9454 });
9455 setJSValue(boxInt32(patchpoint));
9456 break;
9457 }
9458 default:
9459 RELEASE_ASSERT_NOT_REACHED();
9460
9461 }
9462#endif
9463 }
9464
9465 void compileThrow()
9466 {
9467 LValue error = lowJSValue(m_node->child1());
9468 vmCall(Void, m_out.operation(operationThrowDFG), m_callFrame, error);
9469 // vmCall() does an exception check so we should never reach this.
9470 m_out.unreachable();
9471 }
9472
9473 void compileThrowStaticError()
9474 {
9475 LValue errorMessage = lowString(m_node->child1());
9476 LValue errorType = m_out.constInt32(m_node->errorType());
9477 vmCall(Void, m_out.operation(operationThrowStaticError), m_callFrame, errorMessage, errorType);
9478 // vmCall() does an exception check so we should never reach this.
9479 m_out.unreachable();
9480 }
9481
9482 void compileInvalidationPoint()
9483 {
9484 if (verboseCompilationEnabled())
9485 dataLog(" Invalidation point with availability: ", availabilityMap(), "\n");
9486
9487 DFG_ASSERT(m_graph, m_node, m_origin.exitOK);
9488
9489 PatchpointValue* patchpoint = m_out.patchpoint(Void);
9490 OSRExitDescriptor* descriptor = appendOSRExitDescriptor(noValue(), nullptr);
9491 NodeOrigin origin = m_origin;
9492 patchpoint->appendColdAnys(buildExitArguments(descriptor, origin.forExit, noValue()));
9493
9494 State* state = &m_ftlState;
9495
9496 patchpoint->setGenerator(
9497 [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
9498 // The MacroAssembler knows more about this than B3 does. The watchpointLabel() method
9499 // will ensure that this is followed by a nop shadow but only when this is actually
9500 // necessary.
9501 CCallHelpers::Label label = jit.watchpointLabel();
9502
9503 RefPtr<OSRExitHandle> handle = descriptor->emitOSRExitLater(
9504 *state, UncountableInvalidation, origin, params);
9505
9506 RefPtr<JITCode> jitCode = state->jitCode.get();
9507
9508 jit.addLinkTask(
9509 [=] (LinkBuffer& linkBuffer) {
9510 JumpReplacement jumpReplacement(
9511 linkBuffer.locationOf<JSInternalPtrTag>(label),
9512 linkBuffer.locationOf<OSRExitPtrTag>(handle->label));
9513 jitCode->common.jumpReplacements.append(jumpReplacement);
9514 });
9515 });
9516
9517 // Set some obvious things.
9518 patchpoint->effects.terminal = false;
9519 patchpoint->effects.writesLocalState = false;
9520 patchpoint->effects.readsLocalState = false;
9521
9522 // This is how we tell B3 about the possibility of jump replacement.
9523 patchpoint->effects.exitsSideways = true;
9524
9525 // It's not possible for some prior branch to determine the safety of this operation. It's always
9526 // fine to execute this on some path that wouldn't have originally executed it before
9527 // optimization.
9528 patchpoint->effects.controlDependent = false;
9529
9530 // If this falls through then it won't write anything.
9531 patchpoint->effects.writes = HeapRange();
9532
9533 // When this abruptly terminates, it could read any heap location.
9534 patchpoint->effects.reads = HeapRange::top();
9535 }
9536
9537 void compileIsEmpty()
9538 {
9539 setBoolean(m_out.isZero64(lowJSValue(m_node->child1())));
9540 }
9541
9542 void compileIsUndefined()
9543 {
9544 setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualUndefined));
9545 }
9546
9547 void compileIsUndefinedOrNull()
9548 {
9549 setBoolean(isOther(lowJSValue(m_node->child1()), provenType(m_node->child1())));
9550 }
9551
9552 void compileIsBoolean()
9553 {
9554 setBoolean(isBoolean(lowJSValue(m_node->child1()), provenType(m_node->child1())));
9555 }
9556
9557 void compileIsNumber()
9558 {
9559 setBoolean(isNumber(lowJSValue(m_node->child1()), provenType(m_node->child1())));
9560 }
9561
9562 void compileNumberIsInteger()
9563 {
9564 LBasicBlock notInt32 = m_out.newBlock();
9565 LBasicBlock doubleCase = m_out.newBlock();
9566 LBasicBlock doubleNotNanOrInf = m_out.newBlock();
9567 LBasicBlock continuation = m_out.newBlock();
9568
9569 LValue input = lowJSValue(m_node->child1());
9570
9571 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
9572 m_out.branch(
9573 isInt32(input, provenType(m_node->child1())), unsure(continuation), unsure(notInt32));
9574
9575 LBasicBlock lastNext = m_out.appendTo(notInt32, doubleCase);
9576 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
9577 m_out.branch(
9578 isNotNumber(input, provenType(m_node->child1())), unsure(continuation), unsure(doubleCase));
9579
9580 m_out.appendTo(doubleCase, doubleNotNanOrInf);
9581 LValue doubleAsInt;
9582 LValue asDouble = unboxDouble(input, &doubleAsInt);
9583 LValue expBits = m_out.bitAnd(m_out.lShr(doubleAsInt, m_out.constInt32(52)), m_out.constInt64(0x7ff));
9584 m_out.branch(
9585 m_out.equal(expBits, m_out.constInt64(0x7ff)),
9586 unsure(continuation), unsure(doubleNotNanOrInf));
9587
9588 m_out.appendTo(doubleNotNanOrInf, continuation);
9589 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
9590 patchpoint->appendSomeRegister(asDouble);
9591 patchpoint->numFPScratchRegisters = 1;
9592 patchpoint->effects = Effects::none();
9593 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
9594 GPRReg result = params[0].gpr();
9595 FPRReg input = params[1].fpr();
9596 FPRReg temp = params.fpScratch(0);
9597 jit.roundTowardZeroDouble(input, temp);
9598 jit.compareDouble(MacroAssembler::DoubleEqual, input, temp, result);
9599 });
9600 ValueFromBlock patchpointResult = m_out.anchor(patchpoint);
9601 m_out.jump(continuation);
9602
9603 m_out.appendTo(continuation, lastNext);
9604 setBoolean(m_out.phi(Int32, trueResult, falseResult, patchpointResult));
9605 }
9606
9607 void compileIsCellWithType()
9608 {
9609 if (m_node->child1().useKind() == UntypedUse) {
9610 LValue value = lowJSValue(m_node->child1());
9611
9612 LBasicBlock isCellCase = m_out.newBlock();
9613 LBasicBlock continuation = m_out.newBlock();
9614
9615 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
9616 m_out.branch(
9617 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
9618
9619 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
9620 ValueFromBlock cellResult = m_out.anchor(isCellWithType(value, m_node->queriedType(), m_node->speculatedTypeForQuery(), provenType(m_node->child1())));
9621 m_out.jump(continuation);
9622
9623 m_out.appendTo(continuation, lastNext);
9624 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
9625 } else {
9626 ASSERT(m_node->child1().useKind() == CellUse);
9627 setBoolean(isCellWithType(lowCell(m_node->child1()), m_node->queriedType(), m_node->speculatedTypeForQuery(), provenType(m_node->child1())));
9628 }
9629 }
9630
9631 void compileIsObject()
9632 {
9633 LValue value = lowJSValue(m_node->child1());
9634
9635 LBasicBlock isCellCase = m_out.newBlock();
9636 LBasicBlock continuation = m_out.newBlock();
9637
9638 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
9639 m_out.branch(
9640 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
9641
9642 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
9643 ValueFromBlock cellResult = m_out.anchor(isObject(value, provenType(m_node->child1())));
9644 m_out.jump(continuation);
9645
9646 m_out.appendTo(continuation, lastNext);
9647 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
9648 }
9649
9650 LValue wangsInt64Hash(LValue input)
9651 {
9652 // key += ~(key << 32);
9653 LValue key = input;
9654 LValue temp = key;
9655 temp = m_out.shl(temp, m_out.constInt32(32));
9656 temp = m_out.bitNot(temp);
9657 key = m_out.add(key, temp);
9658 // key ^= (key >> 22);
9659 temp = key;
9660 temp = m_out.lShr(temp, m_out.constInt32(22));
9661 key = m_out.bitXor(key, temp);
9662 // key += ~(key << 13);
9663 temp = key;
9664 temp = m_out.shl(temp, m_out.constInt32(13));
9665 temp = m_out.bitNot(temp);
9666 key = m_out.add(key, temp);
9667 // key ^= (key >> 8);
9668 temp = key;
9669 temp = m_out.lShr(temp, m_out.constInt32(8));
9670 key = m_out.bitXor(key, temp);
9671 // key += (key << 3);
9672 temp = key;
9673 temp = m_out.shl(temp, m_out.constInt32(3));
9674 key = m_out.add(key, temp);
9675 // key ^= (key >> 15);
9676 temp = key;
9677 temp = m_out.lShr(temp, m_out.constInt32(15));
9678 key = m_out.bitXor(key, temp);
9679 // key += ~(key << 27);
9680 temp = key;
9681 temp = m_out.shl(temp, m_out.constInt32(27));
9682 temp = m_out.bitNot(temp);
9683 key = m_out.add(key, temp);
9684 // key ^= (key >> 31);
9685 temp = key;
9686 temp = m_out.lShr(temp, m_out.constInt32(31));
9687 key = m_out.bitXor(key, temp);
9688 key = m_out.castToInt32(key);
9689
9690 return key;
9691 }
9692
9693 LValue mapHashString(LValue string, Edge& edge)
9694 {
9695 LBasicBlock nonEmptyStringCase = m_out.newBlock();
9696 LBasicBlock slowCase = m_out.newBlock();
9697 LBasicBlock continuation = m_out.newBlock();
9698
9699 m_out.branch(isRopeString(string, edge), rarely(slowCase), usually(nonEmptyStringCase));
9700
9701 LBasicBlock lastNext = m_out.appendTo(nonEmptyStringCase, slowCase);
9702 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
9703 LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
9704 ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
9705 m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
9706 unsure(slowCase), unsure(continuation));
9707
9708 m_out.appendTo(slowCase, continuation);
9709 ValueFromBlock slowResult = m_out.anchor(
9710 vmCall(Int32, m_out.operation(operationMapHash), m_callFrame, string));
9711 m_out.jump(continuation);
9712
9713 m_out.appendTo(continuation, lastNext);
9714 return m_out.phi(Int32, slowResult, nonEmptyStringHashResult);
9715 }
9716
9717 void compileMapHash()
9718 {
9719 switch (m_node->child1().useKind()) {
9720 case BooleanUse:
9721 case Int32Use:
9722 case SymbolUse:
9723 case ObjectUse: {
9724 LValue key = lowJSValue(m_node->child1(), ManualOperandSpeculation);
9725 speculate(m_node->child1());
9726 setInt32(wangsInt64Hash(key));
9727 return;
9728 }
9729
9730 case CellUse: {
9731 LBasicBlock isString = m_out.newBlock();
9732 LBasicBlock notString = m_out.newBlock();
9733 LBasicBlock continuation = m_out.newBlock();
9734
9735 LValue value = lowCell(m_node->child1());
9736 LValue isStringValue = m_out.equal(m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType), m_out.constInt32(StringType));
9737 m_out.branch(
9738 isStringValue, unsure(isString), unsure(notString));
9739
9740 LBasicBlock lastNext = m_out.appendTo(isString, notString);
9741 ValueFromBlock stringResult = m_out.anchor(mapHashString(value, m_node->child1()));
9742 m_out.jump(continuation);
9743
9744 m_out.appendTo(notString, continuation);
9745 ValueFromBlock notStringResult = m_out.anchor(wangsInt64Hash(value));
9746 m_out.jump(continuation);
9747
9748 m_out.appendTo(continuation, lastNext);
9749 setInt32(m_out.phi(Int32, stringResult, notStringResult));
9750 return;
9751 }
9752
9753 case StringUse: {
9754 LValue string = lowString(m_node->child1());
9755 setInt32(mapHashString(string, m_node->child1()));
9756 return;
9757 }
9758
9759 default:
9760 RELEASE_ASSERT(m_node->child1().useKind() == UntypedUse);
9761 break;
9762 }
9763
9764 LValue value = lowJSValue(m_node->child1());
9765
9766 LBasicBlock isCellCase = m_out.newBlock();
9767 LBasicBlock slowCase = m_out.newBlock();
9768 LBasicBlock straightHash = m_out.newBlock();
9769 LBasicBlock isStringCase = m_out.newBlock();
9770 LBasicBlock nonEmptyStringCase = m_out.newBlock();
9771 LBasicBlock continuation = m_out.newBlock();
9772
9773 m_out.branch(
9774 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(straightHash));
9775
9776 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
9777 LValue isString = m_out.equal(m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType), m_out.constInt32(StringType));
9778 m_out.branch(
9779 isString, unsure(isStringCase), unsure(straightHash));
9780
9781 m_out.appendTo(isStringCase, nonEmptyStringCase);
9782 m_out.branch(isRopeString(value, m_node->child1()), rarely(slowCase), usually(nonEmptyStringCase));
9783
9784 m_out.appendTo(nonEmptyStringCase, straightHash);
9785 LValue stringImpl = m_out.loadPtr(value, m_heaps.JSString_value);
9786 LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
9787 ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
9788 m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
9789 unsure(slowCase), unsure(continuation));
9790
9791 m_out.appendTo(straightHash, slowCase);
9792 ValueFromBlock fastResult = m_out.anchor(wangsInt64Hash(value));
9793 m_out.jump(continuation);
9794
9795 m_out.appendTo(slowCase, continuation);
9796 ValueFromBlock slowResult = m_out.anchor(
9797 vmCall(Int32, m_out.operation(operationMapHash), m_callFrame, value));
9798 m_out.jump(continuation);
9799
9800 m_out.appendTo(continuation, lastNext);
9801 setInt32(m_out.phi(Int32, fastResult, slowResult, nonEmptyStringHashResult));
9802 }
9803
9804 void compileNormalizeMapKey()
9805 {
9806 ASSERT(m_node->child1().useKind() == UntypedUse);
9807
9808 LBasicBlock isNumberCase = m_out.newBlock();
9809 LBasicBlock notInt32NumberCase = m_out.newBlock();
9810 LBasicBlock notNaNCase = m_out.newBlock();
9811 LBasicBlock convertibleCase = m_out.newBlock();
9812 LBasicBlock continuation = m_out.newBlock();
9813
9814 LBasicBlock lastNext = m_out.insertNewBlocksBefore(isNumberCase);
9815
9816 LValue key = lowJSValue(m_node->child1());
9817 ValueFromBlock fastResult = m_out.anchor(key);
9818 m_out.branch(isNotNumber(key), unsure(continuation), unsure(isNumberCase));
9819
9820 m_out.appendTo(isNumberCase, notInt32NumberCase);
9821 m_out.branch(isInt32(key), unsure(continuation), unsure(notInt32NumberCase));
9822
9823 m_out.appendTo(notInt32NumberCase, notNaNCase);
9824 LValue doubleValue = unboxDouble(key);
9825 m_out.branch(m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue), unsure(continuation), unsure(notNaNCase));
9826
9827 m_out.appendTo(notNaNCase, convertibleCase);
9828 LValue integerValue = m_out.doubleToInt(doubleValue);
9829 LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
9830 m_out.branch(m_out.doubleNotEqualOrUnordered(doubleValue, integerValueConvertedToDouble), unsure(continuation), unsure(convertibleCase));
9831
9832 m_out.appendTo(convertibleCase, continuation);
9833 ValueFromBlock slowResult = m_out.anchor(boxInt32(integerValue));
9834 m_out.jump(continuation);
9835
9836 m_out.appendTo(continuation, lastNext);
9837 setJSValue(m_out.phi(Int64, fastResult, slowResult));
9838 }
9839
9840 void compileGetMapBucket()
9841 {
9842 LBasicBlock loopStart = m_out.newBlock();
9843 LBasicBlock loopAround = m_out.newBlock();
9844 LBasicBlock slowPath = m_out.newBlock();
9845 LBasicBlock notPresentInTable = m_out.newBlock();
9846 LBasicBlock notEmptyValue = m_out.newBlock();
9847 LBasicBlock notDeletedValue = m_out.newBlock();
9848 LBasicBlock continuation = m_out.newBlock();
9849
9850 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
9851
9852 LValue map;
9853 if (m_node->child1().useKind() == MapObjectUse)
9854 map = lowMapObject(m_node->child1());
9855 else if (m_node->child1().useKind() == SetObjectUse)
9856 map = lowSetObject(m_node->child1());
9857 else
9858 RELEASE_ASSERT_NOT_REACHED();
9859
9860 LValue key = lowJSValue(m_node->child2(), ManualOperandSpeculation);
9861 if (m_node->child2().useKind() != UntypedUse)
9862 speculate(m_node->child2());
9863
9864 LValue hash = lowInt32(m_node->child3());
9865
9866 LValue buffer = m_out.loadPtr(map, m_heaps.HashMapImpl_buffer);
9867 LValue mask = m_out.sub(m_out.load32(map, m_heaps.HashMapImpl_capacity), m_out.int32One);
9868
9869 ValueFromBlock indexStart = m_out.anchor(hash);
9870 m_out.jump(loopStart);
9871
9872 m_out.appendTo(loopStart, notEmptyValue);
9873 LValue unmaskedIndex = m_out.phi(Int32, indexStart);
9874 LValue index = m_out.bitAnd(mask, unmaskedIndex);
9875 // FIXME: I think these buffers are caged?
9876 // https://bugs.webkit.org/show_bug.cgi?id=174925
9877 LValue hashMapBucket = m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), buffer, m_out.zeroExt(index, Int64), ScaleEight));
9878 ValueFromBlock bucketResult = m_out.anchor(hashMapBucket);
9879 m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::emptyValue()))),
9880 unsure(notPresentInTable), unsure(notEmptyValue));
9881
9882 m_out.appendTo(notEmptyValue, notDeletedValue);
9883 m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::deletedValue()))),
9884 unsure(loopAround), unsure(notDeletedValue));
9885
9886 m_out.appendTo(notDeletedValue, loopAround);
9887 LValue bucketKey = m_out.load64(hashMapBucket, m_heaps.HashMapBucket_key);
9888
9889 // Perform Object.is()
9890 switch (m_node->child2().useKind()) {
9891 case BooleanUse:
9892 case Int32Use:
9893 case SymbolUse:
9894 case ObjectUse: {
9895 m_out.branch(m_out.equal(key, bucketKey),
9896 unsure(continuation), unsure(loopAround));
9897 break;
9898 }
9899 case StringUse: {
9900 LBasicBlock notBitEqual = m_out.newBlock();
9901 LBasicBlock bucketKeyIsCell = m_out.newBlock();
9902
9903 m_out.branch(m_out.equal(key, bucketKey),
9904 unsure(continuation), unsure(notBitEqual));
9905
9906 m_out.appendTo(notBitEqual, bucketKeyIsCell);
9907 m_out.branch(isCell(bucketKey),
9908 unsure(bucketKeyIsCell), unsure(loopAround));
9909
9910 m_out.appendTo(bucketKeyIsCell, loopAround);
9911 m_out.branch(isString(bucketKey),
9912 unsure(slowPath), unsure(loopAround));
9913 break;
9914 }
9915 case CellUse: {
9916 LBasicBlock notBitEqual = m_out.newBlock();
9917 LBasicBlock bucketKeyIsCell = m_out.newBlock();
9918 LBasicBlock bucketKeyIsString = m_out.newBlock();
9919
9920 m_out.branch(m_out.equal(key, bucketKey),
9921 unsure(continuation), unsure(notBitEqual));
9922
9923 m_out.appendTo(notBitEqual, bucketKeyIsCell);
9924 m_out.branch(isCell(bucketKey),
9925 unsure(bucketKeyIsCell), unsure(loopAround));
9926
9927 m_out.appendTo(bucketKeyIsCell, bucketKeyIsString);
9928 m_out.branch(isString(bucketKey),
9929 unsure(bucketKeyIsString), unsure(loopAround));
9930
9931 m_out.appendTo(bucketKeyIsString, loopAround);
9932 m_out.branch(isString(key),
9933 unsure(slowPath), unsure(loopAround));
9934 break;
9935 }
9936 case UntypedUse: {
9937 LBasicBlock notBitEqual = m_out.newBlock();
9938 LBasicBlock bucketKeyIsCell = m_out.newBlock();
9939 LBasicBlock bothAreCells = m_out.newBlock();
9940 LBasicBlock bucketKeyIsString = m_out.newBlock();
9941
9942 m_out.branch(m_out.equal(key, bucketKey),
9943 unsure(continuation), unsure(notBitEqual));
9944
9945 m_out.appendTo(notBitEqual, bucketKeyIsCell);
9946 m_out.branch(isCell(bucketKey),
9947 unsure(bucketKeyIsCell), unsure(loopAround));
9948
9949 m_out.appendTo(bucketKeyIsCell, bothAreCells);
9950 m_out.branch(isCell(key),
9951 unsure(bothAreCells), unsure(loopAround));
9952
9953 m_out.appendTo(bothAreCells, bucketKeyIsString);
9954 m_out.branch(isString(bucketKey),
9955 unsure(bucketKeyIsString), unsure(loopAround));
9956
9957 m_out.appendTo(bucketKeyIsString, loopAround);
9958 m_out.branch(isString(key),
9959 unsure(slowPath), unsure(loopAround));
9960 break;
9961 }
9962 default:
9963 RELEASE_ASSERT_NOT_REACHED();
9964 }
9965
9966 m_out.appendTo(loopAround, slowPath);
9967 m_out.addIncomingToPhi(unmaskedIndex, m_out.anchor(m_out.add(index, m_out.int32One)));
9968 m_out.jump(loopStart);
9969
9970 m_out.appendTo(slowPath, notPresentInTable);
9971 ValueFromBlock slowPathResult = m_out.anchor(vmCall(pointerType(),
9972 m_out.operation(m_node->child1().useKind() == MapObjectUse ? operationJSMapFindBucket : operationJSSetFindBucket), m_callFrame, map, key, hash));
9973 m_out.jump(continuation);
9974
9975 m_out.appendTo(notPresentInTable, continuation);
9976 ValueFromBlock notPresentResult;
9977 if (m_node->child1().useKind() == MapObjectUse)
9978 notPresentResult = m_out.anchor(weakPointer(vm().sentinelMapBucket()));
9979 else if (m_node->child1().useKind() == SetObjectUse)
9980 notPresentResult = m_out.anchor(weakPointer(vm().sentinelSetBucket()));
9981 else
9982 RELEASE_ASSERT_NOT_REACHED();
9983 m_out.jump(continuation);
9984
9985 m_out.appendTo(continuation, lastNext);
9986 setJSValue(m_out.phi(pointerType(), bucketResult, slowPathResult, notPresentResult));
9987 }
9988
9989 void compileGetMapBucketHead()
9990 {
9991 LValue map;
9992 if (m_node->child1().useKind() == MapObjectUse)
9993 map = lowMapObject(m_node->child1());
9994 else if (m_node->child1().useKind() == SetObjectUse)
9995 map = lowSetObject(m_node->child1());
9996 else
9997 RELEASE_ASSERT_NOT_REACHED();
9998
9999 ASSERT(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::offsetOfHead() == HashMapImpl<HashMapBucket<HashMapBucketDataKeyValue>>::offsetOfHead());
10000 setJSValue(m_out.loadPtr(map, m_heaps.HashMapImpl_head));
10001 }
10002
10003 void compileGetMapBucketNext()
10004 {
10005 LBasicBlock loopStart = m_out.newBlock();
10006 LBasicBlock continuation = m_out.newBlock();
10007 LBasicBlock noBucket = m_out.newBlock();
10008 LBasicBlock hasBucket = m_out.newBlock();
10009 LBasicBlock nextBucket = m_out.newBlock();
10010
10011 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
10012
10013 ASSERT(HashMapBucket<HashMapBucketDataKey>::offsetOfNext() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfNext());
10014 ASSERT(HashMapBucket<HashMapBucketDataKey>::offsetOfKey() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfKey());
10015 LValue mapBucketPrev = lowCell(m_node->child1());
10016 ValueFromBlock mapBucketStart = m_out.anchor(m_out.loadPtr(mapBucketPrev, m_heaps.HashMapBucket_next));
10017 m_out.jump(loopStart);
10018
10019 m_out.appendTo(loopStart, noBucket);
10020 LValue mapBucket = m_out.phi(pointerType(), mapBucketStart);
10021 m_out.branch(m_out.isNull(mapBucket), unsure(noBucket), unsure(hasBucket));
10022
10023 m_out.appendTo(noBucket, hasBucket);
10024 ValueFromBlock noBucketResult;
10025 if (m_node->bucketOwnerType() == BucketOwnerType::Map)
10026 noBucketResult = m_out.anchor(weakPointer(vm().sentinelMapBucket()));
10027 else {
10028 ASSERT(m_node->bucketOwnerType() == BucketOwnerType::Set);
10029 noBucketResult = m_out.anchor(weakPointer(vm().sentinelSetBucket()));
10030 }
10031 m_out.jump(continuation);
10032
10033 m_out.appendTo(hasBucket, nextBucket);
10034 ValueFromBlock bucketResult = m_out.anchor(mapBucket);
10035 m_out.branch(m_out.isZero64(m_out.load64(mapBucket, m_heaps.HashMapBucket_key)), unsure(nextBucket), unsure(continuation));
10036
10037 m_out.appendTo(nextBucket, continuation);
10038 m_out.addIncomingToPhi(mapBucket, m_out.anchor(m_out.loadPtr(mapBucket, m_heaps.HashMapBucket_next)));
10039 m_out.jump(loopStart);
10040
10041 m_out.appendTo(continuation, lastNext);
10042 setJSValue(m_out.phi(pointerType(), noBucketResult, bucketResult));
10043 }
10044
10045 void compileLoadValueFromMapBucket()
10046 {
10047 LValue mapBucket = lowCell(m_node->child1());
10048 setJSValue(m_out.load64(mapBucket, m_heaps.HashMapBucket_value));
10049 }
10050
10051 void compileExtractValueFromWeakMapGet()
10052 {
10053 LValue value = lowJSValue(m_node->child1());
10054 setJSValue(m_out.select(m_out.isZero64(value),
10055 m_out.constInt64(JSValue::encode(jsUndefined())),
10056 value));
10057 }
10058
10059 void compileLoadKeyFromMapBucket()
10060 {
10061 LValue mapBucket = lowCell(m_node->child1());
10062 setJSValue(m_out.load64(mapBucket, m_heaps.HashMapBucket_key));
10063 }
10064
10065 void compileSetAdd()
10066 {
10067 LValue set = lowSetObject(m_node->child1());
10068 LValue key = lowJSValue(m_node->child2());
10069 LValue hash = lowInt32(m_node->child3());
10070
10071 setJSValue(vmCall(pointerType(), m_out.operation(operationSetAdd), m_callFrame, set, key, hash));
10072 }
10073
10074 void compileMapSet()
10075 {
10076 LValue map = lowMapObject(m_graph.varArgChild(m_node, 0));
10077 LValue key = lowJSValue(m_graph.varArgChild(m_node, 1));
10078 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
10079 LValue hash = lowInt32(m_graph.varArgChild(m_node, 3));
10080
10081 setJSValue(vmCall(pointerType(), m_out.operation(operationMapSet), m_callFrame, map, key, value, hash));
10082 }
10083
10084 void compileWeakMapGet()
10085 {
10086 LBasicBlock loopStart = m_out.newBlock();
10087 LBasicBlock loopAround = m_out.newBlock();
10088 LBasicBlock notEqualValue = m_out.newBlock();
10089 LBasicBlock continuation = m_out.newBlock();
10090
10091 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
10092
10093 LValue weakMap;
10094 if (m_node->child1().useKind() == WeakMapObjectUse)
10095 weakMap = lowWeakMapObject(m_node->child1());
10096 else if (m_node->child1().useKind() == WeakSetObjectUse)
10097 weakMap = lowWeakSetObject(m_node->child1());
10098 else
10099 RELEASE_ASSERT_NOT_REACHED();
10100 LValue key = lowObject(m_node->child2());
10101 LValue hash = lowInt32(m_node->child3());
10102
10103 LValue buffer = m_out.loadPtr(weakMap, m_heaps.WeakMapImpl_buffer);
10104 LValue mask = m_out.sub(m_out.load32(weakMap, m_heaps.WeakMapImpl_capacity), m_out.int32One);
10105
10106 ValueFromBlock indexStart = m_out.anchor(hash);
10107 m_out.jump(loopStart);
10108
10109 m_out.appendTo(loopStart, notEqualValue);
10110 LValue unmaskedIndex = m_out.phi(Int32, indexStart);
10111 LValue index = m_out.bitAnd(mask, unmaskedIndex);
10112
10113 LValue bucket;
10114
10115 if (m_node->child1().useKind() == WeakMapObjectUse) {
10116 static_assert(hasOneBitSet(sizeof(WeakMapBucket<WeakMapBucketDataKeyValue>)), "Should be a power of 2");
10117 bucket = m_out.add(buffer, m_out.shl(m_out.zeroExt(index, Int64), m_out.constInt32(getLSBSet(sizeof(WeakMapBucket<WeakMapBucketDataKeyValue>)))));
10118 } else {
10119 static_assert(hasOneBitSet(sizeof(WeakMapBucket<WeakMapBucketDataKey>)), "Should be a power of 2");
10120 bucket = m_out.add(buffer, m_out.shl(m_out.zeroExt(index, Int64), m_out.constInt32(getLSBSet(sizeof(WeakMapBucket<WeakMapBucketDataKey>)))));
10121 }
10122
10123 LValue bucketKey = m_out.load64(bucket, m_heaps.WeakMapBucket_key);
10124 m_out.branch(m_out.equal(key, bucketKey), unsure(continuation), unsure(notEqualValue));
10125
10126 m_out.appendTo(notEqualValue, loopAround);
10127 m_out.branch(m_out.isNull(bucketKey), unsure(continuation), unsure(loopAround));
10128
10129 m_out.appendTo(loopAround, continuation);
10130 m_out.addIncomingToPhi(unmaskedIndex, m_out.anchor(m_out.add(index, m_out.int32One)));
10131 m_out.jump(loopStart);
10132
10133 m_out.appendTo(continuation, lastNext);
10134 LValue result;
10135 if (m_node->child1().useKind() == WeakMapObjectUse)
10136 result = m_out.load64(bucket, m_heaps.WeakMapBucket_value);
10137 else
10138 result = bucketKey;
10139 setJSValue(result);
10140 }
10141
10142 void compileWeakSetAdd()
10143 {
10144 LValue set = lowWeakSetObject(m_node->child1());
10145 LValue key = lowObject(m_node->child2());
10146 LValue hash = lowInt32(m_node->child3());
10147
10148 vmCall(Void, m_out.operation(operationWeakSetAdd), m_callFrame, set, key, hash);
10149 }
10150
10151 void compileWeakMapSet()
10152 {
10153 LValue map = lowWeakMapObject(m_graph.varArgChild(m_node, 0));
10154 LValue key = lowObject(m_graph.varArgChild(m_node, 1));
10155 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
10156 LValue hash = lowInt32(m_graph.varArgChild(m_node, 3));
10157
10158 vmCall(Void, m_out.operation(operationWeakMapSet), m_callFrame, map, key, value, hash);
10159 }
10160
10161 void compileIsObjectOrNull()
10162 {
10163 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
10164
10165 Edge child = m_node->child1();
10166 LValue value = lowJSValue(child);
10167
10168 LBasicBlock cellCase = m_out.newBlock();
10169 LBasicBlock notFunctionCase = m_out.newBlock();
10170 LBasicBlock objectCase = m_out.newBlock();
10171 LBasicBlock slowPath = m_out.newBlock();
10172 LBasicBlock notCellCase = m_out.newBlock();
10173 LBasicBlock continuation = m_out.newBlock();
10174
10175 m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
10176
10177 LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
10178 ValueFromBlock isFunctionResult = m_out.anchor(m_out.booleanFalse);
10179 m_out.branch(
10180 isFunction(value, provenType(child)),
10181 unsure(continuation), unsure(notFunctionCase));
10182
10183 m_out.appendTo(notFunctionCase, objectCase);
10184 ValueFromBlock notObjectResult = m_out.anchor(m_out.booleanFalse);
10185 m_out.branch(
10186 isObject(value, provenType(child)),
10187 unsure(objectCase), unsure(continuation));
10188
10189 m_out.appendTo(objectCase, slowPath);
10190 ValueFromBlock objectResult = m_out.anchor(m_out.booleanTrue);
10191 m_out.branch(
10192 isExoticForTypeof(value, provenType(child)),
10193 rarely(slowPath), usually(continuation));
10194
10195 m_out.appendTo(slowPath, notCellCase);
10196 VM& vm = this->vm();
10197 LValue slowResultValue = lazySlowPath(
10198 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
10199 return createLazyCallGenerator(vm,
10200 operationObjectIsObject, locations[0].directGPR(),
10201 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
10202 }, value);
10203 ValueFromBlock slowResult = m_out.anchor(m_out.notZero64(slowResultValue));
10204 m_out.jump(continuation);
10205
10206 m_out.appendTo(notCellCase, continuation);
10207 LValue notCellResultValue = m_out.equal(value, m_out.constInt64(JSValue::encode(jsNull())));
10208 ValueFromBlock notCellResult = m_out.anchor(notCellResultValue);
10209 m_out.jump(continuation);
10210
10211 m_out.appendTo(continuation, lastNext);
10212 LValue result = m_out.phi(
10213 Int32,
10214 isFunctionResult, notObjectResult, objectResult, slowResult, notCellResult);
10215 setBoolean(result);
10216 }
10217
10218 void compileIsFunction()
10219 {
10220 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
10221
10222 Edge child = m_node->child1();
10223 LValue value = lowJSValue(child);
10224
10225 LBasicBlock cellCase = m_out.newBlock();
10226 LBasicBlock notFunctionCase = m_out.newBlock();
10227 LBasicBlock slowPath = m_out.newBlock();
10228 LBasicBlock continuation = m_out.newBlock();
10229
10230 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
10231 m_out.branch(
10232 isCell(value, provenType(child)), unsure(cellCase), unsure(continuation));
10233
10234 LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
10235 ValueFromBlock functionResult = m_out.anchor(m_out.booleanTrue);
10236 m_out.branch(
10237 isFunction(value, provenType(child)),
10238 unsure(continuation), unsure(notFunctionCase));
10239
10240 m_out.appendTo(notFunctionCase, slowPath);
10241 ValueFromBlock objectResult = m_out.anchor(m_out.booleanFalse);
10242 m_out.branch(
10243 isExoticForTypeof(value, provenType(child)),
10244 rarely(slowPath), usually(continuation));
10245
10246 m_out.appendTo(slowPath, continuation);
10247 VM& vm = this->vm();
10248 LValue slowResultValue = lazySlowPath(
10249 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
10250 return createLazyCallGenerator(vm,
10251 operationObjectIsFunction, locations[0].directGPR(),
10252 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
10253 }, value);
10254 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(slowResultValue));
10255 m_out.jump(continuation);
10256
10257 m_out.appendTo(continuation, lastNext);
10258 LValue result = m_out.phi(
10259 Int32, notCellResult, functionResult, objectResult, slowResult);
10260 setBoolean(result);
10261 }
10262
10263 void compileIsTypedArrayView()
10264 {
10265 LValue value = lowJSValue(m_node->child1());
10266
10267 LBasicBlock isCellCase = m_out.newBlock();
10268 LBasicBlock continuation = m_out.newBlock();
10269
10270 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
10271 m_out.branch(isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
10272
10273 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
10274 ValueFromBlock cellResult = m_out.anchor(isTypedArrayView(value, provenType(m_node->child1())));
10275 m_out.jump(continuation);
10276
10277 m_out.appendTo(continuation, lastNext);
10278 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
10279 }
10280
10281 void compileTypeOf()
10282 {
10283 Edge child = m_node->child1();
10284 LValue value = lowJSValue(child);
10285
10286 LBasicBlock continuation = m_out.newBlock();
10287 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
10288
10289 Vector<ValueFromBlock> results;
10290
10291 buildTypeOf(
10292 child, value,
10293 [&] (TypeofType type) {
10294 results.append(m_out.anchor(weakPointer(vm().smallStrings.typeString(type))));
10295 m_out.jump(continuation);
10296 });
10297
10298 m_out.appendTo(continuation, lastNext);
10299 setJSValue(m_out.phi(Int64, results));
10300 }
10301
10302 void compileInByVal()
10303 {
10304 setJSValue(vmCall(Int64, m_out.operation(operationInByVal), m_callFrame, lowCell(m_node->child1()), lowJSValue(m_node->child2())));
10305 }
10306
10307 void compileInById()
10308 {
10309 Node* node = m_node;
10310 UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
10311 LValue base = lowCell(m_node->child1());
10312
10313 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
10314 patchpoint->appendSomeRegister(base);
10315 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
10316 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
10317
10318 patchpoint->clobber(RegisterSet::macroScratchRegisters());
10319
10320 RefPtr<PatchpointExceptionHandle> exceptionHandle =
10321 preparePatchpointForExceptions(patchpoint);
10322
10323 State* state = &m_ftlState;
10324 patchpoint->setGenerator(
10325 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
10326 AllowMacroScratchRegisterUsage allowScratch(jit);
10327
10328 CallSiteIndex callSiteIndex =
10329 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
10330
10331 // This is the direct exit target for operation calls.
10332 Box<CCallHelpers::JumpList> exceptions =
10333 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
10334
10335 auto generator = Box<JITInByIdGenerator>::create(
10336 jit.codeBlock(), node->origin.semantic, callSiteIndex,
10337 params.unavailableRegisters(), uid, JSValueRegs(params[1].gpr()),
10338 JSValueRegs(params[0].gpr()));
10339
10340 generator->generateFastPath(jit);
10341 CCallHelpers::Label done = jit.label();
10342
10343 params.addLatePath(
10344 [=] (CCallHelpers& jit) {
10345 AllowMacroScratchRegisterUsage allowScratch(jit);
10346
10347 generator->slowPathJump().link(&jit);
10348 CCallHelpers::Label slowPathBegin = jit.label();
10349 CCallHelpers::Call slowPathCall = callOperation(
10350 *state, params.unavailableRegisters(), jit, node->origin.semantic,
10351 exceptions.get(), operationInByIdOptimize, params[0].gpr(),
10352 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
10353 CCallHelpers::TrustedImmPtr(uid)).call();
10354 jit.jump().linkTo(done, &jit);
10355
10356 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
10357
10358 jit.addLinkTask(
10359 [=] (LinkBuffer& linkBuffer) {
10360 generator->finalize(linkBuffer, linkBuffer);
10361 });
10362 });
10363 });
10364
10365 setJSValue(patchpoint);
10366 }
10367
10368 void compileHasOwnProperty()
10369 {
10370 LBasicBlock slowCase = m_out.newBlock();
10371 LBasicBlock continuation = m_out.newBlock();
10372 LBasicBlock lastNext = nullptr;
10373
10374 LValue object = lowObject(m_node->child1());
10375 LValue uniquedStringImpl;
10376 LValue keyAsValue = nullptr;
10377 switch (m_node->child2().useKind()) {
10378 case StringUse: {
10379 LBasicBlock isNonEmptyString = m_out.newBlock();
10380 LBasicBlock isAtomicString = m_out.newBlock();
10381
10382 keyAsValue = lowString(m_node->child2());
10383 m_out.branch(isNotRopeString(keyAsValue, m_node->child2()), usually(isNonEmptyString), rarely(slowCase));
10384
10385 lastNext = m_out.appendTo(isNonEmptyString, isAtomicString);
10386 uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
10387 LValue isNotAtomic = m_out.testIsZero32(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtomic()));
10388 m_out.branch(isNotAtomic, rarely(slowCase), usually(isAtomicString));
10389
10390 m_out.appendTo(isAtomicString, slowCase);
10391 break;
10392 }
10393 case SymbolUse: {
10394 keyAsValue = lowSymbol(m_node->child2());
10395 uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.Symbol_symbolImpl);
10396 lastNext = m_out.insertNewBlocksBefore(slowCase);
10397 break;
10398 }
10399 case UntypedUse: {
10400 LBasicBlock isCellCase = m_out.newBlock();
10401 LBasicBlock isStringCase = m_out.newBlock();
10402 LBasicBlock notStringCase = m_out.newBlock();
10403 LBasicBlock isNonEmptyString = m_out.newBlock();
10404 LBasicBlock isSymbolCase = m_out.newBlock();
10405 LBasicBlock hasUniquedStringImpl = m_out.newBlock();
10406
10407 keyAsValue = lowJSValue(m_node->child2());
10408 m_out.branch(isCell(keyAsValue), usually(isCellCase), rarely(slowCase));
10409
10410 lastNext = m_out.appendTo(isCellCase, isStringCase);
10411 m_out.branch(isString(keyAsValue), unsure(isStringCase), unsure(notStringCase));
10412
10413 m_out.appendTo(isStringCase, isNonEmptyString);
10414 m_out.branch(isNotRopeString(keyAsValue, m_node->child2()), usually(isNonEmptyString), rarely(slowCase));
10415
10416 m_out.appendTo(isNonEmptyString, notStringCase);
10417 LValue implFromString = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
10418 ValueFromBlock stringResult = m_out.anchor(implFromString);
10419 LValue isNotAtomic = m_out.testIsZero32(m_out.load32(implFromString, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtomic()));
10420 m_out.branch(isNotAtomic, rarely(slowCase), usually(hasUniquedStringImpl));
10421
10422 m_out.appendTo(notStringCase, isSymbolCase);
10423 m_out.branch(isSymbol(keyAsValue), unsure(isSymbolCase), unsure(slowCase));
10424
10425 m_out.appendTo(isSymbolCase, hasUniquedStringImpl);
10426 ValueFromBlock symbolResult = m_out.anchor(m_out.loadPtr(keyAsValue, m_heaps.Symbol_symbolImpl));
10427 m_out.jump(hasUniquedStringImpl);
10428
10429 m_out.appendTo(hasUniquedStringImpl, slowCase);
10430 uniquedStringImpl = m_out.phi(pointerType(), stringResult, symbolResult);
10431 break;
10432 }
10433 default:
10434 RELEASE_ASSERT_NOT_REACHED();
10435 }
10436
10437 ASSERT(keyAsValue);
10438
10439 // Note that we don't test if the hash is zero here. AtomicStringImpl's can't have a zero
10440 // hash, however, a SymbolImpl may. But, because this is a cache, we don't care. We only
10441 // ever load the result from the cache if the cache entry matches what we are querying for.
10442 // So we either get super lucky and use zero for the hash and somehow collide with the entity
10443 // we're looking for, or we realize we're comparing against another entity, and go to the
10444 // slow path anyways.
10445 LValue hash = m_out.lShr(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
10446
10447 LValue structureID = m_out.load32(object, m_heaps.JSCell_structureID);
10448 LValue index = m_out.add(hash, structureID);
10449 index = m_out.zeroExtPtr(m_out.bitAnd(index, m_out.constInt32(HasOwnPropertyCache::mask)));
10450 ASSERT(vm().hasOwnPropertyCache());
10451 LValue cache = m_out.constIntPtr(vm().hasOwnPropertyCache());
10452
10453 IndexedAbstractHeap& heap = m_heaps.HasOwnPropertyCache;
10454 LValue sameStructureID = m_out.equal(structureID, m_out.load32(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfStructureID())));
10455 LValue sameImpl = m_out.equal(uniquedStringImpl, m_out.loadPtr(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfImpl())));
10456 ValueFromBlock fastResult = m_out.anchor(m_out.load8ZeroExt32(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfResult())));
10457 LValue cacheHit = m_out.bitAnd(sameStructureID, sameImpl);
10458
10459 m_out.branch(m_out.notZero32(cacheHit), usually(continuation), rarely(slowCase));
10460
10461 m_out.appendTo(slowCase, continuation);
10462 ValueFromBlock slowResult;
10463 slowResult = m_out.anchor(vmCall(Int32, m_out.operation(operationHasOwnProperty), m_callFrame, object, keyAsValue));
10464 m_out.jump(continuation);
10465
10466 m_out.appendTo(continuation, lastNext);
10467 setBoolean(m_out.phi(Int32, fastResult, slowResult));
10468 }
10469
10470 void compileParseInt()
10471 {
10472 RELEASE_ASSERT(m_node->child1().useKind() == UntypedUse || m_node->child1().useKind() == StringUse);
10473 LValue result;
10474 if (m_node->child2()) {
10475 LValue radix = lowInt32(m_node->child2());
10476 if (m_node->child1().useKind() == UntypedUse)
10477 result = vmCall(Int64, m_out.operation(operationParseIntGeneric), m_callFrame, lowJSValue(m_node->child1()), radix);
10478 else
10479 result = vmCall(Int64, m_out.operation(operationParseIntString), m_callFrame, lowString(m_node->child1()), radix);
10480 } else {
10481 if (m_node->child1().useKind() == UntypedUse)
10482 result = vmCall(Int64, m_out.operation(operationParseIntNoRadixGeneric), m_callFrame, lowJSValue(m_node->child1()));
10483 else
10484 result = vmCall(Int64, m_out.operation(operationParseIntStringNoRadix), m_callFrame, lowString(m_node->child1()));
10485 }
10486 setJSValue(result);
10487 }
10488
10489 void compileOverridesHasInstance()
10490 {
10491 FrozenValue* defaultHasInstanceFunction = m_node->cellOperand();
10492 ASSERT(defaultHasInstanceFunction->cell()->inherits<JSFunction>(vm()));
10493
10494 LValue constructor = lowCell(m_node->child1());
10495 LValue hasInstance = lowJSValue(m_node->child2());
10496
10497 LBasicBlock defaultHasInstance = m_out.newBlock();
10498 LBasicBlock continuation = m_out.newBlock();
10499
10500 // Unlike in the DFG, we don't worry about cleaning this code up for the case where we have proven the hasInstanceValue is a constant as B3 should fix it for us.
10501
10502 ValueFromBlock notDefaultHasInstanceResult = m_out.anchor(m_out.booleanTrue);
10503 m_out.branch(m_out.notEqual(hasInstance, frozenPointer(defaultHasInstanceFunction)), unsure(continuation), unsure(defaultHasInstance));
10504
10505 LBasicBlock lastNext = m_out.appendTo(defaultHasInstance, continuation);
10506 ValueFromBlock implementsDefaultHasInstanceResult = m_out.anchor(m_out.testIsZero32(
10507 m_out.load8ZeroExt32(constructor, m_heaps.JSCell_typeInfoFlags),
10508 m_out.constInt32(ImplementsDefaultHasInstance)));
10509 m_out.jump(continuation);
10510
10511 m_out.appendTo(continuation, lastNext);
10512 setBoolean(m_out.phi(Int32, implementsDefaultHasInstanceResult, notDefaultHasInstanceResult));
10513 }
10514
10515 void compileCheckTypeInfoFlags()
10516 {
10517 speculate(
10518 BadTypeInfoFlags, noValue(), 0,
10519 m_out.testIsZero32(
10520 m_out.load8ZeroExt32(lowCell(m_node->child1()), m_heaps.JSCell_typeInfoFlags),
10521 m_out.constInt32(m_node->typeInfoOperand())));
10522 }
10523
10524 void compileInstanceOf()
10525 {
10526 Node* node = m_node;
10527 State* state = &m_ftlState;
10528
10529 LValue value;
10530 LValue prototype;
10531 bool valueIsCell;
10532 bool prototypeIsCell;
10533 if (m_node->child1().useKind() == CellUse
10534 && m_node->child2().useKind() == CellUse) {
10535 value = lowCell(m_node->child1());
10536 prototype = lowCell(m_node->child2());
10537
10538 valueIsCell = true;
10539 prototypeIsCell = true;
10540 } else {
10541 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
10542 DFG_ASSERT(m_graph, m_node, m_node->child2().useKind() == UntypedUse);
10543
10544 value = lowJSValue(m_node->child1());
10545 prototype = lowJSValue(m_node->child2());
10546
10547 valueIsCell = abstractValue(m_node->child1()).isType(SpecCell);
10548 prototypeIsCell = abstractValue(m_node->child2()).isType(SpecCell);
10549 }
10550
10551 bool prototypeIsObject = abstractValue(m_node->child2()).isType(SpecObject | ~SpecCell);
10552
10553 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
10554 patchpoint->appendSomeRegister(value);
10555 patchpoint->appendSomeRegister(prototype);
10556 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
10557 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
10558 patchpoint->numGPScratchRegisters = 2;
10559 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
10560 patchpoint->clobber(RegisterSet::macroScratchRegisters());
10561
10562 RefPtr<PatchpointExceptionHandle> exceptionHandle =
10563 preparePatchpointForExceptions(patchpoint);
10564
10565 patchpoint->setGenerator(
10566 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
10567 AllowMacroScratchRegisterUsage allowScratch(jit);
10568
10569 GPRReg resultGPR = params[0].gpr();
10570 GPRReg valueGPR = params[1].gpr();
10571 GPRReg prototypeGPR = params[2].gpr();
10572 GPRReg scratchGPR = params.gpScratch(0);
10573 GPRReg scratch2GPR = params.gpScratch(1);
10574
10575 CCallHelpers::Jump doneJump;
10576 if (!valueIsCell) {
10577 CCallHelpers::Jump isCell = jit.branchIfCell(valueGPR);
10578 jit.boxBooleanPayload(false, resultGPR);
10579 doneJump = jit.jump();
10580 isCell.link(&jit);
10581 }
10582
10583 CCallHelpers::JumpList slowCases;
10584 if (!prototypeIsCell)
10585 slowCases.append(jit.branchIfNotCell(prototypeGPR));
10586
10587 CallSiteIndex callSiteIndex =
10588 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
10589
10590 // This is the direct exit target for operation calls.
10591 Box<CCallHelpers::JumpList> exceptions =
10592 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
10593
10594 auto generator = Box<JITInstanceOfGenerator>::create(
10595 jit.codeBlock(), node->origin.semantic, callSiteIndex,
10596 params.unavailableRegisters(), resultGPR, valueGPR, prototypeGPR, scratchGPR,
10597 scratch2GPR, prototypeIsObject);
10598 generator->generateFastPath(jit);
10599 CCallHelpers::Label done = jit.label();
10600
10601 params.addLatePath(
10602 [=] (CCallHelpers& jit) {
10603 AllowMacroScratchRegisterUsage allowScratch(jit);
10604
10605 J_JITOperation_ESsiJJ optimizationFunction = operationInstanceOfOptimize;
10606
10607 slowCases.link(&jit);
10608 CCallHelpers::Label slowPathBegin = jit.label();
10609 CCallHelpers::Call slowPathCall = callOperation(
10610 *state, params.unavailableRegisters(), jit, node->origin.semantic,
10611 exceptions.get(), optimizationFunction, resultGPR,
10612 CCallHelpers::TrustedImmPtr(generator->stubInfo()), valueGPR,
10613 prototypeGPR).call();
10614 jit.jump().linkTo(done, &jit);
10615
10616 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
10617
10618 jit.addLinkTask(
10619 [=] (LinkBuffer& linkBuffer) {
10620 generator->finalize(linkBuffer, linkBuffer);
10621 });
10622 });
10623
10624 if (doneJump.isSet())
10625 doneJump.link(&jit);
10626 });
10627
10628 // This returns a boxed boolean.
10629 setJSValue(patchpoint);
10630 }
10631
10632 void compileInstanceOfCustom()
10633 {
10634 LValue value = lowJSValue(m_node->child1());
10635 LValue constructor = lowCell(m_node->child2());
10636 LValue hasInstance = lowJSValue(m_node->child3());
10637
10638 setBoolean(m_out.logicalNot(m_out.equal(m_out.constInt32(0), vmCall(Int32, m_out.operation(operationInstanceOfCustom), m_callFrame, value, constructor, hasInstance))));
10639 }
10640
10641 void compileCountExecution()
10642 {
10643 TypedPointer counter = m_out.absolute(m_node->executionCounter()->address());
10644 m_out.store64(m_out.add(m_out.load64(counter), m_out.constInt64(1)), counter);
10645 }
10646
10647 void compileSuperSamplerBegin()
10648 {
10649 TypedPointer counter = m_out.absolute(bitwise_cast<void*>(&g_superSamplerCount));
10650 m_out.store32(m_out.add(m_out.load32(counter), m_out.constInt32(1)), counter);
10651 }
10652
10653 void compileSuperSamplerEnd()
10654 {
10655 TypedPointer counter = m_out.absolute(bitwise_cast<void*>(&g_superSamplerCount));
10656 m_out.store32(m_out.sub(m_out.load32(counter), m_out.constInt32(1)), counter);
10657 }
10658
10659 void compileStoreBarrier()
10660 {
10661 emitStoreBarrier(lowCell(m_node->child1()), m_node->op() == FencedStoreBarrier);
10662 }
10663
10664 void compileHasIndexedProperty()
10665 {
10666 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
10667 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
10668
10669 switch (m_node->arrayMode().type()) {
10670 case Array::Int32:
10671 case Array::Contiguous: {
10672 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
10673 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10674
10675 IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
10676 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
10677
10678 LBasicBlock slowCase = m_out.newBlock();
10679 LBasicBlock continuation = m_out.newBlock();
10680 LBasicBlock lastNext = nullptr;
10681
10682 if (!m_node->arrayMode().isInBounds()) {
10683 LBasicBlock checkHole = m_out.newBlock();
10684 m_out.branch(
10685 m_out.aboveOrEqual(
10686 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
10687 rarely(slowCase), usually(checkHole));
10688 lastNext = m_out.appendTo(checkHole, slowCase);
10689 } else
10690 lastNext = m_out.insertNewBlocksBefore(slowCase);
10691
10692 LValue checkHoleResultValue =
10693 m_out.notZero64(m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1))));
10694 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
10695 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
10696
10697 m_out.appendTo(slowCase, continuation);
10698 ValueFromBlock slowResult = m_out.anchor(
10699 m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10700 m_out.jump(continuation);
10701
10702 m_out.appendTo(continuation, lastNext);
10703 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
10704 return;
10705 }
10706 case Array::Double: {
10707 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
10708 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10709
10710 IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
10711
10712 LBasicBlock slowCase = m_out.newBlock();
10713 LBasicBlock continuation = m_out.newBlock();
10714 LBasicBlock lastNext = nullptr;
10715
10716 if (!m_node->arrayMode().isInBounds()) {
10717 LBasicBlock checkHole = m_out.newBlock();
10718 m_out.branch(
10719 m_out.aboveOrEqual(
10720 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
10721 rarely(slowCase), usually(checkHole));
10722 lastNext = m_out.appendTo(checkHole, slowCase);
10723 } else
10724 lastNext = m_out.insertNewBlocksBefore(slowCase);
10725
10726 LValue doubleValue = m_out.loadDouble(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
10727 LValue checkHoleResultValue = m_out.doubleEqual(doubleValue, doubleValue);
10728 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
10729 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
10730
10731 m_out.appendTo(slowCase, continuation);
10732 ValueFromBlock slowResult = m_out.anchor(
10733 m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10734 m_out.jump(continuation);
10735
10736 m_out.appendTo(continuation, lastNext);
10737 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
10738 return;
10739 }
10740
10741 case Array::ArrayStorage: {
10742 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
10743 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10744
10745 LBasicBlock slowCase = m_out.newBlock();
10746 LBasicBlock continuation = m_out.newBlock();
10747 LBasicBlock lastNext = nullptr;
10748
10749 if (!m_node->arrayMode().isInBounds()) {
10750 LBasicBlock checkHole = m_out.newBlock();
10751 m_out.branch(
10752 m_out.aboveOrEqual(
10753 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength)),
10754 rarely(slowCase), usually(checkHole));
10755 lastNext = m_out.appendTo(checkHole, slowCase);
10756 } else
10757 lastNext = m_out.insertNewBlocksBefore(slowCase);
10758
10759 LValue checkHoleResultValue =
10760 m_out.notZero64(m_out.load64(baseIndex(m_heaps.ArrayStorage_vector, storage, index, m_graph.varArgChild(m_node, 1))));
10761 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
10762 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
10763
10764 m_out.appendTo(slowCase, continuation);
10765 ValueFromBlock slowResult = m_out.anchor(
10766 m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10767 m_out.jump(continuation);
10768
10769 m_out.appendTo(continuation, lastNext);
10770 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
10771 break;
10772 }
10773
10774 default: {
10775 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10776 setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10777 break;
10778 }
10779 }
10780 }
10781
10782 void compileHasGenericProperty()
10783 {
10784 LValue base = lowJSValue(m_node->child1());
10785 LValue property = lowCell(m_node->child2());
10786 setJSValue(vmCall(Int64, m_out.operation(operationHasGenericProperty), m_callFrame, base, property));
10787 }
10788
10789 void compileHasStructureProperty()
10790 {
10791 LValue base = lowJSValue(m_node->child1());
10792 LValue property = lowString(m_node->child2());
10793 LValue enumerator = lowCell(m_node->child3());
10794
10795 LBasicBlock correctStructure = m_out.newBlock();
10796 LBasicBlock wrongStructure = m_out.newBlock();
10797 LBasicBlock continuation = m_out.newBlock();
10798
10799 m_out.branch(m_out.notEqual(
10800 m_out.load32(base, m_heaps.JSCell_structureID),
10801 m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
10802 rarely(wrongStructure), usually(correctStructure));
10803
10804 LBasicBlock lastNext = m_out.appendTo(correctStructure, wrongStructure);
10805 ValueFromBlock correctStructureResult = m_out.anchor(m_out.booleanTrue);
10806 m_out.jump(continuation);
10807
10808 m_out.appendTo(wrongStructure, continuation);
10809 ValueFromBlock wrongStructureResult = m_out.anchor(
10810 m_out.equal(
10811 m_out.constInt64(JSValue::encode(jsBoolean(true))),
10812 vmCall(Int64, m_out.operation(operationHasGenericProperty), m_callFrame, base, property)));
10813 m_out.jump(continuation);
10814
10815 m_out.appendTo(continuation, lastNext);
10816 setBoolean(m_out.phi(Int32, correctStructureResult, wrongStructureResult));
10817 }
10818
10819 void compileGetDirectPname()
10820 {
10821 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
10822 LValue property = lowCell(m_graph.varArgChild(m_node, 1));
10823 LValue index = lowInt32(m_graph.varArgChild(m_node, 2));
10824 LValue enumerator = lowCell(m_graph.varArgChild(m_node, 3));
10825
10826 LBasicBlock checkOffset = m_out.newBlock();
10827 LBasicBlock inlineLoad = m_out.newBlock();
10828 LBasicBlock outOfLineLoad = m_out.newBlock();
10829 LBasicBlock slowCase = m_out.newBlock();
10830 LBasicBlock continuation = m_out.newBlock();
10831
10832 m_out.branch(m_out.notEqual(
10833 m_out.load32(base, m_heaps.JSCell_structureID),
10834 m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
10835 rarely(slowCase), usually(checkOffset));
10836
10837 LBasicBlock lastNext = m_out.appendTo(checkOffset, inlineLoad);
10838 m_out.branch(m_out.aboveOrEqual(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity)),
10839 unsure(outOfLineLoad), unsure(inlineLoad));
10840
10841 m_out.appendTo(inlineLoad, outOfLineLoad);
10842 ValueFromBlock inlineResult = m_out.anchor(
10843 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(),
10844 base, m_out.zeroExt(index, Int64), ScaleEight, JSObject::offsetOfInlineStorage())));
10845 m_out.jump(continuation);
10846
10847 m_out.appendTo(outOfLineLoad, slowCase);
10848 LValue storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
10849 LValue realIndex = m_out.signExt32To64(
10850 m_out.neg(m_out.sub(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity))));
10851 int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
10852 ValueFromBlock outOfLineResult = m_out.anchor(
10853 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), storage, realIndex, ScaleEight, offsetOfFirstProperty)));
10854 m_out.jump(continuation);
10855
10856 m_out.appendTo(slowCase, continuation);
10857 ValueFromBlock slowCaseResult = m_out.anchor(
10858 vmCall(Int64, m_out.operation(operationGetByVal), m_callFrame, base, property));
10859 m_out.jump(continuation);
10860
10861 m_out.appendTo(continuation, lastNext);
10862 setJSValue(m_out.phi(Int64, inlineResult, outOfLineResult, slowCaseResult));
10863 }
10864
10865 void compileGetEnumerableLength()
10866 {
10867 LValue enumerator = lowCell(m_node->child1());
10868 setInt32(m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_indexLength));
10869 }
10870
10871 void compileGetPropertyEnumerator()
10872 {
10873 if (m_node->child1().useKind() == CellUse)
10874 setJSValue(vmCall(Int64, m_out.operation(operationGetPropertyEnumeratorCell), m_callFrame, lowCell(m_node->child1())));
10875 else
10876 setJSValue(vmCall(Int64, m_out.operation(operationGetPropertyEnumerator), m_callFrame, lowJSValue(m_node->child1())));
10877 }
10878
10879 void compileGetEnumeratorStructurePname()
10880 {
10881 LValue enumerator = lowCell(m_node->child1());
10882 LValue index = lowInt32(m_node->child2());
10883
10884 LBasicBlock inBounds = m_out.newBlock();
10885 LBasicBlock outOfBounds = m_out.newBlock();
10886 LBasicBlock continuation = m_out.newBlock();
10887
10888 m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endStructurePropertyIndex)),
10889 usually(inBounds), rarely(outOfBounds));
10890
10891 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
10892 LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
10893 ValueFromBlock inBoundsResult = m_out.anchor(
10894 m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
10895 m_out.jump(continuation);
10896
10897 m_out.appendTo(outOfBounds, continuation);
10898 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
10899 m_out.jump(continuation);
10900
10901 m_out.appendTo(continuation, lastNext);
10902 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
10903 }
10904
10905 void compileGetEnumeratorGenericPname()
10906 {
10907 LValue enumerator = lowCell(m_node->child1());
10908 LValue index = lowInt32(m_node->child2());
10909
10910 LBasicBlock inBounds = m_out.newBlock();
10911 LBasicBlock outOfBounds = m_out.newBlock();
10912 LBasicBlock continuation = m_out.newBlock();
10913
10914 m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endGenericPropertyIndex)),
10915 usually(inBounds), rarely(outOfBounds));
10916
10917 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
10918 LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
10919 ValueFromBlock inBoundsResult = m_out.anchor(
10920 m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
10921 m_out.jump(continuation);
10922
10923 m_out.appendTo(outOfBounds, continuation);
10924 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
10925 m_out.jump(continuation);
10926
10927 m_out.appendTo(continuation, lastNext);
10928 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
10929 }
10930
10931 void compileToIndexString()
10932 {
10933 LValue index = lowInt32(m_node->child1());
10934 setJSValue(vmCall(Int64, m_out.operation(operationToIndexString), m_callFrame, index));
10935 }
10936
10937 void compileCheckStructureImmediate()
10938 {
10939 LValue structure = lowCell(m_node->child1());
10940 checkStructure(
10941 structure, noValue(), BadCache, m_node->structureSet(),
10942 [this] (RegisteredStructure structure) {
10943 return weakStructure(structure);
10944 });
10945 }
10946
10947 void compileMaterializeNewObject()
10948 {
10949 ObjectMaterializationData& data = m_node->objectMaterializationData();
10950
10951 // Lower the values first, to avoid creating values inside a control flow diamond.
10952
10953 Vector<LValue, 8> values;
10954 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
10955 Edge edge = m_graph.varArgChild(m_node, 1 + i);
10956 switch (data.m_properties[i].kind()) {
10957 case PublicLengthPLoc:
10958 case VectorLengthPLoc:
10959 values.append(lowInt32(edge));
10960 break;
10961 default:
10962 values.append(lowJSValue(edge));
10963 break;
10964 }
10965 }
10966
10967 RegisteredStructureSet set = m_node->structureSet();
10968
10969 Vector<LBasicBlock, 1> blocks(set.size());
10970 for (unsigned i = set.size(); i--;)
10971 blocks[i] = m_out.newBlock();
10972 LBasicBlock dummyDefault = m_out.newBlock();
10973 LBasicBlock outerContinuation = m_out.newBlock();
10974
10975 Vector<SwitchCase, 1> cases(set.size());
10976 for (unsigned i = set.size(); i--;)
10977 cases[i] = SwitchCase(weakStructure(set.at(i)), blocks[i], Weight(1));
10978 m_out.switchInstruction(
10979 lowCell(m_graph.varArgChild(m_node, 0)), cases, dummyDefault, Weight(0));
10980
10981 LBasicBlock outerLastNext = m_out.m_nextBlock;
10982
10983 Vector<ValueFromBlock, 1> results;
10984
10985 for (unsigned i = set.size(); i--;) {
10986 m_out.appendTo(blocks[i], i + 1 < set.size() ? blocks[i + 1] : dummyDefault);
10987
10988 RegisteredStructure structure = set.at(i);
10989
10990 LValue object;
10991 LValue butterfly;
10992
10993 if (structure->outOfLineCapacity() || hasIndexedProperties(structure->indexingType())) {
10994 size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
10995 Allocator cellAllocator = allocatorForNonVirtualConcurrently<JSFinalObject>(vm(), allocationSize, AllocatorForMode::AllocatorIfExists);
10996
10997 bool hasIndexingHeader = hasIndexedProperties(structure->indexingType());
10998 unsigned indexingHeaderSize = 0;
10999 LValue indexingPayloadSizeInBytes = m_out.intPtrZero;
11000 LValue vectorLength = m_out.int32Zero;
11001 LValue publicLength = m_out.int32Zero;
11002 if (hasIndexingHeader) {
11003 indexingHeaderSize = sizeof(IndexingHeader);
11004 for (unsigned i = data.m_properties.size(); i--;) {
11005 PromotedLocationDescriptor descriptor = data.m_properties[i];
11006 switch (descriptor.kind()) {
11007 case PublicLengthPLoc:
11008 publicLength = values[i];
11009 break;
11010 case VectorLengthPLoc:
11011 vectorLength = values[i];
11012 break;
11013 default:
11014 break;
11015 }
11016 }
11017 indexingPayloadSizeInBytes =
11018 m_out.mul(m_out.zeroExtPtr(vectorLength), m_out.intPtrEight);
11019 }
11020
11021 LValue butterflySize = m_out.add(
11022 m_out.constIntPtr(
11023 structure->outOfLineCapacity() * sizeof(JSValue) + indexingHeaderSize),
11024 indexingPayloadSizeInBytes);
11025
11026 LBasicBlock slowPath = m_out.newBlock();
11027 LBasicBlock continuation = m_out.newBlock();
11028
11029 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
11030
11031 ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
11032
11033 LValue startOfStorage = allocateHeapCell(
11034 allocatorForSize(vm().jsValueGigacageAuxiliarySpace, butterflySize, slowPath),
11035 slowPath);
11036
11037 LValue fastButterflyValue = m_out.add(
11038 startOfStorage,
11039 m_out.constIntPtr(
11040 structure->outOfLineCapacity() * sizeof(JSValue) + sizeof(IndexingHeader)));
11041
11042 ValueFromBlock haveButterfly = m_out.anchor(fastButterflyValue);
11043
11044 splatWords(
11045 fastButterflyValue,
11046 m_out.constInt32(-structure->outOfLineCapacity() - 1),
11047 m_out.constInt32(-1),
11048 m_out.int64Zero, m_heaps.properties.atAnyNumber());
11049
11050 m_out.store32(vectorLength, fastButterflyValue, m_heaps.Butterfly_vectorLength);
11051
11052 LValue fastObjectValue = allocateObject(
11053 m_out.constIntPtr(cellAllocator.localAllocator()), structure, fastButterflyValue,
11054 slowPath);
11055
11056 ValueFromBlock fastObject = m_out.anchor(fastObjectValue);
11057 ValueFromBlock fastButterfly = m_out.anchor(fastButterflyValue);
11058 m_out.jump(continuation);
11059
11060 m_out.appendTo(slowPath, continuation);
11061
11062 LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
11063
11064 VM& vm = this->vm();
11065 LValue slowObjectValue;
11066 if (hasIndexingHeader) {
11067 slowObjectValue = lazySlowPath(
11068 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11069 return createLazyCallGenerator(vm,
11070 operationNewObjectWithButterflyWithIndexingHeaderAndVectorLength,
11071 locations[0].directGPR(), CCallHelpers::TrustedImmPtr(structure.get()),
11072 locations[1].directGPR(), locations[2].directGPR());
11073 },
11074 vectorLength, butterflyValue);
11075 } else {
11076 slowObjectValue = lazySlowPath(
11077 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11078 return createLazyCallGenerator(vm,
11079 operationNewObjectWithButterfly, locations[0].directGPR(),
11080 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR());
11081 },
11082 butterflyValue);
11083 }
11084 ValueFromBlock slowObject = m_out.anchor(slowObjectValue);
11085 ValueFromBlock slowButterfly = m_out.anchor(
11086 m_out.loadPtr(slowObjectValue, m_heaps.JSObject_butterfly));
11087
11088 m_out.jump(continuation);
11089
11090 m_out.appendTo(continuation, lastNext);
11091
11092 object = m_out.phi(pointerType(), fastObject, slowObject);
11093 butterfly = m_out.phi(pointerType(), fastButterfly, slowButterfly);
11094
11095 m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
11096
11097 initializeArrayElements(m_out.constInt32(structure->indexingType()), m_out.int32Zero, vectorLength, butterfly);
11098
11099 HashMap<int32_t, LValue, DefaultHash<int32_t>::Hash, WTF::UnsignedWithZeroKeyHashTraits<int32_t>> indexMap;
11100 Vector<int32_t> indices;
11101 for (unsigned i = data.m_properties.size(); i--;) {
11102 PromotedLocationDescriptor descriptor = data.m_properties[i];
11103 if (descriptor.kind() != IndexedPropertyPLoc)
11104 continue;
11105 int32_t index = static_cast<int32_t>(descriptor.info());
11106
11107 auto result = indexMap.add(index, values[i]);
11108 DFG_ASSERT(m_graph, m_node, result); // Duplicates are illegal.
11109
11110 indices.append(index);
11111 }
11112
11113 if (!indices.isEmpty()) {
11114 std::sort(indices.begin(), indices.end());
11115
11116 Vector<LBasicBlock> blocksWithStores(indices.size());
11117 Vector<LBasicBlock> blocksWithChecks(indices.size());
11118
11119 for (unsigned i = indices.size(); i--;) {
11120 blocksWithStores[i] = m_out.newBlock();
11121 blocksWithChecks[i] = m_out.newBlock(); // blocksWithChecks[0] is the continuation.
11122 }
11123
11124 LBasicBlock indexLastNext = m_out.m_nextBlock;
11125
11126 for (unsigned i = indices.size(); i--;) {
11127 int32_t index = indices[i];
11128 LValue value = indexMap.get(index);
11129
11130 m_out.branch(
11131 m_out.below(m_out.constInt32(index), publicLength),
11132 unsure(blocksWithStores[i]), unsure(blocksWithChecks[i]));
11133
11134 m_out.appendTo(blocksWithStores[i], blocksWithChecks[i]);
11135
11136 // This has to type-check and convert its inputs, but it cannot do so in a
11137 // way that updates AI. That's a bit annoying, but if you think about how
11138 // sinking works, it's actually not a bad thing. We are virtually guaranteed
11139 // that these type checks will not fail, since the type checks that guarded
11140 // the original stores to the array are still somewhere above this point.
11141 Output::StoreType storeType;
11142 IndexedAbstractHeap* heap;
11143 switch (structure->indexingType()) {
11144 case ALL_INT32_INDEXING_TYPES:
11145 // FIXME: This could use the proven type if we had the Edge for the
11146 // value. https://bugs.webkit.org/show_bug.cgi?id=155311
11147 speculate(BadType, noValue(), nullptr, isNotInt32(value));
11148 storeType = Output::Store64;
11149 heap = &m_heaps.indexedInt32Properties;
11150 break;
11151
11152 case ALL_DOUBLE_INDEXING_TYPES: {
11153 // FIXME: If the source is ValueRep, we should avoid emitting any
11154 // checks. We could also avoid emitting checks if we had the Edge of
11155 // this value. https://bugs.webkit.org/show_bug.cgi?id=155311
11156
11157 LBasicBlock intCase = m_out.newBlock();
11158 LBasicBlock doubleCase = m_out.newBlock();
11159 LBasicBlock continuation = m_out.newBlock();
11160
11161 m_out.branch(isInt32(value), unsure(intCase), unsure(doubleCase));
11162
11163 LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
11164
11165 ValueFromBlock intResult =
11166 m_out.anchor(m_out.intToDouble(unboxInt32(value)));
11167 m_out.jump(continuation);
11168
11169 m_out.appendTo(doubleCase, continuation);
11170
11171 speculate(BadType, noValue(), nullptr, isNumber(value));
11172 ValueFromBlock doubleResult = m_out.anchor(unboxDouble(value));
11173 m_out.jump(continuation);
11174
11175 m_out.appendTo(continuation, lastNext);
11176 value = m_out.phi(Double, intResult, doubleResult);
11177 storeType = Output::StoreDouble;
11178 heap = &m_heaps.indexedDoubleProperties;
11179 break;
11180 }
11181
11182 case ALL_CONTIGUOUS_INDEXING_TYPES:
11183 storeType = Output::Store64;
11184 heap = &m_heaps.indexedContiguousProperties;
11185 break;
11186
11187 default:
11188 DFG_CRASH(m_graph, m_node, "Invalid indexing type");
11189 break;
11190 }
11191
11192 m_out.store(value, m_out.address(butterfly, heap->at(index)), storeType);
11193
11194 m_out.jump(blocksWithChecks[i]);
11195 m_out.appendTo(
11196 blocksWithChecks[i], i ? blocksWithStores[i - 1] : indexLastNext);
11197 }
11198 }
11199 } else {
11200 // In the easy case where we can do a one-shot allocation, we simply allocate the
11201 // object to directly have the desired structure.
11202 object = allocateObject(structure);
11203 butterfly = nullptr; // Don't have one, don't need one.
11204 }
11205
11206 BitVector setInlineOffsets;
11207 for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) {
11208 for (unsigned i = data.m_properties.size(); i--;) {
11209 PromotedLocationDescriptor descriptor = data.m_properties[i];
11210 if (descriptor.kind() != NamedPropertyPLoc)
11211 continue;
11212 if (m_graph.identifiers()[descriptor.info()] != entry.key)
11213 continue;
11214
11215 LValue base;
11216 if (isInlineOffset(entry.offset)) {
11217 setInlineOffsets.set(entry.offset);
11218 base = object;
11219 } else
11220 base = butterfly;
11221 storeProperty(values[i], base, descriptor.info(), entry.offset);
11222 break;
11223 }
11224 }
11225 for (unsigned i = structure->inlineCapacity(); i--;) {
11226 if (!setInlineOffsets.get(i))
11227 m_out.store64(m_out.int64Zero, m_out.address(m_heaps.properties.atAnyNumber(), object, offsetRelativeToBase(i)));
11228 }
11229
11230 results.append(m_out.anchor(object));
11231 m_out.jump(outerContinuation);
11232 }
11233
11234 m_out.appendTo(dummyDefault, outerContinuation);
11235 m_out.unreachable();
11236
11237 m_out.appendTo(outerContinuation, outerLastNext);
11238 setJSValue(m_out.phi(pointerType(), results));
11239 mutatorFence();
11240 }
11241
11242 void compileMaterializeCreateActivation()
11243 {
11244 ObjectMaterializationData& data = m_node->objectMaterializationData();
11245
11246 Vector<LValue, 8> values;
11247 for (unsigned i = 0; i < data.m_properties.size(); ++i)
11248 values.append(lowJSValue(m_graph.varArgChild(m_node, 2 + i)));
11249
11250 LValue scope = lowCell(m_graph.varArgChild(m_node, 1));
11251 SymbolTable* table = m_node->castOperand<SymbolTable*>();
11252 ASSERT(table == m_graph.varArgChild(m_node, 0)->castConstant<SymbolTable*>(vm()));
11253 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
11254
11255 LBasicBlock slowPath = m_out.newBlock();
11256 LBasicBlock continuation = m_out.newBlock();
11257
11258 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
11259
11260 LValue fastObject = allocateObject<JSLexicalEnvironment>(
11261 JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
11262
11263 m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
11264 m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
11265
11266
11267 ValueFromBlock fastResult = m_out.anchor(fastObject);
11268 m_out.jump(continuation);
11269
11270 m_out.appendTo(slowPath, continuation);
11271 // We ensure allocation sinking explictly sets bottom values for all field members.
11272 // Therefore, it doesn't matter what JSValue we pass in as the initialization value
11273 // because all fields will be overwritten.
11274 // FIXME: It may be worth creating an operation that calls a constructor on JSLexicalEnvironment that
11275 // doesn't initialize every slot because we are guaranteed to do that here.
11276 VM& vm = this->vm();
11277 LValue callResult = lazySlowPath(
11278 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11279 return createLazyCallGenerator(vm,
11280 operationCreateActivationDirect, locations[0].directGPR(),
11281 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
11282 CCallHelpers::TrustedImmPtr(table),
11283 CCallHelpers::TrustedImm64(JSValue::encode(jsUndefined())));
11284 }, scope);
11285 ValueFromBlock slowResult = m_out.anchor(callResult);
11286 m_out.jump(continuation);
11287
11288 m_out.appendTo(continuation, lastNext);
11289 LValue activation = m_out.phi(pointerType(), fastResult, slowResult);
11290 RELEASE_ASSERT(data.m_properties.size() == table->scopeSize());
11291 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
11292 PromotedLocationDescriptor descriptor = data.m_properties[i];
11293 ASSERT(descriptor.kind() == ClosureVarPLoc);
11294 m_out.store64(
11295 values[i], activation,
11296 m_heaps.JSLexicalEnvironment_variables[descriptor.info()]);
11297 }
11298
11299 if (validationEnabled()) {
11300 // Validate to make sure every slot in the scope has one value.
11301 ConcurrentJSLocker locker(table->m_lock);
11302 for (auto iter = table->begin(locker), end = table->end(locker); iter != end; ++iter) {
11303 bool found = false;
11304 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
11305 PromotedLocationDescriptor descriptor = data.m_properties[i];
11306 ASSERT(descriptor.kind() == ClosureVarPLoc);
11307 if (iter->value.scopeOffset().offset() == descriptor.info()) {
11308 found = true;
11309 break;
11310 }
11311 }
11312 ASSERT_UNUSED(found, found);
11313 }
11314 }
11315
11316 mutatorFence();
11317 setJSValue(activation);
11318 }
11319
11320 void compileCheckTraps()
11321 {
11322 ASSERT(Options::usePollingTraps());
11323 LBasicBlock needTrapHandling = m_out.newBlock();
11324 LBasicBlock continuation = m_out.newBlock();
11325
11326 LValue state = m_out.load8ZeroExt32(m_out.absolute(vm().needTrapHandlingAddress()));
11327 m_out.branch(m_out.isZero32(state),
11328 usually(continuation), rarely(needTrapHandling));
11329
11330 LBasicBlock lastNext = m_out.appendTo(needTrapHandling, continuation);
11331
11332 VM& vm = this->vm();
11333 lazySlowPath(
11334 [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
11335 return createLazyCallGenerator(vm, operationHandleTraps, InvalidGPRReg);
11336 });
11337 m_out.jump(continuation);
11338
11339 m_out.appendTo(continuation, lastNext);
11340 }
11341
11342 void compileRegExpExec()
11343 {
11344 LValue globalObject = lowCell(m_node->child1());
11345
11346 if (m_node->child2().useKind() == RegExpObjectUse) {
11347 LValue base = lowRegExpObject(m_node->child2());
11348
11349 if (m_node->child3().useKind() == StringUse) {
11350 LValue argument = lowString(m_node->child3());
11351 LValue result = vmCall(
11352 Int64, m_out.operation(operationRegExpExecString), m_callFrame, globalObject,
11353 base, argument);
11354 setJSValue(result);
11355 return;
11356 }
11357
11358 LValue argument = lowJSValue(m_node->child3());
11359 LValue result = vmCall(
11360 Int64, m_out.operation(operationRegExpExec), m_callFrame, globalObject, base,
11361 argument);
11362 setJSValue(result);
11363 return;
11364 }
11365
11366 LValue base = lowJSValue(m_node->child2());
11367 LValue argument = lowJSValue(m_node->child3());
11368 LValue result = vmCall(
11369 Int64, m_out.operation(operationRegExpExecGeneric), m_callFrame, globalObject, base,
11370 argument);
11371 setJSValue(result);
11372 }
11373
11374 void compileRegExpExecNonGlobalOrSticky()
11375 {
11376 LValue globalObject = lowCell(m_node->child1());
11377 LValue argument = lowString(m_node->child2());
11378 LValue result = vmCall(
11379 Int64, m_out.operation(operationRegExpExecNonGlobalOrSticky), m_callFrame, globalObject, frozenPointer(m_node->cellOperand()), argument);
11380 setJSValue(result);
11381 }
11382
11383 void compileRegExpMatchFastGlobal()
11384 {
11385 LValue globalObject = lowCell(m_node->child1());
11386 LValue argument = lowString(m_node->child2());
11387 LValue result = vmCall(
11388 Int64, m_out.operation(operationRegExpMatchFastGlobalString), m_callFrame, globalObject, frozenPointer(m_node->cellOperand()), argument);
11389 setJSValue(result);
11390 }
11391
11392 void compileRegExpTest()
11393 {
11394 LValue globalObject = lowCell(m_node->child1());
11395
11396 if (m_node->child2().useKind() == RegExpObjectUse) {
11397 LValue base = lowRegExpObject(m_node->child2());
11398
11399 if (m_node->child3().useKind() == StringUse) {
11400 LValue argument = lowString(m_node->child3());
11401 LValue result = vmCall(
11402 Int32, m_out.operation(operationRegExpTestString), m_callFrame, globalObject,
11403 base, argument);
11404 setBoolean(result);
11405 return;
11406 }
11407
11408 LValue argument = lowJSValue(m_node->child3());
11409 LValue result = vmCall(
11410 Int32, m_out.operation(operationRegExpTest), m_callFrame, globalObject, base,
11411 argument);
11412 setBoolean(result);
11413 return;
11414 }
11415
11416 LValue base = lowJSValue(m_node->child2());
11417 LValue argument = lowJSValue(m_node->child3());
11418 LValue result = vmCall(
11419 Int32, m_out.operation(operationRegExpTestGeneric), m_callFrame, globalObject, base,
11420 argument);
11421 setBoolean(result);
11422 }
11423
11424 void compileRegExpMatchFast()
11425 {
11426 LValue globalObject = lowCell(m_node->child1());
11427 LValue base = lowRegExpObject(m_node->child2());
11428 LValue argument = lowString(m_node->child3());
11429 LValue result = vmCall(
11430 Int64, m_out.operation(operationRegExpMatchFastString), m_callFrame, globalObject,
11431 base, argument);
11432 setJSValue(result);
11433 }
11434
11435 void compileNewRegexp()
11436 {
11437 FrozenValue* regexp = m_node->cellOperand();
11438 LValue lastIndex = lowJSValue(m_node->child1());
11439 ASSERT(regexp->cell()->inherits<RegExp>(vm()));
11440
11441 LBasicBlock slowCase = m_out.newBlock();
11442 LBasicBlock continuation = m_out.newBlock();
11443
11444 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
11445
11446 auto structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->regExpStructure());
11447 LValue fastResultValue = allocateObject<RegExpObject>(structure, m_out.intPtrZero, slowCase);
11448 m_out.storePtr(frozenPointer(regexp), fastResultValue, m_heaps.RegExpObject_regExpAndLastIndexIsNotWritableFlag);
11449 m_out.store64(lastIndex, fastResultValue, m_heaps.RegExpObject_lastIndex);
11450 mutatorFence();
11451 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
11452 m_out.jump(continuation);
11453
11454 m_out.appendTo(slowCase, continuation);
11455 VM& vm = this->vm();
11456 RegExp* regexpCell = regexp->cast<RegExp*>();
11457 LValue slowResultValue = lazySlowPath(
11458 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11459 return createLazyCallGenerator(vm,
11460 operationNewRegexpWithLastIndex, locations[0].directGPR(),
11461 CCallHelpers::TrustedImmPtr(regexpCell), locations[1].directGPR());
11462 }, lastIndex);
11463 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
11464 m_out.jump(continuation);
11465
11466 m_out.appendTo(continuation, lastNext);
11467 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
11468 }
11469
11470 void compileSetFunctionName()
11471 {
11472 vmCall(Void, m_out.operation(operationSetFunctionName), m_callFrame,
11473 lowCell(m_node->child1()), lowJSValue(m_node->child2()));
11474 }
11475
11476 void compileStringReplace()
11477 {
11478 if (m_node->child1().useKind() == StringUse
11479 && m_node->child2().useKind() == RegExpObjectUse
11480 && m_node->child3().useKind() == StringUse) {
11481
11482 if (JSString* replace = m_node->child3()->dynamicCastConstant<JSString*>(vm())) {
11483 if (!replace->length()) {
11484 LValue string = lowString(m_node->child1());
11485 LValue regExp = lowRegExpObject(m_node->child2());
11486
11487 LValue result = vmCall(
11488 pointerType(), m_out.operation(operationStringProtoFuncReplaceRegExpEmptyStr),
11489 m_callFrame, string, regExp);
11490
11491 setJSValue(result);
11492 return;
11493 }
11494 }
11495
11496 LValue string = lowString(m_node->child1());
11497 LValue regExp = lowRegExpObject(m_node->child2());
11498 LValue replace = lowString(m_node->child3());
11499
11500 LValue result = vmCall(
11501 pointerType(), m_out.operation(operationStringProtoFuncReplaceRegExpString),
11502 m_callFrame, string, regExp, replace);
11503
11504 setJSValue(result);
11505 return;
11506 }
11507
11508 LValue search;
11509 if (m_node->child2().useKind() == StringUse)
11510 search = lowString(m_node->child2());
11511 else
11512 search = lowJSValue(m_node->child2());
11513
11514 LValue result = vmCall(
11515 pointerType(), m_out.operation(operationStringProtoFuncReplaceGeneric), m_callFrame,
11516 lowJSValue(m_node->child1()), search,
11517 lowJSValue(m_node->child3()));
11518
11519 setJSValue(result);
11520 }
11521
11522 void compileGetRegExpObjectLastIndex()
11523 {
11524 setJSValue(m_out.load64(lowRegExpObject(m_node->child1()), m_heaps.RegExpObject_lastIndex));
11525 }
11526
11527 void compileSetRegExpObjectLastIndex()
11528 {
11529 if (!m_node->ignoreLastIndexIsWritable()) {
11530 LValue regExp = lowRegExpObject(m_node->child1());
11531 LValue value = lowJSValue(m_node->child2());
11532
11533 speculate(
11534 ExoticObjectMode, noValue(), nullptr,
11535 m_out.testNonZeroPtr(
11536 m_out.loadPtr(regExp, m_heaps.RegExpObject_regExpAndLastIndexIsNotWritableFlag),
11537 m_out.constIntPtr(RegExpObject::lastIndexIsNotWritableFlag)));
11538
11539 m_out.store64(value, regExp, m_heaps.RegExpObject_lastIndex);
11540 return;
11541 }
11542
11543 m_out.store64(lowJSValue(m_node->child2()), lowCell(m_node->child1()), m_heaps.RegExpObject_lastIndex);
11544 }
11545
11546 void compileLogShadowChickenPrologue()
11547 {
11548 LValue packet = ensureShadowChickenPacket();
11549 LValue scope = lowCell(m_node->child1());
11550
11551 m_out.storePtr(m_callFrame, packet, m_heaps.ShadowChicken_Packet_frame);
11552 m_out.storePtr(m_out.loadPtr(addressFor(0)), packet, m_heaps.ShadowChicken_Packet_callerFrame);
11553 m_out.storePtr(m_out.loadPtr(payloadFor(CallFrameSlot::callee)), packet, m_heaps.ShadowChicken_Packet_callee);
11554 m_out.storePtr(scope, packet, m_heaps.ShadowChicken_Packet_scope);
11555 }
11556
11557 void compileLogShadowChickenTail()
11558 {
11559 LValue packet = ensureShadowChickenPacket();
11560 LValue thisValue = lowJSValue(m_node->child1());
11561 LValue scope = lowCell(m_node->child2());
11562 CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.addCodeOrigin(m_node->origin.semantic);
11563
11564 m_out.storePtr(m_callFrame, packet, m_heaps.ShadowChicken_Packet_frame);
11565 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(ShadowChicken::Packet::tailMarker())), packet, m_heaps.ShadowChicken_Packet_callee);
11566 m_out.store64(thisValue, packet, m_heaps.ShadowChicken_Packet_thisValue);
11567 m_out.storePtr(scope, packet, m_heaps.ShadowChicken_Packet_scope);
11568 // We don't want the CodeBlock to have a weak pointer to itself because
11569 // that would cause it to always get collected.
11570 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), packet, m_heaps.ShadowChicken_Packet_codeBlock);
11571 m_out.store32(m_out.constInt32(callSiteIndex.bits()), packet, m_heaps.ShadowChicken_Packet_callSiteIndex);
11572 }
11573
11574 void compileRecordRegExpCachedResult()
11575 {
11576 Edge globalObjectEdge = m_graph.varArgChild(m_node, 0);
11577 Edge regExpEdge = m_graph.varArgChild(m_node, 1);
11578 Edge stringEdge = m_graph.varArgChild(m_node, 2);
11579 Edge startEdge = m_graph.varArgChild(m_node, 3);
11580 Edge endEdge = m_graph.varArgChild(m_node, 4);
11581
11582 LValue globalObject = lowCell(globalObjectEdge);
11583 LValue regExp = lowCell(regExpEdge);
11584 LValue string = lowCell(stringEdge);
11585 LValue start = lowInt32(startEdge);
11586 LValue end = lowInt32(endEdge);
11587
11588 m_out.storePtr(regExp, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_lastRegExp);
11589 m_out.storePtr(string, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_lastInput);
11590 m_out.store32(start, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_result_start);
11591 m_out.store32(end, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_result_end);
11592 m_out.store32As8(
11593 m_out.constInt32(0),
11594 m_out.address(globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_reified));
11595 }
11596
11597 struct ArgumentsLength {
11598 ArgumentsLength()
11599 : isKnown(false)
11600 , known(UINT_MAX)
11601 , value(nullptr)
11602 {
11603 }
11604
11605 bool isKnown;
11606 unsigned known;
11607 LValue value;
11608 };
11609 ArgumentsLength getArgumentsLength(InlineCallFrame* inlineCallFrame)
11610 {
11611 ArgumentsLength length;
11612
11613 if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
11614 length.known = inlineCallFrame->argumentCountIncludingThis - 1;
11615 length.isKnown = true;
11616 length.value = m_out.constInt32(length.known);
11617 } else {
11618 length.known = UINT_MAX;
11619 length.isKnown = false;
11620
11621 VirtualRegister argumentCountRegister;
11622 if (!inlineCallFrame)
11623 argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
11624 else
11625 argumentCountRegister = inlineCallFrame->argumentCountRegister;
11626 length.value = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One);
11627 }
11628
11629 return length;
11630 }
11631
11632 ArgumentsLength getArgumentsLength()
11633 {
11634 return getArgumentsLength(m_node->origin.semantic.inlineCallFrame());
11635 }
11636
11637 LValue getCurrentCallee()
11638 {
11639 if (InlineCallFrame* frame = m_node->origin.semantic.inlineCallFrame()) {
11640 if (frame->isClosureCall)
11641 return m_out.loadPtr(addressFor(frame->calleeRecovery.virtualRegister()));
11642 return weakPointer(frame->calleeRecovery.constant().asCell());
11643 }
11644 return m_out.loadPtr(addressFor(CallFrameSlot::callee));
11645 }
11646
11647 LValue getArgumentsStart(InlineCallFrame* inlineCallFrame, unsigned offset = 0)
11648 {
11649 VirtualRegister start = AssemblyHelpers::argumentsStart(inlineCallFrame) + offset;
11650 return addressFor(start).value();
11651 }
11652
11653 LValue getArgumentsStart()
11654 {
11655 return getArgumentsStart(m_node->origin.semantic.inlineCallFrame());
11656 }
11657
11658 template<typename Functor>
11659 void checkStructure(
11660 LValue structureDiscriminant, const FormattedValue& formattedValue, ExitKind exitKind,
11661 const RegisteredStructureSet& set, const Functor& weakStructureDiscriminant)
11662 {
11663 if (set.isEmpty()) {
11664 terminate(exitKind);
11665 return;
11666 }
11667
11668 if (set.size() == 1) {
11669 speculate(
11670 exitKind, formattedValue, 0,
11671 m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set[0])));
11672 return;
11673 }
11674
11675 LBasicBlock continuation = m_out.newBlock();
11676
11677 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
11678 for (unsigned i = 0; i < set.size() - 1; ++i) {
11679 LBasicBlock nextStructure = m_out.newBlock();
11680 m_out.branch(
11681 m_out.equal(structureDiscriminant, weakStructureDiscriminant(set[i])),
11682 unsure(continuation), unsure(nextStructure));
11683 m_out.appendTo(nextStructure);
11684 }
11685
11686 speculate(
11687 exitKind, formattedValue, 0,
11688 m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set.last())));
11689
11690 m_out.jump(continuation);
11691 m_out.appendTo(continuation, lastNext);
11692 }
11693
11694 LValue numberOrNotCellToInt32(Edge edge, LValue value)
11695 {
11696 LBasicBlock intCase = m_out.newBlock();
11697 LBasicBlock notIntCase = m_out.newBlock();
11698 LBasicBlock doubleCase = 0;
11699 LBasicBlock notNumberCase = 0;
11700 if (edge.useKind() == NotCellUse) {
11701 doubleCase = m_out.newBlock();
11702 notNumberCase = m_out.newBlock();
11703 }
11704 LBasicBlock continuation = m_out.newBlock();
11705
11706 Vector<ValueFromBlock> results;
11707
11708 m_out.branch(isNotInt32(value), unsure(notIntCase), unsure(intCase));
11709
11710 LBasicBlock lastNext = m_out.appendTo(intCase, notIntCase);
11711 results.append(m_out.anchor(unboxInt32(value)));
11712 m_out.jump(continuation);
11713
11714 if (edge.useKind() == NumberUse) {
11715 m_out.appendTo(notIntCase, continuation);
11716 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isCellOrMisc(value));
11717 results.append(m_out.anchor(doubleToInt32(unboxDouble(value))));
11718 m_out.jump(continuation);
11719 } else {
11720 m_out.appendTo(notIntCase, doubleCase);
11721 m_out.branch(
11722 isCellOrMisc(value, provenType(edge)), unsure(notNumberCase), unsure(doubleCase));
11723
11724 m_out.appendTo(doubleCase, notNumberCase);
11725 results.append(m_out.anchor(doubleToInt32(unboxDouble(value))));
11726 m_out.jump(continuation);
11727
11728 m_out.appendTo(notNumberCase, continuation);
11729
11730 FTL_TYPE_CHECK(jsValueValue(value), edge, ~SpecCellCheck, isCell(value));
11731
11732 LValue specialResult = m_out.select(
11733 m_out.equal(value, m_out.constInt64(JSValue::encode(jsBoolean(true)))),
11734 m_out.int32One, m_out.int32Zero);
11735 results.append(m_out.anchor(specialResult));
11736 m_out.jump(continuation);
11737 }
11738
11739 m_out.appendTo(continuation, lastNext);
11740 return m_out.phi(Int32, results);
11741 }
11742
11743 LValue loadProperty(LValue storage, unsigned identifierNumber, PropertyOffset offset)
11744 {
11745 return m_out.load64(addressOfProperty(storage, identifierNumber, offset));
11746 }
11747
11748 void storeProperty(
11749 LValue value, LValue storage, unsigned identifierNumber, PropertyOffset offset)
11750 {
11751 m_out.store64(value, addressOfProperty(storage, identifierNumber, offset));
11752 }
11753
11754 TypedPointer addressOfProperty(
11755 LValue storage, unsigned identifierNumber, PropertyOffset offset)
11756 {
11757 return m_out.address(
11758 m_heaps.properties[identifierNumber], storage, offsetRelativeToBase(offset));
11759 }
11760
11761 LValue storageForTransition(
11762 LValue object, PropertyOffset offset,
11763 Structure* previousStructure, Structure* nextStructure)
11764 {
11765 if (isInlineOffset(offset))
11766 return object;
11767
11768 if (previousStructure->outOfLineCapacity() == nextStructure->outOfLineCapacity())
11769 return m_out.loadPtr(object, m_heaps.JSObject_butterfly);
11770
11771 LValue result;
11772 if (!previousStructure->outOfLineCapacity())
11773 result = allocatePropertyStorage(object, previousStructure);
11774 else {
11775 result = reallocatePropertyStorage(
11776 object, m_out.loadPtr(object, m_heaps.JSObject_butterfly),
11777 previousStructure, nextStructure);
11778 }
11779
11780 nukeStructureAndSetButterfly(result, object);
11781 return result;
11782 }
11783
11784 void initializeArrayElements(LValue indexingType, LValue begin, LValue end, LValue butterfly)
11785 {
11786
11787 if (begin == end)
11788 return;
11789
11790 if (indexingType->hasInt32()) {
11791 IndexingType rawIndexingType = static_cast<IndexingType>(indexingType->asInt32());
11792 if (hasUndecided(rawIndexingType))
11793 return;
11794 IndexedAbstractHeap* heap = m_heaps.forIndexingType(rawIndexingType);
11795 DFG_ASSERT(m_graph, m_node, heap);
11796
11797 LValue hole;
11798 if (hasDouble(rawIndexingType))
11799 hole = m_out.constInt64(bitwise_cast<int64_t>(PNaN));
11800 else
11801 hole = m_out.constInt64(JSValue::encode(JSValue()));
11802
11803 splatWords(butterfly, begin, end, hole, heap->atAnyIndex());
11804 } else {
11805 LValue hole = m_out.select(
11806 m_out.equal(m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)), m_out.constInt32(DoubleShape)),
11807 m_out.constInt64(bitwise_cast<int64_t>(PNaN)),
11808 m_out.constInt64(JSValue::encode(JSValue())));
11809 splatWords(butterfly, begin, end, hole, m_heaps.root);
11810 }
11811 }
11812
11813 void splatWords(LValue base, LValue begin, LValue end, LValue value, const AbstractHeap& heap)
11814 {
11815 const uint64_t unrollingLimit = 10;
11816 if (begin->hasInt() && end->hasInt()) {
11817 uint64_t beginConst = static_cast<uint64_t>(begin->asInt());
11818 uint64_t endConst = static_cast<uint64_t>(end->asInt());
11819
11820 if (endConst - beginConst <= unrollingLimit) {
11821 for (uint64_t i = beginConst; i < endConst; ++i) {
11822 LValue pointer = m_out.add(base, m_out.constIntPtr(i * sizeof(uint64_t)));
11823 m_out.store64(value, TypedPointer(heap, pointer));
11824 }
11825 return;
11826 }
11827 }
11828
11829 LBasicBlock initLoop = m_out.newBlock();
11830 LBasicBlock initDone = m_out.newBlock();
11831
11832 LBasicBlock lastNext = m_out.insertNewBlocksBefore(initLoop);
11833
11834 ValueFromBlock originalIndex = m_out.anchor(end);
11835 ValueFromBlock originalPointer = m_out.anchor(
11836 m_out.add(base, m_out.shl(m_out.signExt32ToPtr(begin), m_out.constInt32(3))));
11837 m_out.branch(m_out.notEqual(end, begin), unsure(initLoop), unsure(initDone));
11838
11839 m_out.appendTo(initLoop, initDone);
11840 LValue index = m_out.phi(Int32, originalIndex);
11841 LValue pointer = m_out.phi(pointerType(), originalPointer);
11842
11843 m_out.store64(value, TypedPointer(heap, pointer));
11844
11845 LValue nextIndex = m_out.sub(index, m_out.int32One);
11846 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
11847 m_out.addIncomingToPhi(pointer, m_out.anchor(m_out.add(pointer, m_out.intPtrEight)));
11848 m_out.branch(
11849 m_out.notEqual(nextIndex, begin), unsure(initLoop), unsure(initDone));
11850
11851 m_out.appendTo(initDone, lastNext);
11852 }
11853
11854 LValue allocatePropertyStorage(LValue object, Structure* previousStructure)
11855 {
11856 if (previousStructure->couldHaveIndexingHeader()) {
11857 return vmCall(
11858 pointerType(),
11859 m_out.operation(operationAllocateComplexPropertyStorageWithInitialCapacity),
11860 m_callFrame, object);
11861 }
11862
11863 LValue result = allocatePropertyStorageWithSizeImpl(initialOutOfLineCapacity);
11864
11865 splatWords(
11866 result,
11867 m_out.constInt32(-initialOutOfLineCapacity - 1), m_out.constInt32(-1),
11868 m_out.int64Zero, m_heaps.properties.atAnyNumber());
11869
11870 return result;
11871 }
11872
11873 LValue reallocatePropertyStorage(
11874 LValue object, LValue oldStorage, Structure* previous, Structure* next)
11875 {
11876 size_t oldSize = previous->outOfLineCapacity();
11877 size_t newSize = oldSize * outOfLineGrowthFactor;
11878
11879 ASSERT_UNUSED(next, newSize == next->outOfLineCapacity());
11880
11881 if (previous->couldHaveIndexingHeader()) {
11882 LValue newAllocSize = m_out.constIntPtr(newSize);
11883 return vmCall(pointerType(), m_out.operation(operationAllocateComplexPropertyStorage), m_callFrame, object, newAllocSize);
11884 }
11885
11886 LValue result = allocatePropertyStorageWithSizeImpl(newSize);
11887
11888 ptrdiff_t headerSize = -sizeof(IndexingHeader) - sizeof(void*);
11889 ptrdiff_t endStorage = headerSize - static_cast<ptrdiff_t>(oldSize * sizeof(JSValue));
11890
11891 for (ptrdiff_t offset = headerSize; offset > endStorage; offset -= sizeof(void*)) {
11892 LValue loaded =
11893 m_out.loadPtr(m_out.address(m_heaps.properties.atAnyNumber(), oldStorage, offset));
11894 m_out.storePtr(loaded, m_out.address(m_heaps.properties.atAnyNumber(), result, offset));
11895 }
11896
11897 splatWords(
11898 result,
11899 m_out.constInt32(-newSize - 1), m_out.constInt32(-oldSize - 1),
11900 m_out.int64Zero, m_heaps.properties.atAnyNumber());
11901
11902 return result;
11903 }
11904
11905 LValue allocatePropertyStorageWithSizeImpl(size_t sizeInValues)
11906 {
11907 LBasicBlock slowPath = m_out.newBlock();
11908 LBasicBlock continuation = m_out.newBlock();
11909
11910 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
11911
11912 size_t sizeInBytes = sizeInValues * sizeof(JSValue);
11913 Allocator allocator = vm().jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(sizeInBytes, AllocatorForMode::AllocatorIfExists);
11914 LValue startOfStorage = allocateHeapCell(
11915 m_out.constIntPtr(allocator.localAllocator()), slowPath);
11916 ValueFromBlock fastButterfly = m_out.anchor(
11917 m_out.add(m_out.constIntPtr(sizeInBytes + sizeof(IndexingHeader)), startOfStorage));
11918 m_out.jump(continuation);
11919
11920 m_out.appendTo(slowPath, continuation);
11921
11922 LValue slowButterflyValue;
11923 VM& vm = this->vm();
11924 if (sizeInValues == initialOutOfLineCapacity) {
11925 slowButterflyValue = lazySlowPath(
11926 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11927 return createLazyCallGenerator(vm,
11928 operationAllocateSimplePropertyStorageWithInitialCapacity,
11929 locations[0].directGPR());
11930 });
11931 } else {
11932 slowButterflyValue = lazySlowPath(
11933 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11934 return createLazyCallGenerator(vm,
11935 operationAllocateSimplePropertyStorage, locations[0].directGPR(),
11936 CCallHelpers::TrustedImmPtr(sizeInValues));
11937 });
11938 }
11939 ValueFromBlock slowButterfly = m_out.anchor(slowButterflyValue);
11940
11941 m_out.jump(continuation);
11942
11943 m_out.appendTo(continuation, lastNext);
11944
11945 return m_out.phi(pointerType(), fastButterfly, slowButterfly);
11946 }
11947
11948 LValue getById(LValue base, AccessType type)
11949 {
11950 Node* node = m_node;
11951 UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
11952
11953 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
11954 patchpoint->appendSomeRegister(base);
11955 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
11956 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
11957
11958 // FIXME: If this is a GetByIdFlush/GetByIdDirectFlush, we might get some performance boost if we claim that it
11959 // clobbers volatile registers late. It's not necessary for correctness, though, since the
11960 // IC code is super smart about saving registers.
11961 // https://bugs.webkit.org/show_bug.cgi?id=152848
11962
11963 patchpoint->clobber(RegisterSet::macroScratchRegisters());
11964
11965 RefPtr<PatchpointExceptionHandle> exceptionHandle =
11966 preparePatchpointForExceptions(patchpoint);
11967
11968 State* state = &m_ftlState;
11969 patchpoint->setGenerator(
11970 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
11971 AllowMacroScratchRegisterUsage allowScratch(jit);
11972
11973 CallSiteIndex callSiteIndex =
11974 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
11975
11976 // This is the direct exit target for operation calls.
11977 Box<CCallHelpers::JumpList> exceptions =
11978 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
11979
11980 // This is the exit for call IC's created by the getById for getters. We don't have
11981 // to do anything weird other than call this, since it will associate the exit with
11982 // the callsite index.
11983 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
11984
11985 auto generator = Box<JITGetByIdGenerator>::create(
11986 jit.codeBlock(), node->origin.semantic, callSiteIndex,
11987 params.unavailableRegisters(), uid, JSValueRegs(params[1].gpr()),
11988 JSValueRegs(params[0].gpr()), type);
11989
11990 generator->generateFastPath(jit);
11991 CCallHelpers::Label done = jit.label();
11992
11993 params.addLatePath(
11994 [=] (CCallHelpers& jit) {
11995 AllowMacroScratchRegisterUsage allowScratch(jit);
11996
11997 J_JITOperation_ESsiJI optimizationFunction = appropriateOptimizingGetByIdFunction(type);
11998
11999 generator->slowPathJump().link(&jit);
12000 CCallHelpers::Label slowPathBegin = jit.label();
12001 CCallHelpers::Call slowPathCall = callOperation(
12002 *state, params.unavailableRegisters(), jit, node->origin.semantic,
12003 exceptions.get(), optimizationFunction, params[0].gpr(),
12004 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
12005 CCallHelpers::TrustedImmPtr(uid)).call();
12006 jit.jump().linkTo(done, &jit);
12007
12008 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
12009
12010 jit.addLinkTask(
12011 [=] (LinkBuffer& linkBuffer) {
12012 generator->finalize(linkBuffer, linkBuffer);
12013 });
12014 });
12015 });
12016
12017 return patchpoint;
12018 }
12019
12020 LValue getByIdWithThis(LValue base, LValue thisValue)
12021 {
12022 Node* node = m_node;
12023 UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
12024
12025 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12026 patchpoint->appendSomeRegister(base);
12027 patchpoint->appendSomeRegister(thisValue);
12028 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
12029 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
12030
12031 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12032
12033 RefPtr<PatchpointExceptionHandle> exceptionHandle =
12034 preparePatchpointForExceptions(patchpoint);
12035
12036 State* state = &m_ftlState;
12037 patchpoint->setGenerator(
12038 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12039 AllowMacroScratchRegisterUsage allowScratch(jit);
12040
12041 CallSiteIndex callSiteIndex =
12042 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
12043
12044 // This is the direct exit target for operation calls.
12045 Box<CCallHelpers::JumpList> exceptions =
12046 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
12047
12048 // This is the exit for call IC's created by the getById for getters. We don't have
12049 // to do anything weird other than call this, since it will associate the exit with
12050 // the callsite index.
12051 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
12052
12053 auto generator = Box<JITGetByIdWithThisGenerator>::create(
12054 jit.codeBlock(), node->origin.semantic, callSiteIndex,
12055 params.unavailableRegisters(), uid, JSValueRegs(params[0].gpr()),
12056 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), AccessType::GetWithThis);
12057
12058 generator->generateFastPath(jit);
12059 CCallHelpers::Label done = jit.label();
12060
12061 params.addLatePath(
12062 [=] (CCallHelpers& jit) {
12063 AllowMacroScratchRegisterUsage allowScratch(jit);
12064
12065 J_JITOperation_ESsiJJI optimizationFunction = operationGetByIdWithThisOptimize;
12066
12067 generator->slowPathJump().link(&jit);
12068 CCallHelpers::Label slowPathBegin = jit.label();
12069 CCallHelpers::Call slowPathCall = callOperation(
12070 *state, params.unavailableRegisters(), jit, node->origin.semantic,
12071 exceptions.get(), optimizationFunction, params[0].gpr(),
12072 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
12073 params[2].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
12074 jit.jump().linkTo(done, &jit);
12075
12076 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
12077
12078 jit.addLinkTask(
12079 [=] (LinkBuffer& linkBuffer) {
12080 generator->finalize(linkBuffer, linkBuffer);
12081 });
12082 });
12083 });
12084
12085 return patchpoint;
12086 }
12087
12088 LValue isFastTypedArray(LValue object)
12089 {
12090 return m_out.equal(
12091 m_out.load32(object, m_heaps.JSArrayBufferView_mode),
12092 m_out.constInt32(FastTypedArray));
12093 }
12094
12095 TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue storage, LValue index, Edge edge, ptrdiff_t offset = 0)
12096 {
12097 return m_out.baseIndex(
12098 heap, storage, m_out.zeroExtPtr(index), provenValue(edge), offset);
12099 }
12100
12101 template<typename IntFunctor, typename DoubleFunctor>
12102 void compare(
12103 const IntFunctor& intFunctor, const DoubleFunctor& doubleFunctor,
12104 C_JITOperation_TT stringIdentFunction,
12105 C_JITOperation_B_EJssJss stringFunction,
12106 S_JITOperation_EJJ fallbackFunction)
12107 {
12108 if (m_node->isBinaryUseKind(Int32Use)) {
12109 LValue left = lowInt32(m_node->child1());
12110 LValue right = lowInt32(m_node->child2());
12111 setBoolean(intFunctor(left, right));
12112 return;
12113 }
12114
12115 if (m_node->isBinaryUseKind(Int52RepUse)) {
12116 Int52Kind kind;
12117 LValue left = lowWhicheverInt52(m_node->child1(), kind);
12118 LValue right = lowInt52(m_node->child2(), kind);
12119 setBoolean(intFunctor(left, right));
12120 return;
12121 }
12122
12123 if (m_node->isBinaryUseKind(DoubleRepUse)) {
12124 LValue left = lowDouble(m_node->child1());
12125 LValue right = lowDouble(m_node->child2());
12126 setBoolean(doubleFunctor(left, right));
12127 return;
12128 }
12129
12130 if (m_node->isBinaryUseKind(StringIdentUse)) {
12131 LValue left = lowStringIdent(m_node->child1());
12132 LValue right = lowStringIdent(m_node->child2());
12133 setBoolean(m_out.callWithoutSideEffects(Int32, stringIdentFunction, left, right));
12134 return;
12135 }
12136
12137 if (m_node->isBinaryUseKind(StringUse)) {
12138 LValue left = lowCell(m_node->child1());
12139 LValue right = lowCell(m_node->child2());
12140 speculateString(m_node->child1(), left);
12141 speculateString(m_node->child2(), right);
12142
12143 LValue result = vmCall(
12144 Int32, m_out.operation(stringFunction),
12145 m_callFrame, left, right);
12146 setBoolean(result);
12147 return;
12148 }
12149
12150 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
12151 nonSpeculativeCompare(intFunctor, fallbackFunction);
12152 }
12153
12154 void compileStringSlice()
12155 {
12156 LBasicBlock lengthCheckCase = m_out.newBlock();
12157 LBasicBlock emptyCase = m_out.newBlock();
12158 LBasicBlock notEmptyCase = m_out.newBlock();
12159 LBasicBlock oneCharCase = m_out.newBlock();
12160 LBasicBlock is8Bit = m_out.newBlock();
12161 LBasicBlock is16Bit = m_out.newBlock();
12162 LBasicBlock bitsContinuation = m_out.newBlock();
12163 LBasicBlock bigCharacter = m_out.newBlock();
12164 LBasicBlock slowCase = m_out.newBlock();
12165 LBasicBlock ropeSlowCase = m_out.newBlock();
12166 LBasicBlock continuation = m_out.newBlock();
12167
12168 LValue string = lowString(m_node->child1());
12169 LValue start = lowInt32(m_node->child2());
12170 LValue end = nullptr;
12171 if (m_node->child3())
12172 end = lowInt32(m_node->child3());
12173 else
12174 end = m_out.constInt32(std::numeric_limits<int32_t>::max());
12175 m_out.branch(isRopeString(string, m_node->child1()), rarely(ropeSlowCase), usually(lengthCheckCase));
12176
12177 LBasicBlock lastNext = m_out.appendTo(lengthCheckCase, emptyCase);
12178 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
12179 LValue length = m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length);
12180 auto range = populateSliceRange(start, end, length);
12181 LValue from = range.first;
12182 LValue to = range.second;
12183 LValue span = m_out.sub(to, from);
12184 m_out.branch(m_out.lessThanOrEqual(span, m_out.int32Zero), unsure(emptyCase), unsure(notEmptyCase));
12185
12186 Vector<ValueFromBlock, 5> results;
12187
12188 m_out.appendTo(emptyCase, notEmptyCase);
12189 results.append(m_out.anchor(weakPointer(jsEmptyString(&vm()))));
12190 m_out.jump(continuation);
12191
12192 m_out.appendTo(notEmptyCase, oneCharCase);
12193 m_out.branch(m_out.equal(span, m_out.int32One), unsure(oneCharCase), unsure(slowCase));
12194
12195 m_out.appendTo(oneCharCase, is8Bit);
12196 LValue storage = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data);
12197 m_out.branch(
12198 m_out.testIsZero32(
12199 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
12200 m_out.constInt32(StringImpl::flagIs8Bit())),
12201 unsure(is16Bit), unsure(is8Bit));
12202
12203 m_out.appendTo(is8Bit, is16Bit);
12204 ValueFromBlock char8Bit = m_out.anchor(m_out.load8ZeroExt32(m_out.baseIndex(m_heaps.characters8, storage, m_out.zeroExtPtr(from))));
12205 m_out.jump(bitsContinuation);
12206
12207 m_out.appendTo(is16Bit, bigCharacter);
12208 LValue char16BitValue = m_out.load16ZeroExt32(m_out.baseIndex(m_heaps.characters16, storage, m_out.zeroExtPtr(from)));
12209 ValueFromBlock char16Bit = m_out.anchor(char16BitValue);
12210 m_out.branch(
12211 m_out.above(char16BitValue, m_out.constInt32(maxSingleCharacterString)),
12212 rarely(bigCharacter), usually(bitsContinuation));
12213
12214 m_out.appendTo(bigCharacter, bitsContinuation);
12215 results.append(m_out.anchor(vmCall(
12216 Int64, m_out.operation(operationSingleCharacterString),
12217 m_callFrame, char16BitValue)));
12218 m_out.jump(continuation);
12219
12220 m_out.appendTo(bitsContinuation, slowCase);
12221 LValue character = m_out.phi(Int32, char8Bit, char16Bit);
12222 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
12223 results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex(
12224 m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character)))));
12225 m_out.jump(continuation);
12226
12227 m_out.appendTo(slowCase, ropeSlowCase);
12228 results.append(m_out.anchor(vmCall(pointerType(), m_out.operation(operationStringSubstr), m_callFrame, string, from, span)));
12229 m_out.jump(continuation);
12230
12231 m_out.appendTo(ropeSlowCase, continuation);
12232 results.append(m_out.anchor(vmCall(pointerType(), m_out.operation(operationStringSlice), m_callFrame, string, start, end)));
12233 m_out.jump(continuation);
12234
12235 m_out.appendTo(continuation, lastNext);
12236 setJSValue(m_out.phi(pointerType(), results));
12237 }
12238
12239 void compileToLowerCase()
12240 {
12241 LBasicBlock notRope = m_out.newBlock();
12242 LBasicBlock is8Bit = m_out.newBlock();
12243 LBasicBlock loopTop = m_out.newBlock();
12244 LBasicBlock loopBody = m_out.newBlock();
12245 LBasicBlock slowPath = m_out.newBlock();
12246 LBasicBlock continuation = m_out.newBlock();
12247
12248 LValue string = lowString(m_node->child1());
12249 ValueFromBlock startIndex = m_out.anchor(m_out.constInt32(0));
12250 ValueFromBlock startIndexForCall = m_out.anchor(m_out.constInt32(0));
12251 m_out.branch(isRopeString(string, m_node->child1()),
12252 unsure(slowPath), unsure(notRope));
12253
12254 LBasicBlock lastNext = m_out.appendTo(notRope, is8Bit);
12255 LValue impl = m_out.loadPtr(string, m_heaps.JSString_value);
12256 m_out.branch(
12257 m_out.testIsZero32(
12258 m_out.load32(impl, m_heaps.StringImpl_hashAndFlags),
12259 m_out.constInt32(StringImpl::flagIs8Bit())),
12260 unsure(slowPath), unsure(is8Bit));
12261
12262 m_out.appendTo(is8Bit, loopTop);
12263 LValue length = m_out.load32(impl, m_heaps.StringImpl_length);
12264 LValue buffer = m_out.loadPtr(impl, m_heaps.StringImpl_data);
12265 ValueFromBlock fastResult = m_out.anchor(string);
12266 m_out.jump(loopTop);
12267
12268 m_out.appendTo(loopTop, loopBody);
12269 LValue index = m_out.phi(Int32, startIndex);
12270 ValueFromBlock indexFromBlock = m_out.anchor(index);
12271 m_out.branch(m_out.below(index, length),
12272 unsure(loopBody), unsure(continuation));
12273
12274 m_out.appendTo(loopBody, slowPath);
12275
12276 // FIXME: Strings needs to be caged.
12277 // https://bugs.webkit.org/show_bug.cgi?id=174924
12278 LValue byte = m_out.load8ZeroExt32(m_out.baseIndex(m_heaps.characters8, buffer, m_out.zeroExtPtr(index)));
12279 LValue isInvalidAsciiRange = m_out.bitAnd(byte, m_out.constInt32(~0x7F));
12280 LValue isUpperCase = m_out.belowOrEqual(m_out.sub(byte, m_out.constInt32('A')), m_out.constInt32('Z' - 'A'));
12281 LValue isBadCharacter = m_out.bitOr(isInvalidAsciiRange, isUpperCase);
12282 m_out.addIncomingToPhi(index, m_out.anchor(m_out.add(index, m_out.int32One)));
12283 m_out.branch(isBadCharacter, unsure(slowPath), unsure(loopTop));
12284
12285 m_out.appendTo(slowPath, continuation);
12286 LValue slowPathIndex = m_out.phi(Int32, startIndexForCall, indexFromBlock);
12287 ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), m_out.operation(operationToLowerCase), m_callFrame, string, slowPathIndex));
12288 m_out.jump(continuation);
12289
12290 m_out.appendTo(continuation, lastNext);
12291 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
12292 }
12293
12294 void compileNumberToStringWithRadix()
12295 {
12296 bool validRadixIsGuaranteed = false;
12297 if (m_node->child2()->isInt32Constant()) {
12298 int32_t radix = m_node->child2()->asInt32();
12299 if (radix >= 2 && radix <= 36)
12300 validRadixIsGuaranteed = true;
12301 }
12302
12303 switch (m_node->child1().useKind()) {
12304 case Int32Use:
12305 setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationInt32ToStringWithValidRadix : operationInt32ToString), m_callFrame, lowInt32(m_node->child1()), lowInt32(m_node->child2())));
12306 break;
12307 case Int52RepUse:
12308 setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationInt52ToStringWithValidRadix : operationInt52ToString), m_callFrame, lowStrictInt52(m_node->child1()), lowInt32(m_node->child2())));
12309 break;
12310 case DoubleRepUse:
12311 setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationDoubleToStringWithValidRadix : operationDoubleToString), m_callFrame, lowDouble(m_node->child1()), lowInt32(m_node->child2())));
12312 break;
12313 default:
12314 RELEASE_ASSERT_NOT_REACHED();
12315 }
12316 }
12317
12318 void compileNumberToStringWithValidRadixConstant()
12319 {
12320 switch (m_node->child1().useKind()) {
12321 case Int32Use:
12322 setJSValue(vmCall(pointerType(), m_out.operation(operationInt32ToStringWithValidRadix), m_callFrame, lowInt32(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
12323 break;
12324 case Int52RepUse:
12325 setJSValue(vmCall(pointerType(), m_out.operation(operationInt52ToStringWithValidRadix), m_callFrame, lowStrictInt52(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
12326 break;
12327 case DoubleRepUse:
12328 setJSValue(vmCall(pointerType(), m_out.operation(operationDoubleToStringWithValidRadix), m_callFrame, lowDouble(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
12329 break;
12330 default:
12331 RELEASE_ASSERT_NOT_REACHED();
12332 }
12333 }
12334
12335 void compileResolveScopeForHoistingFuncDeclInEval()
12336 {
12337 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12338 setJSValue(vmCall(pointerType(), m_out.operation(operationResolveScopeForHoistingFuncDeclInEval), m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid)));
12339 }
12340
12341 void compileResolveScope()
12342 {
12343 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12344 setJSValue(vmCall(pointerType(), m_out.operation(operationResolveScope),
12345 m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid)));
12346 }
12347
12348 void compileGetDynamicVar()
12349 {
12350 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12351 setJSValue(vmCall(Int64, m_out.operation(operationGetDynamicVar),
12352 m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid), m_out.constInt32(m_node->getPutInfo())));
12353 }
12354
12355 void compilePutDynamicVar()
12356 {
12357 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12358 setJSValue(vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutDynamicVarStrict : operationPutDynamicVarNonStrict),
12359 m_callFrame, lowCell(m_node->child1()), lowJSValue(m_node->child2()), m_out.constIntPtr(uid), m_out.constInt32(m_node->getPutInfo())));
12360 }
12361
12362 void compileUnreachable()
12363 {
12364 // It's so tempting to assert that AI has proved that this is unreachable. But that's
12365 // simply not a requirement of the Unreachable opcode at all. If you emit an opcode that
12366 // *you* know will not return, then it's fine to end the basic block with Unreachable
12367 // after that opcode. You don't have to also prove to AI that your opcode does not return.
12368 // Hence, there is nothing to do here but emit code that will crash, so that we catch
12369 // cases where you said Unreachable but you lied.
12370 //
12371 // It's also also worth noting that some clients emit this opcode because they're not 100% sure
12372 // if the code is unreachable, but they would really prefer if we crashed rather than kept going
12373 // if it did turn out to be reachable. Hence, this needs to deterministically crash.
12374
12375 crash();
12376 }
12377
12378 void compileCheckSubClass()
12379 {
12380 LValue cell = lowCell(m_node->child1());
12381
12382 const ClassInfo* classInfo = m_node->classInfo();
12383 if (!classInfo->checkSubClassSnippet) {
12384 LBasicBlock loop = m_out.newBlock();
12385 LBasicBlock parentClass = m_out.newBlock();
12386 LBasicBlock continuation = m_out.newBlock();
12387
12388 LValue structure = loadStructure(cell);
12389 LValue classInfo = m_out.loadPtr(structure, m_heaps.Structure_classInfo);
12390 ValueFromBlock otherAtStart = m_out.anchor(classInfo);
12391 m_out.jump(loop);
12392
12393 LBasicBlock lastNext = m_out.appendTo(loop, parentClass);
12394 LValue other = m_out.phi(pointerType(), otherAtStart);
12395 m_out.branch(m_out.equal(other, m_out.constIntPtr(classInfo)), unsure(continuation), unsure(parentClass));
12396
12397 m_out.appendTo(parentClass, continuation);
12398 LValue parent = m_out.loadPtr(other, m_heaps.ClassInfo_parentClass);
12399 speculate(BadType, jsValueValue(cell), m_node->child1().node(), m_out.isNull(parent));
12400 m_out.addIncomingToPhi(other, m_out.anchor(parent));
12401 m_out.jump(loop);
12402
12403 m_out.appendTo(continuation, lastNext);
12404 return;
12405 }
12406
12407 RefPtr<Snippet> domJIT = classInfo->checkSubClassSnippet();
12408 PatchpointValue* patchpoint = m_out.patchpoint(Void);
12409 patchpoint->appendSomeRegister(cell);
12410 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
12411 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
12412
12413 NodeOrigin origin = m_origin;
12414 unsigned osrExitArgumentOffset = patchpoint->numChildren();
12415 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(jsValueValue(cell), m_node->child1().node());
12416 patchpoint->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, jsValueValue(cell)));
12417
12418 patchpoint->numGPScratchRegisters = domJIT->numGPScratchRegisters;
12419 patchpoint->numFPScratchRegisters = domJIT->numFPScratchRegisters;
12420 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12421
12422 State* state = &m_ftlState;
12423 Node* node = m_node;
12424 JSValue child1Constant = m_state.forNode(m_node->child1()).value();
12425
12426 patchpoint->setGenerator(
12427 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12428 AllowMacroScratchRegisterUsage allowScratch(jit);
12429
12430 Vector<GPRReg> gpScratch;
12431 Vector<FPRReg> fpScratch;
12432 Vector<SnippetParams::Value> regs;
12433
12434 regs.append(SnippetParams::Value(params[0].gpr(), child1Constant));
12435
12436 for (unsigned i = 0; i < domJIT->numGPScratchRegisters; ++i)
12437 gpScratch.append(params.gpScratch(i));
12438
12439 for (unsigned i = 0; i < domJIT->numFPScratchRegisters; ++i)
12440 fpScratch.append(params.fpScratch(i));
12441
12442 RefPtr<OSRExitHandle> handle = exitDescriptor->emitOSRExitLater(*state, BadType, origin, params, osrExitArgumentOffset);
12443
12444 SnippetParams domJITParams(*state, params, node, nullptr, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
12445 CCallHelpers::JumpList failureCases = domJIT->generator()->run(jit, domJITParams);
12446
12447 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
12448 linkBuffer.link(failureCases, linkBuffer.locationOf<NoPtrTag>(handle->label));
12449 });
12450 });
12451 patchpoint->effects = Effects::forCheck();
12452 }
12453
12454 void compileCallDOM()
12455 {
12456 const DOMJIT::Signature* signature = m_node->signature();
12457
12458 // FIXME: We should have a way to call functions with the vector of registers.
12459 // https://bugs.webkit.org/show_bug.cgi?id=163099
12460 Vector<LValue, JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS> operands;
12461
12462 unsigned index = 0;
12463 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, [&](Node*, Edge edge) {
12464 if (!index)
12465 operands.append(lowCell(edge));
12466 else {
12467 switch (signature->arguments[index - 1]) {
12468 case SpecString:
12469 operands.append(lowString(edge));
12470 break;
12471 case SpecInt32Only:
12472 operands.append(lowInt32(edge));
12473 break;
12474 case SpecBoolean:
12475 operands.append(lowBoolean(edge));
12476 break;
12477 default:
12478 RELEASE_ASSERT_NOT_REACHED();
12479 break;
12480 }
12481 }
12482 ++index;
12483 });
12484
12485 unsigned argumentCountIncludingThis = signature->argumentCount + 1;
12486 LValue result;
12487 assertIsTaggedWith(reinterpret_cast<void*>(signature->unsafeFunction), CFunctionPtrTag);
12488 switch (argumentCountIncludingThis) {
12489 case 1:
12490 result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EP>(signature->unsafeFunction)), m_callFrame, operands[0]);
12491 break;
12492 case 2:
12493 result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPP>(signature->unsafeFunction)), m_callFrame, operands[0], operands[1]);
12494 break;
12495 case 3:
12496 result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPPP>(signature->unsafeFunction)), m_callFrame, operands[0], operands[1], operands[2]);
12497 break;
12498 default:
12499 RELEASE_ASSERT_NOT_REACHED();
12500 break;
12501 }
12502
12503 setJSValue(result);
12504 }
12505
12506 void compileCallDOMGetter()
12507 {
12508 DOMJIT::CallDOMGetterSnippet* domJIT = m_node->callDOMGetterData()->snippet;
12509 if (!domJIT) {
12510 // The following function is not an operation: we directly call a custom accessor getter.
12511 // Since the getter does not have code setting topCallFrame, As is the same to IC, we should set topCallFrame in caller side.
12512 m_out.storePtr(m_callFrame, m_out.absolute(&vm().topCallFrame));
12513 setJSValue(
12514 vmCall(Int64, m_out.operation(m_node->callDOMGetterData()->customAccessorGetter.retaggedExecutableAddress<CFunctionPtrTag>()),
12515 m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(m_graph.identifiers()[m_node->callDOMGetterData()->identifierNumber])));
12516 return;
12517 }
12518
12519 Edge& baseEdge = m_node->child1();
12520 LValue base = lowCell(baseEdge);
12521 JSValue baseConstant = m_state.forNode(baseEdge).value();
12522
12523 LValue globalObject;
12524 JSValue globalObjectConstant;
12525 if (domJIT->requireGlobalObject) {
12526 Edge& globalObjectEdge = m_node->child2();
12527 globalObject = lowCell(globalObjectEdge);
12528 globalObjectConstant = m_state.forNode(globalObjectEdge).value();
12529 }
12530
12531 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12532 patchpoint->appendSomeRegister(base);
12533 if (domJIT->requireGlobalObject)
12534 patchpoint->appendSomeRegister(globalObject);
12535 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
12536 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
12537 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
12538 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12539 patchpoint->numGPScratchRegisters = domJIT->numGPScratchRegisters;
12540 patchpoint->numFPScratchRegisters = domJIT->numFPScratchRegisters;
12541 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
12542
12543 State* state = &m_ftlState;
12544 Node* node = m_node;
12545 patchpoint->setGenerator(
12546 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12547 AllowMacroScratchRegisterUsage allowScratch(jit);
12548
12549 Vector<GPRReg> gpScratch;
12550 Vector<FPRReg> fpScratch;
12551 Vector<SnippetParams::Value> regs;
12552
12553 regs.append(JSValueRegs(params[0].gpr()));
12554 regs.append(SnippetParams::Value(params[1].gpr(), baseConstant));
12555 if (domJIT->requireGlobalObject)
12556 regs.append(SnippetParams::Value(params[2].gpr(), globalObjectConstant));
12557
12558 for (unsigned i = 0; i < domJIT->numGPScratchRegisters; ++i)
12559 gpScratch.append(params.gpScratch(i));
12560
12561 for (unsigned i = 0; i < domJIT->numFPScratchRegisters; ++i)
12562 fpScratch.append(params.fpScratch(i));
12563
12564 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
12565
12566 SnippetParams domJITParams(*state, params, node, exceptions, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
12567 domJIT->generator()->run(jit, domJITParams);
12568 });
12569 patchpoint->effects = Effects::forCall();
12570 setJSValue(patchpoint);
12571 }
12572
12573 void compileFilterICStatus()
12574 {
12575 m_interpreter.filterICStatus(m_node);
12576 }
12577
12578 LValue byteSwap32(LValue value)
12579 {
12580 // FIXME: teach B3 byteswap
12581 // https://bugs.webkit.org/show_bug.cgi?id=188759
12582
12583 RELEASE_ASSERT(value->type() == Int32);
12584 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12585 patchpoint->appendSomeRegister(value);
12586 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12587 jit.move(params[1].gpr(), params[0].gpr());
12588 jit.byteSwap32(params[0].gpr());
12589 });
12590 patchpoint->effects = Effects::none();
12591 return patchpoint;
12592 }
12593
12594 LValue byteSwap64(LValue value)
12595 {
12596 // FIXME: teach B3 byteswap
12597 // https://bugs.webkit.org/show_bug.cgi?id=188759
12598
12599 RELEASE_ASSERT(value->type() == Int64);
12600 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12601 patchpoint->appendSomeRegister(value);
12602 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12603 jit.move(params[1].gpr(), params[0].gpr());
12604 jit.byteSwap64(params[0].gpr());
12605 });
12606 patchpoint->effects = Effects::none();
12607 return patchpoint;
12608 }
12609
12610 template <typename F1, typename F2>
12611 LValue emitCodeBasedOnEndiannessBranch(LValue isLittleEndian, const F1& emitLittleEndianCode, const F2& emitBigEndianCode)
12612 {
12613 LType type;
12614
12615 LBasicBlock bigEndianCase = m_out.newBlock();
12616 LBasicBlock littleEndianCase = m_out.newBlock();
12617 LBasicBlock continuation = m_out.newBlock();
12618
12619 m_out.branch(m_out.testIsZero32(isLittleEndian, m_out.constInt32(1)),
12620 unsure(bigEndianCase), unsure(littleEndianCase));
12621
12622 LBasicBlock lastNext = m_out.appendTo(bigEndianCase, littleEndianCase);
12623 LValue bigEndianValue = emitBigEndianCode();
12624 type = bigEndianValue ? bigEndianValue->type() : Void;
12625 ValueFromBlock bigEndianResult = bigEndianValue ? m_out.anchor(bigEndianValue) : ValueFromBlock();
12626 m_out.jump(continuation);
12627
12628 m_out.appendTo(littleEndianCase, continuation);
12629 LValue littleEndianValue = emitLittleEndianCode();
12630 ValueFromBlock littleEndianResult = littleEndianValue ? m_out.anchor(littleEndianValue) : ValueFromBlock();
12631 RELEASE_ASSERT((!littleEndianValue && !bigEndianValue) || type == littleEndianValue->type());
12632 m_out.jump(continuation);
12633
12634 m_out.appendTo(continuation, lastNext);
12635 RELEASE_ASSERT(!!bigEndianResult == !!littleEndianResult);
12636 if (bigEndianResult)
12637 return m_out.phi(type, bigEndianResult, littleEndianResult);
12638 return nullptr;
12639 }
12640
12641 void compileDataViewGet()
12642 {
12643 LValue dataView = lowDataViewObject(m_node->child1());
12644 LValue index = lowInt32(m_node->child2());
12645 LValue isLittleEndian = nullptr;
12646 if (m_node->child3())
12647 isLittleEndian = lowBoolean(m_node->child3());
12648
12649 DataViewData data = m_node->dataViewData();
12650
12651 LValue length = m_out.zeroExtPtr(m_out.load32NonNegative(dataView, m_heaps.JSArrayBufferView_length));
12652 LValue indexToCheck = m_out.zeroExtPtr(index);
12653 if (data.byteSize > 1)
12654 indexToCheck = m_out.add(indexToCheck, m_out.constInt64(data.byteSize - 1));
12655 speculate(OutOfBounds, noValue(), nullptr, m_out.aboveOrEqual(indexToCheck, length));
12656
12657 LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector));
12658
12659 TypedPointer pointer(m_heaps.typedArrayProperties, m_out.add(vector, m_out.zeroExtPtr(index)));
12660
12661 if (m_node->op() == DataViewGetInt) {
12662 switch (data.byteSize) {
12663 case 1:
12664 if (data.isSigned)
12665 setInt32(m_out.load8SignExt32(pointer));
12666 else
12667 setInt32(m_out.load8ZeroExt32(pointer));
12668 break;
12669 case 2: {
12670 auto emitLittleEndianLoad = [&] {
12671 if (data.isSigned)
12672 return m_out.load16SignExt32(pointer);
12673 return m_out.load16ZeroExt32(pointer);
12674 };
12675
12676 auto emitBigEndianLoad = [&] {
12677 LValue val = m_out.load16ZeroExt32(pointer);
12678
12679 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12680 patchpoint->appendSomeRegister(val);
12681 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12682 jit.move(params[1].gpr(), params[0].gpr());
12683 jit.byteSwap16(params[0].gpr());
12684 if (data.isSigned)
12685 jit.signExtend16To32(params[0].gpr(), params[0].gpr());
12686 });
12687 patchpoint->effects = Effects::none();
12688
12689 return patchpoint;
12690 };
12691
12692 if (data.isLittleEndian == FalseTriState)
12693 setInt32(emitBigEndianLoad());
12694 else if (data.isLittleEndian == TrueTriState)
12695 setInt32(emitLittleEndianLoad());
12696 else
12697 setInt32(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianLoad, emitBigEndianLoad));
12698
12699 break;
12700 }
12701 case 4: {
12702 LValue loadedValue = m_out.load32(pointer);
12703
12704 if (data.isLittleEndian == FalseTriState)
12705 loadedValue = byteSwap32(loadedValue);
12706 else if (data.isLittleEndian == MixedTriState) {
12707 auto emitLittleEndianCode = [&] {
12708 return loadedValue;
12709 };
12710 auto emitBigEndianCode = [&] {
12711 return byteSwap32(loadedValue);
12712 };
12713
12714 loadedValue = emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12715 }
12716
12717 if (data.isSigned)
12718 setInt32(loadedValue);
12719 else
12720 setStrictInt52(m_out.zeroExt(loadedValue, Int64));
12721
12722 break;
12723 }
12724 default:
12725 RELEASE_ASSERT_NOT_REACHED();
12726 }
12727 } else {
12728 switch (data.byteSize) {
12729 case 4: {
12730 auto emitLittleEndianCode = [&] {
12731 return m_out.floatToDouble(m_out.loadFloat(pointer));
12732 };
12733
12734 auto emitBigEndianCode = [&] {
12735 LValue loadedValue = m_out.load32(pointer);
12736 PatchpointValue* patchpoint = m_out.patchpoint(Double);
12737 patchpoint->appendSomeRegister(loadedValue);
12738 patchpoint->numGPScratchRegisters = 1;
12739 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12740 jit.move(params[1].gpr(), params.gpScratch(0));
12741 jit.byteSwap32(params.gpScratch(0));
12742 jit.move32ToFloat(params.gpScratch(0), params[0].fpr());
12743 jit.convertFloatToDouble(params[0].fpr(), params[0].fpr());
12744 });
12745 patchpoint->effects = Effects::none();
12746 return patchpoint;
12747 };
12748
12749 if (data.isLittleEndian == TrueTriState)
12750 setDouble(emitLittleEndianCode());
12751 else if (data.isLittleEndian == FalseTriState)
12752 setDouble(emitBigEndianCode());
12753 else
12754 setDouble(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode));
12755
12756 break;
12757 }
12758 case 8: {
12759 auto emitLittleEndianCode = [&] {
12760 return m_out.loadDouble(pointer);
12761 };
12762
12763 auto emitBigEndianCode = [&] {
12764 LValue loadedValue = m_out.load64(pointer);
12765 loadedValue = byteSwap64(loadedValue);
12766 return m_out.bitCast(loadedValue, Double);
12767 };
12768
12769 if (data.isLittleEndian == TrueTriState)
12770 setDouble(emitLittleEndianCode());
12771 else if (data.isLittleEndian == FalseTriState)
12772 setDouble(emitBigEndianCode());
12773 else
12774 setDouble(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode));
12775
12776 break;
12777 }
12778 default:
12779 RELEASE_ASSERT_NOT_REACHED();
12780 }
12781 }
12782 }
12783
12784 void compileDataViewSet()
12785 {
12786 LValue dataView = lowDataViewObject(m_graph.varArgChild(m_node, 0));
12787 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
12788 LValue isLittleEndian = nullptr;
12789 if (m_graph.varArgChild(m_node, 3))
12790 isLittleEndian = lowBoolean(m_graph.varArgChild(m_node, 3));
12791
12792 DataViewData data = m_node->dataViewData();
12793
12794 LValue length = m_out.zeroExtPtr(m_out.load32NonNegative(dataView, m_heaps.JSArrayBufferView_length));
12795 LValue indexToCheck = m_out.zeroExtPtr(index);
12796 if (data.byteSize > 1)
12797 indexToCheck = m_out.add(indexToCheck, m_out.constInt64(data.byteSize - 1));
12798 speculate(OutOfBounds, noValue(), nullptr, m_out.aboveOrEqual(indexToCheck, length));
12799
12800 Edge& valueEdge = m_graph.varArgChild(m_node, 2);
12801 LValue valueToStore;
12802 switch (valueEdge.useKind()) {
12803 case Int32Use:
12804 valueToStore = lowInt32(valueEdge);
12805 break;
12806 case DoubleRepUse:
12807 valueToStore = lowDouble(valueEdge);
12808 break;
12809 case Int52RepUse:
12810 valueToStore = lowStrictInt52(valueEdge);
12811 break;
12812 default:
12813 RELEASE_ASSERT_NOT_REACHED();
12814 }
12815
12816 LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector));
12817 TypedPointer pointer(m_heaps.typedArrayProperties, m_out.add(vector, m_out.zeroExtPtr(index)));
12818
12819 if (data.isFloatingPoint) {
12820 if (data.byteSize == 4) {
12821 valueToStore = m_out.doubleToFloat(valueToStore);
12822
12823 auto emitLittleEndianCode = [&] () -> LValue {
12824 m_out.storeFloat(valueToStore, pointer);
12825 return nullptr;
12826 };
12827
12828 auto emitBigEndianCode = [&] () -> LValue {
12829 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12830 patchpoint->appendSomeRegister(valueToStore);
12831 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12832 jit.moveFloatTo32(params[1].fpr(), params[0].gpr());
12833 jit.byteSwap32(params[0].gpr());
12834 });
12835 patchpoint->effects = Effects::none();
12836 m_out.store32(patchpoint, pointer);
12837 return nullptr;
12838 };
12839
12840 if (data.isLittleEndian == FalseTriState)
12841 emitBigEndianCode();
12842 else if (data.isLittleEndian == TrueTriState)
12843 emitLittleEndianCode();
12844 else
12845 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12846
12847 } else {
12848 RELEASE_ASSERT(data.byteSize == 8);
12849 auto emitLittleEndianCode = [&] () -> LValue {
12850 m_out.storeDouble(valueToStore, pointer);
12851 return nullptr;
12852 };
12853 auto emitBigEndianCode = [&] () -> LValue {
12854 m_out.store64(byteSwap64(m_out.bitCast(valueToStore, Int64)), pointer);
12855 return nullptr;
12856 };
12857
12858 if (data.isLittleEndian == FalseTriState)
12859 emitBigEndianCode();
12860 else if (data.isLittleEndian == TrueTriState)
12861 emitLittleEndianCode();
12862 else
12863 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12864 }
12865 } else {
12866 switch (data.byteSize) {
12867 case 1:
12868 RELEASE_ASSERT(valueEdge.useKind() == Int32Use);
12869 m_out.store32As8(valueToStore, pointer);
12870 break;
12871 case 2: {
12872 RELEASE_ASSERT(valueEdge.useKind() == Int32Use);
12873
12874 auto emitLittleEndianCode = [&] () -> LValue {
12875 m_out.store32As16(valueToStore, pointer);
12876 return nullptr;
12877 };
12878 auto emitBigEndianCode = [&] () -> LValue {
12879 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12880 patchpoint->appendSomeRegister(valueToStore);
12881 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12882 jit.move(params[1].gpr(), params[0].gpr());
12883 jit.byteSwap16(params[0].gpr());
12884 });
12885 patchpoint->effects = Effects::none();
12886
12887 m_out.store32As16(patchpoint, pointer);
12888 return nullptr;
12889 };
12890
12891 if (data.isLittleEndian == FalseTriState)
12892 emitBigEndianCode();
12893 else if (data.isLittleEndian == TrueTriState)
12894 emitLittleEndianCode();
12895 else
12896 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12897 break;
12898 }
12899 case 4: {
12900 RELEASE_ASSERT(valueEdge.useKind() == Int32Use || valueEdge.useKind() == Int52RepUse);
12901
12902 if (valueEdge.useKind() == Int52RepUse)
12903 valueToStore = m_out.castToInt32(valueToStore);
12904
12905 auto emitLittleEndianCode = [&] () -> LValue {
12906 m_out.store32(valueToStore, pointer);
12907 return nullptr;
12908 };
12909 auto emitBigEndianCode = [&] () -> LValue {
12910 m_out.store32(byteSwap32(valueToStore), pointer);
12911 return nullptr;
12912 };
12913
12914 if (data.isLittleEndian == FalseTriState)
12915 emitBigEndianCode();
12916 else if (data.isLittleEndian == TrueTriState)
12917 emitLittleEndianCode();
12918 else
12919 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12920
12921 break;
12922 }
12923 default:
12924 RELEASE_ASSERT_NOT_REACHED();
12925 }
12926 }
12927 }
12928
12929 void emitSwitchForMultiByOffset(LValue base, bool structuresChecked, Vector<SwitchCase, 2>& cases, LBasicBlock exit)
12930 {
12931 if (cases.isEmpty()) {
12932 m_out.jump(exit);
12933 return;
12934 }
12935
12936 if (structuresChecked) {
12937 std::sort(
12938 cases.begin(), cases.end(),
12939 [&] (const SwitchCase& a, const SwitchCase& b) -> bool {
12940 return a.value()->asInt() < b.value()->asInt();
12941 });
12942 SwitchCase last = cases.takeLast();
12943 m_out.switchInstruction(
12944 m_out.load32(base, m_heaps.JSCell_structureID), cases, last.target(), Weight(0));
12945 return;
12946 }
12947
12948 m_out.switchInstruction(
12949 m_out.load32(base, m_heaps.JSCell_structureID), cases, exit, Weight(0));
12950 }
12951
12952 void compareEqObjectOrOtherToObject(Edge leftChild, Edge rightChild)
12953 {
12954 LValue rightCell = lowCell(rightChild);
12955 LValue leftValue = lowJSValue(leftChild, ManualOperandSpeculation);
12956
12957 speculateTruthyObject(rightChild, rightCell, SpecObject);
12958
12959 LBasicBlock leftCellCase = m_out.newBlock();
12960 LBasicBlock leftNotCellCase = m_out.newBlock();
12961 LBasicBlock continuation = m_out.newBlock();
12962
12963 m_out.branch(
12964 isCell(leftValue, provenType(leftChild)),
12965 unsure(leftCellCase), unsure(leftNotCellCase));
12966
12967 LBasicBlock lastNext = m_out.appendTo(leftCellCase, leftNotCellCase);
12968 speculateTruthyObject(leftChild, leftValue, SpecObject | (~SpecCellCheck));
12969 ValueFromBlock cellResult = m_out.anchor(m_out.equal(rightCell, leftValue));
12970 m_out.jump(continuation);
12971
12972 m_out.appendTo(leftNotCellCase, continuation);
12973 FTL_TYPE_CHECK(
12974 jsValueValue(leftValue), leftChild, SpecOther | SpecCellCheck, isNotOther(leftValue));
12975 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
12976 m_out.jump(continuation);
12977
12978 m_out.appendTo(continuation, lastNext);
12979 setBoolean(m_out.phi(Int32, cellResult, notCellResult));
12980 }
12981
12982 void speculateTruthyObject(Edge edge, LValue cell, SpeculatedType filter)
12983 {
12984 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
12985 FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell));
12986 return;
12987 }
12988
12989 FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell));
12990 speculate(
12991 BadType, jsValueValue(cell), edge.node(),
12992 m_out.testNonZero32(
12993 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
12994 m_out.constInt32(MasqueradesAsUndefined)));
12995 }
12996
12997 template<typename IntFunctor>
12998 void nonSpeculativeCompare(const IntFunctor& intFunctor, S_JITOperation_EJJ helperFunction)
12999 {
13000 LValue left = lowJSValue(m_node->child1());
13001 LValue right = lowJSValue(m_node->child2());
13002
13003 LBasicBlock leftIsInt = m_out.newBlock();
13004 LBasicBlock fastPath = m_out.newBlock();
13005 LBasicBlock slowPath = m_out.newBlock();
13006 LBasicBlock continuation = m_out.newBlock();
13007
13008 m_out.branch(isNotInt32(left, provenType(m_node->child1())), rarely(slowPath), usually(leftIsInt));
13009
13010 LBasicBlock lastNext = m_out.appendTo(leftIsInt, fastPath);
13011 m_out.branch(isNotInt32(right, provenType(m_node->child2())), rarely(slowPath), usually(fastPath));
13012
13013 m_out.appendTo(fastPath, slowPath);
13014 ValueFromBlock fastResult = m_out.anchor(intFunctor(unboxInt32(left), unboxInt32(right)));
13015 m_out.jump(continuation);
13016
13017 m_out.appendTo(slowPath, continuation);
13018 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
13019 pointerType(), m_out.operation(helperFunction), m_callFrame, left, right)));
13020 m_out.jump(continuation);
13021
13022 m_out.appendTo(continuation, lastNext);
13023 setBoolean(m_out.phi(Int32, fastResult, slowResult));
13024 }
13025
13026 LValue stringsEqual(LValue leftJSString, LValue rightJSString, Edge leftJSStringEdge = Edge(), Edge rightJSStringEdge = Edge())
13027 {
13028 LBasicBlock notTriviallyUnequalCase = m_out.newBlock();
13029 LBasicBlock notEmptyCase = m_out.newBlock();
13030 LBasicBlock leftReadyCase = m_out.newBlock();
13031 LBasicBlock rightReadyCase = m_out.newBlock();
13032 LBasicBlock left8BitCase = m_out.newBlock();
13033 LBasicBlock right8BitCase = m_out.newBlock();
13034 LBasicBlock loop = m_out.newBlock();
13035 LBasicBlock bytesEqual = m_out.newBlock();
13036 LBasicBlock trueCase = m_out.newBlock();
13037 LBasicBlock falseCase = m_out.newBlock();
13038 LBasicBlock slowCase = m_out.newBlock();
13039 LBasicBlock continuation = m_out.newBlock();
13040
13041 m_out.branch(isRopeString(leftJSString, leftJSStringEdge), rarely(slowCase), usually(leftReadyCase));
13042
13043 LBasicBlock lastNext = m_out.appendTo(leftReadyCase, rightReadyCase);
13044 m_out.branch(isRopeString(rightJSString, rightJSStringEdge), rarely(slowCase), usually(rightReadyCase));
13045
13046 m_out.appendTo(rightReadyCase, notTriviallyUnequalCase);
13047 LValue left = m_out.loadPtr(leftJSString, m_heaps.JSString_value);
13048 LValue right = m_out.loadPtr(rightJSString, m_heaps.JSString_value);
13049 LValue length = m_out.load32(left, m_heaps.StringImpl_length);
13050 m_out.branch(
13051 m_out.notEqual(length, m_out.load32(right, m_heaps.StringImpl_length)),
13052 unsure(falseCase), unsure(notTriviallyUnequalCase));
13053
13054 m_out.appendTo(notTriviallyUnequalCase, notEmptyCase);
13055 m_out.branch(m_out.isZero32(length), unsure(trueCase), unsure(notEmptyCase));
13056
13057 m_out.appendTo(notEmptyCase, left8BitCase);
13058 m_out.branch(
13059 m_out.testIsZero32(
13060 m_out.load32(left, m_heaps.StringImpl_hashAndFlags),
13061 m_out.constInt32(StringImpl::flagIs8Bit())),
13062 unsure(slowCase), unsure(left8BitCase));
13063
13064 m_out.appendTo(left8BitCase, right8BitCase);
13065 m_out.branch(
13066 m_out.testIsZero32(
13067 m_out.load32(right, m_heaps.StringImpl_hashAndFlags),
13068 m_out.constInt32(StringImpl::flagIs8Bit())),
13069 unsure(slowCase), unsure(right8BitCase));
13070
13071 m_out.appendTo(right8BitCase, loop);
13072
13073 LValue leftData = m_out.loadPtr(left, m_heaps.StringImpl_data);
13074 LValue rightData = m_out.loadPtr(right, m_heaps.StringImpl_data);
13075
13076 ValueFromBlock indexAtStart = m_out.anchor(length);
13077
13078 m_out.jump(loop);
13079
13080 m_out.appendTo(loop, bytesEqual);
13081
13082 LValue indexAtLoopTop = m_out.phi(Int32, indexAtStart);
13083 LValue indexInLoop = m_out.sub(indexAtLoopTop, m_out.int32One);
13084
13085 LValue leftByte = m_out.load8ZeroExt32(
13086 m_out.baseIndex(m_heaps.characters8, leftData, m_out.zeroExtPtr(indexInLoop)));
13087 LValue rightByte = m_out.load8ZeroExt32(
13088 m_out.baseIndex(m_heaps.characters8, rightData, m_out.zeroExtPtr(indexInLoop)));
13089
13090 m_out.branch(m_out.notEqual(leftByte, rightByte), unsure(falseCase), unsure(bytesEqual));
13091
13092 m_out.appendTo(bytesEqual, trueCase);
13093
13094 ValueFromBlock indexForNextIteration = m_out.anchor(indexInLoop);
13095 m_out.addIncomingToPhi(indexAtLoopTop, indexForNextIteration);
13096 m_out.branch(m_out.notZero32(indexInLoop), unsure(loop), unsure(trueCase));
13097
13098 m_out.appendTo(trueCase, falseCase);
13099
13100 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
13101 m_out.jump(continuation);
13102
13103 m_out.appendTo(falseCase, slowCase);
13104
13105 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
13106 m_out.jump(continuation);
13107
13108 m_out.appendTo(slowCase, continuation);
13109
13110 LValue slowResultValue = vmCall(
13111 Int64, m_out.operation(operationCompareStringEq), m_callFrame,
13112 leftJSString, rightJSString);
13113 ValueFromBlock slowResult = m_out.anchor(unboxBoolean(slowResultValue));
13114 m_out.jump(continuation);
13115
13116 m_out.appendTo(continuation, lastNext);
13117 return m_out.phi(Int32, trueResult, falseResult, slowResult);
13118 }
13119
13120 enum ScratchFPRUsage {
13121 DontNeedScratchFPR,
13122 NeedScratchFPR
13123 };
13124 template<typename BinaryArithOpGenerator, ScratchFPRUsage scratchFPRUsage = DontNeedScratchFPR>
13125 void emitBinarySnippet(J_JITOperation_EJJ slowPathFunction)
13126 {
13127 Node* node = m_node;
13128
13129 LValue left = lowJSValue(node->child1());
13130 LValue right = lowJSValue(node->child2());
13131
13132 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
13133 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
13134
13135 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
13136 patchpoint->appendSomeRegister(left);
13137 patchpoint->appendSomeRegister(right);
13138 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
13139 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
13140 RefPtr<PatchpointExceptionHandle> exceptionHandle =
13141 preparePatchpointForExceptions(patchpoint);
13142 patchpoint->numGPScratchRegisters = 1;
13143 patchpoint->numFPScratchRegisters = 2;
13144 if (scratchFPRUsage == NeedScratchFPR)
13145 patchpoint->numFPScratchRegisters++;
13146 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13147 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13148 State* state = &m_ftlState;
13149 patchpoint->setGenerator(
13150 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13151 AllowMacroScratchRegisterUsage allowScratch(jit);
13152
13153 Box<CCallHelpers::JumpList> exceptions =
13154 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
13155
13156 auto generator = Box<BinaryArithOpGenerator>::create(
13157 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
13158 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()),
13159 params.fpScratch(0), params.fpScratch(1), params.gpScratch(0),
13160 scratchFPRUsage == NeedScratchFPR ? params.fpScratch(2) : InvalidFPRReg);
13161
13162 generator->generateFastPath(jit);
13163
13164 if (generator->didEmitFastPath()) {
13165 generator->endJumpList().link(&jit);
13166 CCallHelpers::Label done = jit.label();
13167
13168 params.addLatePath(
13169 [=] (CCallHelpers& jit) {
13170 AllowMacroScratchRegisterUsage allowScratch(jit);
13171
13172 generator->slowPathJumpList().link(&jit);
13173 callOperation(
13174 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13175 exceptions.get(), slowPathFunction, params[0].gpr(),
13176 params[1].gpr(), params[2].gpr());
13177 jit.jump().linkTo(done, &jit);
13178 });
13179 } else {
13180 callOperation(
13181 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13182 exceptions.get(), slowPathFunction, params[0].gpr(), params[1].gpr(),
13183 params[2].gpr());
13184 }
13185 });
13186
13187 setJSValue(patchpoint);
13188 }
13189
13190 template<typename BinaryBitOpGenerator>
13191 void emitBinaryBitOpSnippet(J_JITOperation_EJJ slowPathFunction)
13192 {
13193 Node* node = m_node;
13194
13195 LValue left = lowJSValue(node->child1());
13196 LValue right = lowJSValue(node->child2());
13197
13198 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
13199 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
13200
13201 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
13202 patchpoint->appendSomeRegister(left);
13203 patchpoint->appendSomeRegister(right);
13204 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
13205 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
13206 RefPtr<PatchpointExceptionHandle> exceptionHandle =
13207 preparePatchpointForExceptions(patchpoint);
13208 patchpoint->numGPScratchRegisters = 1;
13209 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13210 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13211 State* state = &m_ftlState;
13212 patchpoint->setGenerator(
13213 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13214 AllowMacroScratchRegisterUsage allowScratch(jit);
13215
13216 Box<CCallHelpers::JumpList> exceptions =
13217 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
13218
13219 auto generator = Box<BinaryBitOpGenerator>::create(
13220 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
13221 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.gpScratch(0));
13222
13223 generator->generateFastPath(jit);
13224 generator->endJumpList().link(&jit);
13225 CCallHelpers::Label done = jit.label();
13226
13227 params.addLatePath(
13228 [=] (CCallHelpers& jit) {
13229 AllowMacroScratchRegisterUsage allowScratch(jit);
13230
13231 generator->slowPathJumpList().link(&jit);
13232 callOperation(
13233 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13234 exceptions.get(), slowPathFunction, params[0].gpr(),
13235 params[1].gpr(), params[2].gpr());
13236 jit.jump().linkTo(done, &jit);
13237 });
13238 });
13239
13240 setJSValue(patchpoint);
13241 }
13242
13243 void emitRightShiftSnippet(JITRightShiftGenerator::ShiftType shiftType)
13244 {
13245 Node* node = m_node;
13246
13247 // FIXME: Make this do exceptions.
13248 // https://bugs.webkit.org/show_bug.cgi?id=151686
13249
13250 LValue left = lowJSValue(node->child1());
13251 LValue right = lowJSValue(node->child2());
13252
13253 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
13254 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
13255
13256 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
13257 patchpoint->appendSomeRegister(left);
13258 patchpoint->appendSomeRegister(right);
13259 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
13260 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
13261 RefPtr<PatchpointExceptionHandle> exceptionHandle =
13262 preparePatchpointForExceptions(patchpoint);
13263 patchpoint->numGPScratchRegisters = 1;
13264 patchpoint->numFPScratchRegisters = 1;
13265 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13266 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13267 State* state = &m_ftlState;
13268 patchpoint->setGenerator(
13269 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13270 AllowMacroScratchRegisterUsage allowScratch(jit);
13271
13272 Box<CCallHelpers::JumpList> exceptions =
13273 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
13274
13275 auto generator = Box<JITRightShiftGenerator>::create(
13276 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
13277 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()),
13278 params.fpScratch(0), params.gpScratch(0), InvalidFPRReg, shiftType);
13279
13280 generator->generateFastPath(jit);
13281 generator->endJumpList().link(&jit);
13282 CCallHelpers::Label done = jit.label();
13283
13284 params.addLatePath(
13285 [=] (CCallHelpers& jit) {
13286 AllowMacroScratchRegisterUsage allowScratch(jit);
13287
13288 generator->slowPathJumpList().link(&jit);
13289
13290 J_JITOperation_EJJ slowPathFunction =
13291 shiftType == JITRightShiftGenerator::SignedShift
13292 ? operationValueBitRShift : operationValueBitURShift;
13293
13294 callOperation(
13295 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13296 exceptions.get(), slowPathFunction, params[0].gpr(),
13297 params[1].gpr(), params[2].gpr());
13298 jit.jump().linkTo(done, &jit);
13299 });
13300 });
13301
13302 setJSValue(patchpoint);
13303 }
13304
13305 LValue allocateHeapCell(LValue allocator, LBasicBlock slowPath)
13306 {
13307 JITAllocator actualAllocator;
13308 if (allocator->hasIntPtr())
13309 actualAllocator = JITAllocator::constant(Allocator(bitwise_cast<LocalAllocator*>(allocator->asIntPtr())));
13310 else
13311 actualAllocator = JITAllocator::variable();
13312
13313 if (actualAllocator.isConstant()) {
13314 if (!actualAllocator.allocator()) {
13315 LBasicBlock haveAllocator = m_out.newBlock();
13316 LBasicBlock lastNext = m_out.insertNewBlocksBefore(haveAllocator);
13317 m_out.jump(slowPath);
13318 m_out.appendTo(haveAllocator, lastNext);
13319 return m_out.intPtrZero;
13320 }
13321 } else {
13322 // This means that either we know that the allocator is null or we don't know what the
13323 // allocator is. In either case, we need the null check.
13324 LBasicBlock haveAllocator = m_out.newBlock();
13325 LBasicBlock lastNext = m_out.insertNewBlocksBefore(haveAllocator);
13326 m_out.branch(
13327 m_out.notEqual(allocator, m_out.intPtrZero),
13328 usually(haveAllocator), rarely(slowPath));
13329 m_out.appendTo(haveAllocator, lastNext);
13330 }
13331
13332 LBasicBlock continuation = m_out.newBlock();
13333
13334 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
13335
13336 PatchpointValue* patchpoint = m_out.patchpoint(pointerType());
13337 if (isARM64()) {
13338 // emitAllocateWithNonNullAllocator uses the scratch registers on ARM.
13339 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13340 }
13341 patchpoint->effects.terminal = true;
13342 if (actualAllocator.isConstant())
13343 patchpoint->numGPScratchRegisters++;
13344 else
13345 patchpoint->appendSomeRegisterWithClobber(allocator);
13346 patchpoint->numGPScratchRegisters++;
13347 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13348
13349 m_out.appendSuccessor(usually(continuation));
13350 m_out.appendSuccessor(rarely(slowPath));
13351
13352 patchpoint->setGenerator(
13353 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13354 AllowMacroScratchRegisterUsageIf allowScratchIf(jit, isARM64());
13355 CCallHelpers::JumpList jumpToSlowPath;
13356
13357 GPRReg allocatorGPR;
13358 if (actualAllocator.isConstant())
13359 allocatorGPR = params.gpScratch(1);
13360 else
13361 allocatorGPR = params[1].gpr();
13362
13363 // We use a patchpoint to emit the allocation path because whenever we mess with
13364 // allocation paths, we already reason about them at the machine code level. We know
13365 // exactly what instruction sequence we want. We're confident that no compiler
13366 // optimization could make this code better. So, it's best to have the code in
13367 // AssemblyHelpers::emitAllocate(). That way, the same optimized path is shared by
13368 // all of the compiler tiers.
13369 jit.emitAllocateWithNonNullAllocator(
13370 params[0].gpr(), actualAllocator, allocatorGPR, params.gpScratch(0),
13371 jumpToSlowPath);
13372
13373 CCallHelpers::Jump jumpToSuccess;
13374 if (!params.fallsThroughToSuccessor(0))
13375 jumpToSuccess = jit.jump();
13376
13377 Vector<Box<CCallHelpers::Label>> labels = params.successorLabels();
13378
13379 params.addLatePath(
13380 [=] (CCallHelpers& jit) {
13381 jumpToSlowPath.linkTo(*labels[1], &jit);
13382 if (jumpToSuccess.isSet())
13383 jumpToSuccess.linkTo(*labels[0], &jit);
13384 });
13385 });
13386
13387 m_out.appendTo(continuation, lastNext);
13388 return patchpoint;
13389 }
13390
13391 void storeStructure(LValue object, Structure* structure)
13392 {
13393 m_out.store32(m_out.constInt32(structure->id()), object, m_heaps.JSCell_structureID);
13394 m_out.store32(
13395 m_out.constInt32(structure->objectInitializationBlob()),
13396 object, m_heaps.JSCell_usefulBytes);
13397 }
13398
13399 void storeStructure(LValue object, LValue structure)
13400 {
13401 if (structure->hasIntPtr()) {
13402 storeStructure(object, bitwise_cast<Structure*>(structure->asIntPtr()));
13403 return;
13404 }
13405
13406 LValue id = m_out.load32(structure, m_heaps.Structure_structureID);
13407 m_out.store32(id, object, m_heaps.JSCell_structureID);
13408
13409 LValue blob = m_out.load32(structure, m_heaps.Structure_indexingModeIncludingHistory);
13410 m_out.store32(blob, object, m_heaps.JSCell_usefulBytes);
13411 }
13412
13413 template <typename StructureType>
13414 LValue allocateCell(LValue allocator, StructureType structure, LBasicBlock slowPath)
13415 {
13416 LValue result = allocateHeapCell(allocator, slowPath);
13417 storeStructure(result, structure);
13418 return result;
13419 }
13420
13421 LValue allocateObject(LValue allocator, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
13422 {
13423 return allocateObject(allocator, weakStructure(structure), butterfly, slowPath);
13424 }
13425
13426 LValue allocateObject(LValue allocator, LValue structure, LValue butterfly, LBasicBlock slowPath)
13427 {
13428 LValue result = allocateCell(allocator, structure, slowPath);
13429 if (structure->hasIntPtr()) {
13430 splatWords(
13431 result,
13432 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
13433 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8 + bitwise_cast<Structure*>(structure->asIntPtr())->inlineCapacity()),
13434 m_out.int64Zero,
13435 m_heaps.properties.atAnyNumber());
13436 } else {
13437 LValue end = m_out.add(
13438 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
13439 m_out.load8ZeroExt32(structure, m_heaps.Structure_inlineCapacity));
13440 splatWords(
13441 result,
13442 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
13443 end,
13444 m_out.int64Zero,
13445 m_heaps.properties.atAnyNumber());
13446 }
13447
13448 m_out.storePtr(butterfly, result, m_heaps.JSObject_butterfly);
13449 return result;
13450 }
13451
13452 template<typename ClassType, typename StructureType>
13453 LValue allocateObject(
13454 size_t size, StructureType structure, LValue butterfly, LBasicBlock slowPath)
13455 {
13456 Allocator allocator = allocatorForNonVirtualConcurrently<ClassType>(vm(), size, AllocatorForMode::AllocatorIfExists);
13457 return allocateObject(
13458 m_out.constIntPtr(allocator.localAllocator()), structure, butterfly, slowPath);
13459 }
13460
13461 template<typename ClassType, typename StructureType>
13462 LValue allocateObject(StructureType structure, LValue butterfly, LBasicBlock slowPath)
13463 {
13464 return allocateObject<ClassType>(
13465 ClassType::allocationSize(0), structure, butterfly, slowPath);
13466 }
13467
13468 LValue allocatorForSize(LValue subspace, LValue size, LBasicBlock slowPath)
13469 {
13470 static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two.");
13471
13472 // Try to do some constant-folding here.
13473 if (subspace->hasIntPtr() && size->hasIntPtr()) {
13474 CompleteSubspace* actualSubspace = bitwise_cast<CompleteSubspace*>(subspace->asIntPtr());
13475 size_t actualSize = size->asIntPtr();
13476
13477 Allocator actualAllocator = actualSubspace->allocatorForNonVirtual(actualSize, AllocatorForMode::AllocatorIfExists);
13478 if (!actualAllocator) {
13479 LBasicBlock continuation = m_out.newBlock();
13480 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
13481 m_out.jump(slowPath);
13482 m_out.appendTo(continuation, lastNext);
13483 return m_out.intPtrZero;
13484 }
13485
13486 return m_out.constIntPtr(actualAllocator.localAllocator());
13487 }
13488
13489 unsigned stepShift = getLSBSet(MarkedSpace::sizeStep);
13490
13491 LBasicBlock continuation = m_out.newBlock();
13492
13493 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
13494
13495 LValue sizeClassIndex = m_out.lShr(
13496 m_out.add(size, m_out.constIntPtr(MarkedSpace::sizeStep - 1)),
13497 m_out.constInt32(stepShift));
13498
13499 m_out.branch(
13500 m_out.above(sizeClassIndex, m_out.constIntPtr(MarkedSpace::largeCutoff >> stepShift)),
13501 rarely(slowPath), usually(continuation));
13502
13503 m_out.appendTo(continuation, lastNext);
13504
13505 return m_out.loadPtr(
13506 m_out.baseIndex(
13507 m_heaps.CompleteSubspace_allocatorForSizeStep,
13508 subspace, sizeClassIndex));
13509 }
13510
13511 LValue allocatorForSize(CompleteSubspace& subspace, LValue size, LBasicBlock slowPath)
13512 {
13513 return allocatorForSize(m_out.constIntPtr(&subspace), size, slowPath);
13514 }
13515
13516 template<typename ClassType>
13517 LValue allocateVariableSizedObject(
13518 LValue size, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
13519 {
13520 CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm());
13521 RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated");
13522 LValue allocator = allocatorForSize(*subspace, size, slowPath);
13523 return allocateObject(allocator, structure, butterfly, slowPath);
13524 }
13525
13526 template<typename ClassType>
13527 LValue allocateVariableSizedCell(
13528 LValue size, Structure* structure, LBasicBlock slowPath)
13529 {
13530 CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm());
13531 RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated");
13532 LValue allocator = allocatorForSize(*subspace, size, slowPath);
13533 return allocateCell(allocator, structure, slowPath);
13534 }
13535
13536 LValue allocateObject(RegisteredStructure structure)
13537 {
13538 size_t allocationSize = JSFinalObject::allocationSize(structure.get()->inlineCapacity());
13539 Allocator allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(vm(), allocationSize, AllocatorForMode::AllocatorIfExists);
13540
13541 // FIXME: If the allocator is null, we could simply emit a normal C call to the allocator
13542 // instead of putting it on the slow path.
13543 // https://bugs.webkit.org/show_bug.cgi?id=161062
13544
13545 LBasicBlock slowPath = m_out.newBlock();
13546 LBasicBlock continuation = m_out.newBlock();
13547
13548 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
13549
13550 ValueFromBlock fastResult = m_out.anchor(allocateObject(
13551 m_out.constIntPtr(allocator.localAllocator()), structure, m_out.intPtrZero, slowPath));
13552
13553 m_out.jump(continuation);
13554
13555 m_out.appendTo(slowPath, continuation);
13556
13557 VM& vm = this->vm();
13558 LValue slowResultValue = lazySlowPath(
13559 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13560 return createLazyCallGenerator(vm,
13561 operationNewObject, locations[0].directGPR(),
13562 CCallHelpers::TrustedImmPtr(structure.get()));
13563 });
13564 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
13565 m_out.jump(continuation);
13566
13567 m_out.appendTo(continuation, lastNext);
13568 return m_out.phi(pointerType(), fastResult, slowResult);
13569 }
13570
13571 struct ArrayValues {
13572 ArrayValues()
13573 : array(0)
13574 , butterfly(0)
13575 {
13576 }
13577
13578 ArrayValues(LValue array, LValue butterfly)
13579 : array(array)
13580 , butterfly(butterfly)
13581 {
13582 }
13583
13584 LValue array;
13585 LValue butterfly;
13586 };
13587
13588 ArrayValues allocateJSArray(LValue publicLength, LValue vectorLength, LValue structure, LValue indexingType, bool shouldInitializeElements = true, bool shouldLargeArraySizeCreateArrayStorage = true)
13589 {
13590 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
13591 if (indexingType->hasInt32()) {
13592 IndexingType type = static_cast<IndexingType>(indexingType->asInt32());
13593 ASSERT_UNUSED(type,
13594 hasUndecided(type)
13595 || hasInt32(type)
13596 || hasDouble(type)
13597 || hasContiguous(type));
13598 }
13599
13600 LBasicBlock fastCase = m_out.newBlock();
13601 LBasicBlock largeCase = m_out.newBlock();
13602 LBasicBlock failCase = m_out.newBlock();
13603 LBasicBlock continuation = m_out.newBlock();
13604 LBasicBlock slowCase = m_out.newBlock();
13605
13606 LBasicBlock lastNext = m_out.insertNewBlocksBefore(fastCase);
13607
13608 Optional<unsigned> staticVectorLength;
13609 Optional<unsigned> staticVectorLengthFromPublicLength;
13610 if (structure->hasIntPtr()) {
13611 if (publicLength->hasInt32()) {
13612 unsigned publicLengthConst = static_cast<unsigned>(publicLength->asInt32());
13613 if (publicLengthConst <= MAX_STORAGE_VECTOR_LENGTH) {
13614 publicLengthConst = Butterfly::optimalContiguousVectorLength(
13615 bitwise_cast<Structure*>(structure->asIntPtr())->outOfLineCapacity(), publicLengthConst);
13616 staticVectorLengthFromPublicLength = publicLengthConst;
13617 }
13618
13619 }
13620 if (vectorLength->hasInt32()) {
13621 unsigned vectorLengthConst = static_cast<unsigned>(vectorLength->asInt32());
13622 if (vectorLengthConst <= MAX_STORAGE_VECTOR_LENGTH) {
13623 vectorLengthConst = Butterfly::optimalContiguousVectorLength(
13624 bitwise_cast<Structure*>(structure->asIntPtr())->outOfLineCapacity(), vectorLengthConst);
13625 vectorLength = m_out.constInt32(vectorLengthConst);
13626 staticVectorLength = vectorLengthConst;
13627 }
13628 }
13629 } else {
13630 // We don't compute the optimal vector length for new Array(blah) where blah is not
13631 // statically known, since the compute effort of doing it here is probably not worth it.
13632 }
13633
13634 ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
13635
13636 LValue predicate;
13637 if (shouldLargeArraySizeCreateArrayStorage)
13638 predicate = m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
13639 else
13640 predicate = m_out.booleanFalse;
13641
13642 m_out.branch(predicate, rarely(largeCase), usually(fastCase));
13643
13644 m_out.appendTo(fastCase, largeCase);
13645
13646 LValue payloadSize =
13647 m_out.shl(m_out.zeroExt(vectorLength, pointerType()), m_out.constIntPtr(3));
13648
13649 LValue butterflySize = m_out.add(
13650 payloadSize, m_out.constIntPtr(sizeof(IndexingHeader)));
13651
13652 LValue allocator = allocatorForSize(vm().jsValueGigacageAuxiliarySpace, butterflySize, failCase);
13653 LValue startOfStorage = allocateHeapCell(allocator, failCase);
13654
13655 LValue butterfly = m_out.add(startOfStorage, m_out.constIntPtr(sizeof(IndexingHeader)));
13656
13657 m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
13658 m_out.store32(vectorLength, butterfly, m_heaps.Butterfly_vectorLength);
13659
13660 initializeArrayElements(
13661 indexingType,
13662 shouldInitializeElements ? m_out.int32Zero : publicLength, vectorLength,
13663 butterfly);
13664
13665 ValueFromBlock haveButterfly = m_out.anchor(butterfly);
13666
13667 LValue object = allocateObject<JSArray>(structure, butterfly, failCase);
13668
13669 ValueFromBlock fastResult = m_out.anchor(object);
13670 ValueFromBlock fastButterfly = m_out.anchor(butterfly);
13671 m_out.jump(continuation);
13672
13673 m_out.appendTo(largeCase, failCase);
13674 ValueFromBlock largeStructure = m_out.anchor(
13675 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))));
13676 m_out.jump(slowCase);
13677
13678 m_out.appendTo(failCase, slowCase);
13679 ValueFromBlock failStructure = m_out.anchor(structure);
13680 m_out.jump(slowCase);
13681
13682 m_out.appendTo(slowCase, continuation);
13683 LValue structureValue = m_out.phi(pointerType(), largeStructure, failStructure);
13684 LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
13685
13686 VM& vm = this->vm();
13687 LValue slowResultValue = nullptr;
13688 if (vectorLength == publicLength
13689 || (staticVectorLengthFromPublicLength && staticVectorLength && staticVectorLength.value() == staticVectorLengthFromPublicLength.value())) {
13690 slowResultValue = lazySlowPath(
13691 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13692 return createLazyCallGenerator(vm,
13693 operationNewArrayWithSize, locations[0].directGPR(),
13694 locations[1].directGPR(), locations[2].directGPR(), locations[3].directGPR());
13695 },
13696 structureValue, publicLength, butterflyValue);
13697 } else {
13698 slowResultValue = lazySlowPath(
13699 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13700 return createLazyCallGenerator(vm,
13701 operationNewArrayWithSizeAndHint, locations[0].directGPR(),
13702 locations[1].directGPR(), locations[2].directGPR(), locations[3].directGPR(), locations[4].directGPR());
13703 },
13704 structureValue, publicLength, vectorLength, butterflyValue);
13705 }
13706
13707 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
13708 ValueFromBlock slowButterfly = m_out.anchor(
13709 m_out.loadPtr(slowResultValue, m_heaps.JSObject_butterfly));
13710 m_out.jump(continuation);
13711
13712 m_out.appendTo(continuation, lastNext);
13713 return ArrayValues(
13714 m_out.phi(pointerType(), fastResult, slowResult),
13715 m_out.phi(pointerType(), fastButterfly, slowButterfly));
13716 }
13717
13718 ArrayValues allocateUninitializedContiguousJSArrayInternal(LValue publicLength, LValue vectorLength, RegisteredStructure structure)
13719 {
13720 bool shouldInitializeElements = false;
13721 bool shouldLargeArraySizeCreateArrayStorage = false;
13722 return allocateJSArray(
13723 publicLength, vectorLength, weakStructure(structure), m_out.constInt32(structure->indexingType()), shouldInitializeElements,
13724 shouldLargeArraySizeCreateArrayStorage);
13725 }
13726
13727 ArrayValues allocateUninitializedContiguousJSArray(LValue publicLength, RegisteredStructure structure)
13728 {
13729 return allocateUninitializedContiguousJSArrayInternal(publicLength, publicLength, structure);
13730 }
13731
13732 ArrayValues allocateUninitializedContiguousJSArray(unsigned publicLength, unsigned vectorLength, RegisteredStructure structure)
13733 {
13734 ASSERT(vectorLength >= publicLength);
13735 return allocateUninitializedContiguousJSArrayInternal(m_out.constInt32(publicLength), m_out.constInt32(vectorLength), structure);
13736 }
13737
13738 LValue ensureShadowChickenPacket()
13739 {
13740 ShadowChicken* shadowChicken = vm().shadowChicken();
13741 RELEASE_ASSERT(shadowChicken);
13742 LBasicBlock slowCase = m_out.newBlock();
13743 LBasicBlock continuation = m_out.newBlock();
13744
13745 TypedPointer addressOfLogCursor = m_out.absolute(shadowChicken->addressOfLogCursor());
13746 LValue logCursor = m_out.loadPtr(addressOfLogCursor);
13747
13748 ValueFromBlock fastResult = m_out.anchor(logCursor);
13749
13750 m_out.branch(
13751 m_out.below(logCursor, m_out.constIntPtr(shadowChicken->logEnd())),
13752 usually(continuation), rarely(slowCase));
13753
13754 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
13755
13756 vmCall(Void, m_out.operation(operationProcessShadowChickenLog), m_callFrame);
13757
13758 ValueFromBlock slowResult = m_out.anchor(m_out.loadPtr(addressOfLogCursor));
13759 m_out.jump(continuation);
13760
13761 m_out.appendTo(continuation, lastNext);
13762 LValue result = m_out.phi(pointerType(), fastResult, slowResult);
13763
13764 m_out.storePtr(
13765 m_out.add(result, m_out.constIntPtr(sizeof(ShadowChicken::Packet))),
13766 addressOfLogCursor);
13767
13768 return result;
13769 }
13770
13771 LValue boolify(Edge edge)
13772 {
13773 switch (edge.useKind()) {
13774 case BooleanUse:
13775 case KnownBooleanUse:
13776 return lowBoolean(edge);
13777 case Int32Use:
13778 return m_out.notZero32(lowInt32(edge));
13779 case DoubleRepUse:
13780 return m_out.doubleNotEqualAndOrdered(lowDouble(edge), m_out.doubleZero);
13781 case ObjectOrOtherUse:
13782 return m_out.logicalNot(
13783 equalNullOrUndefined(
13784 edge, CellCaseSpeculatesObject, SpeculateNullOrUndefined,
13785 ManualOperandSpeculation));
13786 case StringUse:
13787 return m_out.notEqual(lowString(edge), weakPointer(jsEmptyString(&m_graph.m_vm)));
13788 case StringOrOtherUse: {
13789 LValue value = lowJSValue(edge, ManualOperandSpeculation);
13790
13791 LBasicBlock cellCase = m_out.newBlock();
13792 LBasicBlock notCellCase = m_out.newBlock();
13793 LBasicBlock continuation = m_out.newBlock();
13794
13795 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
13796
13797 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
13798 FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCellCheck) | SpecString, isNotString(value));
13799 ValueFromBlock stringResult = m_out.anchor(m_out.notEqual(value, weakPointer(jsEmptyString(&m_graph.m_vm))));
13800 m_out.jump(continuation);
13801
13802 m_out.appendTo(notCellCase, continuation);
13803 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
13804 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
13805 m_out.jump(continuation);
13806
13807 m_out.appendTo(continuation, lastNext);
13808 return m_out.phi(Int32, stringResult, notCellResult);
13809 }
13810 case UntypedUse: {
13811 LValue value = lowJSValue(edge);
13812
13813 // Implements the following control flow structure:
13814 // if (value is cell) {
13815 // if (value is string or value is BigInt)
13816 // result = !!value->length
13817 // else {
13818 // do evil things for masquerades-as-undefined
13819 // result = true
13820 // }
13821 // } else if (value is int32) {
13822 // result = !!unboxInt32(value)
13823 // } else if (value is number) {
13824 // result = !!unboxDouble(value)
13825 // } else {
13826 // result = value == jsTrue
13827 // }
13828
13829 LBasicBlock cellCase = m_out.newBlock();
13830 LBasicBlock notStringCase = m_out.newBlock();
13831 LBasicBlock stringCase = m_out.newBlock();
13832 LBasicBlock bigIntCase = m_out.newBlock();
13833 LBasicBlock notStringOrBigIntCase = m_out.newBlock();
13834 LBasicBlock notCellCase = m_out.newBlock();
13835 LBasicBlock int32Case = m_out.newBlock();
13836 LBasicBlock notInt32Case = m_out.newBlock();
13837 LBasicBlock doubleCase = m_out.newBlock();
13838 LBasicBlock notDoubleCase = m_out.newBlock();
13839 LBasicBlock continuation = m_out.newBlock();
13840
13841 Vector<ValueFromBlock> results;
13842
13843 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
13844
13845 LBasicBlock lastNext = m_out.appendTo(cellCase, notStringCase);
13846 m_out.branch(
13847 isString(value, provenType(edge) & SpecCell),
13848 unsure(stringCase), unsure(notStringCase));
13849
13850 m_out.appendTo(notStringCase, stringCase);
13851 m_out.branch(
13852 isBigInt(value, provenType(edge) & (SpecCell - SpecString)),
13853 unsure(bigIntCase), unsure(notStringOrBigIntCase));
13854
13855 m_out.appendTo(stringCase, bigIntCase);
13856 results.append(m_out.anchor(m_out.notEqual(value, weakPointer(jsEmptyString(&m_graph.m_vm)))));
13857 m_out.jump(continuation);
13858
13859 m_out.appendTo(bigIntCase, notStringOrBigIntCase);
13860 LValue nonZeroBigInt = m_out.notZero32(
13861 m_out.load32NonNegative(value, m_heaps.JSBigInt_length));
13862 results.append(m_out.anchor(nonZeroBigInt));
13863 m_out.jump(continuation);
13864
13865 m_out.appendTo(notStringOrBigIntCase, notCellCase);
13866 LValue isTruthyObject;
13867 if (masqueradesAsUndefinedWatchpointIsStillValid())
13868 isTruthyObject = m_out.booleanTrue;
13869 else {
13870 LBasicBlock masqueradesCase = m_out.newBlock();
13871
13872 results.append(m_out.anchor(m_out.booleanTrue));
13873
13874 m_out.branch(
13875 m_out.testIsZero32(
13876 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
13877 m_out.constInt32(MasqueradesAsUndefined)),
13878 usually(continuation), rarely(masqueradesCase));
13879
13880 m_out.appendTo(masqueradesCase);
13881
13882 isTruthyObject = m_out.notEqual(
13883 weakPointer(m_graph.globalObjectFor(m_node->origin.semantic)),
13884 m_out.loadPtr(loadStructure(value), m_heaps.Structure_globalObject));
13885 }
13886 results.append(m_out.anchor(isTruthyObject));
13887 m_out.jump(continuation);
13888
13889 m_out.appendTo(notCellCase, int32Case);
13890 m_out.branch(
13891 isInt32(value, provenType(edge) & ~SpecCell),
13892 unsure(int32Case), unsure(notInt32Case));
13893
13894 m_out.appendTo(int32Case, notInt32Case);
13895 results.append(m_out.anchor(m_out.notZero32(unboxInt32(value))));
13896 m_out.jump(continuation);
13897
13898 m_out.appendTo(notInt32Case, doubleCase);
13899 m_out.branch(
13900 isNumber(value, provenType(edge) & ~SpecCell),
13901 unsure(doubleCase), unsure(notDoubleCase));
13902
13903 m_out.appendTo(doubleCase, notDoubleCase);
13904 LValue doubleIsTruthy = m_out.doubleNotEqualAndOrdered(
13905 unboxDouble(value), m_out.constDouble(0));
13906 results.append(m_out.anchor(doubleIsTruthy));
13907 m_out.jump(continuation);
13908
13909 m_out.appendTo(notDoubleCase, continuation);
13910 LValue miscIsTruthy = m_out.equal(
13911 value, m_out.constInt64(JSValue::encode(jsBoolean(true))));
13912 results.append(m_out.anchor(miscIsTruthy));
13913 m_out.jump(continuation);
13914
13915 m_out.appendTo(continuation, lastNext);
13916 return m_out.phi(Int32, results);
13917 }
13918 default:
13919 DFG_CRASH(m_graph, m_node, "Bad use kind");
13920 return 0;
13921 }
13922 }
13923
13924 enum StringOrObjectMode {
13925 AllCellsAreFalse,
13926 CellCaseSpeculatesObject
13927 };
13928 enum EqualNullOrUndefinedMode {
13929 EqualNull,
13930 EqualUndefined,
13931 EqualNullOrUndefined,
13932 SpeculateNullOrUndefined
13933 };
13934 LValue equalNullOrUndefined(
13935 Edge edge, StringOrObjectMode cellMode, EqualNullOrUndefinedMode primitiveMode,
13936 OperandSpeculationMode operandMode = AutomaticOperandSpeculation)
13937 {
13938 bool validWatchpoint = masqueradesAsUndefinedWatchpointIsStillValid();
13939
13940 LValue value = lowJSValue(edge, operandMode);
13941
13942 LBasicBlock cellCase = m_out.newBlock();
13943 LBasicBlock primitiveCase = m_out.newBlock();
13944 LBasicBlock continuation = m_out.newBlock();
13945
13946 m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
13947
13948 LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
13949
13950 Vector<ValueFromBlock, 3> results;
13951
13952 switch (cellMode) {
13953 case AllCellsAreFalse:
13954 break;
13955 case CellCaseSpeculatesObject:
13956 FTL_TYPE_CHECK(
13957 jsValueValue(value), edge, (~SpecCellCheck) | SpecObject, isNotObject(value));
13958 break;
13959 }
13960
13961 if (validWatchpoint) {
13962 results.append(m_out.anchor(m_out.booleanFalse));
13963 m_out.jump(continuation);
13964 } else {
13965 LBasicBlock masqueradesCase =
13966 m_out.newBlock();
13967
13968 results.append(m_out.anchor(m_out.booleanFalse));
13969
13970 m_out.branch(
13971 m_out.testNonZero32(
13972 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
13973 m_out.constInt32(MasqueradesAsUndefined)),
13974 rarely(masqueradesCase), usually(continuation));
13975
13976 m_out.appendTo(masqueradesCase, primitiveCase);
13977
13978 LValue structure = loadStructure(value);
13979
13980 results.append(m_out.anchor(
13981 m_out.equal(
13982 weakPointer(m_graph.globalObjectFor(m_node->origin.semantic)),
13983 m_out.loadPtr(structure, m_heaps.Structure_globalObject))));
13984 m_out.jump(continuation);
13985 }
13986
13987 m_out.appendTo(primitiveCase, continuation);
13988
13989 LValue primitiveResult;
13990 switch (primitiveMode) {
13991 case EqualNull:
13992 primitiveResult = m_out.equal(value, m_out.constInt64(ValueNull));
13993 break;
13994 case EqualUndefined:
13995 primitiveResult = m_out.equal(value, m_out.constInt64(ValueUndefined));
13996 break;
13997 case EqualNullOrUndefined:
13998 primitiveResult = isOther(value, provenType(edge));
13999 break;
14000 case SpeculateNullOrUndefined:
14001 FTL_TYPE_CHECK(
14002 jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
14003 primitiveResult = m_out.booleanTrue;
14004 break;
14005 }
14006 results.append(m_out.anchor(primitiveResult));
14007 m_out.jump(continuation);
14008
14009 m_out.appendTo(continuation, lastNext);
14010
14011 return m_out.phi(Int32, results);
14012 }
14013
14014 template<typename FunctionType>
14015 void contiguousPutByValOutOfBounds(
14016 FunctionType slowPathFunction, LValue base, LValue storage, LValue index, LValue value,
14017 LBasicBlock continuation)
14018 {
14019 if (!m_node->arrayMode().isInBounds()) {
14020 LBasicBlock notInBoundsCase =
14021 m_out.newBlock();
14022 LBasicBlock performStore =
14023 m_out.newBlock();
14024
14025 LValue isNotInBounds = m_out.aboveOrEqual(
14026 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength));
14027 m_out.branch(isNotInBounds, unsure(notInBoundsCase), unsure(performStore));
14028
14029 LBasicBlock lastNext = m_out.appendTo(notInBoundsCase, performStore);
14030
14031 LValue isOutOfBounds = m_out.aboveOrEqual(
14032 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_vectorLength));
14033
14034 if (!m_node->arrayMode().isOutOfBounds())
14035 speculate(OutOfBounds, noValue(), 0, isOutOfBounds);
14036 else {
14037 LBasicBlock outOfBoundsCase =
14038 m_out.newBlock();
14039 LBasicBlock holeCase =
14040 m_out.newBlock();
14041
14042 m_out.branch(isOutOfBounds, rarely(outOfBoundsCase), usually(holeCase));
14043
14044 LBasicBlock innerLastNext = m_out.appendTo(outOfBoundsCase, holeCase);
14045
14046 vmCall(
14047 Void, m_out.operation(slowPathFunction),
14048 m_callFrame, base, index, value);
14049
14050 m_out.jump(continuation);
14051
14052 m_out.appendTo(holeCase, innerLastNext);
14053 }
14054
14055 m_out.store32(
14056 m_out.add(index, m_out.int32One),
14057 storage, m_heaps.Butterfly_publicLength);
14058
14059 m_out.jump(performStore);
14060 m_out.appendTo(performStore, lastNext);
14061 }
14062 }
14063
14064 LValue caged(Gigacage::Kind kind, LValue ptr)
14065 {
14066#if GIGACAGE_ENABLED
14067 if (!Gigacage::isEnabled(kind))
14068 return ptr;
14069
14070 if (kind == Gigacage::Primitive && Gigacage::canPrimitiveGigacageBeDisabled()) {
14071 if (vm().primitiveGigacageEnabled().isStillValid())
14072 m_graph.watchpoints().addLazily(vm().primitiveGigacageEnabled());
14073 else
14074 return ptr;
14075 }
14076
14077 LValue basePtr = m_out.constIntPtr(Gigacage::basePtr(kind));
14078 LValue mask = m_out.constIntPtr(Gigacage::mask(kind));
14079
14080 LValue masked = m_out.bitAnd(ptr, mask);
14081 LValue result = m_out.add(masked, basePtr);
14082
14083 // Make sure that B3 doesn't try to do smart reassociation of these pointer bits.
14084 // FIXME: In an ideal world, B3 would not do harmful reassociations, and if it did, it would be able
14085 // to undo them during constant hoisting and regalloc. As it stands, if you remove this then Octane
14086 // gets 1.6% slower and Kraken gets 5% slower. It's all because the basePtr, which is a constant,
14087 // gets reassociated out of the add above and into the address arithmetic. This disables hoisting of
14088 // the basePtr constant. Hoisting that constant is worth a lot more perf than the reassociation. One
14089 // way to make this all work happily is to combine offset legalization with constant hoisting, and
14090 // then teach it reassociation. So, Add(Add(a, b), const) where a is loop-invariant while b isn't
14091 // will turn into Add(Add(a, const), b) by the constant hoister. We would have to teach B3 to do this
14092 // and possibly other smart things if we want to be able to remove this opaque.
14093 // https://bugs.webkit.org/show_bug.cgi?id=175493
14094 return m_out.opaque(result);
14095#else
14096 UNUSED_PARAM(kind);
14097 return ptr;
14098#endif
14099 }
14100
14101 void buildSwitch(SwitchData* data, LType type, LValue switchValue)
14102 {
14103 ASSERT(type == pointerType() || type == Int32);
14104
14105 Vector<SwitchCase> cases;
14106 for (unsigned i = 0; i < data->cases.size(); ++i) {
14107 SwitchCase newCase;
14108
14109 if (type == pointerType()) {
14110 newCase = SwitchCase(m_out.constIntPtr(data->cases[i].value.switchLookupValue(data->kind)),
14111 lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count));
14112 } else if (type == Int32) {
14113 newCase = SwitchCase(m_out.constInt32(data->cases[i].value.switchLookupValue(data->kind)),
14114 lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count));
14115 } else
14116 CRASH();
14117
14118 cases.append(newCase);
14119 }
14120
14121 m_out.switchInstruction(
14122 switchValue, cases,
14123 lowBlock(data->fallThrough.block), Weight(data->fallThrough.count));
14124 }
14125
14126 void switchString(SwitchData* data, LValue string, Edge& edge)
14127 {
14128 bool canDoBinarySwitch = true;
14129 unsigned totalLength = 0;
14130
14131 for (DFG::SwitchCase myCase : data->cases) {
14132 StringImpl* string = myCase.value.stringImpl();
14133 if (!string->is8Bit()) {
14134 canDoBinarySwitch = false;
14135 break;
14136 }
14137 if (string->length() > Options::maximumBinaryStringSwitchCaseLength()) {
14138 canDoBinarySwitch = false;
14139 break;
14140 }
14141 totalLength += string->length();
14142 }
14143
14144 if (!canDoBinarySwitch || totalLength > Options::maximumBinaryStringSwitchTotalLength()) {
14145 switchStringSlow(data, string);
14146 return;
14147 }
14148
14149 LBasicBlock hasImplBlock = m_out.newBlock();
14150 LBasicBlock is8BitBlock = m_out.newBlock();
14151 LBasicBlock slowBlock = m_out.newBlock();
14152
14153 m_out.branch(isRopeString(string, edge), unsure(slowBlock), unsure(hasImplBlock));
14154
14155 LBasicBlock lastNext = m_out.appendTo(hasImplBlock, is8BitBlock);
14156
14157 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
14158 LValue length = m_out.load32(stringImpl, m_heaps.StringImpl_length);
14159
14160 m_out.branch(
14161 m_out.testIsZero32(
14162 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
14163 m_out.constInt32(StringImpl::flagIs8Bit())),
14164 unsure(slowBlock), unsure(is8BitBlock));
14165
14166 m_out.appendTo(is8BitBlock, slowBlock);
14167
14168 LValue buffer = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data);
14169
14170 // FIXME: We should propagate branch weight data to the cases of this switch.
14171 // https://bugs.webkit.org/show_bug.cgi?id=144368
14172
14173 Vector<StringSwitchCase> cases;
14174 for (DFG::SwitchCase myCase : data->cases)
14175 cases.append(StringSwitchCase(myCase.value.stringImpl(), lowBlock(myCase.target.block)));
14176 std::sort(cases.begin(), cases.end());
14177 switchStringRecurse(data, buffer, length, cases, 0, 0, cases.size(), 0, false);
14178
14179 m_out.appendTo(slowBlock, lastNext);
14180 switchStringSlow(data, string);
14181 }
14182
14183 // The code for string switching is based closely on the same code in the DFG backend. While it
14184 // would be nice to reduce the amount of similar-looking code, it seems like this is one of
14185 // those algorithms where factoring out the common bits would result in more code than just
14186 // duplicating.
14187
14188 struct StringSwitchCase {
14189 StringSwitchCase() { }
14190
14191 StringSwitchCase(StringImpl* string, LBasicBlock target)
14192 : string(string)
14193 , target(target)
14194 {
14195 }
14196
14197 bool operator<(const StringSwitchCase& other) const
14198 {
14199 return stringLessThan(*string, *other.string);
14200 }
14201
14202 StringImpl* string;
14203 LBasicBlock target;
14204 };
14205
14206 struct CharacterCase {
14207 CharacterCase()
14208 : character(0)
14209 , begin(0)
14210 , end(0)
14211 {
14212 }
14213
14214 CharacterCase(LChar character, unsigned begin, unsigned end)
14215 : character(character)
14216 , begin(begin)
14217 , end(end)
14218 {
14219 }
14220
14221 bool operator<(const CharacterCase& other) const
14222 {
14223 return character < other.character;
14224 }
14225
14226 LChar character;
14227 unsigned begin;
14228 unsigned end;
14229 };
14230
14231 void switchStringRecurse(
14232 SwitchData* data, LValue buffer, LValue length, const Vector<StringSwitchCase>& cases,
14233 unsigned numChecked, unsigned begin, unsigned end, unsigned alreadyCheckedLength,
14234 unsigned checkedExactLength)
14235 {
14236 LBasicBlock fallThrough = lowBlock(data->fallThrough.block);
14237
14238 if (begin == end) {
14239 m_out.jump(fallThrough);
14240 return;
14241 }
14242
14243 unsigned minLength = cases[begin].string->length();
14244 unsigned commonChars = minLength;
14245 bool allLengthsEqual = true;
14246 for (unsigned i = begin + 1; i < end; ++i) {
14247 unsigned myCommonChars = numChecked;
14248 unsigned limit = std::min(cases[begin].string->length(), cases[i].string->length());
14249 for (unsigned j = numChecked; j < limit; ++j) {
14250 if (cases[begin].string->at(j) != cases[i].string->at(j))
14251 break;
14252 myCommonChars++;
14253 }
14254 commonChars = std::min(commonChars, myCommonChars);
14255 if (minLength != cases[i].string->length())
14256 allLengthsEqual = false;
14257 minLength = std::min(minLength, cases[i].string->length());
14258 }
14259
14260 if (checkedExactLength) {
14261 DFG_ASSERT(m_graph, m_node, alreadyCheckedLength == minLength, alreadyCheckedLength, minLength);
14262 DFG_ASSERT(m_graph, m_node, allLengthsEqual);
14263 }
14264
14265 DFG_ASSERT(m_graph, m_node, minLength >= commonChars, minLength, commonChars);
14266
14267 if (!allLengthsEqual && alreadyCheckedLength < minLength)
14268 m_out.check(m_out.below(length, m_out.constInt32(minLength)), unsure(fallThrough));
14269 if (allLengthsEqual && (alreadyCheckedLength < minLength || !checkedExactLength))
14270 m_out.check(m_out.notEqual(length, m_out.constInt32(minLength)), unsure(fallThrough));
14271
14272 for (unsigned i = numChecked; i < commonChars; ++i) {
14273 m_out.check(
14274 m_out.notEqual(
14275 m_out.load8ZeroExt32(buffer, m_heaps.characters8[i]),
14276 m_out.constInt32(static_cast<uint16_t>(cases[begin].string->at(i)))),
14277 unsure(fallThrough));
14278 }
14279
14280 if (minLength == commonChars) {
14281 // This is the case where one of the cases is a prefix of all of the other cases.
14282 // We've already checked that the input string is a prefix of all of the cases,
14283 // so we just check length to jump to that case.
14284
14285 DFG_ASSERT(m_graph, m_node, cases[begin].string->length() == commonChars, cases[begin].string->length(), commonChars);
14286 for (unsigned i = begin + 1; i < end; ++i)
14287 DFG_ASSERT(m_graph, m_node, cases[i].string->length() > commonChars, cases[i].string->length(), commonChars);
14288
14289 if (allLengthsEqual) {
14290 DFG_ASSERT(m_graph, m_node, end == begin + 1, end, begin);
14291 m_out.jump(cases[begin].target);
14292 return;
14293 }
14294
14295 m_out.check(
14296 m_out.equal(length, m_out.constInt32(commonChars)),
14297 unsure(cases[begin].target));
14298
14299 // We've checked if the length is >= minLength, and then we checked if the length is
14300 // == commonChars. We get to this point if it is >= minLength but not == commonChars.
14301 // Hence we know that it now must be > minLength, i.e. that it's >= minLength + 1.
14302 switchStringRecurse(
14303 data, buffer, length, cases, commonChars, begin + 1, end, minLength + 1, false);
14304 return;
14305 }
14306
14307 // At this point we know that the string is longer than commonChars, and we've only verified
14308 // commonChars. Use a binary switch on the next unchecked character, i.e.
14309 // string[commonChars].
14310
14311 DFG_ASSERT(m_graph, m_node, end >= begin + 2, end, begin);
14312
14313 LValue uncheckedChar = m_out.load8ZeroExt32(buffer, m_heaps.characters8[commonChars]);
14314
14315 Vector<CharacterCase> characterCases;
14316 CharacterCase currentCase(cases[begin].string->at(commonChars), begin, begin + 1);
14317 for (unsigned i = begin + 1; i < end; ++i) {
14318 LChar currentChar = cases[i].string->at(commonChars);
14319 if (currentChar != currentCase.character) {
14320 currentCase.end = i;
14321 characterCases.append(currentCase);
14322 currentCase = CharacterCase(currentChar, i, i + 1);
14323 } else
14324 currentCase.end = i + 1;
14325 }
14326 characterCases.append(currentCase);
14327
14328 Vector<LBasicBlock> characterBlocks;
14329 for (unsigned i = characterCases.size(); i--;)
14330 characterBlocks.append(m_out.newBlock());
14331
14332 Vector<SwitchCase> switchCases;
14333 for (unsigned i = 0; i < characterCases.size(); ++i) {
14334 if (i)
14335 DFG_ASSERT(m_graph, m_node, characterCases[i - 1].character < characterCases[i].character);
14336 switchCases.append(SwitchCase(
14337 m_out.constInt32(characterCases[i].character), characterBlocks[i], Weight()));
14338 }
14339 m_out.switchInstruction(uncheckedChar, switchCases, fallThrough, Weight());
14340
14341 LBasicBlock lastNext = m_out.m_nextBlock;
14342 characterBlocks.append(lastNext); // Makes it convenient to set nextBlock.
14343 for (unsigned i = 0; i < characterCases.size(); ++i) {
14344 m_out.appendTo(characterBlocks[i], characterBlocks[i + 1]);
14345 switchStringRecurse(
14346 data, buffer, length, cases, commonChars + 1,
14347 characterCases[i].begin, characterCases[i].end, minLength, allLengthsEqual);
14348 }
14349
14350 DFG_ASSERT(m_graph, m_node, m_out.m_nextBlock == lastNext);
14351 }
14352
14353 void switchStringSlow(SwitchData* data, LValue string)
14354 {
14355 // FIXME: We ought to be able to use computed gotos here. We would save the labels of the
14356 // blocks we want to jump to, and then request their addresses after compilation completes.
14357 // https://bugs.webkit.org/show_bug.cgi?id=144369
14358
14359 LValue branchOffset = vmCall(
14360 Int32, m_out.operation(operationSwitchStringAndGetBranchOffset),
14361 m_callFrame, m_out.constIntPtr(data->switchTableIndex), string);
14362
14363 StringJumpTable& table = codeBlock()->stringSwitchJumpTable(data->switchTableIndex);
14364
14365 Vector<SwitchCase> cases;
14366 // These may be negative, or zero, or probably other stuff, too. We don't want to mess with HashSet's corner cases and we don't really care about throughput here.
14367 StdUnorderedSet<int32_t> alreadyHandled;
14368 for (unsigned i = 0; i < data->cases.size(); ++i) {
14369 // FIXME: The fact that we're using the bytecode's switch table means that the
14370 // following DFG IR transformation would be invalid.
14371 //
14372 // Original code:
14373 // switch (v) {
14374 // case "foo":
14375 // case "bar":
14376 // things();
14377 // break;
14378 // default:
14379 // break;
14380 // }
14381 //
14382 // New code:
14383 // switch (v) {
14384 // case "foo":
14385 // instrumentFoo();
14386 // goto _things;
14387 // case "bar":
14388 // instrumentBar();
14389 // _things:
14390 // things();
14391 // break;
14392 // default:
14393 // break;
14394 // }
14395 //
14396 // Luckily, we don't currently do any such transformation. But it's kind of silly that
14397 // this is an issue.
14398 // https://bugs.webkit.org/show_bug.cgi?id=144635
14399
14400 DFG::SwitchCase myCase = data->cases[i];
14401 StringJumpTable::StringOffsetTable::iterator iter =
14402 table.offsetTable.find(myCase.value.stringImpl());
14403 DFG_ASSERT(m_graph, m_node, iter != table.offsetTable.end());
14404
14405 if (!alreadyHandled.insert(iter->value.branchOffset).second)
14406 continue;
14407
14408 cases.append(SwitchCase(
14409 m_out.constInt32(iter->value.branchOffset),
14410 lowBlock(myCase.target.block), Weight(myCase.target.count)));
14411 }
14412
14413 m_out.switchInstruction(
14414 branchOffset, cases, lowBlock(data->fallThrough.block),
14415 Weight(data->fallThrough.count));
14416 }
14417
14418 // Calls the functor at the point of code generation where we know what the result type is.
14419 // You can emit whatever code you like at that point. Expects you to terminate the basic block.
14420 // When buildTypeOf() returns, it will have terminated all basic blocks that it created. So, if
14421 // you aren't using this as the terminator of a high-level block, you should create your own
14422 // contination and set it as the nextBlock (m_out.insertNewBlocksBefore(continuation)) before
14423 // calling this. For example:
14424 //
14425 // LBasicBlock continuation = m_out.newBlock();
14426 // LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
14427 // buildTypeOf(
14428 // child, value,
14429 // [&] (TypeofType type) {
14430 // do things;
14431 // m_out.jump(continuation);
14432 // });
14433 // m_out.appendTo(continuation, lastNext);
14434 template<typename Functor>
14435 void buildTypeOf(Edge child, LValue value, const Functor& functor)
14436 {
14437 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
14438
14439 // Implements the following branching structure:
14440 //
14441 // if (is cell) {
14442 // if (is object) {
14443 // if (is function) {
14444 // return function;
14445 // } else if (doesn't have call trap and doesn't masquerade as undefined) {
14446 // return object
14447 // } else {
14448 // return slowPath();
14449 // }
14450 // } else if (is string) {
14451 // return string
14452 // } else if (is bigint) {
14453 // return bigint
14454 // } else {
14455 // return symbol
14456 // }
14457 // } else if (is number) {
14458 // return number
14459 // } else if (is null) {
14460 // return object
14461 // } else if (is boolean) {
14462 // return boolean
14463 // } else {
14464 // return undefined
14465 // }
14466 //
14467 // FIXME: typeof Symbol should be more frequently seen than BigInt.
14468 // We should change the order of type detection based on this frequency.
14469 // https://bugs.webkit.org/show_bug.cgi?id=192650
14470
14471 LBasicBlock cellCase = m_out.newBlock();
14472 LBasicBlock objectCase = m_out.newBlock();
14473 LBasicBlock functionCase = m_out.newBlock();
14474 LBasicBlock notFunctionCase = m_out.newBlock();
14475 LBasicBlock reallyObjectCase = m_out.newBlock();
14476 LBasicBlock slowPath = m_out.newBlock();
14477 LBasicBlock unreachable = m_out.newBlock();
14478 LBasicBlock notObjectCase = m_out.newBlock();
14479 LBasicBlock stringCase = m_out.newBlock();
14480 LBasicBlock notStringCase = m_out.newBlock();
14481 LBasicBlock bigIntCase = m_out.newBlock();
14482 LBasicBlock symbolCase = m_out.newBlock();
14483 LBasicBlock notCellCase = m_out.newBlock();
14484 LBasicBlock numberCase = m_out.newBlock();
14485 LBasicBlock notNumberCase = m_out.newBlock();
14486 LBasicBlock notNullCase = m_out.newBlock();
14487 LBasicBlock booleanCase = m_out.newBlock();
14488 LBasicBlock undefinedCase = m_out.newBlock();
14489
14490 m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
14491
14492 LBasicBlock lastNext = m_out.appendTo(cellCase, objectCase);
14493 m_out.branch(isObject(value, provenType(child)), unsure(objectCase), unsure(notObjectCase));
14494
14495 m_out.appendTo(objectCase, functionCase);
14496 m_out.branch(
14497 isFunction(value, provenType(child) & SpecObject),
14498 unsure(functionCase), unsure(notFunctionCase));
14499
14500 m_out.appendTo(functionCase, notFunctionCase);
14501 functor(TypeofType::Function);
14502
14503 m_out.appendTo(notFunctionCase, reallyObjectCase);
14504 m_out.branch(
14505 isExoticForTypeof(value, provenType(child) & (SpecObject - SpecFunction)),
14506 rarely(slowPath), usually(reallyObjectCase));
14507
14508 m_out.appendTo(reallyObjectCase, slowPath);
14509 functor(TypeofType::Object);
14510
14511 m_out.appendTo(slowPath, unreachable);
14512 VM& vm = this->vm();
14513 LValue result = lazySlowPath(
14514 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
14515 return createLazyCallGenerator(vm,
14516 operationTypeOfObjectAsTypeofType, locations[0].directGPR(),
14517 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
14518 }, value);
14519 Vector<SwitchCase, 3> cases;
14520 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Undefined)), undefinedCase));
14521 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Object)), reallyObjectCase));
14522 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Function)), functionCase));
14523 m_out.switchInstruction(m_out.castToInt32(result), cases, unreachable, Weight());
14524
14525 m_out.appendTo(unreachable, notObjectCase);
14526 m_out.unreachable();
14527
14528 m_out.appendTo(notObjectCase, stringCase);
14529 m_out.branch(
14530 isString(value, provenType(child) & (SpecCell - SpecObject)),
14531 unsure(stringCase), unsure(notStringCase));
14532
14533 m_out.appendTo(stringCase, notStringCase);
14534 functor(TypeofType::String);
14535
14536 m_out.appendTo(notStringCase, bigIntCase);
14537 m_out.branch(
14538 isBigInt(value, provenType(child) & (SpecCell - SpecObject - SpecString)),
14539 unsure(bigIntCase), unsure(symbolCase));
14540
14541 m_out.appendTo(bigIntCase, symbolCase);
14542 functor(TypeofType::BigInt);
14543
14544 m_out.appendTo(symbolCase, notCellCase);
14545 functor(TypeofType::Symbol);
14546
14547 m_out.appendTo(notCellCase, numberCase);
14548 m_out.branch(
14549 isNumber(value, provenType(child) & ~SpecCell),
14550 unsure(numberCase), unsure(notNumberCase));
14551
14552 m_out.appendTo(numberCase, notNumberCase);
14553 functor(TypeofType::Number);
14554
14555 m_out.appendTo(notNumberCase, notNullCase);
14556 LValue isNull;
14557 if (provenType(child) & SpecOther)
14558 isNull = m_out.equal(value, m_out.constInt64(ValueNull));
14559 else
14560 isNull = m_out.booleanFalse;
14561 m_out.branch(isNull, unsure(reallyObjectCase), unsure(notNullCase));
14562
14563 m_out.appendTo(notNullCase, booleanCase);
14564 m_out.branch(
14565 isBoolean(value, provenType(child) & ~(SpecCell | SpecFullNumber)),
14566 unsure(booleanCase), unsure(undefinedCase));
14567
14568 m_out.appendTo(booleanCase, undefinedCase);
14569 functor(TypeofType::Boolean);
14570
14571 m_out.appendTo(undefinedCase, lastNext);
14572 functor(TypeofType::Undefined);
14573 }
14574
14575 TypedPointer pointerIntoTypedArray(LValue storage, LValue index, TypedArrayType type)
14576 {
14577 LValue offset = m_out.shl(m_out.zeroExtPtr(index), m_out.constIntPtr(logElementSize(type)));
14578
14579 return TypedPointer(
14580 m_heaps.typedArrayProperties,
14581 m_out.add(
14582 storage,
14583 offset
14584 ));
14585 }
14586
14587 LValue loadFromIntTypedArray(TypedPointer pointer, TypedArrayType type)
14588 {
14589 switch (elementSize(type)) {
14590 case 1:
14591 return isSigned(type) ? m_out.load8SignExt32(pointer) : m_out.load8ZeroExt32(pointer);
14592 case 2:
14593 return isSigned(type) ? m_out.load16SignExt32(pointer) : m_out.load16ZeroExt32(pointer);
14594 case 4:
14595 return m_out.load32(pointer);
14596 default:
14597 DFG_CRASH(m_graph, m_node, "Bad element size");
14598 }
14599 }
14600
14601 Output::StoreType storeType(TypedArrayType type)
14602 {
14603 if (isInt(type)) {
14604 switch (elementSize(type)) {
14605 case 1:
14606 return Output::Store32As8;
14607 case 2:
14608 return Output::Store32As16;
14609 case 4:
14610 return Output::Store32;
14611 default:
14612 DFG_CRASH(m_graph, m_node, "Bad element size");
14613 return Output::Store32;
14614 }
14615 }
14616 switch (type) {
14617 case TypeFloat32:
14618 return Output::StoreFloat;
14619 case TypeFloat64:
14620 return Output::StoreDouble;
14621 default:
14622 DFG_CRASH(m_graph, m_node, "Bad typed array type");
14623 }
14624 }
14625
14626 void setIntTypedArrayLoadResult(LValue result, TypedArrayType type, bool canSpeculate = false)
14627 {
14628 if (elementSize(type) < 4 || isSigned(type)) {
14629 setInt32(result);
14630 return;
14631 }
14632
14633 if (m_node->shouldSpeculateInt32() && canSpeculate) {
14634 speculate(
14635 Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
14636 setInt32(result);
14637 return;
14638 }
14639
14640 if (m_node->shouldSpeculateInt52()) {
14641 setStrictInt52(m_out.zeroExt(result, Int64));
14642 return;
14643 }
14644
14645 setDouble(m_out.unsignedToDouble(result));
14646 }
14647
14648 LValue getIntTypedArrayStoreOperand(Edge edge, bool isClamped = false)
14649 {
14650 LValue intValue;
14651 switch (edge.useKind()) {
14652 case Int52RepUse:
14653 case Int32Use: {
14654 if (edge.useKind() == Int32Use)
14655 intValue = lowInt32(edge);
14656 else
14657 intValue = m_out.castToInt32(lowStrictInt52(edge));
14658
14659 if (isClamped) {
14660 LBasicBlock atLeastZero = m_out.newBlock();
14661 LBasicBlock continuation = m_out.newBlock();
14662
14663 Vector<ValueFromBlock, 2> intValues;
14664 intValues.append(m_out.anchor(m_out.int32Zero));
14665 m_out.branch(
14666 m_out.lessThan(intValue, m_out.int32Zero),
14667 unsure(continuation), unsure(atLeastZero));
14668
14669 LBasicBlock lastNext = m_out.appendTo(atLeastZero, continuation);
14670
14671 intValues.append(m_out.anchor(m_out.select(
14672 m_out.greaterThan(intValue, m_out.constInt32(255)),
14673 m_out.constInt32(255),
14674 intValue)));
14675 m_out.jump(continuation);
14676
14677 m_out.appendTo(continuation, lastNext);
14678 intValue = m_out.phi(Int32, intValues);
14679 }
14680 break;
14681 }
14682
14683 case DoubleRepUse: {
14684 LValue doubleValue = lowDouble(edge);
14685
14686 if (isClamped) {
14687 LBasicBlock atLeastZero = m_out.newBlock();
14688 LBasicBlock withinRange = m_out.newBlock();
14689 LBasicBlock continuation = m_out.newBlock();
14690
14691 Vector<ValueFromBlock, 3> intValues;
14692 intValues.append(m_out.anchor(m_out.int32Zero));
14693 m_out.branch(
14694 m_out.doubleLessThanOrUnordered(doubleValue, m_out.doubleZero),
14695 unsure(continuation), unsure(atLeastZero));
14696
14697 LBasicBlock lastNext = m_out.appendTo(atLeastZero, withinRange);
14698 intValues.append(m_out.anchor(m_out.constInt32(255)));
14699 m_out.branch(
14700 m_out.doubleGreaterThan(doubleValue, m_out.constDouble(255)),
14701 unsure(continuation), unsure(withinRange));
14702
14703 m_out.appendTo(withinRange, continuation);
14704 intValues.append(m_out.anchor(m_out.doubleToInt(doubleValue)));
14705 m_out.jump(continuation);
14706
14707 m_out.appendTo(continuation, lastNext);
14708 intValue = m_out.phi(Int32, intValues);
14709 } else
14710 intValue = doubleToInt32(doubleValue);
14711 break;
14712 }
14713
14714 default:
14715 DFG_CRASH(m_graph, m_node, "Bad use kind");
14716 }
14717
14718 return intValue;
14719 }
14720
14721 LValue doubleToInt32(LValue doubleValue, double low, double high, bool isSigned = true)
14722 {
14723 LBasicBlock greatEnough = m_out.newBlock();
14724 LBasicBlock withinRange = m_out.newBlock();
14725 LBasicBlock slowPath = m_out.newBlock();
14726 LBasicBlock continuation = m_out.newBlock();
14727
14728 Vector<ValueFromBlock, 2> results;
14729
14730 m_out.branch(
14731 m_out.doubleGreaterThanOrEqual(doubleValue, m_out.constDouble(low)),
14732 unsure(greatEnough), unsure(slowPath));
14733
14734 LBasicBlock lastNext = m_out.appendTo(greatEnough, withinRange);
14735 m_out.branch(
14736 m_out.doubleLessThanOrEqual(doubleValue, m_out.constDouble(high)),
14737 unsure(withinRange), unsure(slowPath));
14738
14739 m_out.appendTo(withinRange, slowPath);
14740 LValue fastResult;
14741 if (isSigned)
14742 fastResult = m_out.doubleToInt(doubleValue);
14743 else
14744 fastResult = m_out.doubleToUInt(doubleValue);
14745 results.append(m_out.anchor(fastResult));
14746 m_out.jump(continuation);
14747
14748 m_out.appendTo(slowPath, continuation);
14749 results.append(m_out.anchor(m_out.call(Int32, m_out.operation(operationToInt32), doubleValue)));
14750 m_out.jump(continuation);
14751
14752 m_out.appendTo(continuation, lastNext);
14753 return m_out.phi(Int32, results);
14754 }
14755
14756 LValue doubleToInt32(LValue doubleValue)
14757 {
14758#if CPU(ARM64)
14759 if (MacroAssemblerARM64::supportsDoubleToInt32ConversionUsingJavaScriptSemantics()) {
14760 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
14761 patchpoint->append(ConstrainedValue(doubleValue, B3::ValueRep::SomeRegister));
14762 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14763 jit.convertDoubleToInt32UsingJavaScriptSemantics(params[1].fpr(), params[0].gpr());
14764 });
14765 patchpoint->effects = Effects::none();
14766 return patchpoint;
14767 }
14768#endif
14769
14770 if (hasSensibleDoubleToInt())
14771 return sensibleDoubleToInt32(doubleValue);
14772
14773 double limit = pow(2, 31) - 1;
14774 return doubleToInt32(doubleValue, -limit, limit);
14775 }
14776
14777 LValue sensibleDoubleToInt32(LValue doubleValue)
14778 {
14779 LBasicBlock slowPath = m_out.newBlock();
14780 LBasicBlock continuation = m_out.newBlock();
14781
14782 LValue fastResultValue = m_out.doubleToInt(doubleValue);
14783 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
14784 m_out.branch(
14785 m_out.equal(fastResultValue, m_out.constInt32(0x80000000)),
14786 rarely(slowPath), usually(continuation));
14787
14788 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
14789 ValueFromBlock slowResult = m_out.anchor(
14790 m_out.call(Int32, m_out.operation(operationToInt32SensibleSlow), doubleValue));
14791 m_out.jump(continuation);
14792
14793 m_out.appendTo(continuation, lastNext);
14794 return m_out.phi(Int32, fastResult, slowResult);
14795 }
14796
14797 // This is a mechanism for creating a code generator that fills in a gap in the code using our
14798 // own MacroAssembler. This is useful for slow paths that involve a lot of code and we don't want
14799 // to pay the price of B3 optimizing it. A lazy slow path will only be generated if it actually
14800 // executes. On the other hand, a lazy slow path always incurs the cost of two additional jumps.
14801 // Also, the lazy slow path's register allocation state is slaved to whatever B3 did, so you
14802 // have to use a ScratchRegisterAllocator to try to use some unused registers and you may have
14803 // to spill to top of stack if there aren't enough registers available.
14804 //
14805 // Lazy slow paths involve three different stages of execution. Each stage has unique
14806 // capabilities and knowledge. The stages are:
14807 //
14808 // 1) DFG->B3 lowering, i.e. code that runs in this phase. Lowering is the last time you will
14809 // have access to LValues. If there is an LValue that needs to be fed as input to a lazy slow
14810 // path, then you must pass it as an argument here (as one of the varargs arguments after the
14811 // functor). But, lowering doesn't know which registers will be used for those LValues. Hence
14812 // you pass a lambda to lazySlowPath() and that lambda will run during stage (2):
14813 //
14814 // 2) FTLCompile.cpp's fixFunctionBasedOnStackMaps. This code is the only stage at which we know
14815 // the mapping from arguments passed to this method in (1) and the registers that B3
14816 // selected for those arguments. You don't actually want to generate any code here, since then
14817 // the slow path wouldn't actually be lazily generated. Instead, you want to save the
14818 // registers being used for the arguments and defer code generation to stage (3) by creating
14819 // and returning a LazySlowPath::Generator:
14820 //
14821 // 3) LazySlowPath's generate() method. This code runs in response to the lazy slow path
14822 // executing for the first time. It will call the generator you created in stage (2).
14823 //
14824 // Note that each time you invoke stage (1), stage (2) may be invoked zero, one, or many times.
14825 // Stage (2) will usually be invoked once for stage (1). But, B3 may kill the code, in which
14826 // case stage (2) won't run. B3 may duplicate the code (for example via tail duplication),
14827 // leading to many calls to your stage (2) lambda. Stage (3) may be called zero or once for each
14828 // stage (2). It will be called zero times if the slow path never runs. This is what you hope for
14829 // whenever you use the lazySlowPath() mechanism.
14830 //
14831 // A typical use of lazySlowPath() will look like the example below, which just creates a slow
14832 // path that adds some value to the input and returns it.
14833 //
14834 // // Stage (1) is here. This is your last chance to figure out which LValues to use as inputs.
14835 // // Notice how we pass "input" as an argument to lazySlowPath().
14836 // LValue input = ...;
14837 // int addend = ...;
14838 // LValue output = lazySlowPath(
14839 // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
14840 // // Stage (2) is here. This is your last chance to figure out which registers are used
14841 // // for which values. Location zero is always the return value. You can ignore it if
14842 // // you don't want to return anything. Location 1 is the register for the first
14843 // // argument to the lazySlowPath(), i.e. "input". Note that the Location object could
14844 // // also hold an FPR, if you are passing a double.
14845 // GPRReg outputGPR = locations[0].directGPR();
14846 // GPRReg inputGPR = locations[1].directGPR();
14847 // return LazySlowPath::createGenerator(
14848 // [=] (CCallHelpers& jit, LazySlowPath::GenerationParams& params) {
14849 // // Stage (3) is here. This is when you generate code. You have access to the
14850 // // registers you collected in stage (2) because this lambda closes over those
14851 // // variables (outputGPR and inputGPR). You also have access to whatever extra
14852 // // data you collected in stage (1), such as the addend in this case.
14853 // jit.add32(TrustedImm32(addend), inputGPR, outputGPR);
14854 // // You have to end by jumping to done. There is nothing to fall through to.
14855 // // You can also jump to the exception handler (see LazySlowPath.h for more
14856 // // info). Note that currently you cannot OSR exit.
14857 // params.doneJumps.append(jit.jump());
14858 // });
14859 // },
14860 // input);
14861 //
14862 // You can basically pass as many inputs as you like, either using this varargs form, or by
14863 // passing a Vector of LValues.
14864 //
14865 // Note that if your slow path is only doing a call, you can use the createLazyCallGenerator()
14866 // helper. For example:
14867 //
14868 // LValue input = ...;
14869 // LValue output = lazySlowPath(
14870 // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
14871 // return createLazyCallGenerator(
14872 // operationDoThings, locations[0].directGPR(), locations[1].directGPR());
14873 // }, input);
14874 //
14875 // Finally, note that all of the lambdas - both the stage (2) lambda and the stage (3) lambda -
14876 // run after the function that created them returns. Hence, you should not use by-reference
14877 // capture (i.e. [&]) in any of these lambdas.
14878 template<typename Functor, typename... ArgumentTypes>
14879 PatchpointValue* lazySlowPath(const Functor& functor, ArgumentTypes... arguments)
14880 {
14881 return lazySlowPath(functor, Vector<LValue>{ arguments... });
14882 }
14883
14884 template<typename Functor>
14885 PatchpointValue* lazySlowPath(const Functor& functor, const Vector<LValue>& userArguments)
14886 {
14887 CodeOrigin origin = m_node->origin.semantic;
14888
14889 PatchpointValue* result = m_out.patchpoint(B3::Int64);
14890 for (LValue arg : userArguments)
14891 result->append(ConstrainedValue(arg, B3::ValueRep::SomeRegister));
14892
14893 RefPtr<PatchpointExceptionHandle> exceptionHandle =
14894 preparePatchpointForExceptions(result);
14895
14896 result->clobber(RegisterSet::macroScratchRegisters());
14897 State* state = &m_ftlState;
14898
14899 result->setGenerator(
14900 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14901 Vector<Location> locations;
14902 for (const B3::ValueRep& rep : params)
14903 locations.append(Location::forValueRep(rep));
14904
14905 RefPtr<LazySlowPath::Generator> generator = functor(locations);
14906
14907 CCallHelpers::PatchableJump patchableJump = jit.patchableJump();
14908 CCallHelpers::Label done = jit.label();
14909
14910 RegisterSet usedRegisters = params.unavailableRegisters();
14911
14912 RefPtr<ExceptionTarget> exceptionTarget =
14913 exceptionHandle->scheduleExitCreation(params);
14914
14915 // FIXME: As part of handling exceptions, we need to create a concrete OSRExit here.
14916 // Doing so should automagically register late paths that emit exit thunks.
14917
14918 params.addLatePath(
14919 [=] (CCallHelpers& jit) {
14920 AllowMacroScratchRegisterUsage allowScratch(jit);
14921 patchableJump.m_jump.link(&jit);
14922 unsigned index = state->jitCode->lazySlowPaths.size();
14923 state->jitCode->lazySlowPaths.append(nullptr);
14924 jit.pushToSaveImmediateWithoutTouchingRegisters(
14925 CCallHelpers::TrustedImm32(index));
14926 CCallHelpers::Jump generatorJump = jit.jump();
14927
14928 // Note that so long as we're here, we don't really know if our late path
14929 // runs before or after any other late paths that we might depend on, like
14930 // the exception thunk.
14931
14932 RefPtr<JITCode> jitCode = state->jitCode;
14933 VM* vm = &state->graph.m_vm;
14934
14935 jit.addLinkTask(
14936 [=] (LinkBuffer& linkBuffer) {
14937 linkBuffer.link(generatorJump,
14938 CodeLocationLabel<JITThunkPtrTag>(vm->getCTIStub(lazySlowPathGenerationThunkGenerator).code()));
14939
14940 std::unique_ptr<LazySlowPath> lazySlowPath = std::make_unique<LazySlowPath>();
14941
14942 auto linkedPatchableJump = CodeLocationJump<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(patchableJump));
14943
14944 CodeLocationLabel<JSInternalPtrTag> linkedDone = linkBuffer.locationOf<JSInternalPtrTag>(done);
14945
14946 CallSiteIndex callSiteIndex =
14947 jitCode->common.addUniqueCallSiteIndex(origin);
14948
14949 lazySlowPath->initialize(
14950 linkedPatchableJump, linkedDone,
14951 exceptionTarget->label(linkBuffer), usedRegisters,
14952 callSiteIndex, generator);
14953
14954 jitCode->lazySlowPaths[index] = WTFMove(lazySlowPath);
14955 });
14956 });
14957 });
14958 return result;
14959 }
14960
14961 void speculate(
14962 ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition)
14963 {
14964 appendOSRExit(kind, lowValue, highValue, failCondition, m_origin);
14965 }
14966
14967 void speculate(
14968 ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, LValue failCondition)
14969 {
14970 appendOSRExit(kind, lowValue, profile, failCondition, m_origin);
14971 }
14972
14973 void terminate(ExitKind kind)
14974 {
14975 speculate(kind, noValue(), nullptr, m_out.booleanTrue);
14976 didAlreadyTerminate();
14977 }
14978
14979 void didAlreadyTerminate()
14980 {
14981 m_state.setIsValid(false);
14982 }
14983
14984 void simulatedTypeCheck(Edge highValue, SpeculatedType typesPassedThrough)
14985 {
14986 m_interpreter.filter(highValue, typesPassedThrough);
14987 }
14988
14989 void typeCheck(
14990 FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
14991 LValue failCondition, ExitKind exitKind = BadType)
14992 {
14993 appendTypeCheck(lowValue, highValue, typesPassedThrough, failCondition, exitKind);
14994 }
14995
14996 void appendTypeCheck(
14997 FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
14998 LValue failCondition, ExitKind exitKind)
14999 {
15000 if (!m_interpreter.needsTypeCheck(highValue, typesPassedThrough))
15001 return;
15002 ASSERT(mayHaveTypeCheck(highValue.useKind()));
15003 appendOSRExit(exitKind, lowValue, highValue.node(), failCondition, m_origin);
15004 m_interpreter.filter(highValue, typesPassedThrough);
15005 }
15006
15007 LValue lowInt32(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15008 {
15009 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
15010
15011 if (edge->hasConstant()) {
15012 JSValue value = edge->asJSValue();
15013 simulatedTypeCheck(edge, SpecInt32Only);
15014 if (!value.isInt32()) {
15015 if (mayHaveTypeCheck(edge.useKind()))
15016 terminate(Uncountable);
15017 return m_out.int32Zero;
15018 }
15019 LValue result = m_out.constInt32(value.asInt32());
15020 result->setOrigin(B3::Origin(edge.node()));
15021 return result;
15022 }
15023
15024 LoweredNodeValue value = m_int32Values.get(edge.node());
15025 if (isValid(value)) {
15026 simulatedTypeCheck(edge, SpecInt32Only);
15027 return value.value();
15028 }
15029
15030 value = m_strictInt52Values.get(edge.node());
15031 if (isValid(value))
15032 return strictInt52ToInt32(edge, value.value());
15033
15034 value = m_int52Values.get(edge.node());
15035 if (isValid(value))
15036 return strictInt52ToInt32(edge, int52ToStrictInt52(value.value()));
15037
15038 value = m_jsValueValues.get(edge.node());
15039 if (isValid(value)) {
15040 LValue boxedResult = value.value();
15041 FTL_TYPE_CHECK(
15042 jsValueValue(boxedResult), edge, SpecInt32Only, isNotInt32(boxedResult));
15043 LValue result = unboxInt32(boxedResult);
15044 setInt32(edge.node(), result);
15045 return result;
15046 }
15047
15048 DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecInt32Only), provenType(edge));
15049 if (mayHaveTypeCheck(edge.useKind()))
15050 terminate(Uncountable);
15051 return m_out.int32Zero;
15052 }
15053
15054 enum Int52Kind { StrictInt52, Int52 };
15055 LValue lowInt52(Edge edge, Int52Kind kind)
15056 {
15057 DFG_ASSERT(m_graph, m_node, edge.useKind() == Int52RepUse, edge.useKind());
15058
15059 LoweredNodeValue value;
15060
15061 switch (kind) {
15062 case Int52:
15063 value = m_int52Values.get(edge.node());
15064 if (isValid(value))
15065 return value.value();
15066
15067 value = m_strictInt52Values.get(edge.node());
15068 if (isValid(value))
15069 return strictInt52ToInt52(value.value());
15070 break;
15071
15072 case StrictInt52:
15073 value = m_strictInt52Values.get(edge.node());
15074 if (isValid(value))
15075 return value.value();
15076
15077 value = m_int52Values.get(edge.node());
15078 if (isValid(value))
15079 return int52ToStrictInt52(value.value());
15080 break;
15081 }
15082
15083 DFG_ASSERT(m_graph, m_node, !provenType(edge), provenType(edge));
15084 if (mayHaveTypeCheck(edge.useKind()))
15085 terminate(Uncountable);
15086 return m_out.int64Zero;
15087 }
15088
15089 LValue lowInt52(Edge edge)
15090 {
15091 return lowInt52(edge, Int52);
15092 }
15093
15094 LValue lowStrictInt52(Edge edge)
15095 {
15096 return lowInt52(edge, StrictInt52);
15097 }
15098
15099 bool betterUseStrictInt52(Node* node)
15100 {
15101 return !isValid(m_int52Values.get(node));
15102 }
15103 bool betterUseStrictInt52(Edge edge)
15104 {
15105 return betterUseStrictInt52(edge.node());
15106 }
15107 template<typename T>
15108 Int52Kind bestInt52Kind(T node)
15109 {
15110 return betterUseStrictInt52(node) ? StrictInt52 : Int52;
15111 }
15112 Int52Kind opposite(Int52Kind kind)
15113 {
15114 switch (kind) {
15115 case Int52:
15116 return StrictInt52;
15117 case StrictInt52:
15118 return Int52;
15119 }
15120 DFG_CRASH(m_graph, m_node, "Bad use kind");
15121 return Int52;
15122 }
15123
15124 LValue lowWhicheverInt52(Edge edge, Int52Kind& kind)
15125 {
15126 kind = bestInt52Kind(edge);
15127 return lowInt52(edge, kind);
15128 }
15129
15130 LValue lowCell(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15131 {
15132 DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || DFG::isCell(edge.useKind()), edge.useKind());
15133
15134 if (edge->op() == JSConstant) {
15135 FrozenValue* value = edge->constant();
15136 simulatedTypeCheck(edge, SpecCellCheck);
15137 if (!value->value().isCell()) {
15138 if (mayHaveTypeCheck(edge.useKind()))
15139 terminate(Uncountable);
15140 return m_out.intPtrZero;
15141 }
15142 LValue result = frozenPointer(value);
15143 result->setOrigin(B3::Origin(edge.node()));
15144 return result;
15145 }
15146
15147 LoweredNodeValue value = m_jsValueValues.get(edge.node());
15148 if (isValid(value)) {
15149 LValue uncheckedValue = value.value();
15150 FTL_TYPE_CHECK(
15151 jsValueValue(uncheckedValue), edge, SpecCellCheck, isNotCell(uncheckedValue));
15152 return uncheckedValue;
15153 }
15154
15155 DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecCellCheck), provenType(edge));
15156 if (mayHaveTypeCheck(edge.useKind()))
15157 terminate(Uncountable);
15158 return m_out.intPtrZero;
15159 }
15160
15161 LValue lowObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15162 {
15163 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
15164
15165 LValue result = lowCell(edge, mode);
15166 speculateObject(edge, result);
15167 return result;
15168 }
15169
15170 LValue lowRegExpObject(Edge edge)
15171 {
15172 LValue result = lowCell(edge);
15173 speculateRegExpObject(edge, result);
15174 return result;
15175 }
15176
15177 LValue lowMapObject(Edge edge)
15178 {
15179 LValue result = lowCell(edge);
15180 speculateMapObject(edge, result);
15181 return result;
15182 }
15183
15184 LValue lowSetObject(Edge edge)
15185 {
15186 LValue result = lowCell(edge);
15187 speculateSetObject(edge, result);
15188 return result;
15189 }
15190
15191 LValue lowWeakMapObject(Edge edge)
15192 {
15193 LValue result = lowCell(edge);
15194 speculateWeakMapObject(edge, result);
15195 return result;
15196 }
15197
15198 LValue lowWeakSetObject(Edge edge)
15199 {
15200 LValue result = lowCell(edge);
15201 speculateWeakSetObject(edge, result);
15202 return result;
15203 }
15204
15205 LValue lowDataViewObject(Edge edge)
15206 {
15207 LValue result = lowCell(edge);
15208 speculateDataViewObject(edge, result);
15209 return result;
15210 }
15211
15212 LValue lowString(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15213 {
15214 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringUse || edge.useKind() == KnownStringUse || edge.useKind() == StringIdentUse);
15215
15216 LValue result = lowCell(edge, mode);
15217 speculateString(edge, result);
15218 return result;
15219 }
15220
15221 LValue lowStringIdent(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15222 {
15223 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringIdentUse);
15224
15225 LValue string = lowString(edge, mode);
15226 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
15227 speculateStringIdent(edge, string, stringImpl);
15228 return stringImpl;
15229 }
15230
15231 LValue lowSymbol(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15232 {
15233 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == SymbolUse);
15234
15235 LValue result = lowCell(edge, mode);
15236 speculateSymbol(edge, result);
15237 return result;
15238 }
15239
15240 LValue lowBigInt(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15241 {
15242 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BigIntUse);
15243
15244 LValue result = lowCell(edge, mode);
15245 speculateBigInt(edge, result);
15246 return result;
15247 }
15248
15249 LValue lowNonNullObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15250 {
15251 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
15252
15253 LValue result = lowCell(edge, mode);
15254 speculateNonNullObject(edge, result);
15255 return result;
15256 }
15257
15258 LValue lowBoolean(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15259 {
15260 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse || edge.useKind() == KnownBooleanUse);
15261
15262 if (edge->hasConstant()) {
15263 JSValue value = edge->asJSValue();
15264 simulatedTypeCheck(edge, SpecBoolean);
15265 if (!value.isBoolean()) {
15266 if (mayHaveTypeCheck(edge.useKind()))
15267 terminate(Uncountable);
15268 return m_out.booleanFalse;
15269 }
15270 LValue result = m_out.constBool(value.asBoolean());
15271 result->setOrigin(B3::Origin(edge.node()));
15272 return result;
15273 }
15274
15275 LoweredNodeValue value = m_booleanValues.get(edge.node());
15276 if (isValid(value)) {
15277 simulatedTypeCheck(edge, SpecBoolean);
15278 return value.value();
15279 }
15280
15281 value = m_jsValueValues.get(edge.node());
15282 if (isValid(value)) {
15283 LValue unboxedResult = value.value();
15284 FTL_TYPE_CHECK(
15285 jsValueValue(unboxedResult), edge, SpecBoolean, isNotBoolean(unboxedResult));
15286 LValue result = unboxBoolean(unboxedResult);
15287 setBoolean(edge.node(), result);
15288 return result;
15289 }
15290
15291 DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecBoolean), provenType(edge));
15292 if (mayHaveTypeCheck(edge.useKind()))
15293 terminate(Uncountable);
15294 return m_out.booleanFalse;
15295 }
15296
15297 LValue lowDouble(Edge edge)
15298 {
15299 DFG_ASSERT(m_graph, m_node, isDouble(edge.useKind()), edge.useKind());
15300
15301 LoweredNodeValue value = m_doubleValues.get(edge.node());
15302 if (isValid(value))
15303 return value.value();
15304 DFG_ASSERT(m_graph, m_node, !provenType(edge), provenType(edge));
15305 if (mayHaveTypeCheck(edge.useKind()))
15306 terminate(Uncountable);
15307 return m_out.doubleZero;
15308 }
15309
15310 LValue lowJSValue(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15311 {
15312 DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse, m_node->op(), edge.useKind());
15313 DFG_ASSERT(m_graph, m_node, !isDouble(edge.useKind()), m_node->op(), edge.useKind());
15314 DFG_ASSERT(m_graph, m_node, edge.useKind() != Int52RepUse, m_node->op(), edge.useKind());
15315
15316 if (edge->hasConstant()) {
15317 LValue result = m_out.constInt64(JSValue::encode(edge->asJSValue()));
15318 result->setOrigin(B3::Origin(edge.node()));
15319 return result;
15320 }
15321
15322 LoweredNodeValue value = m_jsValueValues.get(edge.node());
15323 if (isValid(value))
15324 return value.value();
15325
15326 value = m_int32Values.get(edge.node());
15327 if (isValid(value)) {
15328 LValue result = boxInt32(value.value());
15329 setJSValue(edge.node(), result);
15330 return result;
15331 }
15332
15333 value = m_booleanValues.get(edge.node());
15334 if (isValid(value)) {
15335 LValue result = boxBoolean(value.value());
15336 setJSValue(edge.node(), result);
15337 return result;
15338 }
15339
15340 DFG_CRASH(m_graph, m_node, makeString("Value not defined: ", String::number(edge.node()->index())).ascii().data());
15341 return 0;
15342 }
15343
15344 LValue lowNotCell(Edge edge)
15345 {
15346 LValue result = lowJSValue(edge, ManualOperandSpeculation);
15347 FTL_TYPE_CHECK(jsValueValue(result), edge, ~SpecCellCheck, isCell(result));
15348 return result;
15349 }
15350
15351 LValue lowStorage(Edge edge)
15352 {
15353 LoweredNodeValue value = m_storageValues.get(edge.node());
15354 if (isValid(value))
15355 return value.value();
15356
15357 LValue result = lowCell(edge);
15358 setStorage(edge.node(), result);
15359 return result;
15360 }
15361
15362 LValue strictInt52ToInt32(Edge edge, LValue value)
15363 {
15364 LValue result = m_out.castToInt32(value);
15365 FTL_TYPE_CHECK(
15366 noValue(), edge, SpecInt32Only,
15367 m_out.notEqual(m_out.signExt32To64(result), value));
15368 setInt32(edge.node(), result);
15369 return result;
15370 }
15371
15372 LValue strictInt52ToDouble(LValue value)
15373 {
15374 return m_out.intToDouble(value);
15375 }
15376
15377 LValue strictInt52ToJSValue(LValue value)
15378 {
15379 LBasicBlock isInt32 = m_out.newBlock();
15380 LBasicBlock isDouble = m_out.newBlock();
15381 LBasicBlock continuation = m_out.newBlock();
15382
15383 Vector<ValueFromBlock, 2> results;
15384
15385 LValue int32Value = m_out.castToInt32(value);
15386 m_out.branch(
15387 m_out.equal(m_out.signExt32To64(int32Value), value),
15388 unsure(isInt32), unsure(isDouble));
15389
15390 LBasicBlock lastNext = m_out.appendTo(isInt32, isDouble);
15391
15392 results.append(m_out.anchor(boxInt32(int32Value)));
15393 m_out.jump(continuation);
15394
15395 m_out.appendTo(isDouble, continuation);
15396
15397 results.append(m_out.anchor(boxDouble(m_out.intToDouble(value))));
15398 m_out.jump(continuation);
15399
15400 m_out.appendTo(continuation, lastNext);
15401 return m_out.phi(Int64, results);
15402 }
15403
15404 LValue strictInt52ToInt52(LValue value)
15405 {
15406 return m_out.shl(value, m_out.constInt64(JSValue::int52ShiftAmount));
15407 }
15408
15409 LValue int52ToStrictInt52(LValue value)
15410 {
15411 return m_out.aShr(value, m_out.constInt64(JSValue::int52ShiftAmount));
15412 }
15413
15414 LValue isInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
15415 {
15416 if (LValue proven = isProvenValue(type, SpecInt32Only))
15417 return proven;
15418 return m_out.aboveOrEqual(jsValue, m_tagTypeNumber);
15419 }
15420 LValue isNotInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
15421 {
15422 if (LValue proven = isProvenValue(type, ~SpecInt32Only))
15423 return proven;
15424 return m_out.below(jsValue, m_tagTypeNumber);
15425 }
15426 LValue unboxInt32(LValue jsValue)
15427 {
15428 return m_out.castToInt32(jsValue);
15429 }
15430 LValue boxInt32(LValue value)
15431 {
15432 return m_out.add(m_out.zeroExt(value, Int64), m_tagTypeNumber);
15433 }
15434
15435 LValue isCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
15436 {
15437 if (LValue proven = isProvenValue(type, SpecCellCheck | SpecMisc))
15438 return proven;
15439 return m_out.testIsZero64(jsValue, m_tagTypeNumber);
15440 }
15441 LValue isNotCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
15442 {
15443 if (LValue proven = isProvenValue(type, ~(SpecCellCheck | SpecMisc)))
15444 return proven;
15445 return m_out.testNonZero64(jsValue, m_tagTypeNumber);
15446 }
15447
15448 LValue unboxDouble(LValue jsValue, LValue* unboxedAsInt = nullptr)
15449 {
15450 LValue asInt = m_out.add(jsValue, m_tagTypeNumber);
15451 if (unboxedAsInt)
15452 *unboxedAsInt = asInt;
15453 return m_out.bitCast(asInt, Double);
15454 }
15455 LValue boxDouble(LValue doubleValue)
15456 {
15457 return m_out.sub(m_out.bitCast(doubleValue, Int64), m_tagTypeNumber);
15458 }
15459
15460 LValue jsValueToStrictInt52(Edge edge, LValue boxedValue)
15461 {
15462 LBasicBlock intCase = m_out.newBlock();
15463 LBasicBlock doubleCase = m_out.newBlock();
15464 LBasicBlock continuation = m_out.newBlock();
15465
15466 LValue isNotInt32;
15467 if (!m_interpreter.needsTypeCheck(edge, SpecInt32Only))
15468 isNotInt32 = m_out.booleanFalse;
15469 else if (!m_interpreter.needsTypeCheck(edge, ~SpecInt32Only))
15470 isNotInt32 = m_out.booleanTrue;
15471 else
15472 isNotInt32 = this->isNotInt32(boxedValue);
15473 m_out.branch(isNotInt32, unsure(doubleCase), unsure(intCase));
15474
15475 LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
15476
15477 ValueFromBlock intToInt52 = m_out.anchor(
15478 m_out.signExt32To64(unboxInt32(boxedValue)));
15479 m_out.jump(continuation);
15480
15481 m_out.appendTo(doubleCase, continuation);
15482
15483 LValue possibleResult = m_out.call(
15484 Int64, m_out.operation(operationConvertBoxedDoubleToInt52), boxedValue);
15485 FTL_TYPE_CHECK(
15486 jsValueValue(boxedValue), edge, SpecInt32Only | SpecAnyIntAsDouble,
15487 m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52)));
15488
15489 ValueFromBlock doubleToInt52 = m_out.anchor(possibleResult);
15490 m_out.jump(continuation);
15491
15492 m_out.appendTo(continuation, lastNext);
15493
15494 return m_out.phi(Int64, intToInt52, doubleToInt52);
15495 }
15496
15497 LValue doubleToStrictInt52(Edge edge, LValue value)
15498 {
15499 LValue possibleResult = m_out.call(
15500 Int64, m_out.operation(operationConvertDoubleToInt52), value);
15501 FTL_TYPE_CHECK_WITH_EXIT_KIND(Int52Overflow,
15502 doubleValue(value), edge, SpecAnyIntAsDouble,
15503 m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52)));
15504
15505 return possibleResult;
15506 }
15507
15508 LValue convertDoubleToInt32(LValue value, bool shouldCheckNegativeZero)
15509 {
15510 LValue integerValue = m_out.doubleToInt(value);
15511 LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
15512 LValue valueNotConvertibleToInteger = m_out.doubleNotEqualOrUnordered(value, integerValueConvertedToDouble);
15513 speculate(Overflow, FormattedValue(DataFormatDouble, value), m_node, valueNotConvertibleToInteger);
15514
15515 if (shouldCheckNegativeZero) {
15516 LBasicBlock valueIsZero = m_out.newBlock();
15517 LBasicBlock continuation = m_out.newBlock();
15518 m_out.branch(m_out.isZero32(integerValue), unsure(valueIsZero), unsure(continuation));
15519
15520 LBasicBlock lastNext = m_out.appendTo(valueIsZero, continuation);
15521
15522 LValue doubleBitcastToInt64 = m_out.bitCast(value, Int64);
15523 LValue signBitSet = m_out.lessThan(doubleBitcastToInt64, m_out.constInt64(0));
15524
15525 speculate(NegativeZero, FormattedValue(DataFormatDouble, value), m_node, signBitSet);
15526 m_out.jump(continuation);
15527 m_out.appendTo(continuation, lastNext);
15528 }
15529 return integerValue;
15530 }
15531
15532 LValue isNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
15533 {
15534 if (LValue proven = isProvenValue(type, SpecFullNumber))
15535 return proven;
15536 return isNotCellOrMisc(jsValue);
15537 }
15538 LValue isNotNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
15539 {
15540 if (LValue proven = isProvenValue(type, ~SpecFullNumber))
15541 return proven;
15542 return isCellOrMisc(jsValue);
15543 }
15544
15545 LValue isNotCell(LValue jsValue, SpeculatedType type = SpecFullTop)
15546 {
15547 if (LValue proven = isProvenValue(type, ~SpecCellCheck))
15548 return proven;
15549 return m_out.testNonZero64(jsValue, m_tagMask);
15550 }
15551
15552 LValue isCell(LValue jsValue, SpeculatedType type = SpecFullTop)
15553 {
15554 if (LValue proven = isProvenValue(type, SpecCellCheck))
15555 return proven;
15556 return m_out.testIsZero64(jsValue, m_tagMask);
15557 }
15558
15559 LValue isNotMisc(LValue value, SpeculatedType type = SpecFullTop)
15560 {
15561 if (LValue proven = isProvenValue(type, ~SpecMisc))
15562 return proven;
15563 return m_out.above(value, m_out.constInt64(TagBitTypeOther | TagBitBool | TagBitUndefined));
15564 }
15565
15566 LValue isMisc(LValue value, SpeculatedType type = SpecFullTop)
15567 {
15568 if (LValue proven = isProvenValue(type, SpecMisc))
15569 return proven;
15570 return m_out.logicalNot(isNotMisc(value));
15571 }
15572
15573 LValue isNotBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
15574 {
15575 if (LValue proven = isProvenValue(type, ~SpecBoolean))
15576 return proven;
15577 return m_out.testNonZero64(
15578 m_out.bitXor(jsValue, m_out.constInt64(ValueFalse)),
15579 m_out.constInt64(~1));
15580 }
15581 LValue isBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
15582 {
15583 if (LValue proven = isProvenValue(type, SpecBoolean))
15584 return proven;
15585 return m_out.logicalNot(isNotBoolean(jsValue));
15586 }
15587 LValue unboxBoolean(LValue jsValue)
15588 {
15589 // We want to use a cast that guarantees that B3 knows that even the integer
15590 // value is just 0 or 1. But for now we do it the dumb way.
15591 return m_out.notZero64(m_out.bitAnd(jsValue, m_out.constInt64(1)));
15592 }
15593 LValue boxBoolean(LValue value)
15594 {
15595 return m_out.select(
15596 value, m_out.constInt64(ValueTrue), m_out.constInt64(ValueFalse));
15597 }
15598
15599 LValue isNotOther(LValue value, SpeculatedType type = SpecFullTop)
15600 {
15601 if (LValue proven = isProvenValue(type, ~SpecOther))
15602 return proven;
15603 return m_out.notEqual(
15604 m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
15605 m_out.constInt64(ValueNull));
15606 }
15607 LValue isOther(LValue value, SpeculatedType type = SpecFullTop)
15608 {
15609 if (LValue proven = isProvenValue(type, SpecOther))
15610 return proven;
15611 return m_out.equal(
15612 m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
15613 m_out.constInt64(ValueNull));
15614 }
15615
15616 LValue isProvenValue(SpeculatedType provenType, SpeculatedType wantedType)
15617 {
15618 if (!(provenType & ~wantedType))
15619 return m_out.booleanTrue;
15620 if (!(provenType & wantedType))
15621 return m_out.booleanFalse;
15622 return nullptr;
15623 }
15624
15625 void speculate(Edge edge)
15626 {
15627 switch (edge.useKind()) {
15628 case UntypedUse:
15629 break;
15630 case KnownInt32Use:
15631 case KnownStringUse:
15632 case KnownPrimitiveUse:
15633 case KnownOtherUse:
15634 case DoubleRepUse:
15635 case Int52RepUse:
15636 case KnownCellUse:
15637 case KnownBooleanUse:
15638 ASSERT(!m_interpreter.needsTypeCheck(edge));
15639 break;
15640 case Int32Use:
15641 speculateInt32(edge);
15642 break;
15643 case CellUse:
15644 speculateCell(edge);
15645 break;
15646 case CellOrOtherUse:
15647 speculateCellOrOther(edge);
15648 break;
15649 case AnyIntUse:
15650 speculateAnyInt(edge);
15651 break;
15652 case ObjectUse:
15653 speculateObject(edge);
15654 break;
15655 case ArrayUse:
15656 speculateArray(edge);
15657 break;
15658 case FunctionUse:
15659 speculateFunction(edge);
15660 break;
15661 case ObjectOrOtherUse:
15662 speculateObjectOrOther(edge);
15663 break;
15664 case FinalObjectUse:
15665 speculateFinalObject(edge);
15666 break;
15667 case RegExpObjectUse:
15668 speculateRegExpObject(edge);
15669 break;
15670 case ProxyObjectUse:
15671 speculateProxyObject(edge);
15672 break;
15673 case DerivedArrayUse:
15674 speculateDerivedArray(edge);
15675 break;
15676 case MapObjectUse:
15677 speculateMapObject(edge);
15678 break;
15679 case SetObjectUse:
15680 speculateSetObject(edge);
15681 break;
15682 case WeakMapObjectUse:
15683 speculateWeakMapObject(edge);
15684 break;
15685 case WeakSetObjectUse:
15686 speculateWeakSetObject(edge);
15687 break;
15688 case DataViewObjectUse:
15689 speculateDataViewObject(edge);
15690 break;
15691 case StringUse:
15692 speculateString(edge);
15693 break;
15694 case StringOrOtherUse:
15695 speculateStringOrOther(edge);
15696 break;
15697 case StringIdentUse:
15698 speculateStringIdent(edge);
15699 break;
15700 case SymbolUse:
15701 speculateSymbol(edge);
15702 break;
15703 case StringObjectUse:
15704 speculateStringObject(edge);
15705 break;
15706 case StringOrStringObjectUse:
15707 speculateStringOrStringObject(edge);
15708 break;
15709 case NumberUse:
15710 speculateNumber(edge);
15711 break;
15712 case RealNumberUse:
15713 speculateRealNumber(edge);
15714 break;
15715 case DoubleRepRealUse:
15716 speculateDoubleRepReal(edge);
15717 break;
15718 case DoubleRepAnyIntUse:
15719 speculateDoubleRepAnyInt(edge);
15720 break;
15721 case BooleanUse:
15722 speculateBoolean(edge);
15723 break;
15724 case BigIntUse:
15725 speculateBigInt(edge);
15726 break;
15727 case NotStringVarUse:
15728 speculateNotStringVar(edge);
15729 break;
15730 case NotSymbolUse:
15731 speculateNotSymbol(edge);
15732 break;
15733 case NotCellUse:
15734 speculateNotCell(edge);
15735 break;
15736 case OtherUse:
15737 speculateOther(edge);
15738 break;
15739 case MiscUse:
15740 speculateMisc(edge);
15741 break;
15742 default:
15743 DFG_CRASH(m_graph, m_node, "Unsupported speculation use kind");
15744 }
15745 }
15746
15747 void speculate(Node*, Edge edge)
15748 {
15749 speculate(edge);
15750 }
15751
15752 void speculateInt32(Edge edge)
15753 {
15754 lowInt32(edge);
15755 }
15756
15757 void speculateCell(Edge edge)
15758 {
15759 lowCell(edge);
15760 }
15761
15762 void speculateNotCell(Edge edge)
15763 {
15764 if (!m_interpreter.needsTypeCheck(edge))
15765 return;
15766 lowNotCell(edge);
15767 }
15768
15769 void speculateCellOrOther(Edge edge)
15770 {
15771 if (shouldNotHaveTypeCheck(edge.useKind()))
15772 return;
15773
15774 LValue value = lowJSValue(edge, ManualOperandSpeculation);
15775
15776 LBasicBlock isNotCell = m_out.newBlock();
15777 LBasicBlock continuation = m_out.newBlock();
15778
15779 m_out.branch(isCell(value, provenType(edge)), unsure(continuation), unsure(isNotCell));
15780
15781 LBasicBlock lastNext = m_out.appendTo(isNotCell, continuation);
15782 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
15783 m_out.jump(continuation);
15784
15785 m_out.appendTo(continuation, lastNext);
15786 }
15787
15788 void speculateAnyInt(Edge edge)
15789 {
15790 if (!m_interpreter.needsTypeCheck(edge))
15791 return;
15792
15793 jsValueToStrictInt52(edge, lowJSValue(edge, ManualOperandSpeculation));
15794 }
15795
15796 LValue isCellWithType(LValue cell, JSType queriedType, SpeculatedType speculatedTypeForQuery, SpeculatedType type = SpecFullTop)
15797 {
15798 if (LValue proven = isProvenValue(type & SpecCell, speculatedTypeForQuery))
15799 return proven;
15800 return m_out.equal(
15801 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15802 m_out.constInt32(queriedType));
15803 }
15804
15805 LValue isTypedArrayView(LValue cell, SpeculatedType type = SpecFullTop)
15806 {
15807 if (LValue proven = isProvenValue(type & SpecCell, SpecTypedArrayView))
15808 return proven;
15809 LValue jsType = m_out.sub(
15810 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15811 m_out.constInt32(FirstTypedArrayType));
15812 return m_out.below(
15813 jsType,
15814 m_out.constInt32(NumberOfTypedArrayTypesExcludingDataView));
15815 }
15816
15817 LValue isObject(LValue cell, SpeculatedType type = SpecFullTop)
15818 {
15819 if (LValue proven = isProvenValue(type & SpecCell, SpecObject))
15820 return proven;
15821 return m_out.aboveOrEqual(
15822 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15823 m_out.constInt32(ObjectType));
15824 }
15825
15826 LValue isNotObject(LValue cell, SpeculatedType type = SpecFullTop)
15827 {
15828 if (LValue proven = isProvenValue(type & SpecCell, ~SpecObject))
15829 return proven;
15830 return m_out.below(
15831 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15832 m_out.constInt32(ObjectType));
15833 }
15834
15835 LValue isNotString(LValue cell, SpeculatedType type = SpecFullTop)
15836 {
15837 if (LValue proven = isProvenValue(type & SpecCell, ~SpecString))
15838 return proven;
15839 return m_out.notEqual(
15840 m_out.load32(cell, m_heaps.JSCell_structureID),
15841 m_out.constInt32(vm().stringStructure->id()));
15842 }
15843
15844 LValue isString(LValue cell, SpeculatedType type = SpecFullTop)
15845 {
15846 if (LValue proven = isProvenValue(type & SpecCell, SpecString))
15847 return proven;
15848 return m_out.equal(
15849 m_out.load32(cell, m_heaps.JSCell_structureID),
15850 m_out.constInt32(vm().stringStructure->id()));
15851 }
15852
15853 LValue isRopeString(LValue string, Edge edge = Edge())
15854 {
15855 if (edge) {
15856 if (!((provenType(edge) & SpecString) & ~SpecStringIdent))
15857 return m_out.booleanFalse;
15858 if (JSValue value = provenValue(edge)) {
15859 if (value.isCell() && value.asCell()->type() == StringType && !asString(value)->isRope())
15860 return m_out.booleanFalse;
15861 }
15862 String value = edge->tryGetString(m_graph);
15863 if (!value.isNull()) {
15864 // If this value is LazyValue, it will be converted to JSString, and the result must be non-rope string.
15865 return m_out.booleanFalse;
15866 }
15867 }
15868
15869 return m_out.testNonZeroPtr(m_out.loadPtr(string, m_heaps.JSString_value), m_out.constIntPtr(JSString::isRopeInPointer));
15870 }
15871
15872 LValue isNotRopeString(LValue string, Edge edge = Edge())
15873 {
15874 if (edge) {
15875 if (!((provenType(edge) & SpecString) & ~SpecStringIdent))
15876 return m_out.booleanTrue;
15877 if (JSValue value = provenValue(edge)) {
15878 if (value.isCell() && value.asCell()->type() == StringType && !asString(value)->isRope())
15879 return m_out.booleanTrue;
15880 }
15881 String value = edge->tryGetString(m_graph);
15882 if (!value.isNull()) {
15883 // If this value is LazyValue, it will be converted to JSString, and the result must be non-rope string.
15884 return m_out.booleanTrue;
15885 }
15886 }
15887
15888 return m_out.testIsZeroPtr(m_out.loadPtr(string, m_heaps.JSString_value), m_out.constIntPtr(JSString::isRopeInPointer));
15889 }
15890
15891 LValue isNotSymbol(LValue cell, SpeculatedType type = SpecFullTop)
15892 {
15893 if (LValue proven = isProvenValue(type & SpecCell, ~SpecSymbol))
15894 return proven;
15895 return m_out.notEqual(
15896 m_out.load32(cell, m_heaps.JSCell_structureID),
15897 m_out.constInt32(vm().symbolStructure->id()));
15898 }
15899
15900 LValue isSymbol(LValue cell, SpeculatedType type = SpecFullTop)
15901 {
15902 if (LValue proven = isProvenValue(type & SpecCell, SpecSymbol))
15903 return proven;
15904 return m_out.equal(
15905 m_out.load32(cell, m_heaps.JSCell_structureID),
15906 m_out.constInt32(vm().symbolStructure->id()));
15907 }
15908
15909 LValue isNotBigInt(LValue cell, SpeculatedType type = SpecFullTop)
15910 {
15911 if (LValue proven = isProvenValue(type & SpecCell, ~SpecBigInt))
15912 return proven;
15913 return m_out.notEqual(
15914 m_out.load32(cell, m_heaps.JSCell_structureID),
15915 m_out.constInt32(vm().bigIntStructure->id()));
15916 }
15917
15918 LValue isBigInt(LValue cell, SpeculatedType type = SpecFullTop)
15919 {
15920 if (LValue proven = isProvenValue(type & SpecCell, SpecBigInt))
15921 return proven;
15922 return m_out.equal(
15923 m_out.load32(cell, m_heaps.JSCell_structureID),
15924 m_out.constInt32(vm().bigIntStructure->id()));
15925 }
15926
15927 LValue isArrayTypeForArrayify(LValue cell, ArrayMode arrayMode)
15928 {
15929 switch (arrayMode.type()) {
15930 case Array::Int32:
15931 case Array::Double:
15932 case Array::Contiguous:
15933 case Array::Undecided:
15934 case Array::ArrayStorage: {
15935 IndexingType indexingModeMask = IsArray | IndexingShapeMask;
15936 if (arrayMode.action() == Array::Write)
15937 indexingModeMask |= CopyOnWrite;
15938
15939 IndexingType shape = arrayMode.shapeMask();
15940 LValue indexingType = m_out.load8ZeroExt32(cell, m_heaps.JSCell_indexingTypeAndMisc);
15941
15942 switch (arrayMode.arrayClass()) {
15943 case Array::OriginalArray:
15944 case Array::OriginalCopyOnWriteArray:
15945 DFG_CRASH(m_graph, m_node, "Unexpected original array");
15946 return nullptr;
15947
15948 case Array::Array:
15949 return m_out.equal(
15950 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask)),
15951 m_out.constInt32(IsArray | shape));
15952
15953 case Array::NonArray:
15954 case Array::OriginalNonArray:
15955 return m_out.equal(
15956 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask)),
15957 m_out.constInt32(shape));
15958
15959 case Array::PossiblyArray:
15960 return m_out.equal(
15961 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask & ~IsArray)),
15962 m_out.constInt32(shape));
15963 }
15964 break;
15965 }
15966
15967 case Array::SlowPutArrayStorage: {
15968 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
15969 LValue indexingType = m_out.load8ZeroExt32(cell, m_heaps.JSCell_indexingTypeAndMisc);
15970
15971 LBasicBlock trueCase = m_out.newBlock();
15972 LBasicBlock checkCase = m_out.newBlock();
15973 LBasicBlock continuation = m_out.newBlock();
15974
15975 ValueFromBlock falseValue = m_out.anchor(m_out.booleanFalse);
15976 LValue isAnArrayStorageShape = m_out.belowOrEqual(
15977 m_out.sub(
15978 m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)),
15979 m_out.constInt32(ArrayStorageShape)),
15980 m_out.constInt32(SlowPutArrayStorageShape - ArrayStorageShape));
15981 m_out.branch(isAnArrayStorageShape, unsure(checkCase), unsure(continuation));
15982
15983 LBasicBlock lastNext = m_out.appendTo(checkCase, trueCase);
15984 switch (arrayMode.arrayClass()) {
15985 case Array::OriginalArray:
15986 case Array::OriginalCopyOnWriteArray:
15987 DFG_CRASH(m_graph, m_node, "Unexpected original array");
15988 return nullptr;
15989
15990 case Array::Array:
15991 m_out.branch(
15992 m_out.testNonZero32(indexingType, m_out.constInt32(IsArray)),
15993 unsure(trueCase), unsure(continuation));
15994 break;
15995
15996 case Array::NonArray:
15997 case Array::OriginalNonArray:
15998 m_out.branch(
15999 m_out.testIsZero32(indexingType, m_out.constInt32(IsArray)),
16000 unsure(trueCase), unsure(continuation));
16001 break;
16002
16003 case Array::PossiblyArray:
16004 m_out.jump(trueCase);
16005 break;
16006 }
16007
16008 m_out.appendTo(trueCase, continuation);
16009 ValueFromBlock trueValue = m_out.anchor(m_out.booleanTrue);
16010 m_out.jump(continuation);
16011
16012 m_out.appendTo(continuation, lastNext);
16013 return m_out.phi(Int32, falseValue, trueValue);
16014 }
16015
16016 default:
16017 break;
16018 }
16019 DFG_CRASH(m_graph, m_node, "Corrupt array class");
16020 }
16021
16022 LValue isArrayTypeForCheckArray(LValue cell, ArrayMode arrayMode)
16023 {
16024 switch (arrayMode.type()) {
16025 case Array::Int32:
16026 case Array::Double:
16027 case Array::Contiguous:
16028 case Array::Undecided:
16029 case Array::ArrayStorage:
16030 case Array::SlowPutArrayStorage:
16031 return isArrayTypeForArrayify(cell, arrayMode);
16032
16033 case Array::DirectArguments:
16034 return m_out.equal(
16035 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16036 m_out.constInt32(DirectArgumentsType));
16037
16038 case Array::ScopedArguments:
16039 return m_out.equal(
16040 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16041 m_out.constInt32(ScopedArgumentsType));
16042
16043 default:
16044 return m_out.equal(
16045 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16046 m_out.constInt32(typeForTypedArrayType(arrayMode.typedArrayType())));
16047 }
16048 }
16049
16050 LValue isFunction(LValue cell, SpeculatedType type = SpecFullTop)
16051 {
16052 if (LValue proven = isProvenValue(type & SpecCell, SpecFunction))
16053 return proven;
16054 return isType(cell, JSFunctionType);
16055 }
16056 LValue isNotFunction(LValue cell, SpeculatedType type = SpecFullTop)
16057 {
16058 if (LValue proven = isProvenValue(type & SpecCell, ~SpecFunction))
16059 return proven;
16060 return isNotType(cell, JSFunctionType);
16061 }
16062
16063 LValue isExoticForTypeof(LValue cell, SpeculatedType type = SpecFullTop)
16064 {
16065 if (!(type & SpecObjectOther))
16066 return m_out.booleanFalse;
16067 return m_out.testNonZero32(
16068 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
16069 m_out.constInt32(MasqueradesAsUndefined | OverridesGetCallData));
16070 }
16071
16072 LValue isType(LValue cell, JSType type)
16073 {
16074 return m_out.equal(
16075 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16076 m_out.constInt32(type));
16077 }
16078
16079 LValue isNotType(LValue cell, JSType type)
16080 {
16081 return m_out.logicalNot(isType(cell, type));
16082 }
16083
16084 void speculateObject(Edge edge, LValue cell)
16085 {
16086 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
16087 }
16088
16089 void speculateObject(Edge edge)
16090 {
16091 speculateObject(edge, lowCell(edge));
16092 }
16093
16094 void speculateArray(Edge edge, LValue cell)
16095 {
16096 FTL_TYPE_CHECK(
16097 jsValueValue(cell), edge, SpecArray, isNotType(cell, ArrayType));
16098 }
16099
16100 void speculateArray(Edge edge)
16101 {
16102 speculateArray(edge, lowCell(edge));
16103 }
16104
16105 void speculateFunction(Edge edge, LValue cell)
16106 {
16107 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecFunction, isNotFunction(cell));
16108 }
16109
16110 void speculateFunction(Edge edge)
16111 {
16112 speculateFunction(edge, lowCell(edge));
16113 }
16114
16115 void speculateObjectOrOther(Edge edge)
16116 {
16117 if (!m_interpreter.needsTypeCheck(edge))
16118 return;
16119
16120 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16121
16122 LBasicBlock cellCase = m_out.newBlock();
16123 LBasicBlock primitiveCase = m_out.newBlock();
16124 LBasicBlock continuation = m_out.newBlock();
16125
16126 m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
16127
16128 LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
16129
16130 FTL_TYPE_CHECK(
16131 jsValueValue(value), edge, (~SpecCellCheck) | SpecObject, isNotObject(value));
16132
16133 m_out.jump(continuation);
16134
16135 m_out.appendTo(primitiveCase, continuation);
16136
16137 FTL_TYPE_CHECK(
16138 jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
16139
16140 m_out.jump(continuation);
16141
16142 m_out.appendTo(continuation, lastNext);
16143 }
16144
16145 void speculateFinalObject(Edge edge, LValue cell)
16146 {
16147 FTL_TYPE_CHECK(
16148 jsValueValue(cell), edge, SpecFinalObject, isNotType(cell, FinalObjectType));
16149 }
16150
16151 void speculateFinalObject(Edge edge)
16152 {
16153 speculateFinalObject(edge, lowCell(edge));
16154 }
16155
16156 void speculateRegExpObject(Edge edge, LValue cell)
16157 {
16158 FTL_TYPE_CHECK(
16159 jsValueValue(cell), edge, SpecRegExpObject, isNotType(cell, RegExpObjectType));
16160 }
16161
16162 void speculateRegExpObject(Edge edge)
16163 {
16164 speculateRegExpObject(edge, lowCell(edge));
16165 }
16166
16167 void speculateProxyObject(Edge edge, LValue cell)
16168 {
16169 FTL_TYPE_CHECK(
16170 jsValueValue(cell), edge, SpecProxyObject, isNotType(cell, ProxyObjectType));
16171 }
16172
16173 void speculateProxyObject(Edge edge)
16174 {
16175 speculateProxyObject(edge, lowCell(edge));
16176 }
16177
16178 void speculateDerivedArray(Edge edge, LValue cell)
16179 {
16180 FTL_TYPE_CHECK(
16181 jsValueValue(cell), edge, SpecDerivedArray, isNotType(cell, DerivedArrayType));
16182 }
16183
16184 void speculateDerivedArray(Edge edge)
16185 {
16186 speculateDerivedArray(edge, lowCell(edge));
16187 }
16188
16189 void speculateMapObject(Edge edge, LValue cell)
16190 {
16191 FTL_TYPE_CHECK(
16192 jsValueValue(cell), edge, SpecMapObject, isNotType(cell, JSMapType));
16193 }
16194
16195 void speculateMapObject(Edge edge)
16196 {
16197 speculateMapObject(edge, lowCell(edge));
16198 }
16199
16200 void speculateSetObject(Edge edge, LValue cell)
16201 {
16202 FTL_TYPE_CHECK(
16203 jsValueValue(cell), edge, SpecSetObject, isNotType(cell, JSSetType));
16204 }
16205
16206 void speculateSetObject(Edge edge)
16207 {
16208 speculateSetObject(edge, lowCell(edge));
16209 }
16210
16211 void speculateWeakMapObject(Edge edge, LValue cell)
16212 {
16213 FTL_TYPE_CHECK(
16214 jsValueValue(cell), edge, SpecWeakMapObject, isNotType(cell, JSWeakMapType));
16215 }
16216
16217 void speculateWeakMapObject(Edge edge)
16218 {
16219 speculateWeakMapObject(edge, lowCell(edge));
16220 }
16221
16222 void speculateWeakSetObject(Edge edge, LValue cell)
16223 {
16224 FTL_TYPE_CHECK(
16225 jsValueValue(cell), edge, SpecWeakSetObject, isNotType(cell, JSWeakSetType));
16226 }
16227
16228 void speculateWeakSetObject(Edge edge)
16229 {
16230 speculateWeakSetObject(edge, lowCell(edge));
16231 }
16232
16233 void speculateDataViewObject(Edge edge, LValue cell)
16234 {
16235 FTL_TYPE_CHECK(
16236 jsValueValue(cell), edge, SpecDataViewObject, isNotType(cell, DataViewType));
16237 }
16238
16239 void speculateDataViewObject(Edge edge)
16240 {
16241 speculateDataViewObject(edge, lowCell(edge));
16242 }
16243
16244 void speculateString(Edge edge, LValue cell)
16245 {
16246 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecString, isNotString(cell));
16247 }
16248
16249 void speculateString(Edge edge)
16250 {
16251 speculateString(edge, lowCell(edge));
16252 }
16253
16254 void speculateStringOrOther(Edge edge, LValue value)
16255 {
16256 if (!m_interpreter.needsTypeCheck(edge))
16257 return;
16258
16259 LBasicBlock cellCase = m_out.newBlock();
16260 LBasicBlock notCellCase = m_out.newBlock();
16261 LBasicBlock continuation = m_out.newBlock();
16262
16263 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
16264
16265 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
16266
16267 FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCellCheck) | SpecString, isNotString(value));
16268
16269 m_out.jump(continuation);
16270 m_out.appendTo(notCellCase, continuation);
16271
16272 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
16273
16274 m_out.jump(continuation);
16275 m_out.appendTo(continuation, lastNext);
16276 }
16277
16278 void speculateStringOrOther(Edge edge)
16279 {
16280 speculateStringOrOther(edge, lowJSValue(edge, ManualOperandSpeculation));
16281 }
16282
16283 void speculateStringIdent(Edge edge, LValue string, LValue stringImpl)
16284 {
16285 if (!m_interpreter.needsTypeCheck(edge, SpecStringIdent | ~SpecString))
16286 return;
16287
16288 speculate(BadType, jsValueValue(string), edge.node(), isRopeString(string));
16289 speculate(
16290 BadType, jsValueValue(string), edge.node(),
16291 m_out.testIsZero32(
16292 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
16293 m_out.constInt32(StringImpl::flagIsAtomic())));
16294 m_interpreter.filter(edge, SpecStringIdent | ~SpecString);
16295 }
16296
16297 void speculateStringIdent(Edge edge)
16298 {
16299 lowStringIdent(edge);
16300 }
16301
16302 void speculateStringObject(Edge edge)
16303 {
16304 if (!m_interpreter.needsTypeCheck(edge, SpecStringObject))
16305 return;
16306
16307 speculateStringObjectForCell(edge, lowCell(edge));
16308 }
16309
16310 void speculateStringOrStringObject(Edge edge)
16311 {
16312 if (!m_interpreter.needsTypeCheck(edge, SpecString | SpecStringObject))
16313 return;
16314
16315 LValue cellBase = lowCell(edge);
16316 if (!m_interpreter.needsTypeCheck(edge, SpecString | SpecStringObject))
16317 return;
16318
16319 LBasicBlock notString = m_out.newBlock();
16320 LBasicBlock continuation = m_out.newBlock();
16321
16322 LValue type = m_out.load8ZeroExt32(cellBase, m_heaps.JSCell_typeInfoType);
16323 m_out.branch(
16324 m_out.equal(type, m_out.constInt32(StringType)),
16325 unsure(continuation), unsure(notString));
16326
16327 LBasicBlock lastNext = m_out.appendTo(notString, continuation);
16328 speculate(
16329 BadType, jsValueValue(cellBase), edge.node(),
16330 m_out.notEqual(type, m_out.constInt32(StringObjectType)));
16331 m_out.jump(continuation);
16332
16333 m_out.appendTo(continuation, lastNext);
16334 m_interpreter.filter(edge, SpecString | SpecStringObject);
16335 }
16336
16337 void speculateStringObjectForCell(Edge edge, LValue cell)
16338 {
16339 if (!m_interpreter.needsTypeCheck(edge, SpecStringObject))
16340 return;
16341
16342 LValue type = m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType);
16343 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecStringObject, m_out.notEqual(type, m_out.constInt32(StringObjectType)));
16344 }
16345
16346 void speculateSymbol(Edge edge, LValue cell)
16347 {
16348 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecSymbol, isNotSymbol(cell));
16349 }
16350
16351 void speculateSymbol(Edge edge)
16352 {
16353 speculateSymbol(edge, lowCell(edge));
16354 }
16355
16356 void speculateBigInt(Edge edge, LValue cell)
16357 {
16358 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecBigInt, isNotBigInt(cell));
16359 }
16360
16361 void speculateBigInt(Edge edge)
16362 {
16363 speculateBigInt(edge, lowCell(edge));
16364 }
16365
16366 void speculateNonNullObject(Edge edge, LValue cell)
16367 {
16368 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
16369 if (masqueradesAsUndefinedWatchpointIsStillValid())
16370 return;
16371
16372 speculate(
16373 BadType, jsValueValue(cell), edge.node(),
16374 m_out.testNonZero32(
16375 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
16376 m_out.constInt32(MasqueradesAsUndefined)));
16377 }
16378
16379 void speculateNumber(Edge edge)
16380 {
16381 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16382 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isNotNumber(value));
16383 }
16384
16385 void speculateRealNumber(Edge edge)
16386 {
16387 // Do an early return here because lowDouble() can create a lot of control flow.
16388 if (!m_interpreter.needsTypeCheck(edge))
16389 return;
16390
16391 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16392 LValue doubleValue = unboxDouble(value);
16393
16394 LBasicBlock intCase = m_out.newBlock();
16395 LBasicBlock continuation = m_out.newBlock();
16396
16397 m_out.branch(
16398 m_out.doubleEqual(doubleValue, doubleValue),
16399 usually(continuation), rarely(intCase));
16400
16401 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
16402
16403 typeCheck(
16404 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
16405 isNotInt32(value, provenType(m_node->child1()) & ~SpecFullDouble));
16406 m_out.jump(continuation);
16407
16408 m_out.appendTo(continuation, lastNext);
16409 }
16410
16411 void speculateDoubleRepReal(Edge edge)
16412 {
16413 // Do an early return here because lowDouble() can create a lot of control flow.
16414 if (!m_interpreter.needsTypeCheck(edge))
16415 return;
16416
16417 LValue value = lowDouble(edge);
16418 FTL_TYPE_CHECK(
16419 doubleValue(value), edge, SpecDoubleReal,
16420 m_out.doubleNotEqualOrUnordered(value, value));
16421 }
16422
16423 void speculateDoubleRepAnyInt(Edge edge)
16424 {
16425 if (!m_interpreter.needsTypeCheck(edge))
16426 return;
16427
16428 doubleToStrictInt52(edge, lowDouble(edge));
16429 }
16430
16431 void speculateBoolean(Edge edge)
16432 {
16433 lowBoolean(edge);
16434 }
16435
16436 void speculateNotStringVar(Edge edge)
16437 {
16438 if (!m_interpreter.needsTypeCheck(edge, ~SpecStringVar))
16439 return;
16440
16441 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16442
16443 LBasicBlock isCellCase = m_out.newBlock();
16444 LBasicBlock isStringCase = m_out.newBlock();
16445 LBasicBlock continuation = m_out.newBlock();
16446
16447 m_out.branch(isCell(value, provenType(edge)), unsure(isCellCase), unsure(continuation));
16448
16449 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
16450 m_out.branch(isString(value, provenType(edge)), unsure(isStringCase), unsure(continuation));
16451
16452 m_out.appendTo(isStringCase, continuation);
16453 speculateStringIdent(edge, value, m_out.loadPtr(value, m_heaps.JSString_value));
16454 m_out.jump(continuation);
16455
16456 m_out.appendTo(continuation, lastNext);
16457 }
16458
16459 void speculateNotSymbol(Edge edge)
16460 {
16461 if (!m_interpreter.needsTypeCheck(edge, ~SpecSymbol))
16462 return;
16463
16464 ASSERT(mayHaveTypeCheck(edge.useKind()));
16465 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16466
16467 LBasicBlock isCellCase = m_out.newBlock();
16468 LBasicBlock continuation = m_out.newBlock();
16469
16470 m_out.branch(isCell(value, provenType(edge)), unsure(isCellCase), unsure(continuation));
16471
16472 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
16473 speculate(BadType, jsValueValue(value), edge.node(), isSymbol(value));
16474 m_out.jump(continuation);
16475
16476 m_out.appendTo(continuation, lastNext);
16477
16478 m_interpreter.filter(edge, ~SpecSymbol);
16479 }
16480
16481 void speculateOther(Edge edge)
16482 {
16483 if (!m_interpreter.needsTypeCheck(edge))
16484 return;
16485
16486 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16487 typeCheck(jsValueValue(value), edge, SpecOther, isNotOther(value));
16488 }
16489
16490 void speculateMisc(Edge edge)
16491 {
16492 if (!m_interpreter.needsTypeCheck(edge))
16493 return;
16494
16495 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16496 typeCheck(jsValueValue(value), edge, SpecMisc, isNotMisc(value));
16497 }
16498
16499 void speculateTypedArrayIsNotNeutered(LValue base)
16500 {
16501 LBasicBlock isWasteful = m_out.newBlock();
16502 LBasicBlock continuation = m_out.newBlock();
16503
16504 LValue mode = m_out.load32(base, m_heaps.JSArrayBufferView_mode);
16505 m_out.branch(m_out.equal(mode, m_out.constInt32(WastefulTypedArray)),
16506 unsure(isWasteful), unsure(continuation));
16507
16508 LBasicBlock lastNext = m_out.appendTo(isWasteful, continuation);
16509 LValue vector = m_out.loadPtr(base, m_heaps.JSArrayBufferView_vector);
16510 speculate(Uncountable, jsValueValue(vector), m_node, m_out.isZero64(vector));
16511 m_out.jump(continuation);
16512
16513 m_out.appendTo(continuation, lastNext);
16514 }
16515
16516 bool masqueradesAsUndefinedWatchpointIsStillValid()
16517 {
16518 return m_graph.masqueradesAsUndefinedWatchpointIsStillValid(m_node->origin.semantic);
16519 }
16520
16521 LValue loadCellState(LValue base)
16522 {
16523 return m_out.load8ZeroExt32(base, m_heaps.JSCell_cellState);
16524 }
16525
16526 void emitStoreBarrier(LValue base, bool isFenced)
16527 {
16528 LBasicBlock recheckPath = nullptr;
16529 if (isFenced)
16530 recheckPath = m_out.newBlock();
16531 LBasicBlock slowPath = m_out.newBlock();
16532 LBasicBlock continuation = m_out.newBlock();
16533
16534 LBasicBlock lastNext = m_out.insertNewBlocksBefore(isFenced ? recheckPath : slowPath);
16535
16536 LValue threshold;
16537 if (isFenced)
16538 threshold = m_out.load32(m_out.absolute(vm().heap.addressOfBarrierThreshold()));
16539 else
16540 threshold = m_out.constInt32(blackThreshold);
16541
16542 m_out.branch(
16543 m_out.above(loadCellState(base), threshold),
16544 usually(continuation), rarely(isFenced ? recheckPath : slowPath));
16545
16546 if (isFenced) {
16547 m_out.appendTo(recheckPath, slowPath);
16548
16549 m_out.fence(&m_heaps.root, &m_heaps.JSCell_cellState);
16550
16551 m_out.branch(
16552 m_out.above(loadCellState(base), m_out.constInt32(blackThreshold)),
16553 usually(continuation), rarely(slowPath));
16554 }
16555
16556 m_out.appendTo(slowPath, continuation);
16557
16558 LValue call = vmCall(Void, m_out.operation(operationWriteBarrierSlowPath), m_callFrame, base);
16559 m_heaps.decorateCCallRead(&m_heaps.root, call);
16560 m_heaps.decorateCCallWrite(&m_heaps.JSCell_cellState, call);
16561
16562 m_out.jump(continuation);
16563
16564 m_out.appendTo(continuation, lastNext);
16565 }
16566
16567 void mutatorFence()
16568 {
16569 if (isX86()) {
16570 m_out.fence(&m_heaps.root, nullptr);
16571 return;
16572 }
16573
16574 LBasicBlock slowPath = m_out.newBlock();
16575 LBasicBlock continuation = m_out.newBlock();
16576
16577 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
16578
16579 m_out.branch(
16580 m_out.load8ZeroExt32(m_out.absolute(vm().heap.addressOfMutatorShouldBeFenced())),
16581 rarely(slowPath), usually(continuation));
16582
16583 m_out.appendTo(slowPath, continuation);
16584
16585 m_out.fence(&m_heaps.root, nullptr);
16586 m_out.jump(continuation);
16587
16588 m_out.appendTo(continuation, lastNext);
16589 }
16590
16591 void nukeStructureAndSetButterfly(LValue butterfly, LValue object)
16592 {
16593 if (isX86()) {
16594 m_out.store32(
16595 m_out.bitOr(
16596 m_out.load32(object, m_heaps.JSCell_structureID),
16597 m_out.constInt32(nukedStructureIDBit())),
16598 object, m_heaps.JSCell_structureID);
16599 m_out.fence(&m_heaps.root, nullptr);
16600 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
16601 m_out.fence(&m_heaps.root, nullptr);
16602 return;
16603 }
16604
16605 LBasicBlock fastPath = m_out.newBlock();
16606 LBasicBlock slowPath = m_out.newBlock();
16607 LBasicBlock continuation = m_out.newBlock();
16608
16609 LBasicBlock lastNext = m_out.insertNewBlocksBefore(fastPath);
16610
16611 m_out.branch(
16612 m_out.load8ZeroExt32(m_out.absolute(vm().heap.addressOfMutatorShouldBeFenced())),
16613 rarely(slowPath), usually(fastPath));
16614
16615 m_out.appendTo(fastPath, slowPath);
16616
16617 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
16618 m_out.jump(continuation);
16619
16620 m_out.appendTo(slowPath, continuation);
16621
16622 m_out.store32(
16623 m_out.bitOr(
16624 m_out.load32(object, m_heaps.JSCell_structureID),
16625 m_out.constInt32(nukedStructureIDBit())),
16626 object, m_heaps.JSCell_structureID);
16627 m_out.fence(&m_heaps.root, nullptr);
16628 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
16629 m_out.fence(&m_heaps.root, nullptr);
16630 m_out.jump(continuation);
16631
16632 m_out.appendTo(continuation, lastNext);
16633 }
16634
16635 LValue preciseIndexMask64(LValue value, LValue index, LValue limit)
16636 {
16637 return m_out.bitAnd(
16638 value,
16639 m_out.aShr(
16640 m_out.sub(
16641 index,
16642 m_out.opaque(limit)),
16643 m_out.constInt32(63)));
16644 }
16645
16646 LValue preciseIndexMask32(LValue value, LValue index, LValue limit)
16647 {
16648 return preciseIndexMask64(value, m_out.zeroExt(index, Int64), m_out.zeroExt(limit, Int64));
16649 }
16650
16651 template<typename... Args>
16652 LValue vmCall(LType type, LValue function, Args&&... args)
16653 {
16654 callPreflight();
16655 LValue result = m_out.call(type, function, std::forward<Args>(args)...);
16656 if (mayExit(m_graph, m_node))
16657 callCheck();
16658 else {
16659 // We can't exit due to an exception, so we also can't throw an exception.
16660#ifndef NDEBUG
16661 LBasicBlock crash = m_out.newBlock();
16662 LBasicBlock continuation = m_out.newBlock();
16663
16664 LValue exception = m_out.load64(m_out.absolute(vm().addressOfException()));
16665 LValue hadException = m_out.notZero64(exception);
16666
16667 m_out.branch(
16668 hadException, rarely(crash), usually(continuation));
16669
16670 LBasicBlock lastNext = m_out.appendTo(crash, continuation);
16671 m_out.unreachable();
16672
16673 m_out.appendTo(continuation, lastNext);
16674#endif
16675 }
16676 return result;
16677 }
16678
16679 void callPreflight(CodeOrigin codeOrigin)
16680 {
16681 CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.addCodeOrigin(codeOrigin);
16682 m_out.store32(
16683 m_out.constInt32(callSiteIndex.bits()),
16684 tagFor(CallFrameSlot::argumentCount));
16685 }
16686
16687 void callPreflight()
16688 {
16689 callPreflight(codeOriginDescriptionOfCallSite());
16690 }
16691
16692 CodeOrigin codeOriginDescriptionOfCallSite() const
16693 {
16694 CodeOrigin codeOrigin = m_node->origin.semantic;
16695 if (m_node->op() == TailCallInlinedCaller
16696 || m_node->op() == TailCallVarargsInlinedCaller
16697 || m_node->op() == TailCallForwardVarargsInlinedCaller
16698 || m_node->op() == DirectTailCallInlinedCaller) {
16699 // This case arises when you have a situation like this:
16700 // foo makes a call to bar, bar is inlined in foo. bar makes a call
16701 // to baz and baz is inlined in bar. And then baz makes a tail-call to jaz,
16702 // and jaz is inlined in baz. We want the callframe for jaz to appear to
16703 // have caller be bar.
16704 codeOrigin = *codeOrigin.inlineCallFrame()->getCallerSkippingTailCalls();
16705 }
16706
16707 return codeOrigin;
16708 }
16709
16710 void callCheck()
16711 {
16712 if (Options::useExceptionFuzz())
16713 m_out.call(Void, m_out.operation(operationExceptionFuzz), m_callFrame);
16714
16715 LValue exception = m_out.load64(m_out.absolute(vm().addressOfException()));
16716 LValue hadException = m_out.notZero64(exception);
16717
16718 CodeOrigin opCatchOrigin;
16719 HandlerInfo* exceptionHandler;
16720 if (m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler)) {
16721 bool exitOK = true;
16722 bool isExceptionHandler = true;
16723 appendOSRExit(
16724 ExceptionCheck, noValue(), nullptr, hadException,
16725 m_origin.withForExitAndExitOK(opCatchOrigin, exitOK), isExceptionHandler);
16726 return;
16727 }
16728
16729 LBasicBlock continuation = m_out.newBlock();
16730
16731 m_out.branch(
16732 hadException, rarely(m_handleExceptions), usually(continuation));
16733
16734 m_out.appendTo(continuation);
16735 }
16736
16737 RefPtr<PatchpointExceptionHandle> preparePatchpointForExceptions(PatchpointValue* value)
16738 {
16739 CodeOrigin opCatchOrigin;
16740 HandlerInfo* exceptionHandler;
16741 bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler);
16742 if (!willCatchException)
16743 return PatchpointExceptionHandle::defaultHandle(m_ftlState);
16744
16745 dataLogLnIf(verboseCompilationEnabled(), " Patchpoint exception OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap());
16746
16747 bool exitOK = true;
16748 NodeOrigin origin = m_origin.withForExitAndExitOK(opCatchOrigin, exitOK);
16749
16750 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(noValue(), nullptr);
16751
16752 // Compute the offset into the StackmapGenerationParams where we will find the exit arguments
16753 // we are about to append. We need to account for both the children we've already added, and
16754 // for the possibility of a result value if the patchpoint is not void.
16755 unsigned offset = value->numChildren();
16756 if (value->type() != Void)
16757 offset++;
16758
16759 // Use LateColdAny to ensure that the stackmap arguments interfere with the patchpoint's
16760 // result and with any late-clobbered registers.
16761 value->appendVectorWithRep(
16762 buildExitArguments(exitDescriptor, opCatchOrigin, noValue()),
16763 ValueRep::LateColdAny);
16764
16765 return PatchpointExceptionHandle::create(
16766 m_ftlState, exitDescriptor, origin, offset, *exceptionHandler);
16767 }
16768
16769 LBasicBlock lowBlock(DFG::BasicBlock* block)
16770 {
16771 return m_blocks.get(block);
16772 }
16773
16774 OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, Node* highValue)
16775 {
16776 return appendOSRExitDescriptor(lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue));
16777 }
16778
16779 OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, const MethodOfGettingAValueProfile& profile)
16780 {
16781 return &m_ftlState.jitCode->osrExitDescriptors.alloc(
16782 lowValue.format(), profile,
16783 availabilityMap().m_locals.numberOfArguments(),
16784 availabilityMap().m_locals.numberOfLocals());
16785 }
16786
16787 void appendOSRExit(
16788 ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition,
16789 NodeOrigin origin, bool isExceptionHandler = false)
16790 {
16791 return appendOSRExit(kind, lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue),
16792 failCondition, origin, isExceptionHandler);
16793 }
16794
16795 void appendOSRExit(
16796 ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, LValue failCondition,
16797 NodeOrigin origin, bool isExceptionHandler = false)
16798 {
16799 dataLogLnIf(verboseCompilationEnabled(), " OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap());
16800
16801 DFG_ASSERT(m_graph, m_node, origin.exitOK);
16802
16803 if (!isExceptionHandler
16804 && Options::useOSRExitFuzz()
16805 && canUseOSRExitFuzzing(m_graph.baselineCodeBlockFor(m_node->origin.semantic))
16806 && doOSRExitFuzzing()) {
16807 LValue numberOfFuzzChecks = m_out.add(
16808 m_out.load32(m_out.absolute(&g_numberOfOSRExitFuzzChecks)),
16809 m_out.int32One);
16810
16811 m_out.store32(numberOfFuzzChecks, m_out.absolute(&g_numberOfOSRExitFuzzChecks));
16812
16813 if (unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter()) {
16814 failCondition = m_out.bitOr(
16815 failCondition,
16816 m_out.aboveOrEqual(numberOfFuzzChecks, m_out.constInt32(atOrAfter)));
16817 }
16818 if (unsigned at = Options::fireOSRExitFuzzAt()) {
16819 failCondition = m_out.bitOr(
16820 failCondition,
16821 m_out.equal(numberOfFuzzChecks, m_out.constInt32(at)));
16822 }
16823 }
16824
16825 if (failCondition == m_out.booleanFalse)
16826 return;
16827
16828 blessSpeculation(
16829 m_out.speculate(failCondition), kind, lowValue, profile, origin);
16830 }
16831
16832 void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, Node* highValue, NodeOrigin origin)
16833 {
16834 blessSpeculation(value, kind, lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue), origin);
16835 }
16836
16837 void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, NodeOrigin origin)
16838 {
16839 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(lowValue, profile);
16840
16841 value->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, lowValue));
16842
16843 State* state = &m_ftlState;
16844 value->setGenerator(
16845 [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
16846 exitDescriptor->emitOSRExit(
16847 *state, kind, origin, jit, params, 0);
16848 });
16849 }
16850
16851 StackmapArgumentList buildExitArguments(
16852 OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, FormattedValue lowValue,
16853 unsigned offsetOfExitArgumentsInStackmapLocations = 0)
16854 {
16855 StackmapArgumentList result;
16856 buildExitArguments(
16857 exitDescriptor, exitOrigin, result, lowValue, offsetOfExitArgumentsInStackmapLocations);
16858 return result;
16859 }
16860
16861 void buildExitArguments(
16862 OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, StackmapArgumentList& arguments, FormattedValue lowValue,
16863 unsigned offsetOfExitArgumentsInStackmapLocations = 0)
16864 {
16865 if (!!lowValue)
16866 arguments.append(lowValue.value());
16867
16868 AvailabilityMap availabilityMap = this->availabilityMap();
16869 availabilityMap.pruneByLiveness(m_graph, exitOrigin);
16870
16871 HashMap<Node*, ExitTimeObjectMaterialization*> map;
16872 availabilityMap.forEachAvailability(
16873 [&] (Availability availability) {
16874 if (!availability.shouldUseNode())
16875 return;
16876
16877 Node* node = availability.node();
16878 if (!node->isPhantomAllocation())
16879 return;
16880
16881 auto result = map.add(node, nullptr);
16882 if (result.isNewEntry) {
16883 result.iterator->value =
16884 exitDescriptor->m_materializations.add(node->op(), node->origin.semantic);
16885 }
16886 });
16887
16888 for (unsigned i = 0; i < exitDescriptor->m_values.size(); ++i) {
16889 int operand = exitDescriptor->m_values.operandForIndex(i);
16890
16891 Availability availability = availabilityMap.m_locals[i];
16892
16893 if (Options::validateFTLOSRExitLiveness()
16894 && m_graph.m_plan.mode() != FTLForOSREntryMode) {
16895
16896 if (availability.isDead() && m_graph.isLiveInBytecode(VirtualRegister(operand), exitOrigin))
16897 DFG_CRASH(m_graph, m_node, toCString("Live bytecode local not available: operand = ", VirtualRegister(operand), ", availability = ", availability, ", origin = ", exitOrigin).data());
16898 }
16899 ExitValue exitValue = exitValueForAvailability(arguments, map, availability);
16900 if (exitValue.hasIndexInStackmapLocations())
16901 exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations);
16902 exitDescriptor->m_values[i] = exitValue;
16903 }
16904
16905 for (auto heapPair : availabilityMap.m_heap) {
16906 Node* node = heapPair.key.base();
16907 ExitTimeObjectMaterialization* materialization = map.get(node);
16908 if (!materialization)
16909 DFG_CRASH(m_graph, m_node, toCString("Could not find materialization for ", node, " in ", availabilityMap).data());
16910 ExitValue exitValue = exitValueForAvailability(arguments, map, heapPair.value);
16911 if (exitValue.hasIndexInStackmapLocations())
16912 exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations);
16913 materialization->add(
16914 heapPair.key.descriptor(),
16915 exitValue);
16916 }
16917
16918 if (verboseCompilationEnabled()) {
16919 dataLog(" Exit values: ", exitDescriptor->m_values, "\n");
16920 if (!exitDescriptor->m_materializations.isEmpty()) {
16921 dataLog(" Materializations: \n");
16922 for (ExitTimeObjectMaterialization* materialization : exitDescriptor->m_materializations)
16923 dataLog(" ", pointerDump(materialization), "\n");
16924 }
16925 }
16926 }
16927
16928 ExitValue exitValueForAvailability(
16929 StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
16930 Availability availability)
16931 {
16932 FlushedAt flush = availability.flushedAt();
16933 switch (flush.format()) {
16934 case DeadFlush:
16935 case ConflictingFlush:
16936 if (availability.hasNode())
16937 return exitValueForNode(arguments, map, availability.node());
16938
16939 // This means that the value is dead. It could be dead in bytecode or it could have
16940 // been killed by our DCE, which can sometimes kill things even if they were live in
16941 // bytecode.
16942 return ExitValue::dead();
16943
16944 case FlushedJSValue:
16945 case FlushedCell:
16946 case FlushedBoolean:
16947 return ExitValue::inJSStack(flush.virtualRegister());
16948
16949 case FlushedInt32:
16950 return ExitValue::inJSStackAsInt32(flush.virtualRegister());
16951
16952 case FlushedInt52:
16953 return ExitValue::inJSStackAsInt52(flush.virtualRegister());
16954
16955 case FlushedDouble:
16956 return ExitValue::inJSStackAsDouble(flush.virtualRegister());
16957 }
16958
16959 DFG_CRASH(m_graph, m_node, "Invalid flush format");
16960 return ExitValue::dead();
16961 }
16962
16963 ExitValue exitValueForNode(
16964 StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
16965 Node* node)
16966 {
16967 // NOTE: In FTL->B3, we cannot generate code here, because m_output is positioned after the
16968 // stackmap value. Like all values, the stackmap value cannot use a child that is defined after
16969 // it.
16970
16971 ASSERT(node->shouldGenerate());
16972 ASSERT(node->hasResult());
16973
16974 if (node) {
16975 switch (node->op()) {
16976 case BottomValue:
16977 // This might arise in object materializations. I actually doubt that it would,
16978 // but it seems worthwhile to be conservative.
16979 return ExitValue::dead();
16980
16981 case JSConstant:
16982 case Int52Constant:
16983 case DoubleConstant:
16984 return ExitValue::constant(node->asJSValue());
16985
16986 default:
16987 if (node->isPhantomAllocation())
16988 return ExitValue::materializeNewObject(map.get(node));
16989 break;
16990 }
16991 }
16992
16993 LoweredNodeValue value = m_int32Values.get(node);
16994 if (isValid(value))
16995 return exitArgument(arguments, DataFormatInt32, value.value());
16996
16997 value = m_int52Values.get(node);
16998 if (isValid(value))
16999 return exitArgument(arguments, DataFormatInt52, value.value());
17000
17001 value = m_strictInt52Values.get(node);
17002 if (isValid(value))
17003 return exitArgument(arguments, DataFormatStrictInt52, value.value());
17004
17005 value = m_booleanValues.get(node);
17006 if (isValid(value))
17007 return exitArgument(arguments, DataFormatBoolean, value.value());
17008
17009 value = m_jsValueValues.get(node);
17010 if (isValid(value))
17011 return exitArgument(arguments, DataFormatJS, value.value());
17012
17013 value = m_doubleValues.get(node);
17014 if (isValid(value))
17015 return exitArgument(arguments, DataFormatDouble, value.value());
17016
17017 DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
17018 return ExitValue::dead();
17019 }
17020
17021 ExitValue exitArgument(StackmapArgumentList& arguments, DataFormat format, LValue value)
17022 {
17023 ExitValue result = ExitValue::exitArgument(ExitArgument(format, arguments.size()));
17024 arguments.append(value);
17025 return result;
17026 }
17027
17028 ExitValue exitValueForTailCall(StackmapArgumentList& arguments, Node* node)
17029 {
17030 ASSERT(node->shouldGenerate());
17031 ASSERT(node->hasResult());
17032
17033 switch (node->op()) {
17034 case JSConstant:
17035 case Int52Constant:
17036 case DoubleConstant:
17037 return ExitValue::constant(node->asJSValue());
17038
17039 default:
17040 break;
17041 }
17042
17043 LoweredNodeValue value = m_jsValueValues.get(node);
17044 if (isValid(value))
17045 return exitArgument(arguments, DataFormatJS, value.value());
17046
17047 value = m_int32Values.get(node);
17048 if (isValid(value))
17049 return exitArgument(arguments, DataFormatJS, boxInt32(value.value()));
17050
17051 value = m_booleanValues.get(node);
17052 if (isValid(value))
17053 return exitArgument(arguments, DataFormatJS, boxBoolean(value.value()));
17054
17055 // Doubles and Int52 have been converted by ValueRep()
17056 DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
17057 }
17058
17059 void setInt32(Node* node, LValue value)
17060 {
17061 m_int32Values.set(node, LoweredNodeValue(value, m_highBlock));
17062 }
17063 void setInt52(Node* node, LValue value)
17064 {
17065 m_int52Values.set(node, LoweredNodeValue(value, m_highBlock));
17066 }
17067 void setStrictInt52(Node* node, LValue value)
17068 {
17069 m_strictInt52Values.set(node, LoweredNodeValue(value, m_highBlock));
17070 }
17071 void setInt52(Node* node, LValue value, Int52Kind kind)
17072 {
17073 switch (kind) {
17074 case Int52:
17075 setInt52(node, value);
17076 return;
17077
17078 case StrictInt52:
17079 setStrictInt52(node, value);
17080 return;
17081 }
17082
17083 DFG_CRASH(m_graph, m_node, "Corrupt int52 kind");
17084 }
17085 void setJSValue(Node* node, LValue value)
17086 {
17087 m_jsValueValues.set(node, LoweredNodeValue(value, m_highBlock));
17088 }
17089 void setBoolean(Node* node, LValue value)
17090 {
17091 m_booleanValues.set(node, LoweredNodeValue(value, m_highBlock));
17092 }
17093 void setStorage(Node* node, LValue value)
17094 {
17095 m_storageValues.set(node, LoweredNodeValue(value, m_highBlock));
17096 }
17097 void setDouble(Node* node, LValue value)
17098 {
17099 m_doubleValues.set(node, LoweredNodeValue(value, m_highBlock));
17100 }
17101
17102 void setInt32(LValue value)
17103 {
17104 setInt32(m_node, value);
17105 }
17106 void setInt52(LValue value)
17107 {
17108 setInt52(m_node, value);
17109 }
17110 void setStrictInt52(LValue value)
17111 {
17112 setStrictInt52(m_node, value);
17113 }
17114 void setInt52(LValue value, Int52Kind kind)
17115 {
17116 setInt52(m_node, value, kind);
17117 }
17118 void setJSValue(LValue value)
17119 {
17120 setJSValue(m_node, value);
17121 }
17122 void setBoolean(LValue value)
17123 {
17124 setBoolean(m_node, value);
17125 }
17126 void setStorage(LValue value)
17127 {
17128 setStorage(m_node, value);
17129 }
17130 void setDouble(LValue value)
17131 {
17132 setDouble(m_node, value);
17133 }
17134
17135 bool isValid(const LoweredNodeValue& value)
17136 {
17137 if (!value)
17138 return false;
17139 if (!m_graph.m_ssaDominators->dominates(value.block(), m_highBlock))
17140 return false;
17141 return true;
17142 }
17143
17144 void addWeakReference(JSCell* target)
17145 {
17146 m_graph.m_plan.weakReferences().addLazily(target);
17147 }
17148
17149 LValue loadStructure(LValue value)
17150 {
17151 LValue structureID = m_out.load32(value, m_heaps.JSCell_structureID);
17152 LValue tableBase = m_out.loadPtr(m_out.absolute(vm().heap.structureIDTable().base()));
17153 LValue tableIndex = m_out.aShr(structureID, m_out.constInt32(StructureIDTable::s_numberOfEntropyBits));
17154 LValue entropyBits = m_out.shl(m_out.zeroExtPtr(structureID), m_out.constInt32(StructureIDTable::s_entropyBitsShiftForStructurePointer));
17155 TypedPointer address = m_out.baseIndex(m_heaps.structureTable, tableBase, m_out.zeroExtPtr(tableIndex));
17156 LValue encodedStructureBits = m_out.loadPtr(address);
17157 return m_out.bitXor(encodedStructureBits, entropyBits);
17158 }
17159
17160 LValue weakPointer(JSCell* pointer)
17161 {
17162 addWeakReference(pointer);
17163 return m_out.weakPointer(m_graph, pointer);
17164 }
17165
17166 LValue frozenPointer(FrozenValue* value)
17167 {
17168 return m_out.weakPointer(value);
17169 }
17170
17171 LValue weakStructureID(RegisteredStructure structure)
17172 {
17173 return m_out.constInt32(structure->id());
17174 }
17175
17176 LValue weakStructure(RegisteredStructure structure)
17177 {
17178 ASSERT(!!structure.get());
17179 return m_out.weakPointer(m_graph, structure.get());
17180 }
17181
17182 TypedPointer addressFor(LValue base, int operand, ptrdiff_t offset = 0)
17183 {
17184 return m_out.address(base, m_heaps.variables[operand], offset);
17185 }
17186 TypedPointer payloadFor(LValue base, int operand)
17187 {
17188 return addressFor(base, operand, PayloadOffset);
17189 }
17190 TypedPointer tagFor(LValue base, int operand)
17191 {
17192 return addressFor(base, operand, TagOffset);
17193 }
17194 TypedPointer addressFor(int operand, ptrdiff_t offset = 0)
17195 {
17196 return addressFor(VirtualRegister(operand), offset);
17197 }
17198 TypedPointer addressFor(VirtualRegister operand, ptrdiff_t offset = 0)
17199 {
17200 if (operand.isLocal())
17201 return addressFor(m_captured, operand.offset(), offset);
17202 return addressFor(m_callFrame, operand.offset(), offset);
17203 }
17204 TypedPointer payloadFor(int operand)
17205 {
17206 return payloadFor(VirtualRegister(operand));
17207 }
17208 TypedPointer payloadFor(VirtualRegister operand)
17209 {
17210 return addressFor(operand, PayloadOffset);
17211 }
17212 TypedPointer tagFor(int operand)
17213 {
17214 return tagFor(VirtualRegister(operand));
17215 }
17216 TypedPointer tagFor(VirtualRegister operand)
17217 {
17218 return addressFor(operand, TagOffset);
17219 }
17220
17221 AbstractValue abstractValue(Node* node)
17222 {
17223 return m_state.forNode(node);
17224 }
17225 AbstractValue abstractValue(Edge edge)
17226 {
17227 return abstractValue(edge.node());
17228 }
17229
17230 SpeculatedType provenType(Node* node)
17231 {
17232 return abstractValue(node).m_type;
17233 }
17234 SpeculatedType provenType(Edge edge)
17235 {
17236 return provenType(edge.node());
17237 }
17238
17239 JSValue provenValue(Node* node)
17240 {
17241 return abstractValue(node).m_value;
17242 }
17243 JSValue provenValue(Edge edge)
17244 {
17245 return provenValue(edge.node());
17246 }
17247
17248 StructureAbstractValue abstractStructure(Node* node)
17249 {
17250 return abstractValue(node).m_structure;
17251 }
17252 StructureAbstractValue abstractStructure(Edge edge)
17253 {
17254 return abstractStructure(edge.node());
17255 }
17256
17257 void crash()
17258 {
17259 crash(m_highBlock, m_node);
17260 }
17261 void crash(DFG::BasicBlock* block, Node* node)
17262 {
17263 BlockIndex blockIndex = block->index;
17264 unsigned nodeIndex = node ? node->index() : UINT_MAX;
17265#if ASSERT_DISABLED
17266 m_out.patchpoint(Void)->setGenerator(
17267 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
17268 AllowMacroScratchRegisterUsage allowScratch(jit);
17269
17270 jit.move(CCallHelpers::TrustedImm32(blockIndex), GPRInfo::regT0);
17271 jit.move(CCallHelpers::TrustedImm32(nodeIndex), GPRInfo::regT1);
17272 if (node)
17273 jit.move(CCallHelpers::TrustedImm32(node->op()), GPRInfo::regT2);
17274 jit.abortWithReason(FTLCrash);
17275 });
17276#else
17277 m_out.call(
17278 Void,
17279 m_out.constIntPtr(ftlUnreachable),
17280 // We don't want the CodeBlock to have a weak pointer to itself because
17281 // that would cause it to always get collected.
17282 m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), m_out.constInt32(blockIndex),
17283 m_out.constInt32(nodeIndex));
17284#endif
17285 m_out.unreachable();
17286 }
17287
17288 AvailabilityMap& availabilityMap() { return m_availabilityCalculator.m_availability; }
17289
17290 VM& vm() { return m_graph.m_vm; }
17291 CodeBlock* codeBlock() { return m_graph.m_codeBlock; }
17292
17293 Graph& m_graph;
17294 State& m_ftlState;
17295 AbstractHeapRepository m_heaps;
17296 Output m_out;
17297 Procedure& m_proc;
17298
17299 LBasicBlock m_handleExceptions;
17300 HashMap<DFG::BasicBlock*, LBasicBlock> m_blocks;
17301
17302 LValue m_callFrame;
17303 LValue m_captured;
17304 LValue m_tagTypeNumber;
17305 LValue m_tagMask;
17306
17307 HashMap<Node*, LoweredNodeValue> m_int32Values;
17308 HashMap<Node*, LoweredNodeValue> m_strictInt52Values;
17309 HashMap<Node*, LoweredNodeValue> m_int52Values;
17310 HashMap<Node*, LoweredNodeValue> m_jsValueValues;
17311 HashMap<Node*, LoweredNodeValue> m_booleanValues;
17312 HashMap<Node*, LoweredNodeValue> m_storageValues;
17313 HashMap<Node*, LoweredNodeValue> m_doubleValues;
17314
17315 HashMap<Node*, LValue> m_phis;
17316
17317 LocalOSRAvailabilityCalculator m_availabilityCalculator;
17318
17319 InPlaceAbstractState m_state;
17320 AbstractInterpreter<InPlaceAbstractState> m_interpreter;
17321 DFG::BasicBlock* m_highBlock;
17322 DFG::BasicBlock* m_nextHighBlock;
17323 LBasicBlock m_nextLowBlock;
17324
17325 enum IndexMaskingMode { IndexMaskingDisabled, IndexMaskingEnabled };
17326
17327 IndexMaskingMode m_indexMaskingMode;
17328
17329 NodeOrigin m_origin;
17330 unsigned m_nodeIndex;
17331 Node* m_node;
17332
17333 // These are used for validating AI state.
17334 HashMap<Node*, NodeSet> m_liveInToNode;
17335 HashMap<Node*, AbstractValue> m_aiCheckedNodes;
17336 String m_graphDump;
17337};
17338
17339} // anonymous namespace
17340
17341void lowerDFGToB3(State& state)
17342{
17343 LowerDFGToB3 lowering(state);
17344 lowering.lower();
17345}
17346
17347} } // namespace JSC::FTL
17348
17349#endif // ENABLE(FTL_JIT)
17350
17351