1/*
2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#if ENABLE(JIT)
29
30#include "CodeBlock.h"
31#include "EntryFrame.h"
32#include "FPRInfo.h"
33#include "GPRInfo.h"
34#include "Heap.h"
35#include "InlineCallFrame.h"
36#include "JITAllocator.h"
37#include "JITCode.h"
38#include "MacroAssembler.h"
39#include "MarkedSpace.h"
40#include "RegisterAtOffsetList.h"
41#include "RegisterSet.h"
42#include "StackAlignment.h"
43#include "TagRegistersMode.h"
44#include "TypeofType.h"
45#include "VM.h"
46
47namespace JSC {
48
49typedef void (*V_DebugOperation_EPP)(ExecState*, void*, void*);
50
51class AssemblyHelpers : public MacroAssembler {
52public:
53 AssemblyHelpers(CodeBlock* codeBlock)
54 : m_codeBlock(codeBlock)
55 , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : 0)
56 {
57 if (m_codeBlock) {
58 ASSERT(m_baselineCodeBlock);
59 ASSERT(!m_baselineCodeBlock->alternative());
60 ASSERT(m_baselineCodeBlock->jitType() == JITType::None || JITCode::isBaselineCode(m_baselineCodeBlock->jitType()));
61 }
62 }
63
64 CodeBlock* codeBlock() { return m_codeBlock; }
65 VM& vm() { return *m_codeBlock->vm(); }
66 AssemblerType_T& assembler() { return m_assembler; }
67
68 void checkStackPointerAlignment()
69 {
70 // This check is both unneeded and harder to write correctly for ARM64
71#if !defined(NDEBUG) && !CPU(ARM64)
72 Jump stackPointerAligned = branchTestPtr(Zero, stackPointerRegister, TrustedImm32(0xf));
73 abortWithReason(AHStackPointerMisaligned);
74 stackPointerAligned.link(this);
75#endif
76 }
77
78 template<typename T>
79 void storeCell(T cell, Address address)
80 {
81#if USE(JSVALUE64)
82 store64(cell, address);
83#else
84 store32(cell, address.withOffset(PayloadOffset));
85 store32(TrustedImm32(JSValue::CellTag), address.withOffset(TagOffset));
86#endif
87 }
88
89 void loadCell(Address address, GPRReg gpr)
90 {
91#if USE(JSVALUE64)
92 load64(address, gpr);
93#else
94 load32(address.withOffset(PayloadOffset), gpr);
95#endif
96 }
97
98 void storeValue(JSValueRegs regs, Address address)
99 {
100#if USE(JSVALUE64)
101 store64(regs.gpr(), address);
102#else
103 store32(regs.payloadGPR(), address.withOffset(PayloadOffset));
104 store32(regs.tagGPR(), address.withOffset(TagOffset));
105#endif
106 }
107
108 void storeValue(JSValueRegs regs, BaseIndex address)
109 {
110#if USE(JSVALUE64)
111 store64(regs.gpr(), address);
112#else
113 store32(regs.payloadGPR(), address.withOffset(PayloadOffset));
114 store32(regs.tagGPR(), address.withOffset(TagOffset));
115#endif
116 }
117
118 void storeValue(JSValueRegs regs, void* address)
119 {
120#if USE(JSVALUE64)
121 store64(regs.gpr(), address);
122#else
123 store32(regs.payloadGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + PayloadOffset));
124 store32(regs.tagGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + TagOffset));
125#endif
126 }
127
128 void loadValue(Address address, JSValueRegs regs)
129 {
130#if USE(JSVALUE64)
131 load64(address, regs.gpr());
132#else
133 if (address.base == regs.payloadGPR()) {
134 load32(address.withOffset(TagOffset), regs.tagGPR());
135 load32(address.withOffset(PayloadOffset), regs.payloadGPR());
136 } else {
137 load32(address.withOffset(PayloadOffset), regs.payloadGPR());
138 load32(address.withOffset(TagOffset), regs.tagGPR());
139 }
140#endif
141 }
142
143 void loadValue(BaseIndex address, JSValueRegs regs)
144 {
145#if USE(JSVALUE64)
146 load64(address, regs.gpr());
147#else
148 if (address.base == regs.payloadGPR() || address.index == regs.payloadGPR()) {
149 // We actually could handle the case where the registers are aliased to both
150 // tag and payload, but we don't for now.
151 RELEASE_ASSERT(address.base != regs.tagGPR());
152 RELEASE_ASSERT(address.index != regs.tagGPR());
153
154 load32(address.withOffset(TagOffset), regs.tagGPR());
155 load32(address.withOffset(PayloadOffset), regs.payloadGPR());
156 } else {
157 load32(address.withOffset(PayloadOffset), regs.payloadGPR());
158 load32(address.withOffset(TagOffset), regs.tagGPR());
159 }
160#endif
161 }
162
163 void loadValue(void* address, JSValueRegs regs)
164 {
165#if USE(JSVALUE64)
166 load64(address, regs.gpr());
167#else
168 move(TrustedImmPtr(address), regs.payloadGPR());
169 loadValue(Address(regs.payloadGPR()), regs);
170#endif
171 }
172
173 // Note that this clobbers offset.
174 void loadProperty(GPRReg object, GPRReg offset, JSValueRegs result);
175
176 void moveValueRegs(JSValueRegs srcRegs, JSValueRegs destRegs)
177 {
178#if USE(JSVALUE32_64)
179 if (destRegs.tagGPR() == srcRegs.payloadGPR()) {
180 if (destRegs.payloadGPR() == srcRegs.tagGPR()) {
181 swap(srcRegs.payloadGPR(), srcRegs.tagGPR());
182 return;
183 }
184 move(srcRegs.payloadGPR(), destRegs.payloadGPR());
185 move(srcRegs.tagGPR(), destRegs.tagGPR());
186 return;
187 }
188 move(srcRegs.tagGPR(), destRegs.tagGPR());
189 move(srcRegs.payloadGPR(), destRegs.payloadGPR());
190#else
191 move(srcRegs.gpr(), destRegs.gpr());
192#endif
193 }
194
195 void moveValue(JSValue value, JSValueRegs regs)
196 {
197#if USE(JSVALUE64)
198 move(Imm64(JSValue::encode(value)), regs.gpr());
199#else
200 move(Imm32(value.tag()), regs.tagGPR());
201 move(Imm32(value.payload()), regs.payloadGPR());
202#endif
203 }
204
205 void moveTrustedValue(JSValue value, JSValueRegs regs)
206 {
207#if USE(JSVALUE64)
208 move(TrustedImm64(JSValue::encode(value)), regs.gpr());
209#else
210 move(TrustedImm32(value.tag()), regs.tagGPR());
211 move(TrustedImm32(value.payload()), regs.payloadGPR());
212#endif
213 }
214
215 void storeTrustedValue(JSValue value, Address address)
216 {
217#if USE(JSVALUE64)
218 store64(TrustedImm64(JSValue::encode(value)), address);
219#else
220 store32(TrustedImm32(value.tag()), address.withOffset(TagOffset));
221 store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset));
222#endif
223 }
224
225 void storeTrustedValue(JSValue value, BaseIndex address)
226 {
227#if USE(JSVALUE64)
228 store64(TrustedImm64(JSValue::encode(value)), address);
229#else
230 store32(TrustedImm32(value.tag()), address.withOffset(TagOffset));
231 store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset));
232#endif
233 }
234
235 Address addressFor(const RegisterAtOffset& entry)
236 {
237 return Address(GPRInfo::callFrameRegister, entry.offset());
238 }
239
240 void emitSave(const RegisterAtOffsetList& list)
241 {
242 for (const RegisterAtOffset& entry : list) {
243 if (entry.reg().isGPR())
244 storePtr(entry.reg().gpr(), addressFor(entry));
245 else
246 storeDouble(entry.reg().fpr(), addressFor(entry));
247 }
248 }
249
250 void emitRestore(const RegisterAtOffsetList& list)
251 {
252 for (const RegisterAtOffset& entry : list) {
253 if (entry.reg().isGPR())
254 loadPtr(addressFor(entry), entry.reg().gpr());
255 else
256 loadDouble(addressFor(entry), entry.reg().fpr());
257 }
258 }
259
260 void emitSaveCalleeSavesFor(CodeBlock* codeBlock)
261 {
262 ASSERT(codeBlock);
263
264 const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
265 RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
266 unsigned registerCount = calleeSaves->size();
267
268 for (unsigned i = 0; i < registerCount; i++) {
269 RegisterAtOffset entry = calleeSaves->at(i);
270 if (dontSaveRegisters.get(entry.reg()))
271 continue;
272 storePtr(entry.reg().gpr(), Address(framePointerRegister, entry.offset()));
273 }
274 }
275
276 enum RestoreTagRegisterMode { UseExistingTagRegisterContents, CopyBaselineCalleeSavedRegistersFromBaseFrame };
277
278 void emitSaveOrCopyCalleeSavesFor(CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, RestoreTagRegisterMode tagRegisterMode, GPRReg temp)
279 {
280 ASSERT(codeBlock);
281
282 const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
283 RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
284 unsigned registerCount = calleeSaves->size();
285
286#if USE(JSVALUE64)
287 RegisterSet baselineCalleeSaves = RegisterSet::llintBaselineCalleeSaveRegisters();
288#endif
289
290 for (unsigned i = 0; i < registerCount; i++) {
291 RegisterAtOffset entry = calleeSaves->at(i);
292 if (dontSaveRegisters.get(entry.reg()))
293 continue;
294
295 GPRReg registerToWrite;
296
297#if USE(JSVALUE32_64)
298 UNUSED_PARAM(tagRegisterMode);
299 UNUSED_PARAM(temp);
300#else
301 if (tagRegisterMode == CopyBaselineCalleeSavedRegistersFromBaseFrame && baselineCalleeSaves.get(entry.reg())) {
302 registerToWrite = temp;
303 loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, entry.offset()), registerToWrite);
304 } else
305#endif
306 registerToWrite = entry.reg().gpr();
307
308 storePtr(registerToWrite, Address(framePointerRegister, offsetVirtualRegister.offsetInBytes() + entry.offset()));
309 }
310 }
311
312 void emitRestoreCalleeSavesFor(CodeBlock* codeBlock)
313 {
314 ASSERT(codeBlock);
315
316 const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
317 RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
318 unsigned registerCount = calleeSaves->size();
319
320 for (unsigned i = 0; i < registerCount; i++) {
321 RegisterAtOffset entry = calleeSaves->at(i);
322 if (dontRestoreRegisters.get(entry.reg()))
323 continue;
324 loadPtr(Address(framePointerRegister, entry.offset()), entry.reg().gpr());
325 }
326 }
327
328 void emitSaveCalleeSaves()
329 {
330 emitSaveCalleeSavesFor(codeBlock());
331 }
332
333 void emitSaveThenMaterializeTagRegisters()
334 {
335#if USE(JSVALUE64)
336#if CPU(ARM64)
337 pushPair(GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
338#else
339 push(GPRInfo::tagTypeNumberRegister);
340 push(GPRInfo::tagMaskRegister);
341#endif
342 emitMaterializeTagCheckRegisters();
343#endif
344 }
345
346 void emitRestoreCalleeSaves()
347 {
348 emitRestoreCalleeSavesFor(codeBlock());
349 }
350
351 void emitRestoreSavedTagRegisters()
352 {
353#if USE(JSVALUE64)
354#if CPU(ARM64)
355 popPair(GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
356#else
357 pop(GPRInfo::tagMaskRegister);
358 pop(GPRInfo::tagTypeNumberRegister);
359#endif
360#endif
361 }
362
363 // If you use this, be aware that vmGPR will get trashed.
364 void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(GPRReg vmGPR)
365 {
366#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
367 loadPtr(Address(vmGPR, VM::topEntryFrameOffset()), vmGPR);
368 copyCalleeSavesToEntryFrameCalleeSavesBufferImpl(vmGPR);
369#else
370 UNUSED_PARAM(vmGPR);
371#endif
372 }
373
374 void copyCalleeSavesToEntryFrameCalleeSavesBuffer(EntryFrame*& topEntryFrame)
375 {
376#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
377 const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() };
378 GPRReg temp1 = usedRegisters.getFreeGPR(0);
379 loadPtr(&topEntryFrame, temp1);
380 copyCalleeSavesToEntryFrameCalleeSavesBufferImpl(temp1);
381#else
382 UNUSED_PARAM(topEntryFrame);
383#endif
384 }
385
386 void copyCalleeSavesToEntryFrameCalleeSavesBuffer(GPRReg topEntryFrame)
387 {
388#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
389 copyCalleeSavesToEntryFrameCalleeSavesBufferImpl(topEntryFrame);
390#else
391 UNUSED_PARAM(topEntryFrame);
392#endif
393 }
394
395 void restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(EntryFrame*&);
396
397 void copyCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(EntryFrame*& topEntryFrame, const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
398 {
399#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
400 GPRReg temp1 = usedRegisters.getFreeGPR(0);
401 GPRReg temp2 = usedRegisters.getFreeGPR(1);
402 FPRReg fpTemp = usedRegisters.getFreeFPR();
403 ASSERT(temp2 != InvalidGPRReg);
404
405 ASSERT(codeBlock());
406
407 // Copy saved calleeSaves on stack or unsaved calleeSaves in register to vm calleeSave buffer
408 loadPtr(&topEntryFrame, temp1);
409 addPtr(TrustedImm32(EntryFrame::calleeSaveRegistersBufferOffset()), temp1);
410
411 RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
412 const RegisterAtOffsetList* currentCalleeSaves = codeBlock()->calleeSaveRegisters();
413 RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
414 unsigned registerCount = allCalleeSaves->size();
415
416 for (unsigned i = 0; i < registerCount; i++) {
417 RegisterAtOffset entry = allCalleeSaves->at(i);
418 if (dontCopyRegisters.get(entry.reg()))
419 continue;
420 RegisterAtOffset* currentFrameEntry = currentCalleeSaves->find(entry.reg());
421
422 if (entry.reg().isGPR()) {
423 GPRReg regToStore;
424 if (currentFrameEntry) {
425 // Load calleeSave from stack into temp register
426 regToStore = temp2;
427 loadPtr(Address(framePointerRegister, currentFrameEntry->offset()), regToStore);
428 } else
429 // Just store callee save directly
430 regToStore = entry.reg().gpr();
431
432 storePtr(regToStore, Address(temp1, entry.offset()));
433 } else {
434 FPRReg fpRegToStore;
435 if (currentFrameEntry) {
436 // Load calleeSave from stack into temp register
437 fpRegToStore = fpTemp;
438 loadDouble(Address(framePointerRegister, currentFrameEntry->offset()), fpRegToStore);
439 } else
440 // Just store callee save directly
441 fpRegToStore = entry.reg().fpr();
442
443 storeDouble(fpRegToStore, Address(temp1, entry.offset()));
444 }
445 }
446#else
447 UNUSED_PARAM(topEntryFrame);
448 UNUSED_PARAM(usedRegisters);
449#endif
450 }
451
452 void emitMaterializeTagCheckRegisters()
453 {
454#if USE(JSVALUE64)
455 move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
456 orPtr(MacroAssembler::TrustedImm32(TagBitTypeOther), GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
457#endif
458 }
459
460 void clearStackFrame(GPRReg currentTop, GPRReg newTop, GPRReg temp, unsigned frameSize)
461 {
462 ASSERT(frameSize % stackAlignmentBytes() == 0);
463 if (frameSize <= 128) {
464 for (unsigned offset = 0; offset < frameSize; offset += sizeof(CPURegister))
465 storePtr(TrustedImm32(0), Address(currentTop, -8 - offset));
466 } else {
467 constexpr unsigned storeBytesPerIteration = stackAlignmentBytes();
468 constexpr unsigned storesPerIteration = storeBytesPerIteration / sizeof(CPURegister);
469
470 move(currentTop, temp);
471 Label zeroLoop = label();
472 subPtr(TrustedImm32(storeBytesPerIteration), temp);
473#if CPU(ARM64)
474 static_assert(storesPerIteration == 2, "clearStackFrame() for ARM64 assumes stack is 16 byte aligned");
475 storePair64(ARM64Registers::zr, ARM64Registers::zr, temp);
476#else
477 for (unsigned i = storesPerIteration; i-- != 0;)
478 storePtr(TrustedImm32(0), Address(temp, sizeof(CPURegister) * i));
479#endif
480 branchPtr(NotEqual, temp, newTop).linkTo(zeroLoop, this);
481 }
482 }
483
484#if CPU(X86_64) || CPU(X86)
485 static size_t prologueStackPointerDelta()
486 {
487 // Prologue only saves the framePointerRegister
488 return sizeof(void*);
489 }
490
491 void emitFunctionPrologue()
492 {
493 push(framePointerRegister);
494 move(stackPointerRegister, framePointerRegister);
495 }
496
497 void emitFunctionEpilogueWithEmptyFrame()
498 {
499 pop(framePointerRegister);
500 }
501
502 void emitFunctionEpilogue()
503 {
504 move(framePointerRegister, stackPointerRegister);
505 pop(framePointerRegister);
506 }
507
508 void preserveReturnAddressAfterCall(GPRReg reg)
509 {
510 pop(reg);
511 }
512
513 void restoreReturnAddressBeforeReturn(GPRReg reg)
514 {
515 push(reg);
516 }
517
518 void restoreReturnAddressBeforeReturn(Address address)
519 {
520 push(address);
521 }
522#endif // CPU(X86_64) || CPU(X86)
523
524#if CPU(ARM_THUMB2) || CPU(ARM64)
525 static size_t prologueStackPointerDelta()
526 {
527 // Prologue saves the framePointerRegister and linkRegister
528 return 2 * sizeof(void*);
529 }
530
531 void emitFunctionPrologue()
532 {
533 tagReturnAddress();
534 pushPair(framePointerRegister, linkRegister);
535 move(stackPointerRegister, framePointerRegister);
536 }
537
538 void emitFunctionEpilogueWithEmptyFrame()
539 {
540 popPair(framePointerRegister, linkRegister);
541 }
542
543 void emitFunctionEpilogue()
544 {
545 move(framePointerRegister, stackPointerRegister);
546 emitFunctionEpilogueWithEmptyFrame();
547 }
548
549 ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
550 {
551 move(linkRegister, reg);
552 }
553
554 ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
555 {
556 move(reg, linkRegister);
557 }
558
559 ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
560 {
561 loadPtr(address, linkRegister);
562 }
563#endif
564
565#if CPU(MIPS)
566 static size_t prologueStackPointerDelta()
567 {
568 // Prologue saves the framePointerRegister and returnAddressRegister
569 return 2 * sizeof(void*);
570 }
571
572 void emitFunctionPrologue()
573 {
574 pushPair(framePointerRegister, returnAddressRegister);
575 move(stackPointerRegister, framePointerRegister);
576 }
577
578 void emitFunctionEpilogueWithEmptyFrame()
579 {
580 popPair(framePointerRegister, returnAddressRegister);
581 }
582
583 void emitFunctionEpilogue()
584 {
585 move(framePointerRegister, stackPointerRegister);
586 emitFunctionEpilogueWithEmptyFrame();
587 }
588
589 ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
590 {
591 move(returnAddressRegister, reg);
592 }
593
594 ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
595 {
596 move(reg, returnAddressRegister);
597 }
598
599 ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
600 {
601 loadPtr(address, returnAddressRegister);
602 }
603#endif
604
605 void emitGetFromCallFrameHeaderPtr(int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
606 {
607 loadPtr(Address(from, entry * sizeof(Register)), to);
608 }
609 void emitGetFromCallFrameHeader32(int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
610 {
611 load32(Address(from, entry * sizeof(Register)), to);
612 }
613#if USE(JSVALUE64)
614 void emitGetFromCallFrameHeader64(int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
615 {
616 load64(Address(from, entry * sizeof(Register)), to);
617 }
618#endif // USE(JSVALUE64)
619 void emitPutToCallFrameHeader(GPRReg from, int entry)
620 {
621 storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
622 }
623
624 void emitPutToCallFrameHeader(void* value, int entry)
625 {
626 storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
627 }
628
629 void emitGetCallerFrameFromCallFrameHeaderPtr(RegisterID to)
630 {
631 loadPtr(Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), to);
632 }
633 void emitPutCallerFrameToCallFrameHeader(RegisterID from)
634 {
635 storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()));
636 }
637
638 void emitPutReturnPCToCallFrameHeader(RegisterID from)
639 {
640 storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
641 }
642 void emitPutReturnPCToCallFrameHeader(TrustedImmPtr from)
643 {
644 storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
645 }
646
647 // emitPutToCallFrameHeaderBeforePrologue() and related are used to access callee frame header
648 // fields before the code from emitFunctionPrologue() has executed.
649 // First, the access is via the stack pointer. Second, the address calculation must also take
650 // into account that the stack pointer may not have been adjusted down for the return PC and/or
651 // caller's frame pointer. On some platforms, the callee is responsible for pushing the
652 // "link register" containing the return address in the function prologue.
653#if USE(JSVALUE64)
654 void emitPutToCallFrameHeaderBeforePrologue(GPRReg from, int entry)
655 {
656 storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta()));
657 }
658#else
659 void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from, int entry)
660 {
661 storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
662 }
663
664 void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag, int entry)
665 {
666 storePtr(tag, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
667 }
668#endif
669
670 JumpList branchIfNotEqual(JSValueRegs regs, JSValue value)
671 {
672#if USE(JSVALUE64)
673 return branch64(NotEqual, regs.gpr(), TrustedImm64(JSValue::encode(value)));
674#else
675 JumpList result;
676 result.append(branch32(NotEqual, regs.tagGPR(), TrustedImm32(value.tag())));
677 if (value.isEmpty() || value.isUndefinedOrNull())
678 return result; // These don't have anything interesting in the payload.
679 result.append(branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload())));
680 return result;
681#endif
682 }
683
684 Jump branchIfEqual(JSValueRegs regs, JSValue value)
685 {
686#if USE(JSVALUE64)
687 return branch64(Equal, regs.gpr(), TrustedImm64(JSValue::encode(value)));
688#else
689 Jump notEqual;
690 // These don't have anything interesting in the payload.
691 if (!value.isEmpty() && !value.isUndefinedOrNull())
692 notEqual = branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload()));
693 Jump result = branch32(Equal, regs.tagGPR(), TrustedImm32(value.tag()));
694 if (notEqual.isSet())
695 notEqual.link(this);
696 return result;
697#endif
698 }
699
700 Jump branchIfNotCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
701 {
702#if USE(JSVALUE64)
703 if (mode == HaveTagRegisters)
704 return branchTest64(NonZero, reg, GPRInfo::tagMaskRegister);
705 return branchTest64(NonZero, reg, TrustedImm64(TagMask));
706#else
707 UNUSED_PARAM(mode);
708 return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag));
709#endif
710 }
711
712 Jump branchIfNotCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
713 {
714#if USE(JSVALUE64)
715 return branchIfNotCell(regs.gpr(), mode);
716#else
717 return branchIfNotCell(regs.tagGPR(), mode);
718#endif
719 }
720
721 Jump branchIfCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
722 {
723#if USE(JSVALUE64)
724 if (mode == HaveTagRegisters)
725 return branchTest64(Zero, reg, GPRInfo::tagMaskRegister);
726 return branchTest64(Zero, reg, TrustedImm64(TagMask));
727#else
728 UNUSED_PARAM(mode);
729 return branch32(MacroAssembler::Equal, reg, TrustedImm32(JSValue::CellTag));
730#endif
731 }
732 Jump branchIfCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
733 {
734#if USE(JSVALUE64)
735 return branchIfCell(regs.gpr(), mode);
736#else
737 return branchIfCell(regs.tagGPR(), mode);
738#endif
739 }
740
741 Jump branchIfOther(JSValueRegs regs, GPRReg tempGPR)
742 {
743#if USE(JSVALUE64)
744 move(regs.gpr(), tempGPR);
745 and64(TrustedImm32(~TagBitUndefined), tempGPR);
746 return branch64(Equal, tempGPR, TrustedImm64(ValueNull));
747#else
748 or32(TrustedImm32(1), regs.tagGPR(), tempGPR);
749 return branch32(Equal, tempGPR, TrustedImm32(JSValue::NullTag));
750#endif
751 }
752
753 Jump branchIfNotOther(JSValueRegs regs, GPRReg tempGPR)
754 {
755#if USE(JSVALUE64)
756 move(regs.gpr(), tempGPR);
757 and64(TrustedImm32(~TagBitUndefined), tempGPR);
758 return branch64(NotEqual, tempGPR, TrustedImm64(ValueNull));
759#else
760 or32(TrustedImm32(1), regs.tagGPR(), tempGPR);
761 return branch32(NotEqual, tempGPR, TrustedImm32(JSValue::NullTag));
762#endif
763 }
764
765 Jump branchIfInt32(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters)
766 {
767#if USE(JSVALUE64)
768 if (mode == HaveTagRegisters)
769 return branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
770 return branch64(AboveOrEqual, gpr, TrustedImm64(TagTypeNumber));
771#else
772 UNUSED_PARAM(mode);
773 return branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
774#endif
775 }
776
777 Jump branchIfInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
778 {
779#if USE(JSVALUE64)
780 return branchIfInt32(regs.gpr(), mode);
781#else
782 return branchIfInt32(regs.tagGPR(), mode);
783#endif
784 }
785
786 Jump branchIfNotInt32(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters)
787 {
788#if USE(JSVALUE64)
789 if (mode == HaveTagRegisters)
790 return branch64(Below, gpr, GPRInfo::tagTypeNumberRegister);
791 return branch64(Below, gpr, TrustedImm64(TagTypeNumber));
792#else
793 UNUSED_PARAM(mode);
794 return branch32(NotEqual, gpr, TrustedImm32(JSValue::Int32Tag));
795#endif
796 }
797
798 Jump branchIfNotInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
799 {
800#if USE(JSVALUE64)
801 return branchIfNotInt32(regs.gpr(), mode);
802#else
803 return branchIfNotInt32(regs.tagGPR(), mode);
804#endif
805 }
806
807 // Note that the tempGPR is not used in 64-bit mode.
808 Jump branchIfNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters)
809 {
810#if USE(JSVALUE64)
811 UNUSED_PARAM(tempGPR);
812 return branchIfNumber(regs.gpr(), mode);
813#else
814 UNUSED_PARAM(mode);
815 ASSERT(tempGPR != InvalidGPRReg);
816 add32(TrustedImm32(1), regs.tagGPR(), tempGPR);
817 return branch32(Below, tempGPR, TrustedImm32(JSValue::LowestTag + 1));
818#endif
819 }
820
821#if USE(JSVALUE64)
822 Jump branchIfNumber(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters)
823 {
824 if (mode == HaveTagRegisters)
825 return branchTest64(NonZero, gpr, GPRInfo::tagTypeNumberRegister);
826 return branchTest64(NonZero, gpr, TrustedImm64(TagTypeNumber));
827 }
828#endif
829
830 // Note that the tempGPR is not used in 64-bit mode.
831 Jump branchIfNotNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters)
832 {
833#if USE(JSVALUE64)
834 UNUSED_PARAM(tempGPR);
835 return branchIfNotNumber(regs.gpr(), mode);
836#else
837 UNUSED_PARAM(mode);
838 add32(TrustedImm32(1), regs.tagGPR(), tempGPR);
839 return branch32(AboveOrEqual, tempGPR, TrustedImm32(JSValue::LowestTag + 1));
840#endif
841 }
842
843#if USE(JSVALUE64)
844 Jump branchIfNotNumber(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters)
845 {
846 if (mode == HaveTagRegisters)
847 return branchTest64(Zero, gpr, GPRInfo::tagTypeNumberRegister);
848 return branchTest64(Zero, gpr, TrustedImm64(TagTypeNumber));
849 }
850#endif
851
852 Jump branchIfNotDoubleKnownNotInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
853 {
854#if USE(JSVALUE64)
855 if (mode == HaveTagRegisters)
856 return branchTest64(Zero, regs.gpr(), GPRInfo::tagTypeNumberRegister);
857 return branchTest64(Zero, regs.gpr(), TrustedImm64(TagTypeNumber));
858#else
859 UNUSED_PARAM(mode);
860 return branch32(AboveOrEqual, regs.tagGPR(), TrustedImm32(JSValue::LowestTag));
861#endif
862 }
863
864 // Note that the tempGPR is not used in 32-bit mode.
865 Jump branchIfBoolean(GPRReg gpr, GPRReg tempGPR)
866 {
867#if USE(JSVALUE64)
868 ASSERT(tempGPR != InvalidGPRReg);
869 move(gpr, tempGPR);
870 xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), tempGPR);
871 return branchTest64(Zero, tempGPR, TrustedImm32(static_cast<int32_t>(~1)));
872#else
873 UNUSED_PARAM(tempGPR);
874 return branch32(Equal, gpr, TrustedImm32(JSValue::BooleanTag));
875#endif
876 }
877
878 // Note that the tempGPR is not used in 32-bit mode.
879 Jump branchIfBoolean(JSValueRegs regs, GPRReg tempGPR)
880 {
881#if USE(JSVALUE64)
882 return branchIfBoolean(regs.gpr(), tempGPR);
883#else
884 return branchIfBoolean(regs.tagGPR(), tempGPR);
885#endif
886 }
887
888 // Note that the tempGPR is not used in 32-bit mode.
889 Jump branchIfNotBoolean(GPRReg gpr, GPRReg tempGPR)
890 {
891#if USE(JSVALUE64)
892 ASSERT(tempGPR != InvalidGPRReg);
893 move(gpr, tempGPR);
894 xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), tempGPR);
895 return branchTest64(NonZero, tempGPR, TrustedImm32(static_cast<int32_t>(~1)));
896#else
897 UNUSED_PARAM(tempGPR);
898 return branch32(NotEqual, gpr, TrustedImm32(JSValue::BooleanTag));
899#endif
900 }
901
902 // Note that the tempGPR is not used in 32-bit mode.
903 Jump branchIfNotBoolean(JSValueRegs regs, GPRReg tempGPR)
904 {
905#if USE(JSVALUE64)
906 return branchIfNotBoolean(regs.gpr(), tempGPR);
907#else
908 return branchIfNotBoolean(regs.tagGPR(), tempGPR);
909#endif
910 }
911
912 Jump branchIfObject(GPRReg cellGPR)
913 {
914 return branch8(
915 AboveOrEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
916 }
917
918 Jump branchIfNotObject(GPRReg cellGPR)
919 {
920 return branch8(
921 Below, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
922 }
923
924 Jump branchIfType(GPRReg cellGPR, JSType type)
925 {
926 return branch8(Equal, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type));
927 }
928
929 Jump branchIfNotType(GPRReg cellGPR, JSType type)
930 {
931 return branch8(NotEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type));
932 }
933
934 Jump branchIfString(GPRReg cellGPR) { return branchIfType(cellGPR, StringType); }
935 Jump branchIfNotString(GPRReg cellGPR) { return branchIfNotType(cellGPR, StringType); }
936 Jump branchIfSymbol(GPRReg cellGPR) { return branchIfType(cellGPR, SymbolType); }
937 Jump branchIfNotSymbol(GPRReg cellGPR) { return branchIfNotType(cellGPR, SymbolType); }
938 Jump branchIfBigInt(GPRReg cellGPR) { return branchIfType(cellGPR, BigIntType); }
939 Jump branchIfNotBigInt(GPRReg cellGPR) { return branchIfNotType(cellGPR, BigIntType); }
940 Jump branchIfFunction(GPRReg cellGPR) { return branchIfType(cellGPR, JSFunctionType); }
941 Jump branchIfNotFunction(GPRReg cellGPR) { return branchIfNotType(cellGPR, JSFunctionType); }
942
943 Jump branchIfEmpty(GPRReg gpr)
944 {
945#if USE(JSVALUE64)
946 return branchTest64(Zero, gpr);
947#else
948 return branch32(Equal, gpr, TrustedImm32(JSValue::EmptyValueTag));
949#endif
950 }
951
952 Jump branchIfEmpty(JSValueRegs regs)
953 {
954#if USE(JSVALUE64)
955 return branchIfEmpty(regs.gpr());
956#else
957 return branchIfEmpty(regs.tagGPR());
958#endif
959 }
960
961 Jump branchIfNotEmpty(GPRReg gpr)
962 {
963#if USE(JSVALUE64)
964 return branchTest64(NonZero, gpr);
965#else
966 return branch32(NotEqual, gpr, TrustedImm32(JSValue::EmptyValueTag));
967#endif
968 }
969
970 Jump branchIfNotEmpty(JSValueRegs regs)
971 {
972#if USE(JSVALUE64)
973 return branchIfNotEmpty(regs.gpr());
974#else
975 return branchIfNotEmpty(regs.tagGPR());
976#endif
977 }
978
979 // Note that this function does not respect MasqueradesAsUndefined.
980 Jump branchIfUndefined(GPRReg gpr)
981 {
982#if USE(JSVALUE64)
983 return branch64(Equal, gpr, TrustedImm64(JSValue::encode(jsUndefined())));
984#else
985 return branch32(Equal, gpr, TrustedImm32(JSValue::UndefinedTag));
986#endif
987 }
988
989 // Note that this function does not respect MasqueradesAsUndefined.
990 Jump branchIfUndefined(JSValueRegs regs)
991 {
992#if USE(JSVALUE64)
993 return branchIfUndefined(regs.gpr());
994#else
995 return branchIfUndefined(regs.tagGPR());
996#endif
997 }
998
999 // Note that this function does not respect MasqueradesAsUndefined.
1000 Jump branchIfNotUndefined(GPRReg gpr)
1001 {
1002#if USE(JSVALUE64)
1003 return branch64(NotEqual, gpr, TrustedImm64(JSValue::encode(jsUndefined())));
1004#else
1005 return branch32(NotEqual, gpr, TrustedImm32(JSValue::UndefinedTag));
1006#endif
1007 }
1008
1009 // Note that this function does not respect MasqueradesAsUndefined.
1010 Jump branchIfNotUndefined(JSValueRegs regs)
1011 {
1012#if USE(JSVALUE64)
1013 return branchIfNotUndefined(regs.gpr());
1014#else
1015 return branchIfNotUndefined(regs.tagGPR());
1016#endif
1017 }
1018
1019 Jump branchIfNull(GPRReg gpr)
1020 {
1021#if USE(JSVALUE64)
1022 return branch64(Equal, gpr, TrustedImm64(JSValue::encode(jsNull())));
1023#else
1024 return branch32(Equal, gpr, TrustedImm32(JSValue::NullTag));
1025#endif
1026 }
1027
1028 Jump branchIfNull(JSValueRegs regs)
1029 {
1030#if USE(JSVALUE64)
1031 return branchIfNull(regs.gpr());
1032#else
1033 return branchIfNull(regs.tagGPR());
1034#endif
1035 }
1036
1037 Jump branchIfNotNull(GPRReg gpr)
1038 {
1039#if USE(JSVALUE64)
1040 return branch64(NotEqual, gpr, TrustedImm64(JSValue::encode(jsNull())));
1041#else
1042 return branch32(NotEqual, gpr, TrustedImm32(JSValue::NullTag));
1043#endif
1044 }
1045
1046 Jump branchIfNotNull(JSValueRegs regs)
1047 {
1048#if USE(JSVALUE64)
1049 return branchIfNotNull(regs.gpr());
1050#else
1051 return branchIfNotNull(regs.tagGPR());
1052#endif
1053 }
1054
1055 template<typename T>
1056 Jump branchStructure(RelationalCondition condition, T leftHandSide, Structure* structure)
1057 {
1058#if USE(JSVALUE64)
1059 return branch32(condition, leftHandSide, TrustedImm32(structure->id()));
1060#else
1061 return branchPtr(condition, leftHandSide, TrustedImmPtr(structure));
1062#endif
1063 }
1064
1065 Jump branchIfFastTypedArray(GPRReg baseGPR);
1066 Jump branchIfNotFastTypedArray(GPRReg baseGPR);
1067
1068 Jump branchIfNaN(FPRReg fpr)
1069 {
1070 return branchDouble(DoubleNotEqualOrUnordered, fpr, fpr);
1071 }
1072
1073 Jump branchIfNotNaN(FPRReg fpr)
1074 {
1075 return branchDouble(DoubleEqual, fpr, fpr);
1076 }
1077
1078 Jump branchIfRopeStringImpl(GPRReg stringImplGPR)
1079 {
1080 return branchTestPtr(NonZero, stringImplGPR, TrustedImm32(JSString::isRopeInPointer));
1081 }
1082
1083 Jump branchIfNotRopeStringImpl(GPRReg stringImplGPR)
1084 {
1085 return branchTestPtr(Zero, stringImplGPR, TrustedImm32(JSString::isRopeInPointer));
1086 }
1087
1088 static Address addressForByteOffset(ptrdiff_t byteOffset)
1089 {
1090 return Address(GPRInfo::callFrameRegister, byteOffset);
1091 }
1092 static Address addressFor(VirtualRegister virtualRegister, GPRReg baseReg)
1093 {
1094 ASSERT(virtualRegister.isValid());
1095 return Address(baseReg, virtualRegister.offset() * sizeof(Register));
1096 }
1097 static Address addressFor(VirtualRegister virtualRegister)
1098 {
1099 // NB. It's tempting on some architectures to sometimes use an offset from the stack
1100 // register because for some offsets that will encode to a smaller instruction. But we
1101 // cannot do this. We use this in places where the stack pointer has been moved to some
1102 // unpredictable location.
1103 ASSERT(virtualRegister.isValid());
1104 return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register));
1105 }
1106 static Address addressFor(int operand)
1107 {
1108 return addressFor(static_cast<VirtualRegister>(operand));
1109 }
1110
1111 static Address tagFor(VirtualRegister virtualRegister, GPRReg baseGPR)
1112 {
1113 ASSERT(virtualRegister.isValid());
1114 return Address(baseGPR, virtualRegister.offset() * sizeof(Register) + TagOffset);
1115 }
1116 static Address tagFor(VirtualRegister virtualRegister)
1117 {
1118 ASSERT(virtualRegister.isValid());
1119 return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + TagOffset);
1120 }
1121 static Address tagFor(int operand)
1122 {
1123 return tagFor(static_cast<VirtualRegister>(operand));
1124 }
1125
1126 static Address payloadFor(VirtualRegister virtualRegister, GPRReg baseGPR)
1127 {
1128 ASSERT(virtualRegister.isValid());
1129 return Address(baseGPR, virtualRegister.offset() * sizeof(Register) + PayloadOffset);
1130 }
1131 static Address payloadFor(VirtualRegister virtualRegister)
1132 {
1133 ASSERT(virtualRegister.isValid());
1134 return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + PayloadOffset);
1135 }
1136 static Address payloadFor(int operand)
1137 {
1138 return payloadFor(static_cast<VirtualRegister>(operand));
1139 }
1140
1141 // Access to our fixed callee CallFrame.
1142 static Address calleeFrameSlot(int slot)
1143 {
1144 ASSERT(slot >= CallerFrameAndPC::sizeInRegisters);
1145 return Address(stackPointerRegister, sizeof(Register) * (slot - CallerFrameAndPC::sizeInRegisters));
1146 }
1147
1148 // Access to our fixed callee CallFrame.
1149 static Address calleeArgumentSlot(int argument)
1150 {
1151 return calleeFrameSlot(virtualRegisterForArgument(argument).offset());
1152 }
1153
1154 static Address calleeFrameTagSlot(int slot)
1155 {
1156 return calleeFrameSlot(slot).withOffset(TagOffset);
1157 }
1158
1159 static Address calleeFramePayloadSlot(int slot)
1160 {
1161 return calleeFrameSlot(slot).withOffset(PayloadOffset);
1162 }
1163
1164 static Address calleeArgumentTagSlot(int argument)
1165 {
1166 return calleeArgumentSlot(argument).withOffset(TagOffset);
1167 }
1168
1169 static Address calleeArgumentPayloadSlot(int argument)
1170 {
1171 return calleeArgumentSlot(argument).withOffset(PayloadOffset);
1172 }
1173
1174 static Address calleeFrameCallerFrame()
1175 {
1176 return calleeFrameSlot(0).withOffset(CallFrame::callerFrameOffset());
1177 }
1178
1179 static GPRReg selectScratchGPR(RegisterSet preserved)
1180 {
1181 GPRReg registers[] = {
1182 GPRInfo::regT0,
1183 GPRInfo::regT1,
1184 GPRInfo::regT2,
1185 GPRInfo::regT3,
1186 GPRInfo::regT4,
1187 GPRInfo::regT5,
1188 };
1189
1190 for (GPRReg reg : registers) {
1191 if (!preserved.contains(reg))
1192 return reg;
1193 }
1194 RELEASE_ASSERT_NOT_REACHED();
1195 return InvalidGPRReg;
1196 }
1197
1198 template<typename... Regs>
1199 static GPRReg selectScratchGPR(Regs... args)
1200 {
1201 RegisterSet set;
1202 constructRegisterSet(set, args...);
1203 return selectScratchGPR(set);
1204 }
1205
1206 static void constructRegisterSet(RegisterSet&)
1207 {
1208 }
1209
1210 template<typename... Regs>
1211 static void constructRegisterSet(RegisterSet& set, JSValueRegs regs, Regs... args)
1212 {
1213 if (regs.tagGPR() != InvalidGPRReg)
1214 set.set(regs.tagGPR());
1215 if (regs.payloadGPR() != InvalidGPRReg)
1216 set.set(regs.payloadGPR());
1217 constructRegisterSet(set, args...);
1218 }
1219
1220 template<typename... Regs>
1221 static void constructRegisterSet(RegisterSet& set, GPRReg reg, Regs... args)
1222 {
1223 if (reg != InvalidGPRReg)
1224 set.set(reg);
1225 constructRegisterSet(set, args...);
1226 }
1227
1228 // Add a debug call. This call has no effect on JIT code execution state.
1229 void debugCall(VM&, V_DebugOperation_EPP function, void* argument);
1230
1231 // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
1232#if !ASSERT_DISABLED
1233 void jitAssertIsInt32(GPRReg);
1234 void jitAssertIsJSInt32(GPRReg);
1235 void jitAssertIsJSNumber(GPRReg);
1236 void jitAssertIsJSDouble(GPRReg);
1237 void jitAssertIsCell(GPRReg);
1238 void jitAssertHasValidCallFrame();
1239 void jitAssertIsNull(GPRReg);
1240 void jitAssertTagsInPlace();
1241 void jitAssertArgumentCountSane();
1242#else
1243 void jitAssertIsInt32(GPRReg) { }
1244 void jitAssertIsJSInt32(GPRReg) { }
1245 void jitAssertIsJSNumber(GPRReg) { }
1246 void jitAssertIsJSDouble(GPRReg) { }
1247 void jitAssertIsCell(GPRReg) { }
1248 void jitAssertHasValidCallFrame() { }
1249 void jitAssertIsNull(GPRReg) { }
1250 void jitAssertTagsInPlace() { }
1251 void jitAssertArgumentCountSane() { }
1252#endif
1253
1254 void jitReleaseAssertNoException(VM&);
1255
1256 void incrementSuperSamplerCount();
1257 void decrementSuperSamplerCount();
1258
1259 void purifyNaN(FPRReg);
1260
1261 // These methods convert between doubles, and doubles boxed and JSValues.
1262#if USE(JSVALUE64)
1263 GPRReg boxDouble(FPRReg fpr, GPRReg gpr, TagRegistersMode mode = HaveTagRegisters)
1264 {
1265 moveDoubleTo64(fpr, gpr);
1266 if (mode == DoNotHaveTagRegisters)
1267 sub64(TrustedImm64(TagTypeNumber), gpr);
1268 else {
1269 sub64(GPRInfo::tagTypeNumberRegister, gpr);
1270 jitAssertIsJSDouble(gpr);
1271 }
1272 return gpr;
1273 }
1274 FPRReg unboxDoubleWithoutAssertions(GPRReg gpr, GPRReg resultGPR, FPRReg fpr)
1275 {
1276 add64(GPRInfo::tagTypeNumberRegister, gpr, resultGPR);
1277 move64ToDouble(resultGPR, fpr);
1278 return fpr;
1279 }
1280 FPRReg unboxDouble(GPRReg gpr, GPRReg resultGPR, FPRReg fpr)
1281 {
1282 jitAssertIsJSDouble(gpr);
1283 return unboxDoubleWithoutAssertions(gpr, resultGPR, fpr);
1284 }
1285
1286 void boxDouble(FPRReg fpr, JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
1287 {
1288 boxDouble(fpr, regs.gpr(), mode);
1289 }
1290
1291 void unboxDoubleNonDestructive(JSValueRegs regs, FPRReg destFPR, GPRReg resultGPR, FPRReg)
1292 {
1293 unboxDouble(regs.payloadGPR(), resultGPR, destFPR);
1294 }
1295
1296 // Here are possible arrangements of source, target, scratch:
1297 // - source, target, scratch can all be separate registers.
1298 // - source and target can be the same but scratch is separate.
1299 // - target and scratch can be the same but source is separate.
1300 void boxInt52(GPRReg source, GPRReg target, GPRReg scratch, FPRReg fpScratch)
1301 {
1302 // Is it an int32?
1303 signExtend32ToPtr(source, scratch);
1304 Jump isInt32 = branch64(Equal, source, scratch);
1305
1306 // Nope, it's not, but regT0 contains the int64 value.
1307 convertInt64ToDouble(source, fpScratch);
1308 boxDouble(fpScratch, target);
1309 Jump done = jump();
1310
1311 isInt32.link(this);
1312 zeroExtend32ToPtr(source, target);
1313 or64(GPRInfo::tagTypeNumberRegister, target);
1314
1315 done.link(this);
1316 }
1317#endif
1318
1319#if USE(JSVALUE32_64)
1320 void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
1321 {
1322 moveDoubleToInts(fpr, payloadGPR, tagGPR);
1323 }
1324 void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
1325 {
1326 moveIntsToDouble(payloadGPR, tagGPR, fpr, scratchFPR);
1327 }
1328
1329 void boxDouble(FPRReg fpr, JSValueRegs regs)
1330 {
1331 boxDouble(fpr, regs.tagGPR(), regs.payloadGPR());
1332 }
1333 void unboxDouble(JSValueRegs regs, FPRReg fpr, FPRReg scratchFPR)
1334 {
1335 unboxDouble(regs.tagGPR(), regs.payloadGPR(), fpr, scratchFPR);
1336 }
1337
1338 void unboxDoubleNonDestructive(const JSValueRegs regs, FPRReg destFPR, GPRReg, FPRReg scratchFPR)
1339 {
1340 unboxDouble(regs, destFPR, scratchFPR);
1341 }
1342#endif
1343
1344 void boxBooleanPayload(GPRReg boolGPR, GPRReg payloadGPR)
1345 {
1346#if USE(JSVALUE64)
1347 add32(TrustedImm32(ValueFalse), boolGPR, payloadGPR);
1348#else
1349 move(boolGPR, payloadGPR);
1350#endif
1351 }
1352
1353 void boxBooleanPayload(bool value, GPRReg payloadGPR)
1354 {
1355#if USE(JSVALUE64)
1356 move(TrustedImm32(ValueFalse + value), payloadGPR);
1357#else
1358 move(TrustedImm32(value), payloadGPR);
1359#endif
1360 }
1361
1362 void boxBoolean(GPRReg boolGPR, JSValueRegs boxedRegs)
1363 {
1364 boxBooleanPayload(boolGPR, boxedRegs.payloadGPR());
1365#if USE(JSVALUE32_64)
1366 move(TrustedImm32(JSValue::BooleanTag), boxedRegs.tagGPR());
1367#endif
1368 }
1369
1370 void boxBoolean(bool value, JSValueRegs boxedRegs)
1371 {
1372 boxBooleanPayload(value, boxedRegs.payloadGPR());
1373#if USE(JSVALUE32_64)
1374 move(TrustedImm32(JSValue::BooleanTag), boxedRegs.tagGPR());
1375#endif
1376 }
1377
1378 void boxInt32(GPRReg intGPR, JSValueRegs boxedRegs, TagRegistersMode mode = HaveTagRegisters)
1379 {
1380#if USE(JSVALUE64)
1381 if (mode == DoNotHaveTagRegisters) {
1382 move(intGPR, boxedRegs.gpr());
1383 or64(TrustedImm64(TagTypeNumber), boxedRegs.gpr());
1384 } else
1385 or64(GPRInfo::tagTypeNumberRegister, intGPR, boxedRegs.gpr());
1386#else
1387 UNUSED_PARAM(mode);
1388 move(intGPR, boxedRegs.payloadGPR());
1389 move(TrustedImm32(JSValue::Int32Tag), boxedRegs.tagGPR());
1390#endif
1391 }
1392
1393 void boxCell(GPRReg cellGPR, JSValueRegs boxedRegs)
1394 {
1395#if USE(JSVALUE64)
1396 move(cellGPR, boxedRegs.gpr());
1397#else
1398 move(cellGPR, boxedRegs.payloadGPR());
1399 move(TrustedImm32(JSValue::CellTag), boxedRegs.tagGPR());
1400#endif
1401 }
1402
1403 void callExceptionFuzz(VM&);
1404
1405 enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck };
1406 enum ExceptionJumpWidth { NormalJumpWidth, FarJumpWidth };
1407 JS_EXPORT_PRIVATE Jump emitExceptionCheck(
1408 VM&, ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth);
1409 JS_EXPORT_PRIVATE Jump emitNonPatchableExceptionCheck(VM&);
1410 Jump emitJumpIfException(VM&);
1411
1412#if ENABLE(SAMPLING_COUNTERS)
1413 static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1)
1414 {
1415 jit.add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
1416 }
1417 void emitCount(AbstractSamplingCounter& counter, int32_t increment = 1)
1418 {
1419 add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
1420 }
1421#endif
1422
1423#if ENABLE(SAMPLING_FLAGS)
1424 void setSamplingFlag(int32_t);
1425 void clearSamplingFlag(int32_t flag);
1426#endif
1427
1428 JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
1429 {
1430 return codeBlock()->globalObjectFor(codeOrigin);
1431 }
1432
1433 bool isStrictModeFor(CodeOrigin codeOrigin)
1434 {
1435 auto* inlineCallFrame = codeOrigin.inlineCallFrame();
1436 if (!inlineCallFrame)
1437 return codeBlock()->isStrictMode();
1438 return inlineCallFrame->isStrictMode();
1439 }
1440
1441 ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
1442 {
1443 return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
1444 }
1445
1446 ExecutableBase* executableFor(const CodeOrigin& codeOrigin);
1447
1448 CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
1449 {
1450 return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock());
1451 }
1452
1453 CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
1454 {
1455 if (!inlineCallFrame)
1456 return baselineCodeBlock();
1457 return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
1458 }
1459
1460 CodeBlock* baselineCodeBlock()
1461 {
1462 return m_baselineCodeBlock;
1463 }
1464
1465 static VirtualRegister argumentsStart(InlineCallFrame* inlineCallFrame)
1466 {
1467 if (!inlineCallFrame)
1468 return VirtualRegister(CallFrame::argumentOffset(0));
1469 if (inlineCallFrame->argumentsWithFixup.size() <= 1)
1470 return virtualRegisterForLocal(0);
1471 ValueRecovery recovery = inlineCallFrame->argumentsWithFixup[1];
1472 RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack);
1473 return recovery.virtualRegister();
1474 }
1475
1476 static VirtualRegister argumentsStart(const CodeOrigin& codeOrigin)
1477 {
1478 return argumentsStart(codeOrigin.inlineCallFrame());
1479 }
1480
1481 static VirtualRegister argumentCount(InlineCallFrame* inlineCallFrame)
1482 {
1483 ASSERT(!inlineCallFrame || inlineCallFrame->isVarargs());
1484 if (!inlineCallFrame)
1485 return VirtualRegister(CallFrameSlot::argumentCount);
1486 return inlineCallFrame->argumentCountRegister;
1487 }
1488
1489 static VirtualRegister argumentCount(const CodeOrigin& codeOrigin)
1490 {
1491 return argumentCount(codeOrigin.inlineCallFrame());
1492 }
1493
1494 void emitLoadStructure(VM&, RegisterID source, RegisterID dest, RegisterID scratch);
1495
1496 void emitStoreStructureWithTypeInfo(TrustedImmPtr structure, RegisterID dest, RegisterID)
1497 {
1498 emitStoreStructureWithTypeInfo(*this, structure, dest);
1499 }
1500
1501 void emitStoreStructureWithTypeInfo(RegisterID structure, RegisterID dest, RegisterID scratch)
1502 {
1503#if USE(JSVALUE64)
1504 load64(MacroAssembler::Address(structure, Structure::structureIDOffset()), scratch);
1505 store64(scratch, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
1506#else
1507 // Store all the info flags using a single 32-bit wide load and store.
1508 load32(MacroAssembler::Address(structure, Structure::indexingModeIncludingHistoryOffset()), scratch);
1509 store32(scratch, MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset()));
1510
1511 // Store the StructureID
1512 storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
1513#endif
1514 }
1515
1516 static void emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest);
1517
1518 Jump barrierBranchWithoutFence(GPRReg cell)
1519 {
1520 return branch8(Above, Address(cell, JSCell::cellStateOffset()), TrustedImm32(blackThreshold));
1521 }
1522
1523 Jump barrierBranchWithoutFence(JSCell* cell)
1524 {
1525 uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::cellStateOffset();
1526 return branch8(Above, AbsoluteAddress(address), TrustedImm32(blackThreshold));
1527 }
1528
1529 Jump barrierBranch(VM& vm, GPRReg cell, GPRReg scratchGPR)
1530 {
1531 load8(Address(cell, JSCell::cellStateOffset()), scratchGPR);
1532 return branch32(Above, scratchGPR, AbsoluteAddress(vm.heap.addressOfBarrierThreshold()));
1533 }
1534
1535 Jump barrierBranch(VM& vm, JSCell* cell, GPRReg scratchGPR)
1536 {
1537 uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::cellStateOffset();
1538 load8(address, scratchGPR);
1539 return branch32(Above, scratchGPR, AbsoluteAddress(vm.heap.addressOfBarrierThreshold()));
1540 }
1541
1542 void barrierStoreLoadFence(VM& vm)
1543 {
1544 Jump ok = jumpIfMutatorFenceNotNeeded(vm);
1545 memoryFence();
1546 ok.link(this);
1547 }
1548
1549 void mutatorFence(VM& vm)
1550 {
1551 if (isX86())
1552 return;
1553 Jump ok = jumpIfMutatorFenceNotNeeded(vm);
1554 storeFence();
1555 ok.link(this);
1556 }
1557
1558 void cageWithoutUntagging(Gigacage::Kind kind, GPRReg storage)
1559 {
1560#if GIGACAGE_ENABLED
1561 if (!Gigacage::isEnabled(kind))
1562 return;
1563
1564#if CPU(ARM64E)
1565 RegisterID tempReg = InvalidGPRReg;
1566 if (kind == Gigacage::Primitive) {
1567 tempReg = getCachedMemoryTempRegisterIDAndInvalidate();
1568 move(storage, tempReg);
1569 // Flip the registers since bitFieldInsert only inserts into the low bits.
1570 std::swap(storage, tempReg);
1571 }
1572#endif
1573 andPtr(TrustedImmPtr(Gigacage::mask(kind)), storage);
1574 addPtr(TrustedImmPtr(Gigacage::basePtr(kind)), storage);
1575#if CPU(ARM64E)
1576 if (kind == Gigacage::Primitive)
1577 bitFieldInsert64(storage, 0, 64 - numberOfPACBits, tempReg);
1578#endif
1579
1580#else
1581 UNUSED_PARAM(kind);
1582 UNUSED_PARAM(storage);
1583#endif
1584 }
1585
1586 // length may be the same register as scratch.
1587 void cageConditionally(Gigacage::Kind kind, GPRReg storage, GPRReg length, GPRReg scratch)
1588 {
1589#if CPU(ARM64E)
1590 if (kind == Gigacage::Primitive)
1591 untagArrayPtr(length, storage);
1592#else
1593 UNUSED_PARAM(kind);
1594 UNUSED_PARAM(storage);
1595 UNUSED_PARAM(length);
1596#endif
1597
1598#if GIGACAGE_ENABLED
1599 if (!Gigacage::isEnabled(kind))
1600 return;
1601
1602 if (kind != Gigacage::Primitive || Gigacage::isDisablingPrimitiveGigacageDisabled())
1603 cageWithoutUntagging(kind, storage);
1604 else {
1605 loadPtr(&Gigacage::basePtr(kind), scratch);
1606 Jump done = branchTestPtr(Zero, scratch);
1607#if CPU(ARM64E)
1608 auto tempReg = getCachedMemoryTempRegisterIDAndInvalidate();
1609 move(storage, tempReg);
1610 andPtr(TrustedImmPtr(Gigacage::mask(kind)), tempReg);
1611 addPtr(scratch, tempReg);
1612 bitFieldInsert64(tempReg, 0, 64 - numberOfPACBits, storage);
1613#else
1614 andPtr(TrustedImmPtr(Gigacage::mask(kind)), storage);
1615 addPtr(scratch, storage);
1616#endif
1617 done.link(this);
1618
1619
1620 }
1621#else
1622 UNUSED_PARAM(scratch);
1623#endif
1624
1625 }
1626
1627 void emitComputeButterflyIndexingMask(GPRReg vectorLengthGPR, GPRReg scratchGPR, GPRReg resultGPR)
1628 {
1629 ASSERT(scratchGPR != resultGPR);
1630 Jump done;
1631 if (isX86() && !isX86_64()) {
1632 Jump nonZero = branchTest32(NonZero, vectorLengthGPR);
1633 move(TrustedImm32(0), resultGPR);
1634 done = jump();
1635 nonZero.link(this);
1636 }
1637 // If vectorLength == 0 then clz will return 32 on both ARM and x86. On 64-bit systems, we can then do a 64-bit right shift on a 32-bit -1 to get a 0 mask for zero vectorLength. On 32-bit ARM, shift masks with 0xff, which means it will still create a 0 mask.
1638 countLeadingZeros32(vectorLengthGPR, scratchGPR);
1639 move(TrustedImm32(-1), resultGPR);
1640 urshiftPtr(scratchGPR, resultGPR);
1641 if (done.isSet())
1642 done.link(this);
1643 }
1644
1645 // If for whatever reason the butterfly is going to change vector length this function does NOT
1646 // update the indexing mask.
1647 void nukeStructureAndStoreButterfly(VM& vm, GPRReg butterfly, GPRReg object)
1648 {
1649 if (isX86()) {
1650 or32(TrustedImm32(bitwise_cast<int32_t>(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset()));
1651 storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
1652 return;
1653 }
1654
1655 Jump ok = jumpIfMutatorFenceNotNeeded(vm);
1656 or32(TrustedImm32(bitwise_cast<int32_t>(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset()));
1657 storeFence();
1658 storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
1659 storeFence();
1660 Jump done = jump();
1661 ok.link(this);
1662 storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
1663 done.link(this);
1664 }
1665
1666 Jump jumpIfMutatorFenceNotNeeded(VM& vm)
1667 {
1668 return branchTest8(Zero, AbsoluteAddress(vm.heap.addressOfMutatorShouldBeFenced()));
1669 }
1670
1671 void sanitizeStackInline(VM&, GPRReg scratch);
1672
1673 // Emits the branch structure for typeof. The code emitted by this doesn't fall through. The
1674 // functor is called at those points where we have pinpointed a type. One way to use this is to
1675 // have the functor emit the code to put the type string into an appropriate register and then
1676 // jump out. A secondary functor is used for the call trap and masquerades-as-undefined slow
1677 // case. It is passed the unlinked jump to the slow case.
1678 template<typename Functor, typename SlowPathFunctor>
1679 void emitTypeOf(
1680 JSValueRegs regs, GPRReg tempGPR, const Functor& functor,
1681 const SlowPathFunctor& slowPathFunctor)
1682 {
1683 // Implements the following branching structure:
1684 //
1685 // if (is cell) {
1686 // if (is object) {
1687 // if (is function) {
1688 // return function;
1689 // } else if (doesn't have call trap and doesn't masquerade as undefined) {
1690 // return object
1691 // } else {
1692 // return slowPath();
1693 // }
1694 // } else if (is string) {
1695 // return string
1696 // } else if (is bigint) {
1697 // return bigint
1698 // } else {
1699 // return symbol
1700 // }
1701 // } else if (is number) {
1702 // return number
1703 // } else if (is null) {
1704 // return object
1705 // } else if (is boolean) {
1706 // return boolean
1707 // } else {
1708 // return undefined
1709 // }
1710 //
1711 // FIXME: typeof Symbol should be more frequently seen than BigInt.
1712 // We should change the order of type detection based on this frequency.
1713 // https://bugs.webkit.org/show_bug.cgi?id=192650
1714
1715 Jump notCell = branchIfNotCell(regs);
1716
1717 GPRReg cellGPR = regs.payloadGPR();
1718 Jump notObject = branchIfNotObject(cellGPR);
1719
1720 Jump notFunction = branchIfNotFunction(cellGPR);
1721 functor(TypeofType::Function, false);
1722
1723 notFunction.link(this);
1724 slowPathFunctor(
1725 branchTest8(
1726 NonZero,
1727 Address(cellGPR, JSCell::typeInfoFlagsOffset()),
1728 TrustedImm32(MasqueradesAsUndefined | OverridesGetCallData)));
1729 functor(TypeofType::Object, false);
1730
1731 notObject.link(this);
1732
1733 Jump notString = branchIfNotString(cellGPR);
1734 functor(TypeofType::String, false);
1735
1736 notString.link(this);
1737
1738 Jump notBigInt = branchIfNotBigInt(cellGPR);
1739 functor(TypeofType::BigInt, false);
1740
1741 notBigInt.link(this);
1742 functor(TypeofType::Symbol, false);
1743
1744 notCell.link(this);
1745
1746 Jump notNumber = branchIfNotNumber(regs, tempGPR);
1747 functor(TypeofType::Number, false);
1748 notNumber.link(this);
1749
1750 JumpList notNull = branchIfNotEqual(regs, jsNull());
1751 functor(TypeofType::Object, false);
1752 notNull.link(this);
1753
1754 Jump notBoolean = branchIfNotBoolean(regs, tempGPR);
1755 functor(TypeofType::Boolean, false);
1756 notBoolean.link(this);
1757
1758 functor(TypeofType::Undefined, true);
1759 }
1760
1761 void emitDumbVirtualCall(VM&, CallLinkInfo*);
1762
1763 void makeSpaceOnStackForCCall();
1764 void reclaimSpaceOnStackForCCall();
1765
1766#if USE(JSVALUE64)
1767 void emitRandomThunk(JSGlobalObject*, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result);
1768 void emitRandomThunk(VM&, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result);
1769#endif
1770
1771 // Call this if you know that the value held in allocatorGPR is non-null. This DOES NOT mean
1772 // that allocator is non-null; allocator can be null as a signal that we don't know what the
1773 // value of allocatorGPR is. Additionally, if the allocator is not null, then there is no need
1774 // to populate allocatorGPR - this code will ignore the contents of allocatorGPR.
1775 void emitAllocateWithNonNullAllocator(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath);
1776
1777 void emitAllocate(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath);
1778
1779 template<typename StructureType>
1780 void emitAllocateJSCell(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, StructureType structure, GPRReg scratchGPR, JumpList& slowPath)
1781 {
1782 emitAllocate(resultGPR, allocator, allocatorGPR, scratchGPR, slowPath);
1783 emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR);
1784 }
1785
1786 template<typename StructureType, typename StorageType>
1787 void emitAllocateJSObject(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, StructureType structure, StorageType storage, GPRReg scratchGPR, JumpList& slowPath)
1788 {
1789 emitAllocateJSCell(resultGPR, allocator, allocatorGPR, structure, scratchGPR, slowPath);
1790 storePtr(storage, Address(resultGPR, JSObject::butterflyOffset()));
1791 }
1792
1793 template<typename ClassType, typename StructureType, typename StorageType>
1794 void emitAllocateJSObjectWithKnownSize(
1795 VM& vm, GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1,
1796 GPRReg scratchGPR2, JumpList& slowPath, size_t size)
1797 {
1798 Allocator allocator = allocatorForNonVirtualConcurrently<ClassType>(vm, size, AllocatorForMode::AllocatorIfExists);
1799 emitAllocateJSObject(resultGPR, JITAllocator::constant(allocator), scratchGPR1, structure, storage, scratchGPR2, slowPath);
1800 }
1801
1802 template<typename ClassType, typename StructureType, typename StorageType>
1803 void emitAllocateJSObject(VM& vm, GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
1804 {
1805 emitAllocateJSObjectWithKnownSize<ClassType>(vm, resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, ClassType::allocationSize(0));
1806 }
1807
1808 // allocationSize can be aliased with any of the other input GPRs. If it's not aliased then it
1809 // won't be clobbered.
1810 void emitAllocateVariableSized(GPRReg resultGPR, CompleteSubspace& subspace, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath);
1811
1812 template<typename ClassType, typename StructureType>
1813 void emitAllocateVariableSizedCell(VM& vm, GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
1814 {
1815 CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm);
1816 RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated");
1817 emitAllocateVariableSized(resultGPR, *subspace, allocationSize, scratchGPR1, scratchGPR2, slowPath);
1818 emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR2);
1819 }
1820
1821 template<typename ClassType, typename StructureType>
1822 void emitAllocateVariableSizedJSObject(VM& vm, GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
1823 {
1824 emitAllocateVariableSizedCell<ClassType>(vm, resultGPR, structure, allocationSize, scratchGPR1, scratchGPR2, slowPath);
1825 storePtr(TrustedImmPtr(nullptr), Address(resultGPR, JSObject::butterflyOffset()));
1826 }
1827
1828 JumpList branchIfValue(VM&, JSValueRegs, GPRReg scratch, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg, FPRReg, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject*, bool negateResult);
1829 JumpList branchIfTruthy(VM& vm, JSValueRegs value, GPRReg scratch, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg scratchFPR0, FPRReg scratchFPR1, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject* globalObject)
1830 {
1831 return branchIfValue(vm, value, scratch, scratchIfShouldCheckMasqueradesAsUndefined, scratchFPR0, scratchFPR1, shouldCheckMasqueradesAsUndefined, globalObject, false);
1832 }
1833 JumpList branchIfFalsey(VM& vm, JSValueRegs value, GPRReg scratch, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg scratchFPR0, FPRReg scratchFPR1, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject* globalObject)
1834 {
1835 return branchIfValue(vm, value, scratch, scratchIfShouldCheckMasqueradesAsUndefined, scratchFPR0, scratchFPR1, shouldCheckMasqueradesAsUndefined, globalObject, true);
1836 }
1837 void emitConvertValueToBoolean(VM&, JSValueRegs, GPRReg result, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg, FPRReg, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject*, bool negateResult = false);
1838
1839 template<typename ClassType>
1840 void emitAllocateDestructibleObject(VM& vm, GPRReg resultGPR, Structure* structure, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
1841 {
1842 auto butterfly = TrustedImmPtr(nullptr);
1843 emitAllocateJSObject<ClassType>(vm, resultGPR, TrustedImmPtr(structure), butterfly, scratchGPR1, scratchGPR2, slowPath);
1844 storePtr(TrustedImmPtr(structure->classInfo()), Address(resultGPR, JSDestructibleObject::classInfoOffset()));
1845 }
1846
1847 void emitInitializeInlineStorage(GPRReg baseGPR, unsigned inlineCapacity)
1848 {
1849 for (unsigned i = 0; i < inlineCapacity; ++i)
1850 storeTrustedValue(JSValue(), Address(baseGPR, JSObject::offsetOfInlineStorage() + i * sizeof(EncodedJSValue)));
1851 }
1852
1853 void emitInitializeInlineStorage(GPRReg baseGPR, GPRReg inlineCapacity)
1854 {
1855 Jump empty = branchTest32(Zero, inlineCapacity);
1856 Label loop = label();
1857 sub32(TrustedImm32(1), inlineCapacity);
1858 storeTrustedValue(JSValue(), BaseIndex(baseGPR, inlineCapacity, TimesEight, JSObject::offsetOfInlineStorage()));
1859 branchTest32(NonZero, inlineCapacity).linkTo(loop, this);
1860 empty.link(this);
1861 }
1862
1863 void emitInitializeOutOfLineStorage(GPRReg butterflyGPR, unsigned outOfLineCapacity)
1864 {
1865 for (unsigned i = 0; i < outOfLineCapacity; ++i)
1866 storeTrustedValue(JSValue(), Address(butterflyGPR, -sizeof(IndexingHeader) - (i + 1) * sizeof(EncodedJSValue)));
1867 }
1868
1869#if USE(JSVALUE64)
1870 void wangsInt64Hash(GPRReg inputAndResult, GPRReg scratch);
1871#endif
1872
1873 // This assumes that index and length are 32-bit. This also assumes that they are already
1874 // zero-extended. Also this does not clobber index, which is useful in the baseline JIT. This
1875 // permits length and result to be in the same register.
1876 void emitPreparePreciseIndexMask32(GPRReg index, GPRReg length, GPRReg result);
1877
1878#if ENABLE(WEBASSEMBLY)
1879 void loadWasmContextInstance(GPRReg dst);
1880 void storeWasmContextInstance(GPRReg src);
1881 static bool loadWasmContextInstanceNeedsMacroScratchRegister();
1882 static bool storeWasmContextInstanceNeedsMacroScratchRegister();
1883#endif
1884
1885protected:
1886 void copyCalleeSavesToEntryFrameCalleeSavesBufferImpl(GPRReg calleeSavesBuffer);
1887
1888 CodeBlock* m_codeBlock;
1889 CodeBlock* m_baselineCodeBlock;
1890};
1891
1892} // namespace JSC
1893
1894#endif // ENABLE(JIT)
1895