1/*
2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#if ENABLE(ASSEMBLER)
29
30#include "JSCJSValue.h"
31
32#if CPU(ARM_THUMB2)
33#define TARGET_ASSEMBLER ARMv7Assembler
34#define TARGET_MACROASSEMBLER MacroAssemblerARMv7
35#include "MacroAssemblerARMv7.h"
36namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
37
38#elif CPU(ARM64E)
39#define TARGET_ASSEMBLER ARM64EAssembler
40#define TARGET_MACROASSEMBLER MacroAssemblerARM64E
41#include "MacroAssemblerARM64E.h"
42
43#elif CPU(ARM64)
44#define TARGET_ASSEMBLER ARM64Assembler
45#define TARGET_MACROASSEMBLER MacroAssemblerARM64
46#include "MacroAssemblerARM64.h"
47
48#elif CPU(MIPS)
49#define TARGET_ASSEMBLER MIPSAssembler
50#define TARGET_MACROASSEMBLER MacroAssemblerMIPS
51#include "MacroAssemblerMIPS.h"
52
53#elif CPU(X86)
54#define TARGET_ASSEMBLER X86Assembler
55#define TARGET_MACROASSEMBLER MacroAssemblerX86
56#include "MacroAssemblerX86.h"
57
58#elif CPU(X86_64)
59#define TARGET_ASSEMBLER X86Assembler
60#define TARGET_MACROASSEMBLER MacroAssemblerX86_64
61#include "MacroAssemblerX86_64.h"
62
63#else
64#error "The MacroAssembler is not supported on this platform."
65#endif
66
67#include "MacroAssemblerHelpers.h"
68
69namespace WTF {
70
71template<typename FunctionType>
72class ScopedLambda;
73
74} // namespace WTF
75
76namespace JSC {
77
78#if ENABLE(MASM_PROBE)
79namespace Probe {
80
81class Context;
82typedef void (*Function)(Context&);
83
84} // namespace Probe
85#endif // ENABLE(MASM_PROBE)
86
87namespace Printer {
88
89struct PrintRecord;
90typedef Vector<PrintRecord> PrintRecordList;
91
92} // namespace Printer
93
94using MacroAssemblerBase = TARGET_MACROASSEMBLER;
95
96class MacroAssembler : public MacroAssemblerBase {
97public:
98
99 static constexpr RegisterID nextRegister(RegisterID reg)
100 {
101 return static_cast<RegisterID>(reg + 1);
102 }
103
104 static constexpr FPRegisterID nextFPRegister(FPRegisterID reg)
105 {
106 return static_cast<FPRegisterID>(reg + 1);
107 }
108
109 static constexpr unsigned registerIndex(RegisterID reg)
110 {
111 return reg - firstRegister();
112 }
113
114 static constexpr unsigned fpRegisterIndex(FPRegisterID reg)
115 {
116 return reg - firstFPRegister();
117 }
118
119 static constexpr unsigned registerIndex(FPRegisterID reg)
120 {
121 return fpRegisterIndex(reg) + numberOfRegisters();
122 }
123
124 static constexpr unsigned totalNumberOfRegisters()
125 {
126 return numberOfRegisters() + numberOfFPRegisters();
127 }
128
129 using MacroAssemblerBase::pop;
130 using MacroAssemblerBase::jump;
131 using MacroAssemblerBase::branch32;
132 using MacroAssemblerBase::compare32;
133 using MacroAssemblerBase::move;
134 using MacroAssemblerBase::moveDouble;
135 using MacroAssemblerBase::add32;
136 using MacroAssemblerBase::mul32;
137 using MacroAssemblerBase::and32;
138 using MacroAssemblerBase::branchAdd32;
139 using MacroAssemblerBase::branchMul32;
140#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64) || CPU(MIPS)
141 using MacroAssemblerBase::branchPtr;
142#endif
143 using MacroAssemblerBase::branchSub32;
144 using MacroAssemblerBase::lshift32;
145 using MacroAssemblerBase::or32;
146 using MacroAssemblerBase::rshift32;
147 using MacroAssemblerBase::store32;
148 using MacroAssemblerBase::sub32;
149 using MacroAssemblerBase::urshift32;
150 using MacroAssemblerBase::xor32;
151
152 static bool isPtrAlignedAddressOffset(ptrdiff_t value)
153 {
154 return value == static_cast<int32_t>(value);
155 }
156
157 static const double twoToThe32; // This is super useful for some double code.
158
159 // Utilities used by the DFG JIT.
160 using AbstractMacroAssemblerBase::invert;
161 using MacroAssemblerBase::invert;
162
163 static DoubleCondition invert(DoubleCondition cond)
164 {
165 switch (cond) {
166 case DoubleEqual:
167 return DoubleNotEqualOrUnordered;
168 case DoubleNotEqual:
169 return DoubleEqualOrUnordered;
170 case DoubleGreaterThan:
171 return DoubleLessThanOrEqualOrUnordered;
172 case DoubleGreaterThanOrEqual:
173 return DoubleLessThanOrUnordered;
174 case DoubleLessThan:
175 return DoubleGreaterThanOrEqualOrUnordered;
176 case DoubleLessThanOrEqual:
177 return DoubleGreaterThanOrUnordered;
178 case DoubleEqualOrUnordered:
179 return DoubleNotEqual;
180 case DoubleNotEqualOrUnordered:
181 return DoubleEqual;
182 case DoubleGreaterThanOrUnordered:
183 return DoubleLessThanOrEqual;
184 case DoubleGreaterThanOrEqualOrUnordered:
185 return DoubleLessThan;
186 case DoubleLessThanOrUnordered:
187 return DoubleGreaterThanOrEqual;
188 case DoubleLessThanOrEqualOrUnordered:
189 return DoubleGreaterThan;
190 }
191 RELEASE_ASSERT_NOT_REACHED();
192 return DoubleEqual; // make compiler happy
193 }
194
195 static bool isInvertible(ResultCondition cond)
196 {
197 switch (cond) {
198 case Zero:
199 case NonZero:
200 case Signed:
201 case PositiveOrZero:
202 return true;
203 default:
204 return false;
205 }
206 }
207
208 static ResultCondition invert(ResultCondition cond)
209 {
210 switch (cond) {
211 case Zero:
212 return NonZero;
213 case NonZero:
214 return Zero;
215 case Signed:
216 return PositiveOrZero;
217 case PositiveOrZero:
218 return Signed;
219 default:
220 RELEASE_ASSERT_NOT_REACHED();
221 return Zero; // Make compiler happy for release builds.
222 }
223 }
224
225 static RelationalCondition flip(RelationalCondition cond)
226 {
227 switch (cond) {
228 case Equal:
229 case NotEqual:
230 return cond;
231 case Above:
232 return Below;
233 case AboveOrEqual:
234 return BelowOrEqual;
235 case Below:
236 return Above;
237 case BelowOrEqual:
238 return AboveOrEqual;
239 case GreaterThan:
240 return LessThan;
241 case GreaterThanOrEqual:
242 return LessThanOrEqual;
243 case LessThan:
244 return GreaterThan;
245 case LessThanOrEqual:
246 return GreaterThanOrEqual;
247 }
248
249 RELEASE_ASSERT_NOT_REACHED();
250 return Equal;
251 }
252
253 static bool isSigned(RelationalCondition cond)
254 {
255 return MacroAssemblerHelpers::isSigned<MacroAssembler>(cond);
256 }
257
258 static bool isUnsigned(RelationalCondition cond)
259 {
260 return MacroAssemblerHelpers::isUnsigned<MacroAssembler>(cond);
261 }
262
263 static bool isSigned(ResultCondition cond)
264 {
265 return MacroAssemblerHelpers::isSigned<MacroAssembler>(cond);
266 }
267
268 static bool isUnsigned(ResultCondition cond)
269 {
270 return MacroAssemblerHelpers::isUnsigned<MacroAssembler>(cond);
271 }
272
273 // Platform agnostic convenience functions,
274 // described in terms of other macro assembly methods.
275 void pop()
276 {
277 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
278 }
279
280 void peek(RegisterID dest, int index = 0)
281 {
282 loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
283 }
284
285 Address addressForPoke(int index)
286 {
287 return Address(stackPointerRegister, (index * sizeof(void*)));
288 }
289
290 void poke(RegisterID src, int index = 0)
291 {
292 storePtr(src, addressForPoke(index));
293 }
294
295 void poke(TrustedImm32 value, int index = 0)
296 {
297 store32(value, addressForPoke(index));
298 }
299
300 void poke(TrustedImmPtr imm, int index = 0)
301 {
302 storePtr(imm, addressForPoke(index));
303 }
304
305 void poke(FPRegisterID src, int index = 0)
306 {
307 storeDouble(src, addressForPoke(index));
308 }
309
310#if !CPU(ARM64)
311 void pushToSave(RegisterID src)
312 {
313 push(src);
314 }
315 void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
316 {
317 push(imm);
318 }
319 void popToRestore(RegisterID dest)
320 {
321 pop(dest);
322 }
323 void pushToSave(FPRegisterID src)
324 {
325 subPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
326 storeDouble(src, stackPointerRegister);
327 }
328 void popToRestore(FPRegisterID dest)
329 {
330 loadDouble(stackPointerRegister, dest);
331 addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
332 }
333
334 static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
335#endif // !CPU(ARM64)
336
337#if CPU(X86_64) || CPU(ARM64)
338 void peek64(RegisterID dest, int index = 0)
339 {
340 load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
341 }
342
343 void poke(TrustedImm64 value, int index = 0)
344 {
345 store64(value, addressForPoke(index));
346 }
347
348 void poke64(RegisterID src, int index = 0)
349 {
350 store64(src, addressForPoke(index));
351 }
352#endif
353
354 // Immediate shifts only have 5 controllable bits
355 // so we'll consider them safe for now.
356 TrustedImm32 trustedImm32ForShift(Imm32 imm)
357 {
358 return TrustedImm32(imm.asTrustedImm32().m_value & 31);
359 }
360
361 // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
362 void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
363 {
364 branchPtr(cond, op1, imm).linkTo(target, this);
365 }
366 void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
367 {
368 branchPtr(cond, op1, imm).linkTo(target, this);
369 }
370
371 Jump branch32(RelationalCondition cond, RegisterID left, AbsoluteAddress right)
372 {
373 return branch32(flip(cond), right, left);
374 }
375
376 void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
377 {
378 branch32(cond, op1, op2).linkTo(target, this);
379 }
380
381 void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
382 {
383 branch32(cond, op1, imm).linkTo(target, this);
384 }
385
386 void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
387 {
388 branch32(cond, op1, imm).linkTo(target, this);
389 }
390
391 void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
392 {
393 branch32(cond, left, right).linkTo(target, this);
394 }
395
396 Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
397 {
398 return branch32(commute(cond), right, left);
399 }
400
401 Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
402 {
403 return branch32(commute(cond), right, left);
404 }
405
406 void compare32(RelationalCondition cond, Imm32 left, RegisterID right, RegisterID dest)
407 {
408 compare32(commute(cond), right, left, dest);
409 }
410
411 void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
412 {
413 branchTestPtr(cond, reg).linkTo(target, this);
414 }
415
416#if !CPU(ARM_THUMB2) && !CPU(ARM64)
417 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(nullptr))
418 {
419 return PatchableJump(branchPtr(cond, left, right));
420 }
421
422 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr))
423 {
424 return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
425 }
426
427 PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
428 {
429 return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
430 }
431
432 PatchableJump patchableJump()
433 {
434 return PatchableJump(jump());
435 }
436
437 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
438 {
439 return PatchableJump(branchTest32(cond, reg, mask));
440 }
441
442 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
443 {
444 return PatchableJump(branch32(cond, reg, imm));
445 }
446
447 PatchableJump patchableBranch8(RelationalCondition cond, Address address, TrustedImm32 imm)
448 {
449 return PatchableJump(branch8(cond, address, imm));
450 }
451
452 PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
453 {
454 return PatchableJump(branch32(cond, address, imm));
455 }
456#endif
457
458 void jump(Label target)
459 {
460 jump().linkTo(target, this);
461 }
462
463 // Commute a relational condition, returns a new condition that will produce
464 // the same results given the same inputs but with their positions exchanged.
465 static RelationalCondition commute(RelationalCondition condition)
466 {
467 switch (condition) {
468 case Above:
469 return Below;
470 case AboveOrEqual:
471 return BelowOrEqual;
472 case Below:
473 return Above;
474 case BelowOrEqual:
475 return AboveOrEqual;
476 case GreaterThan:
477 return LessThan;
478 case GreaterThanOrEqual:
479 return LessThanOrEqual;
480 case LessThan:
481 return GreaterThan;
482 case LessThanOrEqual:
483 return GreaterThanOrEqual;
484 default:
485 break;
486 }
487
488 ASSERT(condition == Equal || condition == NotEqual);
489 return condition;
490 }
491
492 void oops()
493 {
494 abortWithReason(B3Oops);
495 }
496
497 // B3 has additional pseudo-opcodes for returning, when it wants to signal that the return
498 // consumes some register in some way.
499 void retVoid() { ret(); }
500 void ret32(RegisterID) { ret(); }
501 void ret64(RegisterID) { ret(); }
502 void retFloat(FPRegisterID) { ret(); }
503 void retDouble(FPRegisterID) { ret(); }
504
505 static const unsigned BlindingModulus = 64;
506 bool shouldConsiderBlinding()
507 {
508 return !(random() & (BlindingModulus - 1));
509 }
510
511 void move(Address src, Address dest, RegisterID scratch)
512 {
513 loadPtr(src, scratch);
514 storePtr(scratch, dest);
515 }
516
517 void move32(Address src, Address dest, RegisterID scratch)
518 {
519 load32(src, scratch);
520 store32(scratch, dest);
521 }
522
523 void moveFloat(Address src, Address dest, FPRegisterID scratch)
524 {
525 loadFloat(src, scratch);
526 storeFloat(scratch, dest);
527 }
528
529 // Overload mostly for use in templates.
530 void move(FPRegisterID src, FPRegisterID dest)
531 {
532 moveDouble(src, dest);
533 }
534
535 void moveDouble(Address src, Address dest, FPRegisterID scratch)
536 {
537 loadDouble(src, scratch);
538 storeDouble(scratch, dest);
539 }
540
541 // Ptr methods
542 // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
543 // FIXME: should this use a test for 32-bitness instead of this specific exception?
544#if !CPU(X86_64) && !CPU(ARM64)
545 void addPtr(Address src, RegisterID dest)
546 {
547 add32(src, dest);
548 }
549
550 void addPtr(AbsoluteAddress src, RegisterID dest)
551 {
552 add32(src, dest);
553 }
554
555 void addPtr(RegisterID src, RegisterID dest)
556 {
557 add32(src, dest);
558 }
559
560 void addPtr(RegisterID left, RegisterID right, RegisterID dest)
561 {
562 add32(left, right, dest);
563 }
564
565 void addPtr(TrustedImm32 imm, RegisterID srcDest)
566 {
567 add32(imm, srcDest);
568 }
569
570 void addPtr(TrustedImmPtr imm, RegisterID dest)
571 {
572 add32(TrustedImm32(imm), dest);
573 }
574
575 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
576 {
577 add32(imm, src, dest);
578 }
579
580 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
581 {
582 add32(imm, address);
583 }
584
585 void andPtr(RegisterID src, RegisterID dest)
586 {
587 and32(src, dest);
588 }
589
590 void andPtr(TrustedImm32 imm, RegisterID srcDest)
591 {
592 and32(imm, srcDest);
593 }
594
595 void andPtr(TrustedImmPtr imm, RegisterID srcDest)
596 {
597 and32(TrustedImm32(imm), srcDest);
598 }
599
600 void lshiftPtr(Imm32 imm, RegisterID srcDest)
601 {
602 lshift32(trustedImm32ForShift(imm), srcDest);
603 }
604
605 void lshiftPtr(TrustedImm32 imm, RegisterID srcDest)
606 {
607 lshift32(imm, srcDest);
608 }
609
610 void rshiftPtr(Imm32 imm, RegisterID srcDest)
611 {
612 rshift32(trustedImm32ForShift(imm), srcDest);
613 }
614
615 void rshiftPtr(TrustedImm32 imm, RegisterID srcDest)
616 {
617 rshift32(imm, srcDest);
618 }
619
620 void urshiftPtr(Imm32 imm, RegisterID srcDest)
621 {
622 urshift32(trustedImm32ForShift(imm), srcDest);
623 }
624
625 void urshiftPtr(RegisterID shiftAmmount, RegisterID srcDest)
626 {
627 urshift32(shiftAmmount, srcDest);
628 }
629
630 void negPtr(RegisterID dest)
631 {
632 neg32(dest);
633 }
634
635 void negPtr(RegisterID src, RegisterID dest)
636 {
637 neg32(src, dest);
638 }
639
640 void orPtr(RegisterID src, RegisterID dest)
641 {
642 or32(src, dest);
643 }
644
645 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
646 {
647 or32(op1, op2, dest);
648 }
649
650 void orPtr(TrustedImmPtr imm, RegisterID dest)
651 {
652 or32(TrustedImm32(imm), dest);
653 }
654
655 void orPtr(TrustedImm32 imm, RegisterID dest)
656 {
657 or32(imm, dest);
658 }
659
660 void subPtr(RegisterID src, RegisterID dest)
661 {
662 sub32(src, dest);
663 }
664
665 void subPtr(TrustedImm32 imm, RegisterID dest)
666 {
667 sub32(imm, dest);
668 }
669
670 void subPtr(TrustedImmPtr imm, RegisterID dest)
671 {
672 sub32(TrustedImm32(imm), dest);
673 }
674
675 void xorPtr(RegisterID src, RegisterID dest)
676 {
677 xor32(src, dest);
678 }
679
680 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
681 {
682 xor32(imm, srcDest);
683 }
684
685 void xorPtr(TrustedImmPtr imm, RegisterID srcDest)
686 {
687 xor32(TrustedImm32(imm), srcDest);
688 }
689
690 void xorPtr(Address src, RegisterID dest)
691 {
692 xor32(src, dest);
693 }
694
695 void loadPtr(ImplicitAddress address, RegisterID dest)
696 {
697 load32(address, dest);
698 }
699
700 void loadPtr(BaseIndex address, RegisterID dest)
701 {
702 load32(address, dest);
703 }
704
705 void loadPtr(const void* address, RegisterID dest)
706 {
707 load32(address, dest);
708 }
709
710#if ENABLE(FAST_TLS_JIT)
711 void loadFromTLSPtr(uint32_t offset, RegisterID dst)
712 {
713 loadFromTLS32(offset, dst);
714 }
715
716 void storeToTLSPtr(RegisterID src, uint32_t offset)
717 {
718 storeToTLS32(src, offset);
719 }
720#endif
721
722 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
723 {
724 return load32WithAddressOffsetPatch(address, dest);
725 }
726
727 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
728 {
729 return load32WithCompactAddressOffsetPatch(address, dest);
730 }
731
732 void move(ImmPtr imm, RegisterID dest)
733 {
734 move(Imm32(imm.asTrustedImmPtr()), dest);
735 }
736
737 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
738 {
739 compare32(cond, left, right, dest);
740 }
741
742 void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
743 {
744 compare32(cond, left, right, dest);
745 }
746
747 void storePtr(RegisterID src, ImplicitAddress address)
748 {
749 store32(src, address);
750 }
751
752 void storePtr(RegisterID src, BaseIndex address)
753 {
754 store32(src, address);
755 }
756
757 void storePtr(RegisterID src, void* address)
758 {
759 store32(src, address);
760 }
761
762 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
763 {
764 store32(TrustedImm32(imm), address);
765 }
766
767 void storePtr(ImmPtr imm, Address address)
768 {
769 store32(Imm32(imm.asTrustedImmPtr()), address);
770 }
771
772 void storePtr(TrustedImmPtr imm, void* address)
773 {
774 store32(TrustedImm32(imm), address);
775 }
776
777 void storePtr(TrustedImm32 imm, ImplicitAddress address)
778 {
779 store32(imm, address);
780 }
781
782 void storePtr(TrustedImmPtr imm, BaseIndex address)
783 {
784 store32(TrustedImm32(imm), address);
785 }
786
787 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
788 {
789 return store32WithAddressOffsetPatch(src, address);
790 }
791
792 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
793 {
794 return branch32(cond, left, right);
795 }
796
797 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
798 {
799 return branch32(cond, left, TrustedImm32(right));
800 }
801
802 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
803 {
804 return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
805 }
806
807 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
808 {
809 return branch32(cond, left, right);
810 }
811
812 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
813 {
814 return branch32(cond, left, right);
815 }
816
817 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
818 {
819 return branch32(cond, left, right);
820 }
821
822 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
823 {
824 return branch32(cond, left, TrustedImm32(right));
825 }
826
827 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
828 {
829 return branch32(cond, left, TrustedImm32(right));
830 }
831
832 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
833 {
834 return branchSub32(cond, src, dest);
835 }
836
837 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
838 {
839 return branchTest32(cond, reg, mask);
840 }
841
842 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
843 {
844 return branchTest32(cond, reg, mask);
845 }
846
847 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
848 {
849 return branchTest32(cond, address, mask);
850 }
851
852 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
853 {
854 return branchTest32(cond, address, mask);
855 }
856
857 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
858 {
859 return branchAdd32(cond, src, dest);
860 }
861
862 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
863 {
864 return branchSub32(cond, imm, dest);
865 }
866 using MacroAssemblerBase::branchTest8;
867 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
868 {
869 return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
870 }
871
872#else // !CPU(X86_64) && !CPU(ARM64)
873
874 void addPtr(RegisterID src, RegisterID dest)
875 {
876 add64(src, dest);
877 }
878
879 void addPtr(RegisterID left, RegisterID right, RegisterID dest)
880 {
881 add64(left, right, dest);
882 }
883
884 void addPtr(Address src, RegisterID dest)
885 {
886 add64(src, dest);
887 }
888
889 void addPtr(TrustedImm32 imm, RegisterID srcDest)
890 {
891 add64(imm, srcDest);
892 }
893
894 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
895 {
896 add64(imm, src, dest);
897 }
898
899 void addPtr(TrustedImm32 imm, Address address)
900 {
901 add64(imm, address);
902 }
903
904 void addPtr(AbsoluteAddress src, RegisterID dest)
905 {
906 add64(src, dest);
907 }
908
909 void addPtr(TrustedImmPtr imm, RegisterID dest)
910 {
911 add64(TrustedImm64(imm), dest);
912 }
913
914 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
915 {
916 add64(imm, address);
917 }
918
919 void andPtr(RegisterID src, RegisterID dest)
920 {
921 and64(src, dest);
922 }
923
924 void andPtr(TrustedImm32 imm, RegisterID srcDest)
925 {
926 and64(imm, srcDest);
927 }
928
929 void andPtr(TrustedImmPtr imm, RegisterID srcDest)
930 {
931 and64(imm, srcDest);
932 }
933
934 void lshiftPtr(Imm32 imm, RegisterID srcDest)
935 {
936 lshift64(trustedImm32ForShift(imm), srcDest);
937 }
938
939 void lshiftPtr(TrustedImm32 imm, RegisterID srcDest)
940 {
941 lshift64(imm, srcDest);
942 }
943
944 void rshiftPtr(Imm32 imm, RegisterID srcDest)
945 {
946 rshift64(trustedImm32ForShift(imm), srcDest);
947 }
948
949 void rshiftPtr(TrustedImm32 imm, RegisterID srcDest)
950 {
951 rshift64(imm, srcDest);
952 }
953
954 void urshiftPtr(Imm32 imm, RegisterID srcDest)
955 {
956 urshift64(trustedImm32ForShift(imm), srcDest);
957 }
958
959 void urshiftPtr(RegisterID shiftAmmount, RegisterID srcDest)
960 {
961 urshift64(shiftAmmount, srcDest);
962 }
963
964 void negPtr(RegisterID dest)
965 {
966 neg64(dest);
967 }
968
969 void negPtr(RegisterID src, RegisterID dest)
970 {
971 neg64(src, dest);
972 }
973
974 void orPtr(RegisterID src, RegisterID dest)
975 {
976 or64(src, dest);
977 }
978
979 void orPtr(TrustedImm32 imm, RegisterID dest)
980 {
981 or64(imm, dest);
982 }
983
984 void orPtr(TrustedImmPtr imm, RegisterID dest)
985 {
986 or64(TrustedImm64(imm), dest);
987 }
988
989 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
990 {
991 or64(op1, op2, dest);
992 }
993
994 void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
995 {
996 or64(imm, src, dest);
997 }
998
999 void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
1000 {
1001 rotateRight64(imm, srcDst);
1002 }
1003
1004 void subPtr(RegisterID src, RegisterID dest)
1005 {
1006 sub64(src, dest);
1007 }
1008
1009 void subPtr(TrustedImm32 imm, RegisterID dest)
1010 {
1011 sub64(imm, dest);
1012 }
1013
1014 void subPtr(TrustedImmPtr imm, RegisterID dest)
1015 {
1016 sub64(TrustedImm64(imm), dest);
1017 }
1018
1019 void xorPtr(RegisterID src, RegisterID dest)
1020 {
1021 xor64(src, dest);
1022 }
1023
1024 void xorPtr(Address src, RegisterID dest)
1025 {
1026 xor64(src, dest);
1027 }
1028
1029 void xorPtr(RegisterID src, Address dest)
1030 {
1031 xor64(src, dest);
1032 }
1033
1034 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
1035 {
1036 xor64(imm, srcDest);
1037 }
1038
1039 // FIXME: Look into making the need for a scratch register explicit, or providing the option to specify a scratch register.
1040 void xorPtr(TrustedImmPtr imm, RegisterID srcDest)
1041 {
1042 xor64(TrustedImm64(imm), srcDest);
1043 }
1044
1045 void loadPtr(ImplicitAddress address, RegisterID dest)
1046 {
1047 load64(address, dest);
1048 }
1049
1050 void loadPtr(BaseIndex address, RegisterID dest)
1051 {
1052 load64(address, dest);
1053 }
1054
1055 void loadPtr(const void* address, RegisterID dest)
1056 {
1057 load64(address, dest);
1058 }
1059
1060#if ENABLE(FAST_TLS_JIT)
1061 void loadFromTLSPtr(uint32_t offset, RegisterID dst)
1062 {
1063 loadFromTLS64(offset, dst);
1064 }
1065 void storeToTLSPtr(RegisterID src, uint32_t offset)
1066 {
1067 storeToTLS64(src, offset);
1068 }
1069#endif
1070
1071 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
1072 {
1073 return load64WithAddressOffsetPatch(address, dest);
1074 }
1075
1076 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
1077 {
1078 return load64WithCompactAddressOffsetPatch(address, dest);
1079 }
1080
1081 void storePtr(RegisterID src, ImplicitAddress address)
1082 {
1083 store64(src, address);
1084 }
1085
1086 void storePtr(RegisterID src, BaseIndex address)
1087 {
1088 store64(src, address);
1089 }
1090
1091 void storePtr(RegisterID src, void* address)
1092 {
1093 store64(src, address);
1094 }
1095
1096 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
1097 {
1098 store64(TrustedImm64(imm), address);
1099 }
1100
1101 void storePtr(TrustedImm32 imm, ImplicitAddress address)
1102 {
1103 store64(imm, address);
1104 }
1105
1106 void storePtr(TrustedImmPtr imm, BaseIndex address)
1107 {
1108 store64(TrustedImm64(imm), address);
1109 }
1110
1111 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
1112 {
1113 return store64WithAddressOffsetPatch(src, address);
1114 }
1115
1116 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1117 {
1118 compare64(cond, left, right, dest);
1119 }
1120
1121 void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1122 {
1123 compare64(cond, left, right, dest);
1124 }
1125
1126 void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
1127 {
1128 test64(cond, reg, mask, dest);
1129 }
1130
1131 void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
1132 {
1133 test64(cond, reg, mask, dest);
1134 }
1135
1136 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
1137 {
1138 return branch64(cond, left, right);
1139 }
1140
1141 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
1142 {
1143 return branch64(cond, left, TrustedImm64(right));
1144 }
1145
1146 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
1147 {
1148 return branch64(cond, left, right);
1149 }
1150
1151 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
1152 {
1153 return branch64(cond, left, right);
1154 }
1155
1156 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1157 {
1158 return branch64(cond, left, right);
1159 }
1160
1161 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
1162 {
1163 return branch64(cond, left, TrustedImm64(right));
1164 }
1165
1166 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
1167 {
1168 return branchTest64(cond, reg, mask);
1169 }
1170
1171 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1172 {
1173 return branchTest64(cond, reg, mask);
1174 }
1175
1176 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1177 {
1178 return branchTest64(cond, address, mask);
1179 }
1180
1181 Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
1182 {
1183 return branchTest64(cond, address, reg);
1184 }
1185
1186 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1187 {
1188 return branchTest64(cond, address, mask);
1189 }
1190
1191 Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1192 {
1193 return branchTest64(cond, address, mask);
1194 }
1195
1196 Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1197 {
1198 return branchAdd64(cond, imm, dest);
1199 }
1200
1201 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
1202 {
1203 return branchAdd64(cond, src, dest);
1204 }
1205
1206 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1207 {
1208 return branchSub64(cond, imm, dest);
1209 }
1210
1211 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
1212 {
1213 return branchSub64(cond, src, dest);
1214 }
1215
1216 Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1217 {
1218 return branchSub64(cond, src1, src2, dest);
1219 }
1220
1221 using MacroAssemblerBase::and64;
1222 using MacroAssemblerBase::convertInt32ToDouble;
1223 using MacroAssemblerBase::store64;
1224 bool shouldBlindDouble(double value)
1225 {
1226 // Don't trust NaN or +/-Infinity
1227 if (!std::isfinite(value))
1228 return shouldConsiderBlinding();
1229
1230 // Try to force normalisation, and check that there's no change
1231 // in the bit pattern
1232 if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
1233 return shouldConsiderBlinding();
1234
1235 value = fabs(value);
1236 // Only allow a limited set of fractional components
1237 double scaledValue = value * 8;
1238 if (scaledValue / 8 != value)
1239 return shouldConsiderBlinding();
1240 double frac = scaledValue - floor(scaledValue);
1241 if (frac != 0.0)
1242 return shouldConsiderBlinding();
1243
1244 return value > 0xff;
1245 }
1246
1247 bool shouldBlindPointerForSpecificArch(uintptr_t value)
1248 {
1249 if (sizeof(void*) == 4)
1250 return shouldBlindForSpecificArch(static_cast<uint32_t>(value));
1251 return shouldBlindForSpecificArch(static_cast<uint64_t>(value));
1252 }
1253
1254 bool shouldBlind(ImmPtr imm)
1255 {
1256 if (!canBlind())
1257 return false;
1258
1259#if ENABLE(FORCED_JIT_BLINDING)
1260 UNUSED_PARAM(imm);
1261 // Debug always blind all constants, if only so we know
1262 // if we've broken blinding during patch development.
1263 return true;
1264#endif
1265
1266 // First off we'll special case common, "safe" values to avoid hurting
1267 // performance too much
1268 uint64_t value = imm.asTrustedImmPtr().asIntptr();
1269 switch (value) {
1270 case 0xffff:
1271 case 0xffffff:
1272 case 0xffffffffL:
1273 case 0xffffffffffL:
1274 case 0xffffffffffffL:
1275 case 0xffffffffffffffL:
1276 case 0xffffffffffffffffL:
1277 return false;
1278 default: {
1279 if (value <= 0xff)
1280 return false;
1281 if (~value <= 0xff)
1282 return false;
1283 }
1284 }
1285
1286 if (!shouldConsiderBlinding())
1287 return false;
1288
1289 return shouldBlindPointerForSpecificArch(static_cast<uintptr_t>(value));
1290 }
1291
1292 uint8_t generateRotationSeed(size_t widthInBits)
1293 {
1294 // Generate the seed in [1, widthInBits - 1]. We should not generate widthInBits or 0
1295 // since it leads to `<< widthInBits` or `>> widthInBits`, which cause undefined behaviors.
1296 return (random() % (widthInBits - 1)) + 1;
1297 }
1298
1299 struct RotatedImmPtr {
1300 RotatedImmPtr(uintptr_t v1, uint8_t v2)
1301 : value(v1)
1302 , rotation(v2)
1303 {
1304 }
1305 TrustedImmPtr value;
1306 TrustedImm32 rotation;
1307 };
1308
1309 RotatedImmPtr rotationBlindConstant(ImmPtr imm)
1310 {
1311 uint8_t rotation = generateRotationSeed(sizeof(void*) * 8);
1312 uintptr_t value = imm.asTrustedImmPtr().asIntptr();
1313 value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
1314 return RotatedImmPtr(value, rotation);
1315 }
1316
1317 void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
1318 {
1319 move(constant.value, dest);
1320 rotateRightPtr(constant.rotation, dest);
1321 }
1322
1323 bool shouldBlind(Imm64 imm)
1324 {
1325#if ENABLE(FORCED_JIT_BLINDING)
1326 UNUSED_PARAM(imm);
1327 // Debug always blind all constants, if only so we know
1328 // if we've broken blinding during patch development.
1329 return true;
1330#endif
1331
1332 // First off we'll special case common, "safe" values to avoid hurting
1333 // performance too much
1334 uint64_t value = imm.asTrustedImm64().m_value;
1335 switch (value) {
1336 case 0xffff:
1337 case 0xffffff:
1338 case 0xffffffffL:
1339 case 0xffffffffffL:
1340 case 0xffffffffffffL:
1341 case 0xffffffffffffffL:
1342 case 0xffffffffffffffffL:
1343 return false;
1344 default: {
1345 if (value <= 0xff)
1346 return false;
1347 if (~value <= 0xff)
1348 return false;
1349
1350 JSValue jsValue = JSValue::decode(value);
1351 if (jsValue.isInt32())
1352 return shouldBlind(Imm32(jsValue.asInt32()));
1353 if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
1354 return false;
1355
1356 if (!shouldBlindDouble(bitwise_cast<double>(value)))
1357 return false;
1358 }
1359 }
1360
1361 if (!shouldConsiderBlinding())
1362 return false;
1363
1364 return shouldBlindForSpecificArch(value);
1365 }
1366
1367 struct RotatedImm64 {
1368 RotatedImm64(uint64_t v1, uint8_t v2)
1369 : value(v1)
1370 , rotation(v2)
1371 {
1372 }
1373 TrustedImm64 value;
1374 TrustedImm32 rotation;
1375 };
1376
1377 RotatedImm64 rotationBlindConstant(Imm64 imm)
1378 {
1379 uint8_t rotation = generateRotationSeed(sizeof(int64_t) * 8);
1380 uint64_t value = imm.asTrustedImm64().m_value;
1381 value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
1382 return RotatedImm64(value, rotation);
1383 }
1384
1385 void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
1386 {
1387 move(constant.value, dest);
1388 rotateRight64(constant.rotation, dest);
1389 }
1390
1391 void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
1392 {
1393 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
1394 RegisterID scratchRegister = scratchRegisterForBlinding();
1395 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
1396 convertInt32ToDouble(scratchRegister, dest);
1397 } else
1398 convertInt32ToDouble(imm.asTrustedImm32(), dest);
1399 }
1400
1401 void move(ImmPtr imm, RegisterID dest)
1402 {
1403 if (shouldBlind(imm))
1404 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1405 else
1406 move(imm.asTrustedImmPtr(), dest);
1407 }
1408
1409 void move(Imm64 imm, RegisterID dest)
1410 {
1411 if (shouldBlind(imm))
1412 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1413 else
1414 move(imm.asTrustedImm64(), dest);
1415 }
1416
1417#if CPU(X86_64) || CPU(ARM64)
1418 void moveDouble(Imm64 imm, FPRegisterID dest)
1419 {
1420 move(imm, scratchRegister());
1421 move64ToDouble(scratchRegister(), dest);
1422 }
1423#endif
1424
1425 void and64(Imm32 imm, RegisterID dest)
1426 {
1427 if (shouldBlind(imm)) {
1428 BlindedImm32 key = andBlindedConstant(imm);
1429 and64(key.value1, dest);
1430 and64(key.value2, dest);
1431 } else
1432 and64(imm.asTrustedImm32(), dest);
1433 }
1434
1435 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
1436 {
1437 if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
1438 RegisterID scratchRegister = scratchRegisterForBlinding();
1439 loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
1440 return branchPtr(cond, left, scratchRegister);
1441 }
1442 return branchPtr(cond, left, right.asTrustedImmPtr());
1443 }
1444
1445 void storePtr(ImmPtr imm, Address dest)
1446 {
1447 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
1448 RegisterID scratchRegister = scratchRegisterForBlinding();
1449 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1450 storePtr(scratchRegister, dest);
1451 } else
1452 storePtr(imm.asTrustedImmPtr(), dest);
1453 }
1454
1455 void store64(Imm64 imm, Address dest)
1456 {
1457 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
1458 RegisterID scratchRegister = scratchRegisterForBlinding();
1459 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1460 store64(scratchRegister, dest);
1461 } else
1462 store64(imm.asTrustedImm64(), dest);
1463 }
1464
1465#endif // !CPU(X86_64)
1466
1467#if !CPU(X86) && !CPU(X86_64) && !CPU(ARM64)
1468 // We should implement this the right way eventually, but for now, it's fine because it arises so
1469 // infrequently.
1470 void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
1471 {
1472 move(TrustedImm32(0), dest);
1473 Jump falseCase = branchDouble(invert(cond), left, right);
1474 move(TrustedImm32(1), dest);
1475 falseCase.link(this);
1476 }
1477#endif
1478
1479 void lea32(Address address, RegisterID dest)
1480 {
1481 add32(TrustedImm32(address.offset), address.base, dest);
1482 }
1483
1484#if CPU(X86_64) || CPU(ARM64)
1485 void lea64(Address address, RegisterID dest)
1486 {
1487 add64(TrustedImm32(address.offset), address.base, dest);
1488 }
1489#endif // CPU(X86_64) || CPU(ARM64)
1490
1491 bool shouldBlind(Imm32 imm)
1492 {
1493#if ENABLE(FORCED_JIT_BLINDING)
1494 UNUSED_PARAM(imm);
1495 // Debug always blind all constants, if only so we know
1496 // if we've broken blinding during patch development.
1497 return true;
1498#else // ENABLE(FORCED_JIT_BLINDING)
1499
1500 // First off we'll special case common, "safe" values to avoid hurting
1501 // performance too much
1502 uint32_t value = imm.asTrustedImm32().m_value;
1503 switch (value) {
1504 case 0xffff:
1505 case 0xffffff:
1506 case 0xffffffff:
1507 return false;
1508 default:
1509 if (value <= 0xff)
1510 return false;
1511 if (~value <= 0xff)
1512 return false;
1513 }
1514
1515 if (!shouldConsiderBlinding())
1516 return false;
1517
1518 return shouldBlindForSpecificArch(value);
1519#endif // ENABLE(FORCED_JIT_BLINDING)
1520 }
1521
1522 struct BlindedImm32 {
1523 BlindedImm32(int32_t v1, int32_t v2)
1524 : value1(v1)
1525 , value2(v2)
1526 {
1527 }
1528 TrustedImm32 value1;
1529 TrustedImm32 value2;
1530 };
1531
1532 uint32_t keyForConstant(uint32_t value, uint32_t& mask)
1533 {
1534 uint32_t key = random();
1535 if (value <= 0xff)
1536 mask = 0xff;
1537 else if (value <= 0xffff)
1538 mask = 0xffff;
1539 else if (value <= 0xffffff)
1540 mask = 0xffffff;
1541 else
1542 mask = 0xffffffff;
1543 return key & mask;
1544 }
1545
1546 uint32_t keyForConstant(uint32_t value)
1547 {
1548 uint32_t mask = 0;
1549 return keyForConstant(value, mask);
1550 }
1551
1552 BlindedImm32 xorBlindConstant(Imm32 imm)
1553 {
1554 uint32_t baseValue = imm.asTrustedImm32().m_value;
1555 uint32_t key = keyForConstant(baseValue);
1556 return BlindedImm32(baseValue ^ key, key);
1557 }
1558
1559 BlindedImm32 additionBlindedConstant(Imm32 imm)
1560 {
1561 // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
1562 static const uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
1563
1564 uint32_t baseValue = imm.asTrustedImm32().m_value;
1565 uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
1566 if (key > baseValue)
1567 key = key - baseValue;
1568 return BlindedImm32(baseValue - key, key);
1569 }
1570
1571 BlindedImm32 andBlindedConstant(Imm32 imm)
1572 {
1573 uint32_t baseValue = imm.asTrustedImm32().m_value;
1574 uint32_t mask = 0;
1575 uint32_t key = keyForConstant(baseValue, mask);
1576 ASSERT((baseValue & mask) == baseValue);
1577 return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
1578 }
1579
1580 BlindedImm32 orBlindedConstant(Imm32 imm)
1581 {
1582 uint32_t baseValue = imm.asTrustedImm32().m_value;
1583 uint32_t mask = 0;
1584 uint32_t key = keyForConstant(baseValue, mask);
1585 ASSERT((baseValue & mask) == baseValue);
1586 return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
1587 }
1588
1589 void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
1590 {
1591 move(constant.value1, dest);
1592 xor32(constant.value2, dest);
1593 }
1594
1595 void add32(Imm32 imm, RegisterID dest)
1596 {
1597 if (shouldBlind(imm)) {
1598 BlindedImm32 key = additionBlindedConstant(imm);
1599 add32(key.value1, dest);
1600 add32(key.value2, dest);
1601 } else
1602 add32(imm.asTrustedImm32(), dest);
1603 }
1604
1605 void add32(Imm32 imm, RegisterID src, RegisterID dest)
1606 {
1607 if (shouldBlind(imm)) {
1608 BlindedImm32 key = additionBlindedConstant(imm);
1609 add32(key.value1, src, dest);
1610 add32(key.value2, dest);
1611 } else
1612 add32(imm.asTrustedImm32(), src, dest);
1613 }
1614
1615 void addPtr(Imm32 imm, RegisterID dest)
1616 {
1617 if (shouldBlind(imm)) {
1618 BlindedImm32 key = additionBlindedConstant(imm);
1619 addPtr(key.value1, dest);
1620 addPtr(key.value2, dest);
1621 } else
1622 addPtr(imm.asTrustedImm32(), dest);
1623 }
1624
1625 void mul32(Imm32 imm, RegisterID src, RegisterID dest)
1626 {
1627 if (shouldBlind(imm)) {
1628 if (src != dest || haveScratchRegisterForBlinding()) {
1629 if (src == dest) {
1630 move(src, scratchRegisterForBlinding());
1631 src = scratchRegisterForBlinding();
1632 }
1633 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1634 mul32(src, dest);
1635 return;
1636 }
1637 // If we don't have a scratch register available for use, we'll just
1638 // place a random number of nops.
1639 uint32_t nopCount = random() & 3;
1640 while (nopCount--)
1641 nop();
1642 }
1643 mul32(imm.asTrustedImm32(), src, dest);
1644 }
1645
1646 void and32(Imm32 imm, RegisterID dest)
1647 {
1648 if (shouldBlind(imm)) {
1649 BlindedImm32 key = andBlindedConstant(imm);
1650 and32(key.value1, dest);
1651 and32(key.value2, dest);
1652 } else
1653 and32(imm.asTrustedImm32(), dest);
1654 }
1655
1656 void andPtr(Imm32 imm, RegisterID dest)
1657 {
1658 if (shouldBlind(imm)) {
1659 BlindedImm32 key = andBlindedConstant(imm);
1660 andPtr(key.value1, dest);
1661 andPtr(key.value2, dest);
1662 } else
1663 andPtr(imm.asTrustedImm32(), dest);
1664 }
1665
1666 void and32(Imm32 imm, RegisterID src, RegisterID dest)
1667 {
1668 if (shouldBlind(imm)) {
1669 if (src == dest)
1670 return and32(imm.asTrustedImm32(), dest);
1671 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1672 and32(src, dest);
1673 } else
1674 and32(imm.asTrustedImm32(), src, dest);
1675 }
1676
1677 void move(Imm32 imm, RegisterID dest)
1678 {
1679 if (shouldBlind(imm))
1680 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1681 else
1682 move(imm.asTrustedImm32(), dest);
1683 }
1684
1685 void or32(Imm32 imm, RegisterID src, RegisterID dest)
1686 {
1687 if (shouldBlind(imm)) {
1688 if (src == dest)
1689 return or32(imm, dest);
1690 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1691 or32(src, dest);
1692 } else
1693 or32(imm.asTrustedImm32(), src, dest);
1694 }
1695
1696 void or32(Imm32 imm, RegisterID dest)
1697 {
1698 if (shouldBlind(imm)) {
1699 BlindedImm32 key = orBlindedConstant(imm);
1700 or32(key.value1, dest);
1701 or32(key.value2, dest);
1702 } else
1703 or32(imm.asTrustedImm32(), dest);
1704 }
1705
1706 void poke(Imm32 value, int index = 0)
1707 {
1708 store32(value, addressForPoke(index));
1709 }
1710
1711 void poke(ImmPtr value, int index = 0)
1712 {
1713 storePtr(value, addressForPoke(index));
1714 }
1715
1716#if CPU(X86_64) || CPU(ARM64)
1717 void poke(Imm64 value, int index = 0)
1718 {
1719 store64(value, addressForPoke(index));
1720 }
1721#endif // CPU(X86_64)
1722
1723 void store32(Imm32 imm, Address dest)
1724 {
1725 if (shouldBlind(imm)) {
1726#if CPU(X86) || CPU(X86_64)
1727 BlindedImm32 blind = xorBlindConstant(imm);
1728 store32(blind.value1, dest);
1729 xor32(blind.value2, dest);
1730#else // CPU(X86) || CPU(X86_64)
1731 if (haveScratchRegisterForBlinding()) {
1732 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegisterForBlinding());
1733 store32(scratchRegisterForBlinding(), dest);
1734 } else {
1735 // If we don't have a scratch register available for use, we'll just
1736 // place a random number of nops.
1737 uint32_t nopCount = random() & 3;
1738 while (nopCount--)
1739 nop();
1740 store32(imm.asTrustedImm32(), dest);
1741 }
1742#endif // CPU(X86) || CPU(X86_64)
1743 } else
1744 store32(imm.asTrustedImm32(), dest);
1745 }
1746
1747 void sub32(Imm32 imm, RegisterID dest)
1748 {
1749 if (shouldBlind(imm)) {
1750 BlindedImm32 key = additionBlindedConstant(imm);
1751 sub32(key.value1, dest);
1752 sub32(key.value2, dest);
1753 } else
1754 sub32(imm.asTrustedImm32(), dest);
1755 }
1756
1757 void subPtr(Imm32 imm, RegisterID dest)
1758 {
1759 if (shouldBlind(imm)) {
1760 BlindedImm32 key = additionBlindedConstant(imm);
1761 subPtr(key.value1, dest);
1762 subPtr(key.value2, dest);
1763 } else
1764 subPtr(imm.asTrustedImm32(), dest);
1765 }
1766
1767 void xor32(Imm32 imm, RegisterID src, RegisterID dest)
1768 {
1769 if (shouldBlind(imm)) {
1770 BlindedImm32 blind = xorBlindConstant(imm);
1771 xor32(blind.value1, src, dest);
1772 xor32(blind.value2, dest);
1773 } else
1774 xor32(imm.asTrustedImm32(), src, dest);
1775 }
1776
1777 void xor32(Imm32 imm, RegisterID dest)
1778 {
1779 if (shouldBlind(imm)) {
1780 BlindedImm32 blind = xorBlindConstant(imm);
1781 xor32(blind.value1, dest);
1782 xor32(blind.value2, dest);
1783 } else
1784 xor32(imm.asTrustedImm32(), dest);
1785 }
1786
1787 Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
1788 {
1789 if (shouldBlind(right)) {
1790 if (haveScratchRegisterForBlinding()) {
1791 loadXorBlindedConstant(xorBlindConstant(right), scratchRegisterForBlinding());
1792 return branch32(cond, left, scratchRegisterForBlinding());
1793 }
1794 // If we don't have a scratch register available for use, we'll just
1795 // place a random number of nops.
1796 uint32_t nopCount = random() & 3;
1797 while (nopCount--)
1798 nop();
1799 return branch32(cond, left, right.asTrustedImm32());
1800 }
1801
1802 return branch32(cond, left, right.asTrustedImm32());
1803 }
1804
1805 void compare32(RelationalCondition cond, RegisterID left, Imm32 right, RegisterID dest)
1806 {
1807 if (shouldBlind(right)) {
1808 if (left != dest || haveScratchRegisterForBlinding()) {
1809 RegisterID blindedConstantReg = dest;
1810 if (left == dest)
1811 blindedConstantReg = scratchRegisterForBlinding();
1812 loadXorBlindedConstant(xorBlindConstant(right), blindedConstantReg);
1813 compare32(cond, left, blindedConstantReg, dest);
1814 return;
1815 }
1816 // If we don't have a scratch register available for use, we'll just
1817 // place a random number of nops.
1818 uint32_t nopCount = random() & 3;
1819 while (nopCount--)
1820 nop();
1821 compare32(cond, left, right.asTrustedImm32(), dest);
1822 return;
1823 }
1824
1825 compare32(cond, left, right.asTrustedImm32(), dest);
1826 }
1827
1828 Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
1829 {
1830 if (shouldBlind(imm)) {
1831 if (src != dest || haveScratchRegisterForBlinding()) {
1832 if (src == dest) {
1833 move(src, scratchRegisterForBlinding());
1834 src = scratchRegisterForBlinding();
1835 }
1836 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1837 return branchAdd32(cond, src, dest);
1838 }
1839 // If we don't have a scratch register available for use, we'll just
1840 // place a random number of nops.
1841 uint32_t nopCount = random() & 3;
1842 while (nopCount--)
1843 nop();
1844 }
1845 return branchAdd32(cond, src, imm.asTrustedImm32(), dest);
1846 }
1847
1848 Jump branchMul32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
1849 {
1850 if (src == dest)
1851 ASSERT(haveScratchRegisterForBlinding());
1852
1853 if (shouldBlind(imm)) {
1854 if (src == dest) {
1855 move(src, scratchRegisterForBlinding());
1856 src = scratchRegisterForBlinding();
1857 }
1858 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1859 return branchMul32(cond, src, dest);
1860 }
1861 return branchMul32(cond, src, imm.asTrustedImm32(), dest);
1862 }
1863
1864 // branchSub32 takes a scratch register as 32 bit platforms make use of this,
1865 // with src == dst, and on x86-32 we don't have a platform scratch register.
1866 Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
1867 {
1868 if (shouldBlind(imm)) {
1869 ASSERT(scratch != dest);
1870 ASSERT(scratch != src);
1871 loadXorBlindedConstant(xorBlindConstant(imm), scratch);
1872 return branchSub32(cond, src, scratch, dest);
1873 }
1874 return branchSub32(cond, src, imm.asTrustedImm32(), dest);
1875 }
1876
1877 void lshift32(Imm32 imm, RegisterID dest)
1878 {
1879 lshift32(trustedImm32ForShift(imm), dest);
1880 }
1881
1882 void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
1883 {
1884 lshift32(src, trustedImm32ForShift(amount), dest);
1885 }
1886
1887 void rshift32(Imm32 imm, RegisterID dest)
1888 {
1889 rshift32(trustedImm32ForShift(imm), dest);
1890 }
1891
1892 void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
1893 {
1894 rshift32(src, trustedImm32ForShift(amount), dest);
1895 }
1896
1897 void urshift32(Imm32 imm, RegisterID dest)
1898 {
1899 urshift32(trustedImm32ForShift(imm), dest);
1900 }
1901
1902 void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
1903 {
1904 urshift32(src, trustedImm32ForShift(amount), dest);
1905 }
1906
1907 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
1908 {
1909 if (hasOneBitSet(imm.m_value)) {
1910 lshift32(src, TrustedImm32(getLSBSet(imm.m_value)), dest);
1911 return;
1912 }
1913 MacroAssemblerBase::mul32(imm, src, dest);
1914 }
1915
1916 // If the result jump is taken that means the assert passed.
1917 void jitAssert(const WTF::ScopedLambda<Jump(void)>&);
1918
1919#if ENABLE(MASM_PROBE)
1920 // This function emits code to preserve the CPUState (e.g. registers),
1921 // call a user supplied probe function, and restore the CPUState before
1922 // continuing with other JIT generated code.
1923 //
1924 // The user supplied probe function will be called with a single pointer to
1925 // a Probe::State struct (defined below) which contains, among other things,
1926 // the preserved CPUState. This allows the user probe function to inspect
1927 // the CPUState at that point in the JIT generated code.
1928 //
1929 // If the user probe function alters the register values in the Probe::State,
1930 // the altered values will be loaded into the CPU registers when the probe
1931 // returns.
1932 //
1933 // The Probe::State is stack allocated and is only valid for the duration
1934 // of the call to the user probe function.
1935 //
1936 // The probe function may choose to move the stack pointer (in any direction).
1937 // To do this, the probe function needs to set the new sp value in the CPUState.
1938 //
1939 // The probe function may also choose to fill stack space with some values.
1940 // To do this, the probe function must first:
1941 // 1. Set the new sp value in the Probe::State's CPUState.
1942 // 2. Set the Probe::State's initializeStackFunction to a Probe::Function callback
1943 // which will do the work of filling in the stack values after the probe
1944 // trampoline has adjusted the machine stack pointer.
1945 // 3. Set the Probe::State's initializeStackArgs to any value that the client wants
1946 // to pass to the initializeStackFunction callback.
1947 // 4. Return from the probe function.
1948 //
1949 // Upon returning from the probe function, the probe trampoline will adjust the
1950 // the stack pointer based on the sp value in CPUState. If initializeStackFunction
1951 // is not set, the probe trampoline will restore registers and return to its caller.
1952 //
1953 // If initializeStackFunction is set, the trampoline will move the Probe::State
1954 // beyond the range of the stack pointer i.e. it will place the new Probe::State at
1955 // an address lower than where CPUState.sp() points. This ensures that the
1956 // Probe::State will not be trashed by the initializeStackFunction when it writes to
1957 // the stack. Then, the trampoline will call back to the initializeStackFunction
1958 // Probe::Function to let it fill in the stack values as desired. The
1959 // initializeStackFunction Probe::Function will be passed the moved Probe::State at
1960 // the new location.
1961 //
1962 // initializeStackFunction may now write to the stack at addresses greater or
1963 // equal to CPUState.sp(), but not below that. initializeStackFunction is also
1964 // not allowed to change CPUState.sp(). If the initializeStackFunction does not
1965 // abide by these rules, then behavior is undefined, and bad things may happen.
1966 //
1967 // Note: this version of probe() should be implemented by the target specific
1968 // MacroAssembler.
1969 void probe(Probe::Function, void* arg);
1970
1971 JS_EXPORT_PRIVATE void probe(Function<void(Probe::Context&)>);
1972
1973 // Let's you print from your JIT generated code.
1974 // See comments in MacroAssemblerPrinter.h for examples of how to use this.
1975 template<typename... Arguments>
1976 void print(Arguments&&... args);
1977
1978 void print(Printer::PrintRecordList*);
1979#endif // ENABLE(MASM_PROBE)
1980};
1981
1982} // namespace JSC
1983
1984namespace WTF {
1985
1986class PrintStream;
1987
1988void printInternal(PrintStream&, JSC::MacroAssembler::RelationalCondition);
1989void printInternal(PrintStream&, JSC::MacroAssembler::ResultCondition);
1990void printInternal(PrintStream&, JSC::MacroAssembler::DoubleCondition);
1991
1992} // namespace WTF
1993
1994#else // ENABLE(ASSEMBLER)
1995
1996namespace JSC {
1997
1998// If there is no assembler for this platform, at least allow code to make references to
1999// some of the things it would otherwise define, albeit without giving that code any way
2000// of doing anything useful.
2001class MacroAssembler {
2002private:
2003 MacroAssembler() { }
2004
2005public:
2006
2007 enum RegisterID : int8_t { NoRegister, InvalidGPRReg = -1 };
2008 enum FPRegisterID : int8_t { NoFPRegister, InvalidFPRReg = -1 };
2009};
2010
2011} // namespace JSC
2012
2013#endif // ENABLE(ASSEMBLER)
2014