1/*
2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#if ENABLE(ASSEMBLER)
29
30#include "JSCJSValue.h"
31
32#if CPU(ARM_THUMB2)
33#define TARGET_ASSEMBLER ARMv7Assembler
34#define TARGET_MACROASSEMBLER MacroAssemblerARMv7
35#include "MacroAssemblerARMv7.h"
36namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
37
38#elif CPU(ARM64E)
39#define TARGET_ASSEMBLER ARM64EAssembler
40#define TARGET_MACROASSEMBLER MacroAssemblerARM64E
41#include "MacroAssemblerARM64E.h"
42
43#elif CPU(ARM64)
44#define TARGET_ASSEMBLER ARM64Assembler
45#define TARGET_MACROASSEMBLER MacroAssemblerARM64
46#include "MacroAssemblerARM64.h"
47
48#elif CPU(MIPS)
49#define TARGET_ASSEMBLER MIPSAssembler
50#define TARGET_MACROASSEMBLER MacroAssemblerMIPS
51#include "MacroAssemblerMIPS.h"
52
53#elif CPU(X86)
54#define TARGET_ASSEMBLER X86Assembler
55#define TARGET_MACROASSEMBLER MacroAssemblerX86
56#include "MacroAssemblerX86.h"
57
58#elif CPU(X86_64)
59#define TARGET_ASSEMBLER X86Assembler
60#define TARGET_MACROASSEMBLER MacroAssemblerX86_64
61#include "MacroAssemblerX86_64.h"
62
63#else
64#error "The MacroAssembler is not supported on this platform."
65#endif
66
67#include "MacroAssemblerHelpers.h"
68
69namespace WTF {
70
71template<typename FunctionType>
72class ScopedLambda;
73
74} // namespace WTF
75
76namespace JSC {
77
78#if ENABLE(MASM_PROBE)
79namespace Probe {
80
81class Context;
82typedef void (*Function)(Context&);
83
84} // namespace Probe
85#endif // ENABLE(MASM_PROBE)
86
87namespace Printer {
88
89struct PrintRecord;
90typedef Vector<PrintRecord> PrintRecordList;
91
92} // namespace Printer
93
94using MacroAssemblerBase = TARGET_MACROASSEMBLER;
95
96class MacroAssembler : public MacroAssemblerBase {
97public:
98
99 static constexpr RegisterID nextRegister(RegisterID reg)
100 {
101 return static_cast<RegisterID>(reg + 1);
102 }
103
104 static constexpr FPRegisterID nextFPRegister(FPRegisterID reg)
105 {
106 return static_cast<FPRegisterID>(reg + 1);
107 }
108
109 static constexpr unsigned registerIndex(RegisterID reg)
110 {
111 return reg - firstRegister();
112 }
113
114 static constexpr unsigned fpRegisterIndex(FPRegisterID reg)
115 {
116 return reg - firstFPRegister();
117 }
118
119 static constexpr unsigned registerIndex(FPRegisterID reg)
120 {
121 return fpRegisterIndex(reg) + numberOfRegisters();
122 }
123
124 static constexpr unsigned totalNumberOfRegisters()
125 {
126 return numberOfRegisters() + numberOfFPRegisters();
127 }
128
129 using MacroAssemblerBase::pop;
130 using MacroAssemblerBase::jump;
131 using MacroAssemblerBase::farJump;
132 using MacroAssemblerBase::branch32;
133 using MacroAssemblerBase::compare32;
134 using MacroAssemblerBase::move;
135 using MacroAssemblerBase::moveDouble;
136 using MacroAssemblerBase::add32;
137 using MacroAssemblerBase::mul32;
138 using MacroAssemblerBase::and32;
139 using MacroAssemblerBase::branchAdd32;
140 using MacroAssemblerBase::branchMul32;
141#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64) || CPU(MIPS)
142 using MacroAssemblerBase::branchPtr;
143#endif
144 using MacroAssemblerBase::branchSub32;
145 using MacroAssemblerBase::lshift32;
146 using MacroAssemblerBase::or32;
147 using MacroAssemblerBase::rshift32;
148 using MacroAssemblerBase::store32;
149 using MacroAssemblerBase::sub32;
150 using MacroAssemblerBase::urshift32;
151 using MacroAssemblerBase::xor32;
152
153 static bool isPtrAlignedAddressOffset(ptrdiff_t value)
154 {
155 return value == static_cast<int32_t>(value);
156 }
157
158 static const double twoToThe32; // This is super useful for some double code.
159
160 // Utilities used by the DFG JIT.
161 using AbstractMacroAssemblerBase::invert;
162 using MacroAssemblerBase::invert;
163
164 static DoubleCondition invert(DoubleCondition cond)
165 {
166 switch (cond) {
167 case DoubleEqual:
168 return DoubleNotEqualOrUnordered;
169 case DoubleNotEqual:
170 return DoubleEqualOrUnordered;
171 case DoubleGreaterThan:
172 return DoubleLessThanOrEqualOrUnordered;
173 case DoubleGreaterThanOrEqual:
174 return DoubleLessThanOrUnordered;
175 case DoubleLessThan:
176 return DoubleGreaterThanOrEqualOrUnordered;
177 case DoubleLessThanOrEqual:
178 return DoubleGreaterThanOrUnordered;
179 case DoubleEqualOrUnordered:
180 return DoubleNotEqual;
181 case DoubleNotEqualOrUnordered:
182 return DoubleEqual;
183 case DoubleGreaterThanOrUnordered:
184 return DoubleLessThanOrEqual;
185 case DoubleGreaterThanOrEqualOrUnordered:
186 return DoubleLessThan;
187 case DoubleLessThanOrUnordered:
188 return DoubleGreaterThanOrEqual;
189 case DoubleLessThanOrEqualOrUnordered:
190 return DoubleGreaterThan;
191 }
192 RELEASE_ASSERT_NOT_REACHED();
193 return DoubleEqual; // make compiler happy
194 }
195
196 static bool isInvertible(ResultCondition cond)
197 {
198 switch (cond) {
199 case Zero:
200 case NonZero:
201 case Signed:
202 case PositiveOrZero:
203 return true;
204 default:
205 return false;
206 }
207 }
208
209 static ResultCondition invert(ResultCondition cond)
210 {
211 switch (cond) {
212 case Zero:
213 return NonZero;
214 case NonZero:
215 return Zero;
216 case Signed:
217 return PositiveOrZero;
218 case PositiveOrZero:
219 return Signed;
220 default:
221 RELEASE_ASSERT_NOT_REACHED();
222 return Zero; // Make compiler happy for release builds.
223 }
224 }
225
226 static RelationalCondition flip(RelationalCondition cond)
227 {
228 switch (cond) {
229 case Equal:
230 case NotEqual:
231 return cond;
232 case Above:
233 return Below;
234 case AboveOrEqual:
235 return BelowOrEqual;
236 case Below:
237 return Above;
238 case BelowOrEqual:
239 return AboveOrEqual;
240 case GreaterThan:
241 return LessThan;
242 case GreaterThanOrEqual:
243 return LessThanOrEqual;
244 case LessThan:
245 return GreaterThan;
246 case LessThanOrEqual:
247 return GreaterThanOrEqual;
248 }
249
250 RELEASE_ASSERT_NOT_REACHED();
251 return Equal;
252 }
253
254 static bool isSigned(RelationalCondition cond)
255 {
256 return MacroAssemblerHelpers::isSigned<MacroAssembler>(cond);
257 }
258
259 static bool isUnsigned(RelationalCondition cond)
260 {
261 return MacroAssemblerHelpers::isUnsigned<MacroAssembler>(cond);
262 }
263
264 static bool isSigned(ResultCondition cond)
265 {
266 return MacroAssemblerHelpers::isSigned<MacroAssembler>(cond);
267 }
268
269 static bool isUnsigned(ResultCondition cond)
270 {
271 return MacroAssemblerHelpers::isUnsigned<MacroAssembler>(cond);
272 }
273
274 // Platform agnostic convenience functions,
275 // described in terms of other macro assembly methods.
276 void pop()
277 {
278 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
279 }
280
281 void peek(RegisterID dest, int index = 0)
282 {
283 loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
284 }
285
286 Address addressForPoke(int index)
287 {
288 return Address(stackPointerRegister, (index * sizeof(void*)));
289 }
290
291 void poke(RegisterID src, int index = 0)
292 {
293 storePtr(src, addressForPoke(index));
294 }
295
296 void poke(TrustedImm32 value, int index = 0)
297 {
298 store32(value, addressForPoke(index));
299 }
300
301 void poke(TrustedImmPtr imm, int index = 0)
302 {
303 storePtr(imm, addressForPoke(index));
304 }
305
306 void poke(FPRegisterID src, int index = 0)
307 {
308 storeDouble(src, addressForPoke(index));
309 }
310
311#if !CPU(ARM64)
312 void pushToSave(RegisterID src)
313 {
314 push(src);
315 }
316 void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
317 {
318 push(imm);
319 }
320 void popToRestore(RegisterID dest)
321 {
322 pop(dest);
323 }
324 void pushToSave(FPRegisterID src)
325 {
326 subPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
327 storeDouble(src, stackPointerRegister);
328 }
329 void popToRestore(FPRegisterID dest)
330 {
331 loadDouble(stackPointerRegister, dest);
332 addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
333 }
334
335 static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
336#endif // !CPU(ARM64)
337
338#if CPU(X86_64) || CPU(ARM64)
339 void peek64(RegisterID dest, int index = 0)
340 {
341 load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
342 }
343
344 void poke(TrustedImm64 value, int index = 0)
345 {
346 store64(value, addressForPoke(index));
347 }
348
349 void poke64(RegisterID src, int index = 0)
350 {
351 store64(src, addressForPoke(index));
352 }
353#endif
354
355 // Immediate shifts only have 5 controllable bits
356 // so we'll consider them safe for now.
357 TrustedImm32 trustedImm32ForShift(Imm32 imm)
358 {
359 return TrustedImm32(imm.asTrustedImm32().m_value & 31);
360 }
361
362 // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
363 void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
364 {
365 branchPtr(cond, op1, imm).linkTo(target, this);
366 }
367 void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
368 {
369 branchPtr(cond, op1, imm).linkTo(target, this);
370 }
371
372 Jump branch32(RelationalCondition cond, RegisterID left, AbsoluteAddress right)
373 {
374 return branch32(flip(cond), right, left);
375 }
376
377 void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
378 {
379 branch32(cond, op1, op2).linkTo(target, this);
380 }
381
382 void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
383 {
384 branch32(cond, op1, imm).linkTo(target, this);
385 }
386
387 void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
388 {
389 branch32(cond, op1, imm).linkTo(target, this);
390 }
391
392 void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
393 {
394 branch32(cond, left, right).linkTo(target, this);
395 }
396
397 Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
398 {
399 return branch32(commute(cond), right, left);
400 }
401
402 Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
403 {
404 return branch32(commute(cond), right, left);
405 }
406
407 void compare32(RelationalCondition cond, Imm32 left, RegisterID right, RegisterID dest)
408 {
409 compare32(commute(cond), right, left, dest);
410 }
411
412 void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
413 {
414 branchTestPtr(cond, reg).linkTo(target, this);
415 }
416
417#if !CPU(ARM_THUMB2) && !CPU(ARM64)
418 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(nullptr))
419 {
420 return PatchableJump(branchPtr(cond, left, right));
421 }
422
423 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr))
424 {
425 return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
426 }
427
428 PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
429 {
430 return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
431 }
432
433 PatchableJump patchableJump()
434 {
435 return PatchableJump(jump());
436 }
437
438 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
439 {
440 return PatchableJump(branchTest32(cond, reg, mask));
441 }
442
443 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
444 {
445 return PatchableJump(branch32(cond, reg, imm));
446 }
447
448 PatchableJump patchableBranch8(RelationalCondition cond, Address address, TrustedImm32 imm)
449 {
450 return PatchableJump(branch8(cond, address, imm));
451 }
452
453 PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
454 {
455 return PatchableJump(branch32(cond, address, imm));
456 }
457#endif
458
459 void jump(Label target)
460 {
461 jump().linkTo(target, this);
462 }
463
464 // Commute a relational condition, returns a new condition that will produce
465 // the same results given the same inputs but with their positions exchanged.
466 static RelationalCondition commute(RelationalCondition condition)
467 {
468 switch (condition) {
469 case Above:
470 return Below;
471 case AboveOrEqual:
472 return BelowOrEqual;
473 case Below:
474 return Above;
475 case BelowOrEqual:
476 return AboveOrEqual;
477 case GreaterThan:
478 return LessThan;
479 case GreaterThanOrEqual:
480 return LessThanOrEqual;
481 case LessThan:
482 return GreaterThan;
483 case LessThanOrEqual:
484 return GreaterThanOrEqual;
485 default:
486 break;
487 }
488
489 ASSERT(condition == Equal || condition == NotEqual);
490 return condition;
491 }
492
493 void oops()
494 {
495 abortWithReason(B3Oops);
496 }
497
498 // B3 has additional pseudo-opcodes for returning, when it wants to signal that the return
499 // consumes some register in some way.
500 void retVoid() { ret(); }
501 void ret32(RegisterID) { ret(); }
502 void ret64(RegisterID) { ret(); }
503 void retFloat(FPRegisterID) { ret(); }
504 void retDouble(FPRegisterID) { ret(); }
505
506 static constexpr unsigned BlindingModulus = 64;
507 bool shouldConsiderBlinding()
508 {
509 return !(random() & (BlindingModulus - 1));
510 }
511
512 void move(Address src, Address dest, RegisterID scratch)
513 {
514 loadPtr(src, scratch);
515 storePtr(scratch, dest);
516 }
517
518 void move32(Address src, Address dest, RegisterID scratch)
519 {
520 load32(src, scratch);
521 store32(scratch, dest);
522 }
523
524 void moveFloat(Address src, Address dest, FPRegisterID scratch)
525 {
526 loadFloat(src, scratch);
527 storeFloat(scratch, dest);
528 }
529
530 // Overload mostly for use in templates.
531 void move(FPRegisterID src, FPRegisterID dest)
532 {
533 moveDouble(src, dest);
534 }
535
536 void moveDouble(Address src, Address dest, FPRegisterID scratch)
537 {
538 loadDouble(src, scratch);
539 storeDouble(scratch, dest);
540 }
541
542 // Ptr methods
543 // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
544 // FIXME: should this use a test for 32-bitness instead of this specific exception?
545#if !CPU(X86_64) && !CPU(ARM64)
546 void addPtr(Address src, RegisterID dest)
547 {
548 add32(src, dest);
549 }
550
551 void addPtr(AbsoluteAddress src, RegisterID dest)
552 {
553 add32(src, dest);
554 }
555
556 void addPtr(RegisterID src, RegisterID dest)
557 {
558 add32(src, dest);
559 }
560
561 void addPtr(RegisterID left, RegisterID right, RegisterID dest)
562 {
563 add32(left, right, dest);
564 }
565
566 void addPtr(TrustedImm32 imm, RegisterID srcDest)
567 {
568 add32(imm, srcDest);
569 }
570
571 void addPtr(TrustedImmPtr imm, RegisterID dest)
572 {
573 add32(TrustedImm32(imm), dest);
574 }
575
576 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
577 {
578 add32(imm, src, dest);
579 }
580
581 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
582 {
583 add32(imm, address);
584 }
585
586 void andPtr(RegisterID src, RegisterID dest)
587 {
588 and32(src, dest);
589 }
590
591 void andPtr(TrustedImm32 imm, RegisterID srcDest)
592 {
593 and32(imm, srcDest);
594 }
595
596 void andPtr(TrustedImmPtr imm, RegisterID srcDest)
597 {
598 and32(TrustedImm32(imm), srcDest);
599 }
600
601 void lshiftPtr(Imm32 imm, RegisterID srcDest)
602 {
603 lshift32(trustedImm32ForShift(imm), srcDest);
604 }
605
606 void lshiftPtr(TrustedImm32 imm, RegisterID srcDest)
607 {
608 lshift32(imm, srcDest);
609 }
610
611 void rshiftPtr(Imm32 imm, RegisterID srcDest)
612 {
613 rshift32(trustedImm32ForShift(imm), srcDest);
614 }
615
616 void rshiftPtr(TrustedImm32 imm, RegisterID srcDest)
617 {
618 rshift32(imm, srcDest);
619 }
620
621 void urshiftPtr(Imm32 imm, RegisterID srcDest)
622 {
623 urshift32(trustedImm32ForShift(imm), srcDest);
624 }
625
626 void urshiftPtr(RegisterID shiftAmmount, RegisterID srcDest)
627 {
628 urshift32(shiftAmmount, srcDest);
629 }
630
631 void negPtr(RegisterID dest)
632 {
633 neg32(dest);
634 }
635
636 void negPtr(RegisterID src, RegisterID dest)
637 {
638 neg32(src, dest);
639 }
640
641 void orPtr(RegisterID src, RegisterID dest)
642 {
643 or32(src, dest);
644 }
645
646 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
647 {
648 or32(op1, op2, dest);
649 }
650
651 void orPtr(TrustedImmPtr imm, RegisterID dest)
652 {
653 or32(TrustedImm32(imm), dest);
654 }
655
656 void orPtr(TrustedImm32 imm, RegisterID dest)
657 {
658 or32(imm, dest);
659 }
660
661 void subPtr(RegisterID src, RegisterID dest)
662 {
663 sub32(src, dest);
664 }
665
666 void subPtr(TrustedImm32 imm, RegisterID dest)
667 {
668 sub32(imm, dest);
669 }
670
671 void subPtr(TrustedImmPtr imm, RegisterID dest)
672 {
673 sub32(TrustedImm32(imm), dest);
674 }
675
676 void xorPtr(RegisterID src, RegisterID dest)
677 {
678 xor32(src, dest);
679 }
680
681 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
682 {
683 xor32(imm, srcDest);
684 }
685
686 void xorPtr(TrustedImmPtr imm, RegisterID srcDest)
687 {
688 xor32(TrustedImm32(imm), srcDest);
689 }
690
691 void xorPtr(Address src, RegisterID dest)
692 {
693 xor32(src, dest);
694 }
695
696 void loadPtr(ImplicitAddress address, RegisterID dest)
697 {
698 load32(address, dest);
699 }
700
701 void loadPtr(BaseIndex address, RegisterID dest)
702 {
703 load32(address, dest);
704 }
705
706 void loadPtr(const void* address, RegisterID dest)
707 {
708 load32(address, dest);
709 }
710
711#if ENABLE(FAST_TLS_JIT)
712 void loadFromTLSPtr(uint32_t offset, RegisterID dst)
713 {
714 loadFromTLS32(offset, dst);
715 }
716
717 void storeToTLSPtr(RegisterID src, uint32_t offset)
718 {
719 storeToTLS32(src, offset);
720 }
721#endif
722
723 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
724 {
725 return load32WithAddressOffsetPatch(address, dest);
726 }
727
728 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
729 {
730 return load32WithCompactAddressOffsetPatch(address, dest);
731 }
732
733 void move(ImmPtr imm, RegisterID dest)
734 {
735 move(Imm32(imm.asTrustedImmPtr()), dest);
736 }
737
738 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
739 {
740 compare32(cond, left, right, dest);
741 }
742
743 void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
744 {
745 compare32(cond, left, right, dest);
746 }
747
748 void storePtr(RegisterID src, ImplicitAddress address)
749 {
750 store32(src, address);
751 }
752
753 void storePtr(RegisterID src, BaseIndex address)
754 {
755 store32(src, address);
756 }
757
758 void storePtr(RegisterID src, void* address)
759 {
760 store32(src, address);
761 }
762
763 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
764 {
765 store32(TrustedImm32(imm), address);
766 }
767
768 void storePtr(ImmPtr imm, Address address)
769 {
770 store32(Imm32(imm.asTrustedImmPtr()), address);
771 }
772
773 void storePtr(TrustedImmPtr imm, void* address)
774 {
775 store32(TrustedImm32(imm), address);
776 }
777
778 void storePtr(TrustedImm32 imm, ImplicitAddress address)
779 {
780 store32(imm, address);
781 }
782
783 void storePtr(TrustedImmPtr imm, BaseIndex address)
784 {
785 store32(TrustedImm32(imm), address);
786 }
787
788 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
789 {
790 return store32WithAddressOffsetPatch(src, address);
791 }
792
793 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
794 {
795 return branch32(cond, left, right);
796 }
797
798 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
799 {
800 return branch32(cond, left, TrustedImm32(right));
801 }
802
803 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
804 {
805 return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
806 }
807
808 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
809 {
810 return branch32(cond, left, right);
811 }
812
813 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
814 {
815 return branch32(cond, left, right);
816 }
817
818 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
819 {
820 return branch32(cond, left, right);
821 }
822
823 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
824 {
825 return branch32(cond, left, TrustedImm32(right));
826 }
827
828 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
829 {
830 return branch32(cond, left, TrustedImm32(right));
831 }
832
833 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
834 {
835 return branchSub32(cond, src, dest);
836 }
837
838 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
839 {
840 return branchTest32(cond, reg, mask);
841 }
842
843 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
844 {
845 return branchTest32(cond, reg, mask);
846 }
847
848 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
849 {
850 return branchTest32(cond, address, mask);
851 }
852
853 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
854 {
855 return branchTest32(cond, address, mask);
856 }
857
858 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
859 {
860 return branchAdd32(cond, src, dest);
861 }
862
863 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
864 {
865 return branchSub32(cond, imm, dest);
866 }
867 using MacroAssemblerBase::branchTest8;
868 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
869 {
870 return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
871 }
872
873#else // !CPU(X86_64) && !CPU(ARM64)
874
875 void addPtr(RegisterID src, RegisterID dest)
876 {
877 add64(src, dest);
878 }
879
880 void addPtr(RegisterID left, RegisterID right, RegisterID dest)
881 {
882 add64(left, right, dest);
883 }
884
885 void addPtr(Address src, RegisterID dest)
886 {
887 add64(src, dest);
888 }
889
890 void addPtr(TrustedImm32 imm, RegisterID srcDest)
891 {
892 add64(imm, srcDest);
893 }
894
895 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
896 {
897 add64(imm, src, dest);
898 }
899
900 void addPtr(TrustedImm32 imm, Address address)
901 {
902 add64(imm, address);
903 }
904
905 void addPtr(AbsoluteAddress src, RegisterID dest)
906 {
907 add64(src, dest);
908 }
909
910 void addPtr(TrustedImmPtr imm, RegisterID dest)
911 {
912 add64(TrustedImm64(imm), dest);
913 }
914
915 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
916 {
917 add64(imm, address);
918 }
919
920 void andPtr(RegisterID src, RegisterID dest)
921 {
922 and64(src, dest);
923 }
924
925 void andPtr(TrustedImm32 imm, RegisterID srcDest)
926 {
927 and64(imm, srcDest);
928 }
929
930 void andPtr(TrustedImmPtr imm, RegisterID srcDest)
931 {
932 and64(imm, srcDest);
933 }
934
935 void lshiftPtr(Imm32 imm, RegisterID srcDest)
936 {
937 lshift64(trustedImm32ForShift(imm), srcDest);
938 }
939
940 void lshiftPtr(TrustedImm32 imm, RegisterID srcDest)
941 {
942 lshift64(imm, srcDest);
943 }
944
945 void rshiftPtr(Imm32 imm, RegisterID srcDest)
946 {
947 rshift64(trustedImm32ForShift(imm), srcDest);
948 }
949
950 void rshiftPtr(TrustedImm32 imm, RegisterID srcDest)
951 {
952 rshift64(imm, srcDest);
953 }
954
955 void urshiftPtr(Imm32 imm, RegisterID srcDest)
956 {
957 urshift64(trustedImm32ForShift(imm), srcDest);
958 }
959
960 void urshiftPtr(RegisterID shiftAmmount, RegisterID srcDest)
961 {
962 urshift64(shiftAmmount, srcDest);
963 }
964
965 void negPtr(RegisterID dest)
966 {
967 neg64(dest);
968 }
969
970 void negPtr(RegisterID src, RegisterID dest)
971 {
972 neg64(src, dest);
973 }
974
975 void orPtr(RegisterID src, RegisterID dest)
976 {
977 or64(src, dest);
978 }
979
980 void orPtr(TrustedImm32 imm, RegisterID dest)
981 {
982 or64(imm, dest);
983 }
984
985 void orPtr(TrustedImmPtr imm, RegisterID dest)
986 {
987 or64(TrustedImm64(imm), dest);
988 }
989
990 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
991 {
992 or64(op1, op2, dest);
993 }
994
995 void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
996 {
997 or64(imm, src, dest);
998 }
999
1000 void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
1001 {
1002 rotateRight64(imm, srcDst);
1003 }
1004
1005 void subPtr(RegisterID src, RegisterID dest)
1006 {
1007 sub64(src, dest);
1008 }
1009
1010 void subPtr(TrustedImm32 imm, RegisterID dest)
1011 {
1012 sub64(imm, dest);
1013 }
1014
1015 void subPtr(TrustedImmPtr imm, RegisterID dest)
1016 {
1017 sub64(TrustedImm64(imm), dest);
1018 }
1019
1020 void xorPtr(RegisterID src, RegisterID dest)
1021 {
1022 xor64(src, dest);
1023 }
1024
1025 void xorPtr(Address src, RegisterID dest)
1026 {
1027 xor64(src, dest);
1028 }
1029
1030 void xorPtr(RegisterID src, Address dest)
1031 {
1032 xor64(src, dest);
1033 }
1034
1035 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
1036 {
1037 xor64(imm, srcDest);
1038 }
1039
1040 // FIXME: Look into making the need for a scratch register explicit, or providing the option to specify a scratch register.
1041 void xorPtr(TrustedImmPtr imm, RegisterID srcDest)
1042 {
1043 xor64(TrustedImm64(imm), srcDest);
1044 }
1045
1046 void loadPtr(ImplicitAddress address, RegisterID dest)
1047 {
1048 load64(address, dest);
1049 }
1050
1051 void loadPtr(BaseIndex address, RegisterID dest)
1052 {
1053 load64(address, dest);
1054 }
1055
1056 void loadPtr(const void* address, RegisterID dest)
1057 {
1058 load64(address, dest);
1059 }
1060
1061#if ENABLE(FAST_TLS_JIT)
1062 void loadFromTLSPtr(uint32_t offset, RegisterID dst)
1063 {
1064 loadFromTLS64(offset, dst);
1065 }
1066 void storeToTLSPtr(RegisterID src, uint32_t offset)
1067 {
1068 storeToTLS64(src, offset);
1069 }
1070#endif
1071
1072 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
1073 {
1074 return load64WithAddressOffsetPatch(address, dest);
1075 }
1076
1077 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
1078 {
1079 return load64WithCompactAddressOffsetPatch(address, dest);
1080 }
1081
1082 void storePtr(RegisterID src, ImplicitAddress address)
1083 {
1084 store64(src, address);
1085 }
1086
1087 void storePtr(RegisterID src, BaseIndex address)
1088 {
1089 store64(src, address);
1090 }
1091
1092 void storePtr(RegisterID src, void* address)
1093 {
1094 store64(src, address);
1095 }
1096
1097 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
1098 {
1099 store64(TrustedImm64(imm), address);
1100 }
1101
1102 void storePtr(TrustedImm32 imm, ImplicitAddress address)
1103 {
1104 store64(imm, address);
1105 }
1106
1107 void storePtr(TrustedImmPtr imm, BaseIndex address)
1108 {
1109 store64(TrustedImm64(imm), address);
1110 }
1111
1112 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
1113 {
1114 return store64WithAddressOffsetPatch(src, address);
1115 }
1116
1117 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1118 {
1119 compare64(cond, left, right, dest);
1120 }
1121
1122 void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1123 {
1124 compare64(cond, left, right, dest);
1125 }
1126
1127 void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
1128 {
1129 test64(cond, reg, mask, dest);
1130 }
1131
1132 void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
1133 {
1134 test64(cond, reg, mask, dest);
1135 }
1136
1137 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
1138 {
1139 return branch64(cond, left, right);
1140 }
1141
1142 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
1143 {
1144 return branch64(cond, left, TrustedImm64(right));
1145 }
1146
1147 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
1148 {
1149 return branch64(cond, left, right);
1150 }
1151
1152 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
1153 {
1154 return branch64(cond, left, right);
1155 }
1156
1157 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1158 {
1159 return branch64(cond, left, right);
1160 }
1161
1162 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
1163 {
1164 return branch64(cond, left, TrustedImm64(right));
1165 }
1166
1167 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
1168 {
1169 return branchTest64(cond, reg, mask);
1170 }
1171
1172 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1173 {
1174 return branchTest64(cond, reg, mask);
1175 }
1176
1177 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1178 {
1179 return branchTest64(cond, address, mask);
1180 }
1181
1182 Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
1183 {
1184 return branchTest64(cond, address, reg);
1185 }
1186
1187 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1188 {
1189 return branchTest64(cond, address, mask);
1190 }
1191
1192 Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1193 {
1194 return branchTest64(cond, address, mask);
1195 }
1196
1197 Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1198 {
1199 return branchAdd64(cond, imm, dest);
1200 }
1201
1202 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
1203 {
1204 return branchAdd64(cond, src, dest);
1205 }
1206
1207 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1208 {
1209 return branchSub64(cond, imm, dest);
1210 }
1211
1212 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
1213 {
1214 return branchSub64(cond, src, dest);
1215 }
1216
1217 Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1218 {
1219 return branchSub64(cond, src1, src2, dest);
1220 }
1221
1222 using MacroAssemblerBase::and64;
1223 using MacroAssemblerBase::convertInt32ToDouble;
1224 using MacroAssemblerBase::store64;
1225 bool shouldBlindDouble(double value)
1226 {
1227 // Don't trust NaN or +/-Infinity
1228 if (!std::isfinite(value))
1229 return shouldConsiderBlinding();
1230
1231 // Try to force normalisation, and check that there's no change
1232 // in the bit pattern
1233 if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
1234 return shouldConsiderBlinding();
1235
1236 value = fabs(value);
1237 // Only allow a limited set of fractional components
1238 double scaledValue = value * 8;
1239 if (scaledValue / 8 != value)
1240 return shouldConsiderBlinding();
1241 double frac = scaledValue - floor(scaledValue);
1242 if (frac != 0.0)
1243 return shouldConsiderBlinding();
1244
1245 return value > 0xff;
1246 }
1247
1248 bool shouldBlindPointerForSpecificArch(uintptr_t value)
1249 {
1250 if (sizeof(void*) == 4)
1251 return shouldBlindForSpecificArch(static_cast<uint32_t>(value));
1252 return shouldBlindForSpecificArch(static_cast<uint64_t>(value));
1253 }
1254
1255 bool shouldBlind(ImmPtr imm)
1256 {
1257 if (!canBlind())
1258 return false;
1259
1260#if ENABLE(FORCED_JIT_BLINDING)
1261 UNUSED_PARAM(imm);
1262 // Debug always blind all constants, if only so we know
1263 // if we've broken blinding during patch development.
1264 return true;
1265#endif
1266
1267 // First off we'll special case common, "safe" values to avoid hurting
1268 // performance too much
1269 uint64_t value = imm.asTrustedImmPtr().asIntptr();
1270 switch (value) {
1271 case 0xffff:
1272 case 0xffffff:
1273 case 0xffffffffL:
1274 case 0xffffffffffL:
1275 case 0xffffffffffffL:
1276 case 0xffffffffffffffL:
1277 case 0xffffffffffffffffL:
1278 return false;
1279 default: {
1280 if (value <= 0xff)
1281 return false;
1282 if (~value <= 0xff)
1283 return false;
1284 }
1285 }
1286
1287 if (!shouldConsiderBlinding())
1288 return false;
1289
1290 return shouldBlindPointerForSpecificArch(static_cast<uintptr_t>(value));
1291 }
1292
1293 uint8_t generateRotationSeed(size_t widthInBits)
1294 {
1295 // Generate the seed in [1, widthInBits - 1]. We should not generate widthInBits or 0
1296 // since it leads to `<< widthInBits` or `>> widthInBits`, which cause undefined behaviors.
1297 return (random() % (widthInBits - 1)) + 1;
1298 }
1299
1300 struct RotatedImmPtr {
1301 RotatedImmPtr(uintptr_t v1, uint8_t v2)
1302 : value(v1)
1303 , rotation(v2)
1304 {
1305 }
1306 TrustedImmPtr value;
1307 TrustedImm32 rotation;
1308 };
1309
1310 RotatedImmPtr rotationBlindConstant(ImmPtr imm)
1311 {
1312 uint8_t rotation = generateRotationSeed(sizeof(void*) * 8);
1313 uintptr_t value = imm.asTrustedImmPtr().asIntptr();
1314 value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
1315 return RotatedImmPtr(value, rotation);
1316 }
1317
1318 void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
1319 {
1320 move(constant.value, dest);
1321 rotateRightPtr(constant.rotation, dest);
1322 }
1323
1324 bool shouldBlind(Imm64 imm)
1325 {
1326#if ENABLE(FORCED_JIT_BLINDING)
1327 UNUSED_PARAM(imm);
1328 // Debug always blind all constants, if only so we know
1329 // if we've broken blinding during patch development.
1330 return true;
1331#endif
1332
1333 // First off we'll special case common, "safe" values to avoid hurting
1334 // performance too much
1335 uint64_t value = imm.asTrustedImm64().m_value;
1336 switch (value) {
1337 case 0xffff:
1338 case 0xffffff:
1339 case 0xffffffffL:
1340 case 0xffffffffffL:
1341 case 0xffffffffffffL:
1342 case 0xffffffffffffffL:
1343 case 0xffffffffffffffffL:
1344 return false;
1345 default: {
1346 if (value <= 0xff)
1347 return false;
1348 if (~value <= 0xff)
1349 return false;
1350
1351 JSValue jsValue = JSValue::decode(value);
1352 if (jsValue.isInt32())
1353 return shouldBlind(Imm32(jsValue.asInt32()));
1354 if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
1355 return false;
1356
1357 if (!shouldBlindDouble(bitwise_cast<double>(value)))
1358 return false;
1359 }
1360 }
1361
1362 if (!shouldConsiderBlinding())
1363 return false;
1364
1365 return shouldBlindForSpecificArch(value);
1366 }
1367
1368 struct RotatedImm64 {
1369 RotatedImm64(uint64_t v1, uint8_t v2)
1370 : value(v1)
1371 , rotation(v2)
1372 {
1373 }
1374 TrustedImm64 value;
1375 TrustedImm32 rotation;
1376 };
1377
1378 RotatedImm64 rotationBlindConstant(Imm64 imm)
1379 {
1380 uint8_t rotation = generateRotationSeed(sizeof(int64_t) * 8);
1381 uint64_t value = imm.asTrustedImm64().m_value;
1382 value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
1383 return RotatedImm64(value, rotation);
1384 }
1385
1386 void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
1387 {
1388 move(constant.value, dest);
1389 rotateRight64(constant.rotation, dest);
1390 }
1391
1392 void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
1393 {
1394 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
1395 RegisterID scratchRegister = scratchRegisterForBlinding();
1396 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
1397 convertInt32ToDouble(scratchRegister, dest);
1398 } else
1399 convertInt32ToDouble(imm.asTrustedImm32(), dest);
1400 }
1401
1402 void move(ImmPtr imm, RegisterID dest)
1403 {
1404 if (shouldBlind(imm))
1405 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1406 else
1407 move(imm.asTrustedImmPtr(), dest);
1408 }
1409
1410 void move(Imm64 imm, RegisterID dest)
1411 {
1412 if (shouldBlind(imm))
1413 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1414 else
1415 move(imm.asTrustedImm64(), dest);
1416 }
1417
1418#if CPU(X86_64) || CPU(ARM64)
1419 void moveDouble(Imm64 imm, FPRegisterID dest)
1420 {
1421 move(imm, scratchRegister());
1422 move64ToDouble(scratchRegister(), dest);
1423 }
1424#endif
1425
1426 void and64(Imm32 imm, RegisterID dest)
1427 {
1428 if (shouldBlind(imm)) {
1429 BlindedImm32 key = andBlindedConstant(imm);
1430 and64(key.value1, dest);
1431 and64(key.value2, dest);
1432 } else
1433 and64(imm.asTrustedImm32(), dest);
1434 }
1435
1436 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
1437 {
1438 if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
1439 RegisterID scratchRegister = scratchRegisterForBlinding();
1440 loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
1441 return branchPtr(cond, left, scratchRegister);
1442 }
1443 return branchPtr(cond, left, right.asTrustedImmPtr());
1444 }
1445
1446 void storePtr(ImmPtr imm, Address dest)
1447 {
1448 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
1449 RegisterID scratchRegister = scratchRegisterForBlinding();
1450 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1451 storePtr(scratchRegister, dest);
1452 } else
1453 storePtr(imm.asTrustedImmPtr(), dest);
1454 }
1455
1456 void store64(Imm64 imm, Address dest)
1457 {
1458 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
1459 RegisterID scratchRegister = scratchRegisterForBlinding();
1460 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1461 store64(scratchRegister, dest);
1462 } else
1463 store64(imm.asTrustedImm64(), dest);
1464 }
1465
1466#endif // !CPU(X86_64)
1467
1468#if !CPU(X86) && !CPU(X86_64) && !CPU(ARM64)
1469 // We should implement this the right way eventually, but for now, it's fine because it arises so
1470 // infrequently.
1471 void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
1472 {
1473 move(TrustedImm32(0), dest);
1474 Jump falseCase = branchDouble(invert(cond), left, right);
1475 move(TrustedImm32(1), dest);
1476 falseCase.link(this);
1477 }
1478#endif
1479
1480 void lea32(Address address, RegisterID dest)
1481 {
1482 add32(TrustedImm32(address.offset), address.base, dest);
1483 }
1484
1485#if CPU(X86_64) || CPU(ARM64)
1486 void lea64(Address address, RegisterID dest)
1487 {
1488 add64(TrustedImm32(address.offset), address.base, dest);
1489 }
1490#endif // CPU(X86_64) || CPU(ARM64)
1491
1492 bool shouldBlind(Imm32 imm)
1493 {
1494#if ENABLE(FORCED_JIT_BLINDING)
1495 UNUSED_PARAM(imm);
1496 // Debug always blind all constants, if only so we know
1497 // if we've broken blinding during patch development.
1498 return true;
1499#else // ENABLE(FORCED_JIT_BLINDING)
1500
1501 // First off we'll special case common, "safe" values to avoid hurting
1502 // performance too much
1503 uint32_t value = imm.asTrustedImm32().m_value;
1504 switch (value) {
1505 case 0xffff:
1506 case 0xffffff:
1507 case 0xffffffff:
1508 return false;
1509 default:
1510 if (value <= 0xff)
1511 return false;
1512 if (~value <= 0xff)
1513 return false;
1514 }
1515
1516 if (!shouldConsiderBlinding())
1517 return false;
1518
1519 return shouldBlindForSpecificArch(value);
1520#endif // ENABLE(FORCED_JIT_BLINDING)
1521 }
1522
1523 struct BlindedImm32 {
1524 BlindedImm32(int32_t v1, int32_t v2)
1525 : value1(v1)
1526 , value2(v2)
1527 {
1528 }
1529 TrustedImm32 value1;
1530 TrustedImm32 value2;
1531 };
1532
1533 uint32_t keyForConstant(uint32_t value, uint32_t& mask)
1534 {
1535 uint32_t key = random();
1536 if (value <= 0xff)
1537 mask = 0xff;
1538 else if (value <= 0xffff)
1539 mask = 0xffff;
1540 else if (value <= 0xffffff)
1541 mask = 0xffffff;
1542 else
1543 mask = 0xffffffff;
1544 return key & mask;
1545 }
1546
1547 uint32_t keyForConstant(uint32_t value)
1548 {
1549 uint32_t mask = 0;
1550 return keyForConstant(value, mask);
1551 }
1552
1553 BlindedImm32 xorBlindConstant(Imm32 imm)
1554 {
1555 uint32_t baseValue = imm.asTrustedImm32().m_value;
1556 uint32_t key = keyForConstant(baseValue);
1557 return BlindedImm32(baseValue ^ key, key);
1558 }
1559
1560 BlindedImm32 additionBlindedConstant(Imm32 imm)
1561 {
1562 // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
1563 static const uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
1564
1565 uint32_t baseValue = imm.asTrustedImm32().m_value;
1566 uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
1567 if (key > baseValue)
1568 key = key - baseValue;
1569 return BlindedImm32(baseValue - key, key);
1570 }
1571
1572 BlindedImm32 andBlindedConstant(Imm32 imm)
1573 {
1574 uint32_t baseValue = imm.asTrustedImm32().m_value;
1575 uint32_t mask = 0;
1576 uint32_t key = keyForConstant(baseValue, mask);
1577 ASSERT((baseValue & mask) == baseValue);
1578 return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
1579 }
1580
1581 BlindedImm32 orBlindedConstant(Imm32 imm)
1582 {
1583 uint32_t baseValue = imm.asTrustedImm32().m_value;
1584 uint32_t mask = 0;
1585 uint32_t key = keyForConstant(baseValue, mask);
1586 ASSERT((baseValue & mask) == baseValue);
1587 return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
1588 }
1589
1590 void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
1591 {
1592 move(constant.value1, dest);
1593 xor32(constant.value2, dest);
1594 }
1595
1596 void add32(Imm32 imm, RegisterID dest)
1597 {
1598 if (shouldBlind(imm)) {
1599 BlindedImm32 key = additionBlindedConstant(imm);
1600 add32(key.value1, dest);
1601 add32(key.value2, dest);
1602 } else
1603 add32(imm.asTrustedImm32(), dest);
1604 }
1605
1606 void add32(Imm32 imm, RegisterID src, RegisterID dest)
1607 {
1608 if (shouldBlind(imm)) {
1609 BlindedImm32 key = additionBlindedConstant(imm);
1610 add32(key.value1, src, dest);
1611 add32(key.value2, dest);
1612 } else
1613 add32(imm.asTrustedImm32(), src, dest);
1614 }
1615
1616 void addPtr(Imm32 imm, RegisterID dest)
1617 {
1618 if (shouldBlind(imm)) {
1619 BlindedImm32 key = additionBlindedConstant(imm);
1620 addPtr(key.value1, dest);
1621 addPtr(key.value2, dest);
1622 } else
1623 addPtr(imm.asTrustedImm32(), dest);
1624 }
1625
1626 void mul32(Imm32 imm, RegisterID src, RegisterID dest)
1627 {
1628 if (shouldBlind(imm)) {
1629 if (src != dest || haveScratchRegisterForBlinding()) {
1630 if (src == dest) {
1631 move(src, scratchRegisterForBlinding());
1632 src = scratchRegisterForBlinding();
1633 }
1634 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1635 mul32(src, dest);
1636 return;
1637 }
1638 // If we don't have a scratch register available for use, we'll just
1639 // place a random number of nops.
1640 uint32_t nopCount = random() & 3;
1641 while (nopCount--)
1642 nop();
1643 }
1644 mul32(imm.asTrustedImm32(), src, dest);
1645 }
1646
1647 void and32(Imm32 imm, RegisterID dest)
1648 {
1649 if (shouldBlind(imm)) {
1650 BlindedImm32 key = andBlindedConstant(imm);
1651 and32(key.value1, dest);
1652 and32(key.value2, dest);
1653 } else
1654 and32(imm.asTrustedImm32(), dest);
1655 }
1656
1657 void andPtr(Imm32 imm, RegisterID dest)
1658 {
1659 if (shouldBlind(imm)) {
1660 BlindedImm32 key = andBlindedConstant(imm);
1661 andPtr(key.value1, dest);
1662 andPtr(key.value2, dest);
1663 } else
1664 andPtr(imm.asTrustedImm32(), dest);
1665 }
1666
1667 void and32(Imm32 imm, RegisterID src, RegisterID dest)
1668 {
1669 if (shouldBlind(imm)) {
1670 if (src == dest)
1671 return and32(imm.asTrustedImm32(), dest);
1672 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1673 and32(src, dest);
1674 } else
1675 and32(imm.asTrustedImm32(), src, dest);
1676 }
1677
1678 void move(Imm32 imm, RegisterID dest)
1679 {
1680 if (shouldBlind(imm))
1681 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1682 else
1683 move(imm.asTrustedImm32(), dest);
1684 }
1685
1686 void or32(Imm32 imm, RegisterID src, RegisterID dest)
1687 {
1688 if (shouldBlind(imm)) {
1689 if (src == dest)
1690 return or32(imm, dest);
1691 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1692 or32(src, dest);
1693 } else
1694 or32(imm.asTrustedImm32(), src, dest);
1695 }
1696
1697 void or32(Imm32 imm, RegisterID dest)
1698 {
1699 if (shouldBlind(imm)) {
1700 BlindedImm32 key = orBlindedConstant(imm);
1701 or32(key.value1, dest);
1702 or32(key.value2, dest);
1703 } else
1704 or32(imm.asTrustedImm32(), dest);
1705 }
1706
1707 void poke(Imm32 value, int index = 0)
1708 {
1709 store32(value, addressForPoke(index));
1710 }
1711
1712 void poke(ImmPtr value, int index = 0)
1713 {
1714 storePtr(value, addressForPoke(index));
1715 }
1716
1717#if CPU(X86_64) || CPU(ARM64)
1718 void poke(Imm64 value, int index = 0)
1719 {
1720 store64(value, addressForPoke(index));
1721 }
1722#endif // CPU(X86_64)
1723
1724 void store32(Imm32 imm, Address dest)
1725 {
1726 if (shouldBlind(imm)) {
1727#if CPU(X86) || CPU(X86_64)
1728 BlindedImm32 blind = xorBlindConstant(imm);
1729 store32(blind.value1, dest);
1730 xor32(blind.value2, dest);
1731#else // CPU(X86) || CPU(X86_64)
1732 if (haveScratchRegisterForBlinding()) {
1733 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegisterForBlinding());
1734 store32(scratchRegisterForBlinding(), dest);
1735 } else {
1736 // If we don't have a scratch register available for use, we'll just
1737 // place a random number of nops.
1738 uint32_t nopCount = random() & 3;
1739 while (nopCount--)
1740 nop();
1741 store32(imm.asTrustedImm32(), dest);
1742 }
1743#endif // CPU(X86) || CPU(X86_64)
1744 } else
1745 store32(imm.asTrustedImm32(), dest);
1746 }
1747
1748 void sub32(Imm32 imm, RegisterID dest)
1749 {
1750 if (shouldBlind(imm)) {
1751 BlindedImm32 key = additionBlindedConstant(imm);
1752 sub32(key.value1, dest);
1753 sub32(key.value2, dest);
1754 } else
1755 sub32(imm.asTrustedImm32(), dest);
1756 }
1757
1758 void subPtr(Imm32 imm, RegisterID dest)
1759 {
1760 if (shouldBlind(imm)) {
1761 BlindedImm32 key = additionBlindedConstant(imm);
1762 subPtr(key.value1, dest);
1763 subPtr(key.value2, dest);
1764 } else
1765 subPtr(imm.asTrustedImm32(), dest);
1766 }
1767
1768 void xor32(Imm32 imm, RegisterID src, RegisterID dest)
1769 {
1770 if (shouldBlind(imm)) {
1771 BlindedImm32 blind = xorBlindConstant(imm);
1772 xor32(blind.value1, src, dest);
1773 xor32(blind.value2, dest);
1774 } else
1775 xor32(imm.asTrustedImm32(), src, dest);
1776 }
1777
1778 void xor32(Imm32 imm, RegisterID dest)
1779 {
1780 if (shouldBlind(imm)) {
1781 BlindedImm32 blind = xorBlindConstant(imm);
1782 xor32(blind.value1, dest);
1783 xor32(blind.value2, dest);
1784 } else
1785 xor32(imm.asTrustedImm32(), dest);
1786 }
1787
1788 Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
1789 {
1790 if (shouldBlind(right)) {
1791 if (haveScratchRegisterForBlinding()) {
1792 loadXorBlindedConstant(xorBlindConstant(right), scratchRegisterForBlinding());
1793 return branch32(cond, left, scratchRegisterForBlinding());
1794 }
1795 // If we don't have a scratch register available for use, we'll just
1796 // place a random number of nops.
1797 uint32_t nopCount = random() & 3;
1798 while (nopCount--)
1799 nop();
1800 return branch32(cond, left, right.asTrustedImm32());
1801 }
1802
1803 return branch32(cond, left, right.asTrustedImm32());
1804 }
1805
1806 void compare32(RelationalCondition cond, RegisterID left, Imm32 right, RegisterID dest)
1807 {
1808 if (shouldBlind(right)) {
1809 if (left != dest || haveScratchRegisterForBlinding()) {
1810 RegisterID blindedConstantReg = dest;
1811 if (left == dest)
1812 blindedConstantReg = scratchRegisterForBlinding();
1813 loadXorBlindedConstant(xorBlindConstant(right), blindedConstantReg);
1814 compare32(cond, left, blindedConstantReg, dest);
1815 return;
1816 }
1817 // If we don't have a scratch register available for use, we'll just
1818 // place a random number of nops.
1819 uint32_t nopCount = random() & 3;
1820 while (nopCount--)
1821 nop();
1822 compare32(cond, left, right.asTrustedImm32(), dest);
1823 return;
1824 }
1825
1826 compare32(cond, left, right.asTrustedImm32(), dest);
1827 }
1828
1829 Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
1830 {
1831 if (shouldBlind(imm)) {
1832 if (src != dest || haveScratchRegisterForBlinding()) {
1833 if (src == dest) {
1834 move(src, scratchRegisterForBlinding());
1835 src = scratchRegisterForBlinding();
1836 }
1837 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1838 return branchAdd32(cond, src, dest);
1839 }
1840 // If we don't have a scratch register available for use, we'll just
1841 // place a random number of nops.
1842 uint32_t nopCount = random() & 3;
1843 while (nopCount--)
1844 nop();
1845 }
1846 return branchAdd32(cond, src, imm.asTrustedImm32(), dest);
1847 }
1848
1849 Jump branchMul32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
1850 {
1851 if (src == dest)
1852 ASSERT(haveScratchRegisterForBlinding());
1853
1854 if (shouldBlind(imm)) {
1855 if (src == dest) {
1856 move(src, scratchRegisterForBlinding());
1857 src = scratchRegisterForBlinding();
1858 }
1859 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1860 return branchMul32(cond, src, dest);
1861 }
1862 return branchMul32(cond, src, imm.asTrustedImm32(), dest);
1863 }
1864
1865 // branchSub32 takes a scratch register as 32 bit platforms make use of this,
1866 // with src == dst, and on x86-32 we don't have a platform scratch register.
1867 Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
1868 {
1869 if (shouldBlind(imm)) {
1870 ASSERT(scratch != dest);
1871 ASSERT(scratch != src);
1872 loadXorBlindedConstant(xorBlindConstant(imm), scratch);
1873 return branchSub32(cond, src, scratch, dest);
1874 }
1875 return branchSub32(cond, src, imm.asTrustedImm32(), dest);
1876 }
1877
1878 void lshift32(Imm32 imm, RegisterID dest)
1879 {
1880 lshift32(trustedImm32ForShift(imm), dest);
1881 }
1882
1883 void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
1884 {
1885 lshift32(src, trustedImm32ForShift(amount), dest);
1886 }
1887
1888 void rshift32(Imm32 imm, RegisterID dest)
1889 {
1890 rshift32(trustedImm32ForShift(imm), dest);
1891 }
1892
1893 void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
1894 {
1895 rshift32(src, trustedImm32ForShift(amount), dest);
1896 }
1897
1898 void urshift32(Imm32 imm, RegisterID dest)
1899 {
1900 urshift32(trustedImm32ForShift(imm), dest);
1901 }
1902
1903 void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
1904 {
1905 urshift32(src, trustedImm32ForShift(amount), dest);
1906 }
1907
1908 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
1909 {
1910 if (hasOneBitSet(imm.m_value)) {
1911 lshift32(src, TrustedImm32(getLSBSet(imm.m_value)), dest);
1912 return;
1913 }
1914 MacroAssemblerBase::mul32(imm, src, dest);
1915 }
1916
1917 // If the result jump is taken that means the assert passed.
1918 void jitAssert(const WTF::ScopedLambda<Jump(void)>&);
1919
1920#if ENABLE(MASM_PROBE)
1921 // This function emits code to preserve the CPUState (e.g. registers),
1922 // call a user supplied probe function, and restore the CPUState before
1923 // continuing with other JIT generated code.
1924 //
1925 // The user supplied probe function will be called with a single pointer to
1926 // a Probe::State struct (defined below) which contains, among other things,
1927 // the preserved CPUState. This allows the user probe function to inspect
1928 // the CPUState at that point in the JIT generated code.
1929 //
1930 // If the user probe function alters the register values in the Probe::State,
1931 // the altered values will be loaded into the CPU registers when the probe
1932 // returns.
1933 //
1934 // The Probe::State is stack allocated and is only valid for the duration
1935 // of the call to the user probe function.
1936 //
1937 // The probe function may choose to move the stack pointer (in any direction).
1938 // To do this, the probe function needs to set the new sp value in the CPUState.
1939 //
1940 // The probe function may also choose to fill stack space with some values.
1941 // To do this, the probe function must first:
1942 // 1. Set the new sp value in the Probe::State's CPUState.
1943 // 2. Set the Probe::State's initializeStackFunction to a Probe::Function callback
1944 // which will do the work of filling in the stack values after the probe
1945 // trampoline has adjusted the machine stack pointer.
1946 // 3. Set the Probe::State's initializeStackArgs to any value that the client wants
1947 // to pass to the initializeStackFunction callback.
1948 // 4. Return from the probe function.
1949 //
1950 // Upon returning from the probe function, the probe trampoline will adjust the
1951 // the stack pointer based on the sp value in CPUState. If initializeStackFunction
1952 // is not set, the probe trampoline will restore registers and return to its caller.
1953 //
1954 // If initializeStackFunction is set, the trampoline will move the Probe::State
1955 // beyond the range of the stack pointer i.e. it will place the new Probe::State at
1956 // an address lower than where CPUState.sp() points. This ensures that the
1957 // Probe::State will not be trashed by the initializeStackFunction when it writes to
1958 // the stack. Then, the trampoline will call back to the initializeStackFunction
1959 // Probe::Function to let it fill in the stack values as desired. The
1960 // initializeStackFunction Probe::Function will be passed the moved Probe::State at
1961 // the new location.
1962 //
1963 // initializeStackFunction may now write to the stack at addresses greater or
1964 // equal to CPUState.sp(), but not below that. initializeStackFunction is also
1965 // not allowed to change CPUState.sp(). If the initializeStackFunction does not
1966 // abide by these rules, then behavior is undefined, and bad things may happen.
1967 //
1968 // Note: this version of probe() should be implemented by the target specific
1969 // MacroAssembler.
1970 void probe(Probe::Function, void* arg);
1971
1972 JS_EXPORT_PRIVATE void probe(Function<void(Probe::Context&)>);
1973
1974 // Let's you print from your JIT generated code.
1975 // See comments in MacroAssemblerPrinter.h for examples of how to use this.
1976 template<typename... Arguments>
1977 void print(Arguments&&... args);
1978
1979 void print(Printer::PrintRecordList*);
1980#endif // ENABLE(MASM_PROBE)
1981};
1982
1983} // namespace JSC
1984
1985namespace WTF {
1986
1987class PrintStream;
1988
1989void printInternal(PrintStream&, JSC::MacroAssembler::RelationalCondition);
1990void printInternal(PrintStream&, JSC::MacroAssembler::ResultCondition);
1991void printInternal(PrintStream&, JSC::MacroAssembler::DoubleCondition);
1992
1993} // namespace WTF
1994
1995#else // ENABLE(ASSEMBLER)
1996
1997namespace JSC {
1998
1999// If there is no assembler for this platform, at least allow code to make references to
2000// some of the things it would otherwise define, albeit without giving that code any way
2001// of doing anything useful.
2002class MacroAssembler {
2003private:
2004 MacroAssembler() { }
2005
2006public:
2007
2008 enum RegisterID : int8_t { NoRegister, InvalidGPRReg = -1 };
2009 enum FPRegisterID : int8_t { NoFPRegister, InvalidFPRReg = -1 };
2010};
2011
2012} // namespace JSC
2013
2014#endif // ENABLE(ASSEMBLER)
2015