1/*
2 * Copyright (C) 2008-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#if ENABLE(ASSEMBLER)
29
30#include "X86Assembler.h"
31#include "AbstractMacroAssembler.h"
32#include <array>
33#include <wtf/Optional.h>
34
35namespace JSC {
36
37using Assembler = TARGET_ASSEMBLER;
38
39class MacroAssemblerX86Common : public AbstractMacroAssembler<Assembler> {
40public:
41#if CPU(X86_64)
42 // Use this directly only if you're not generating code with it.
43 static const X86Registers::RegisterID s_scratchRegister = X86Registers::r11;
44
45 // Use this when generating code so that we get enforcement of the disallowing of scratch register
46 // usage.
47 X86Registers::RegisterID scratchRegister()
48 {
49 RELEASE_ASSERT(m_allowScratchRegister);
50 return s_scratchRegister;
51 }
52#endif
53
54protected:
55 static const int DoubleConditionBitInvert = 0x10;
56 static const int DoubleConditionBitSpecial = 0x20;
57 static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
58
59public:
60 typedef X86Assembler::XMMRegisterID XMMRegisterID;
61
62 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
63 {
64 return value >= -128 && value <= 127;
65 }
66
67 enum RelationalCondition {
68 Equal = X86Assembler::ConditionE,
69 NotEqual = X86Assembler::ConditionNE,
70 Above = X86Assembler::ConditionA,
71 AboveOrEqual = X86Assembler::ConditionAE,
72 Below = X86Assembler::ConditionB,
73 BelowOrEqual = X86Assembler::ConditionBE,
74 GreaterThan = X86Assembler::ConditionG,
75 GreaterThanOrEqual = X86Assembler::ConditionGE,
76 LessThan = X86Assembler::ConditionL,
77 LessThanOrEqual = X86Assembler::ConditionLE
78 };
79
80 enum ResultCondition {
81 Overflow = X86Assembler::ConditionO,
82 Signed = X86Assembler::ConditionS,
83 PositiveOrZero = X86Assembler::ConditionNS,
84 Zero = X86Assembler::ConditionE,
85 NonZero = X86Assembler::ConditionNE
86 };
87
88 // FIXME: it would be neat to rename this to FloatingPointCondition in every assembler.
89 enum DoubleCondition {
90 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
91 DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
92 DoubleNotEqual = X86Assembler::ConditionNE,
93 DoubleGreaterThan = X86Assembler::ConditionA,
94 DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
95 DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
96 DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
97 // If either operand is NaN, these conditions always evaluate to true.
98 DoubleEqualOrUnordered = X86Assembler::ConditionE,
99 DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
100 DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
101 DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
102 DoubleLessThanOrUnordered = X86Assembler::ConditionB,
103 DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
104 };
105 COMPILE_ASSERT(
106 !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
107 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
108
109 static const RegisterID stackPointerRegister = X86Registers::esp;
110 static const RegisterID framePointerRegister = X86Registers::ebp;
111
112 static bool canBlind() { return true; }
113 static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
114 static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
115
116 // Integer arithmetic operations:
117 //
118 // Operations are typically two operand - operation(source, srcDst)
119 // For many operations the source may be an TrustedImm32, the srcDst operand
120 // may often be a memory location (explictly described using an Address
121 // object).
122
123 void add32(RegisterID src, RegisterID dest)
124 {
125 m_assembler.addl_rr(src, dest);
126 }
127
128 void add32(TrustedImm32 imm, Address address)
129 {
130 m_assembler.addl_im(imm.m_value, address.offset, address.base);
131 }
132
133 void add32(TrustedImm32 imm, BaseIndex address)
134 {
135 m_assembler.addl_im(imm.m_value, address.offset, address.base, address.index, address.scale);
136 }
137
138 void add8(TrustedImm32 imm, Address address)
139 {
140 TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
141 m_assembler.addb_im(imm8.m_value, address.offset, address.base);
142 }
143
144 void add8(TrustedImm32 imm, BaseIndex address)
145 {
146 TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
147 m_assembler.addb_im(imm8.m_value, address.offset, address.base, address.index, address.scale);
148 }
149
150 void add16(TrustedImm32 imm, Address address)
151 {
152 m_assembler.addw_im(imm.m_value, address.offset, address.base);
153 }
154
155 void add16(TrustedImm32 imm, BaseIndex address)
156 {
157 m_assembler.addw_im(imm.m_value, address.offset, address.base, address.index, address.scale);
158 }
159
160 void add32(TrustedImm32 imm, RegisterID dest)
161 {
162 if (imm.m_value == 1)
163 m_assembler.inc_r(dest);
164 else
165 m_assembler.addl_ir(imm.m_value, dest);
166 }
167
168 void add32(Address src, RegisterID dest)
169 {
170 m_assembler.addl_mr(src.offset, src.base, dest);
171 }
172
173 void add32(BaseIndex src, RegisterID dest)
174 {
175 m_assembler.addl_mr(src.offset, src.base, src.index, src.scale, dest);
176 }
177
178 void add32(RegisterID src, Address dest)
179 {
180 m_assembler.addl_rm(src, dest.offset, dest.base);
181 }
182
183 void add32(RegisterID src, BaseIndex dest)
184 {
185 m_assembler.addl_rm(src, dest.offset, dest.base, dest.index, dest.scale);
186 }
187
188 void add8(RegisterID src, Address dest)
189 {
190 m_assembler.addb_rm(src, dest.offset, dest.base);
191 }
192
193 void add8(RegisterID src, BaseIndex dest)
194 {
195 m_assembler.addb_rm(src, dest.offset, dest.base, dest.index, dest.scale);
196 }
197
198 void add16(RegisterID src, Address dest)
199 {
200 m_assembler.addw_rm(src, dest.offset, dest.base);
201 }
202
203 void add16(RegisterID src, BaseIndex dest)
204 {
205 m_assembler.addw_rm(src, dest.offset, dest.base, dest.index, dest.scale);
206 }
207
208 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
209 {
210 if (!imm.m_value) {
211 zeroExtend32ToPtr(src, dest);
212 return;
213 }
214
215 if (src == dest) {
216 add32(imm, dest);
217 return;
218 }
219
220 m_assembler.leal_mr(imm.m_value, src, dest);
221 }
222
223 void add32(RegisterID a, RegisterID b, RegisterID dest)
224 {
225 x86Lea32(BaseIndex(a, b, TimesOne), dest);
226 }
227
228 void x86Lea32(BaseIndex index, RegisterID dest)
229 {
230 if (!index.scale && !index.offset) {
231 if (index.base == dest) {
232 add32(index.index, dest);
233 return;
234 }
235 if (index.index == dest) {
236 add32(index.base, dest);
237 return;
238 }
239 }
240 m_assembler.leal_mr(index.offset, index.base, index.index, index.scale, dest);
241 }
242
243 void and32(RegisterID src, RegisterID dest)
244 {
245 m_assembler.andl_rr(src, dest);
246 }
247
248 void and32(TrustedImm32 imm, RegisterID dest)
249 {
250 m_assembler.andl_ir(imm.m_value, dest);
251 }
252
253 void and32(RegisterID src, Address dest)
254 {
255 m_assembler.andl_rm(src, dest.offset, dest.base);
256 }
257
258 void and32(RegisterID src, BaseIndex dest)
259 {
260 m_assembler.andl_rm(src, dest.offset, dest.base, dest.index, dest.scale);
261 }
262
263 void and16(RegisterID src, Address dest)
264 {
265 m_assembler.andw_rm(src, dest.offset, dest.base);
266 }
267
268 void and16(RegisterID src, BaseIndex dest)
269 {
270 m_assembler.andw_rm(src, dest.offset, dest.base, dest.index, dest.scale);
271 }
272
273 void and8(RegisterID src, Address dest)
274 {
275 m_assembler.andb_rm(src, dest.offset, dest.base);
276 }
277
278 void and8(RegisterID src, BaseIndex dest)
279 {
280 m_assembler.andb_rm(src, dest.offset, dest.base, dest.index, dest.scale);
281 }
282
283 void and32(Address src, RegisterID dest)
284 {
285 m_assembler.andl_mr(src.offset, src.base, dest);
286 }
287
288 void and32(BaseIndex src, RegisterID dest)
289 {
290 m_assembler.andl_mr(src.offset, src.base, src.index, src.scale, dest);
291 }
292
293 void and16(Address src, RegisterID dest)
294 {
295 m_assembler.andw_mr(src.offset, src.base, dest);
296 }
297
298 void and16(BaseIndex src, RegisterID dest)
299 {
300 m_assembler.andw_mr(src.offset, src.base, src.index, src.scale, dest);
301 }
302
303 void and32(TrustedImm32 imm, Address address)
304 {
305 m_assembler.andl_im(imm.m_value, address.offset, address.base);
306 }
307
308 void and32(TrustedImm32 imm, BaseIndex address)
309 {
310 m_assembler.andl_im(imm.m_value, address.offset, address.base, address.index, address.scale);
311 }
312
313 void and16(TrustedImm32 imm, Address address)
314 {
315 m_assembler.andw_im(static_cast<int16_t>(imm.m_value), address.offset, address.base);
316 }
317
318 void and16(TrustedImm32 imm, BaseIndex address)
319 {
320 m_assembler.andw_im(static_cast<int16_t>(imm.m_value), address.offset, address.base, address.index, address.scale);
321 }
322
323 void and8(TrustedImm32 imm, Address address)
324 {
325 m_assembler.andb_im(static_cast<int8_t>(imm.m_value), address.offset, address.base);
326 }
327
328 void and8(TrustedImm32 imm, BaseIndex address)
329 {
330 m_assembler.andb_im(static_cast<int8_t>(imm.m_value), address.offset, address.base, address.index, address.scale);
331 }
332
333 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
334 {
335 if (op1 == op2)
336 zeroExtend32ToPtr(op1, dest);
337 else if (op1 == dest)
338 and32(op2, dest);
339 else {
340 move32IfNeeded(op2, dest);
341 and32(op1, dest);
342 }
343 }
344
345 void and32(Address op1, RegisterID op2, RegisterID dest)
346 {
347 if (op2 == dest)
348 and32(op1, dest);
349 else if (op1.base == dest) {
350 load32(op1, dest);
351 and32(op2, dest);
352 } else {
353 zeroExtend32ToPtr(op2, dest);
354 and32(op1, dest);
355 }
356 }
357
358 void and32(RegisterID op1, Address op2, RegisterID dest)
359 {
360 and32(op2, op1, dest);
361 }
362
363 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
364 {
365 move32IfNeeded(src, dest);
366 and32(imm, dest);
367 }
368
369 void countLeadingZeros32(RegisterID src, RegisterID dst)
370 {
371 if (supportsLZCNT()) {
372 m_assembler.lzcnt_rr(src, dst);
373 return;
374 }
375 m_assembler.bsr_rr(src, dst);
376 clz32AfterBsr(dst);
377 }
378
379 void countLeadingZeros32(Address src, RegisterID dst)
380 {
381 if (supportsLZCNT()) {
382 m_assembler.lzcnt_mr(src.offset, src.base, dst);
383 return;
384 }
385 m_assembler.bsr_mr(src.offset, src.base, dst);
386 clz32AfterBsr(dst);
387 }
388
389 void countTrailingZeros32(RegisterID src, RegisterID dst)
390 {
391 if (supportsBMI1()) {
392 m_assembler.tzcnt_rr(src, dst);
393 return;
394 }
395 m_assembler.bsf_rr(src, dst);
396 ctzAfterBsf<32>(dst);
397 }
398
399 void countPopulation32(Address src, RegisterID dst)
400 {
401 ASSERT(supportsCountPopulation());
402 m_assembler.popcnt_mr(src.offset, src.base, dst);
403 }
404
405 void countPopulation32(RegisterID src, RegisterID dst)
406 {
407 ASSERT(supportsCountPopulation());
408 m_assembler.popcnt_rr(src, dst);
409 }
410
411 void byteSwap32(RegisterID dst)
412 {
413 m_assembler.bswapl_r(dst);
414 }
415
416 void byteSwap16(RegisterID dst)
417 {
418 m_assembler.rolw_i8r(8, dst);
419 zeroExtend16To32(dst, dst);
420 }
421
422#if CPU(X86_64)
423 void byteSwap64(RegisterID dst)
424 {
425 m_assembler.bswapq_r(dst);
426 }
427#endif
428
429 // Only used for testing purposes.
430 void illegalInstruction()
431 {
432 m_assembler.illegalInstruction();
433 }
434
435 void lshift32(RegisterID shift_amount, RegisterID dest)
436 {
437 if (shift_amount == X86Registers::ecx)
438 m_assembler.shll_CLr(dest);
439 else {
440 ASSERT(shift_amount != dest);
441 // On x86 we can only shift by ecx; if asked to shift by another register we'll
442 // need rejig the shift amount into ecx first, and restore the registers afterwards.
443 // If we dest is ecx, then shift the swapped register!
444 swap(shift_amount, X86Registers::ecx);
445 m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
446 swap(shift_amount, X86Registers::ecx);
447 }
448 }
449
450 void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
451 {
452 ASSERT(shift_amount != dest);
453
454 move32IfNeeded(src, dest);
455 lshift32(shift_amount, dest);
456 }
457
458 void lshift32(TrustedImm32 imm, RegisterID dest)
459 {
460 m_assembler.shll_i8r(imm.m_value, dest);
461 }
462
463 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
464 {
465 move32IfNeeded(src, dest);
466 lshift32(imm, dest);
467 }
468
469 void mul32(RegisterID src, RegisterID dest)
470 {
471 m_assembler.imull_rr(src, dest);
472 }
473
474 void mul32(RegisterID src1, RegisterID src2, RegisterID dest)
475 {
476 if (src2 == dest) {
477 m_assembler.imull_rr(src1, dest);
478 return;
479 }
480 move32IfNeeded(src1, dest);
481 m_assembler.imull_rr(src2, dest);
482 }
483
484 void mul32(Address src, RegisterID dest)
485 {
486 m_assembler.imull_mr(src.offset, src.base, dest);
487 }
488
489 void mul32(Address op1, RegisterID op2, RegisterID dest)
490 {
491 if (op2 == dest)
492 mul32(op1, dest);
493 else if (op1.base == dest) {
494 load32(op1, dest);
495 mul32(op2, dest);
496 } else {
497 zeroExtend32ToPtr(op2, dest);
498 mul32(op1, dest);
499 }
500 }
501
502 void mul32(RegisterID src1, Address src2, RegisterID dest)
503 {
504 mul32(src2, src1, dest);
505 }
506
507 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
508 {
509 m_assembler.imull_i32r(src, imm.m_value, dest);
510 }
511
512 void x86ConvertToDoubleWord32()
513 {
514 m_assembler.cdq();
515 }
516
517 void x86ConvertToDoubleWord32(RegisterID eax, RegisterID edx)
518 {
519 ASSERT_UNUSED(eax, eax == X86Registers::eax);
520 ASSERT_UNUSED(edx, edx == X86Registers::edx);
521 x86ConvertToDoubleWord32();
522 }
523
524 void x86Div32(RegisterID denominator)
525 {
526 m_assembler.idivl_r(denominator);
527 }
528
529 void x86Div32(RegisterID eax, RegisterID edx, RegisterID denominator)
530 {
531 ASSERT_UNUSED(eax, eax == X86Registers::eax);
532 ASSERT_UNUSED(edx, edx == X86Registers::edx);
533 x86Div32(denominator);
534 }
535
536 void x86UDiv32(RegisterID denominator)
537 {
538 m_assembler.divl_r(denominator);
539 }
540
541 void x86UDiv32(RegisterID eax, RegisterID edx, RegisterID denominator)
542 {
543 ASSERT_UNUSED(eax, eax == X86Registers::eax);
544 ASSERT_UNUSED(edx, edx == X86Registers::edx);
545 x86UDiv32(denominator);
546 }
547
548 void neg32(RegisterID srcDest)
549 {
550 m_assembler.negl_r(srcDest);
551 }
552
553 void neg32(RegisterID src, RegisterID dest)
554 {
555 move32IfNeeded(src, dest);
556 m_assembler.negl_r(dest);
557 }
558
559 void neg32(Address srcDest)
560 {
561 m_assembler.negl_m(srcDest.offset, srcDest.base);
562 }
563
564 void neg32(BaseIndex srcDest)
565 {
566 m_assembler.negl_m(srcDest.offset, srcDest.base, srcDest.index, srcDest.scale);
567 }
568
569 void neg16(Address srcDest)
570 {
571 m_assembler.negw_m(srcDest.offset, srcDest.base);
572 }
573
574 void neg16(BaseIndex srcDest)
575 {
576 m_assembler.negw_m(srcDest.offset, srcDest.base, srcDest.index, srcDest.scale);
577 }
578
579 void neg8(Address srcDest)
580 {
581 m_assembler.negb_m(srcDest.offset, srcDest.base);
582 }
583
584 void neg8(BaseIndex srcDest)
585 {
586 m_assembler.negb_m(srcDest.offset, srcDest.base, srcDest.index, srcDest.scale);
587 }
588
589 void or32(RegisterID src, RegisterID dest)
590 {
591 m_assembler.orl_rr(src, dest);
592 }
593
594 void or32(TrustedImm32 imm, RegisterID dest)
595 {
596 m_assembler.orl_ir(imm.m_value, dest);
597 }
598
599 void or32(RegisterID src, Address dest)
600 {
601 m_assembler.orl_rm(src, dest.offset, dest.base);
602 }
603
604 void or32(RegisterID src, BaseIndex dest)
605 {
606 m_assembler.orl_rm(src, dest.offset, dest.base, dest.index, dest.scale);
607 }
608
609 void or16(RegisterID src, Address dest)
610 {
611 m_assembler.orw_rm(src, dest.offset, dest.base);
612 }
613
614 void or16(RegisterID src, BaseIndex dest)
615 {
616 m_assembler.orw_rm(src, dest.offset, dest.base, dest.index, dest.scale);
617 }
618
619 void or8(RegisterID src, Address dest)
620 {
621 m_assembler.orb_rm(src, dest.offset, dest.base);
622 }
623
624 void or8(RegisterID src, BaseIndex dest)
625 {
626 m_assembler.orb_rm(src, dest.offset, dest.base, dest.index, dest.scale);
627 }
628
629 void or32(Address src, RegisterID dest)
630 {
631 m_assembler.orl_mr(src.offset, src.base, dest);
632 }
633
634 void or32(BaseIndex src, RegisterID dest)
635 {
636 m_assembler.orl_mr(src.offset, src.base, src.index, src.scale, dest);
637 }
638
639 void or32(TrustedImm32 imm, Address address)
640 {
641 m_assembler.orl_im(imm.m_value, address.offset, address.base);
642 }
643
644 void or32(TrustedImm32 imm, BaseIndex address)
645 {
646 m_assembler.orl_im(imm.m_value, address.offset, address.base, address.index, address.scale);
647 }
648
649 void or16(TrustedImm32 imm, Address address)
650 {
651 m_assembler.orw_im(static_cast<int16_t>(imm.m_value), address.offset, address.base);
652 }
653
654 void or16(TrustedImm32 imm, BaseIndex address)
655 {
656 m_assembler.orw_im(static_cast<int16_t>(imm.m_value), address.offset, address.base, address.index, address.scale);
657 }
658
659 void or8(TrustedImm32 imm, Address address)
660 {
661 m_assembler.orb_im(static_cast<int8_t>(imm.m_value), address.offset, address.base);
662 }
663
664 void or8(TrustedImm32 imm, BaseIndex address)
665 {
666 m_assembler.orb_im(static_cast<int8_t>(imm.m_value), address.offset, address.base, address.index, address.scale);
667 }
668
669 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
670 {
671 if (op1 == op2)
672 zeroExtend32ToPtr(op1, dest);
673 else if (op1 == dest)
674 or32(op2, dest);
675 else {
676 move32IfNeeded(op2, dest);
677 or32(op1, dest);
678 }
679 }
680
681 void or32(Address op1, RegisterID op2, RegisterID dest)
682 {
683 if (op2 == dest)
684 or32(op1, dest);
685 else if (op1.base == dest) {
686 load32(op1, dest);
687 or32(op2, dest);
688 } else {
689 zeroExtend32ToPtr(op2, dest);
690 or32(op1, dest);
691 }
692 }
693
694 void or32(RegisterID op1, Address op2, RegisterID dest)
695 {
696 or32(op2, op1, dest);
697 }
698
699 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
700 {
701 move32IfNeeded(src, dest);
702 or32(imm, dest);
703 }
704
705 void rshift32(RegisterID shift_amount, RegisterID dest)
706 {
707 if (shift_amount == X86Registers::ecx)
708 m_assembler.sarl_CLr(dest);
709 else {
710 ASSERT(shift_amount != dest);
711
712 // On x86 we can only shift by ecx; if asked to shift by another register we'll
713 // need rejig the shift amount into ecx first, and restore the registers afterwards.
714 // If we dest is ecx, then shift the swapped register!
715 swap(shift_amount, X86Registers::ecx);
716 m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
717 swap(shift_amount, X86Registers::ecx);
718 }
719 }
720
721 void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
722 {
723 ASSERT(shift_amount != dest);
724
725 move32IfNeeded(src, dest);
726 rshift32(shift_amount, dest);
727 }
728
729 void rshift32(TrustedImm32 imm, RegisterID dest)
730 {
731 m_assembler.sarl_i8r(imm.m_value, dest);
732 }
733
734 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
735 {
736 move32IfNeeded(src, dest);
737 rshift32(imm, dest);
738 }
739
740 void urshift32(RegisterID shift_amount, RegisterID dest)
741 {
742 if (shift_amount == X86Registers::ecx)
743 m_assembler.shrl_CLr(dest);
744 else {
745 ASSERT(shift_amount != dest);
746
747 // On x86 we can only shift by ecx; if asked to shift by another register we'll
748 // need rejig the shift amount into ecx first, and restore the registers afterwards.
749 // If we dest is ecx, then shift the swapped register!
750 swap(shift_amount, X86Registers::ecx);
751 m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
752 swap(shift_amount, X86Registers::ecx);
753 }
754 }
755
756 void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
757 {
758 ASSERT(shift_amount != dest);
759
760 move32IfNeeded(src, dest);
761 urshift32(shift_amount, dest);
762 }
763
764 void urshift32(TrustedImm32 imm, RegisterID dest)
765 {
766 m_assembler.shrl_i8r(imm.m_value, dest);
767 }
768
769 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
770 {
771 move32IfNeeded(src, dest);
772 urshift32(imm, dest);
773 }
774
775 void rotateRight32(TrustedImm32 imm, RegisterID dest)
776 {
777 m_assembler.rorl_i8r(imm.m_value, dest);
778 }
779
780 void rotateRight32(RegisterID src, RegisterID dest)
781 {
782 if (src == X86Registers::ecx)
783 m_assembler.rorl_CLr(dest);
784 else {
785 ASSERT(src != dest);
786
787 // Can only rotate by ecx, so we do some swapping if we see anything else.
788 swap(src, X86Registers::ecx);
789 m_assembler.rorl_CLr(dest == X86Registers::ecx ? src : dest);
790 swap(src, X86Registers::ecx);
791 }
792 }
793
794 void rotateLeft32(TrustedImm32 imm, RegisterID dest)
795 {
796 m_assembler.roll_i8r(imm.m_value, dest);
797 }
798
799 void rotateLeft32(RegisterID src, RegisterID dest)
800 {
801 if (src == X86Registers::ecx)
802 m_assembler.roll_CLr(dest);
803 else {
804 ASSERT(src != dest);
805
806 // Can only rotate by ecx, so we do some swapping if we see anything else.
807 swap(src, X86Registers::ecx);
808 m_assembler.roll_CLr(dest == X86Registers::ecx ? src : dest);
809 swap(src, X86Registers::ecx);
810 }
811 }
812
813 void sub32(RegisterID src, RegisterID dest)
814 {
815 m_assembler.subl_rr(src, dest);
816 }
817
818 void sub32(RegisterID left, RegisterID right, RegisterID dest)
819 {
820 if (dest == right) {
821 neg32(dest);
822 add32(left, dest);
823 return;
824 }
825 move(left, dest);
826 sub32(right, dest);
827 }
828
829 void sub32(TrustedImm32 imm, RegisterID dest)
830 {
831 if (imm.m_value == 1)
832 m_assembler.dec_r(dest);
833 else
834 m_assembler.subl_ir(imm.m_value, dest);
835 }
836
837 void sub32(TrustedImm32 imm, Address address)
838 {
839 m_assembler.subl_im(imm.m_value, address.offset, address.base);
840 }
841
842 void sub16(TrustedImm32 imm, Address address)
843 {
844 m_assembler.subw_im(static_cast<int16_t>(imm.m_value), address.offset, address.base);
845 }
846
847 void sub8(TrustedImm32 imm, Address address)
848 {
849 m_assembler.subb_im(static_cast<int8_t>(imm.m_value), address.offset, address.base);
850 }
851
852 void sub32(TrustedImm32 imm, BaseIndex address)
853 {
854 m_assembler.subl_im(imm.m_value, address.offset, address.base, address.index, address.scale);
855 }
856
857 void sub16(TrustedImm32 imm, BaseIndex address)
858 {
859 m_assembler.subw_im(static_cast<int16_t>(imm.m_value), address.offset, address.base, address.index, address.scale);
860 }
861
862 void sub8(TrustedImm32 imm, BaseIndex address)
863 {
864 m_assembler.subb_im(static_cast<int8_t>(imm.m_value), address.offset, address.base, address.index, address.scale);
865 }
866
867 void sub32(Address src, RegisterID dest)
868 {
869 m_assembler.subl_mr(src.offset, src.base, dest);
870 }
871
872 void sub32(BaseIndex src, RegisterID dest)
873 {
874 m_assembler.subl_mr(src.offset, src.base, src.index, src.scale, dest);
875 }
876
877 void sub32(RegisterID src, Address dest)
878 {
879 m_assembler.subl_rm(src, dest.offset, dest.base);
880 }
881
882 void sub16(RegisterID src, Address dest)
883 {
884 m_assembler.subw_rm(src, dest.offset, dest.base);
885 }
886
887 void sub8(RegisterID src, Address dest)
888 {
889 m_assembler.subb_rm(src, dest.offset, dest.base);
890 }
891
892 void sub32(RegisterID src, BaseIndex dest)
893 {
894 m_assembler.subl_rm(src, dest.offset, dest.base, dest.index, dest.scale);
895 }
896
897 void sub16(RegisterID src, BaseIndex dest)
898 {
899 m_assembler.subw_rm(src, dest.offset, dest.base, dest.index, dest.scale);
900 }
901
902 void sub8(RegisterID src, BaseIndex dest)
903 {
904 m_assembler.subb_rm(src, dest.offset, dest.base, dest.index, dest.scale);
905 }
906
907 void xor32(RegisterID src, RegisterID dest)
908 {
909 m_assembler.xorl_rr(src, dest);
910 }
911
912 void xor32(TrustedImm32 imm, Address dest)
913 {
914 if (imm.m_value == -1)
915 m_assembler.notl_m(dest.offset, dest.base);
916 else
917 m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
918 }
919
920 void xor32(TrustedImm32 imm, BaseIndex dest)
921 {
922 if (imm.m_value == -1)
923 m_assembler.notl_m(dest.offset, dest.base, dest.index, dest.scale);
924 else
925 m_assembler.xorl_im(imm.m_value, dest.offset, dest.base, dest.index, dest.scale);
926 }
927
928 void xor16(TrustedImm32 imm, Address dest)
929 {
930 imm.m_value = static_cast<int16_t>(imm.m_value);
931 if (imm.m_value == -1)
932 m_assembler.notw_m(dest.offset, dest.base);
933 else
934 m_assembler.xorw_im(imm.m_value, dest.offset, dest.base);
935 }
936
937 void xor16(TrustedImm32 imm, BaseIndex dest)
938 {
939 imm.m_value = static_cast<int16_t>(imm.m_value);
940 if (imm.m_value == -1)
941 m_assembler.notw_m(dest.offset, dest.base, dest.index, dest.scale);
942 else
943 m_assembler.xorw_im(imm.m_value, dest.offset, dest.base, dest.index, dest.scale);
944 }
945
946 void xor8(TrustedImm32 imm, Address dest)
947 {
948 imm.m_value = static_cast<int8_t>(imm.m_value);
949 if (imm.m_value == -1)
950 m_assembler.notb_m(dest.offset, dest.base);
951 else
952 m_assembler.xorb_im(imm.m_value, dest.offset, dest.base);
953 }
954
955 void xor8(TrustedImm32 imm, BaseIndex dest)
956 {
957 imm.m_value = static_cast<int8_t>(imm.m_value);
958 if (imm.m_value == -1)
959 m_assembler.notb_m(dest.offset, dest.base, dest.index, dest.scale);
960 else
961 m_assembler.xorb_im(imm.m_value, dest.offset, dest.base, dest.index, dest.scale);
962 }
963
964 void xor32(TrustedImm32 imm, RegisterID dest)
965 {
966 if (imm.m_value == -1)
967 m_assembler.notl_r(dest);
968 else
969 m_assembler.xorl_ir(imm.m_value, dest);
970 }
971
972 void xor32(RegisterID src, Address dest)
973 {
974 m_assembler.xorl_rm(src, dest.offset, dest.base);
975 }
976
977 void xor32(RegisterID src, BaseIndex dest)
978 {
979 m_assembler.xorl_rm(src, dest.offset, dest.base, dest.index, dest.scale);
980 }
981
982 void xor16(RegisterID src, Address dest)
983 {
984 m_assembler.xorw_rm(src, dest.offset, dest.base);
985 }
986
987 void xor16(RegisterID src, BaseIndex dest)
988 {
989 m_assembler.xorw_rm(src, dest.offset, dest.base, dest.index, dest.scale);
990 }
991
992 void xor8(RegisterID src, Address dest)
993 {
994 m_assembler.xorb_rm(src, dest.offset, dest.base);
995 }
996
997 void xor8(RegisterID src, BaseIndex dest)
998 {
999 m_assembler.xorb_rm(src, dest.offset, dest.base, dest.index, dest.scale);
1000 }
1001
1002 void xor32(Address src, RegisterID dest)
1003 {
1004 m_assembler.xorl_mr(src.offset, src.base, dest);
1005 }
1006
1007 void xor32(BaseIndex src, RegisterID dest)
1008 {
1009 m_assembler.xorl_mr(src.offset, src.base, src.index, src.scale, dest);
1010 }
1011
1012 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
1013 {
1014 if (op1 == op2)
1015 move(TrustedImm32(0), dest);
1016 else if (op1 == dest)
1017 xor32(op2, dest);
1018 else {
1019 move32IfNeeded(op2, dest);
1020 xor32(op1, dest);
1021 }
1022 }
1023
1024 void xor32(Address op1, RegisterID op2, RegisterID dest)
1025 {
1026 if (op2 == dest)
1027 xor32(op1, dest);
1028 else if (op1.base == dest) {
1029 load32(op1, dest);
1030 xor32(op2, dest);
1031 } else {
1032 zeroExtend32ToPtr(op2, dest);
1033 xor32(op1, dest);
1034 }
1035 }
1036
1037 void xor32(RegisterID op1, Address op2, RegisterID dest)
1038 {
1039 xor32(op2, op1, dest);
1040 }
1041
1042 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
1043 {
1044 move32IfNeeded(src, dest);
1045 xor32(imm, dest);
1046 }
1047
1048 void not32(RegisterID srcDest)
1049 {
1050 m_assembler.notl_r(srcDest);
1051 }
1052
1053 void not32(Address dest)
1054 {
1055 m_assembler.notl_m(dest.offset, dest.base);
1056 }
1057
1058 void not32(BaseIndex dest)
1059 {
1060 m_assembler.notl_m(dest.offset, dest.base, dest.index, dest.scale);
1061 }
1062
1063 void not16(Address dest)
1064 {
1065 m_assembler.notw_m(dest.offset, dest.base);
1066 }
1067
1068 void not16(BaseIndex dest)
1069 {
1070 m_assembler.notw_m(dest.offset, dest.base, dest.index, dest.scale);
1071 }
1072
1073 void not8(Address dest)
1074 {
1075 m_assembler.notb_m(dest.offset, dest.base);
1076 }
1077
1078 void not8(BaseIndex dest)
1079 {
1080 m_assembler.notb_m(dest.offset, dest.base, dest.index, dest.scale);
1081 }
1082
1083 void sqrtDouble(FPRegisterID src, FPRegisterID dst)
1084 {
1085 m_assembler.sqrtsd_rr(src, dst);
1086 }
1087
1088 void sqrtDouble(Address src, FPRegisterID dst)
1089 {
1090 m_assembler.sqrtsd_mr(src.offset, src.base, dst);
1091 }
1092
1093 void sqrtFloat(FPRegisterID src, FPRegisterID dst)
1094 {
1095 m_assembler.sqrtss_rr(src, dst);
1096 }
1097
1098 void sqrtFloat(Address src, FPRegisterID dst)
1099 {
1100 m_assembler.sqrtss_mr(src.offset, src.base, dst);
1101 }
1102
1103 void absDouble(FPRegisterID src, FPRegisterID dst)
1104 {
1105 ASSERT(src != dst);
1106 static const double negativeZeroConstant = -0.0;
1107 loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
1108 m_assembler.andnpd_rr(src, dst);
1109 }
1110
1111 void negateDouble(FPRegisterID src, FPRegisterID dst)
1112 {
1113 ASSERT(src != dst);
1114 static const double negativeZeroConstant = -0.0;
1115 loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
1116 m_assembler.xorpd_rr(src, dst);
1117 }
1118
1119 void ceilDouble(FPRegisterID src, FPRegisterID dst)
1120 {
1121 m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardInfiniti);
1122 }
1123
1124 void ceilDouble(Address src, FPRegisterID dst)
1125 {
1126 m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardInfiniti);
1127 }
1128
1129 void ceilFloat(FPRegisterID src, FPRegisterID dst)
1130 {
1131 m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardInfiniti);
1132 }
1133
1134 void ceilFloat(Address src, FPRegisterID dst)
1135 {
1136 m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardInfiniti);
1137 }
1138
1139 void floorDouble(FPRegisterID src, FPRegisterID dst)
1140 {
1141 m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
1142 }
1143
1144 void floorDouble(Address src, FPRegisterID dst)
1145 {
1146 m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
1147 }
1148
1149 void floorFloat(FPRegisterID src, FPRegisterID dst)
1150 {
1151 m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
1152 }
1153
1154 void floorFloat(Address src, FPRegisterID dst)
1155 {
1156 m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
1157 }
1158
1159 void roundTowardNearestIntDouble(FPRegisterID src, FPRegisterID dst)
1160 {
1161 m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::ToNearestWithTiesToEven);
1162 }
1163
1164 void roundTowardNearestIntFloat(FPRegisterID src, FPRegisterID dst)
1165 {
1166 m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::ToNearestWithTiesToEven);
1167 }
1168
1169 void roundTowardZeroDouble(FPRegisterID src, FPRegisterID dst)
1170 {
1171 m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardZero);
1172 }
1173
1174 void roundTowardZeroDouble(Address src, FPRegisterID dst)
1175 {
1176 m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardZero);
1177 }
1178
1179 void roundTowardZeroFloat(FPRegisterID src, FPRegisterID dst)
1180 {
1181 m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardZero);
1182 }
1183
1184 void roundTowardZeroFloat(Address src, FPRegisterID dst)
1185 {
1186 m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardZero);
1187 }
1188
1189 // Memory access operations:
1190 //
1191 // Loads are of the form load(address, destination) and stores of the form
1192 // store(source, address). The source for a store may be an TrustedImm32. Address
1193 // operand objects to loads and store will be implicitly constructed if a
1194 // register is passed.
1195
1196 void load32(ImplicitAddress address, RegisterID dest)
1197 {
1198 m_assembler.movl_mr(address.offset, address.base, dest);
1199 }
1200
1201 void load32(BaseIndex address, RegisterID dest)
1202 {
1203 m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
1204 }
1205
1206 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
1207 {
1208 load32(address, dest);
1209 }
1210
1211 void load16Unaligned(ImplicitAddress address, RegisterID dest)
1212 {
1213 load16(address, dest);
1214 }
1215
1216 void load16Unaligned(BaseIndex address, RegisterID dest)
1217 {
1218 load16(address, dest);
1219 }
1220
1221 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
1222 {
1223 padBeforePatch();
1224 m_assembler.movl_mr_disp32(address.offset, address.base, dest);
1225 return DataLabel32(this);
1226 }
1227
1228 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
1229 {
1230 padBeforePatch();
1231 m_assembler.movl_mr_disp8(address.offset, address.base, dest);
1232 return DataLabelCompact(this);
1233 }
1234
1235 template<PtrTag tag>
1236 static void repatchCompact(CodeLocationDataLabelCompact<tag> dataLabelCompact, int32_t value)
1237 {
1238 ASSERT(isCompactPtrAlignedAddressOffset(value));
1239 AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
1240 }
1241
1242 DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
1243 {
1244 padBeforePatch();
1245 m_assembler.movl_mr_disp8(address.offset, address.base, dest);
1246 return DataLabelCompact(this);
1247 }
1248
1249 void load8(BaseIndex address, RegisterID dest)
1250 {
1251 m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
1252 }
1253
1254 void load8(ImplicitAddress address, RegisterID dest)
1255 {
1256 m_assembler.movzbl_mr(address.offset, address.base, dest);
1257 }
1258
1259 void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
1260 {
1261 m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
1262 }
1263
1264 void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
1265 {
1266 m_assembler.movsbl_mr(address.offset, address.base, dest);
1267 }
1268
1269 void zeroExtend8To32(RegisterID src, RegisterID dest)
1270 {
1271 m_assembler.movzbl_rr(src, dest);
1272 }
1273
1274 void signExtend8To32(RegisterID src, RegisterID dest)
1275 {
1276 m_assembler.movsbl_rr(src, dest);
1277 }
1278
1279 void load16(ImplicitAddress address, RegisterID dest)
1280 {
1281 m_assembler.movzwl_mr(address.offset, address.base, dest);
1282 }
1283
1284 void load16(BaseIndex address, RegisterID dest)
1285 {
1286 m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
1287 }
1288
1289 void load16(Address address, RegisterID dest)
1290 {
1291 m_assembler.movzwl_mr(address.offset, address.base, dest);
1292 }
1293
1294 void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
1295 {
1296 m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
1297 }
1298
1299 void load16SignedExtendTo32(Address address, RegisterID dest)
1300 {
1301 m_assembler.movswl_mr(address.offset, address.base, dest);
1302 }
1303
1304 void zeroExtend16To32(RegisterID src, RegisterID dest)
1305 {
1306 m_assembler.movzwl_rr(src, dest);
1307 }
1308
1309 void signExtend16To32(RegisterID src, RegisterID dest)
1310 {
1311 m_assembler.movswl_rr(src, dest);
1312 }
1313
1314 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
1315 {
1316 padBeforePatch();
1317 m_assembler.movl_rm_disp32(src, address.offset, address.base);
1318 return DataLabel32(this);
1319 }
1320
1321 void store32(RegisterID src, ImplicitAddress address)
1322 {
1323 m_assembler.movl_rm(src, address.offset, address.base);
1324 }
1325
1326 void store32(RegisterID src, BaseIndex address)
1327 {
1328 m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
1329 }
1330
1331 void store32(TrustedImm32 imm, ImplicitAddress address)
1332 {
1333 m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
1334 }
1335
1336 void store32(TrustedImm32 imm, BaseIndex address)
1337 {
1338 m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
1339 }
1340
1341 void storeZero32(ImplicitAddress address)
1342 {
1343 store32(TrustedImm32(0), address);
1344 }
1345
1346 void storeZero32(BaseIndex address)
1347 {
1348 store32(TrustedImm32(0), address);
1349 }
1350
1351 void storeZero16(ImplicitAddress address)
1352 {
1353 store16(TrustedImm32(0), address);
1354 }
1355
1356 void storeZero16(BaseIndex address)
1357 {
1358 store16(TrustedImm32(0), address);
1359 }
1360
1361 void store8(TrustedImm32 imm, Address address)
1362 {
1363 TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
1364 m_assembler.movb_i8m(imm8.m_value, address.offset, address.base);
1365 }
1366
1367 void store8(TrustedImm32 imm, BaseIndex address)
1368 {
1369 TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
1370 m_assembler.movb_i8m(imm8.m_value, address.offset, address.base, address.index, address.scale);
1371 }
1372
1373 static ALWAYS_INLINE RegisterID getUnusedRegister(BaseIndex address)
1374 {
1375 if (address.base != X86Registers::eax && address.index != X86Registers::eax)
1376 return X86Registers::eax;
1377
1378 if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
1379 return X86Registers::ebx;
1380
1381 ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
1382 return X86Registers::ecx;
1383 }
1384
1385 static ALWAYS_INLINE RegisterID getUnusedRegister(Address address)
1386 {
1387 if (address.base != X86Registers::eax)
1388 return X86Registers::eax;
1389
1390 ASSERT(address.base != X86Registers::edx);
1391 return X86Registers::edx;
1392 }
1393
1394 void store8(RegisterID src, BaseIndex address)
1395 {
1396#if CPU(X86)
1397 // On 32-bit x86 we can only store from the first 4 registers;
1398 // esp..edi are mapped to the 'h' registers!
1399 if (src >= 4) {
1400 // Pick a temporary register.
1401 RegisterID temp = getUnusedRegister(address);
1402
1403 // Swap to the temporary register to perform the store.
1404 swap(src, temp);
1405 m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
1406 swap(src, temp);
1407 return;
1408 }
1409#endif
1410 m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
1411 }
1412
1413 void store8(RegisterID src, Address address)
1414 {
1415#if CPU(X86)
1416 // On 32-bit x86 we can only store from the first 4 registers;
1417 // esp..edi are mapped to the 'h' registers!
1418 if (src >= 4) {
1419 // Pick a temporary register.
1420 RegisterID temp = getUnusedRegister(address);
1421
1422 // Swap to the temporary register to perform the store.
1423 swap(src, temp);
1424 m_assembler.movb_rm(temp, address.offset, address.base);
1425 swap(src, temp);
1426 return;
1427 }
1428#endif
1429 m_assembler.movb_rm(src, address.offset, address.base);
1430 }
1431
1432 void store16(RegisterID src, BaseIndex address)
1433 {
1434 m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
1435 }
1436
1437 void store16(RegisterID src, Address address)
1438 {
1439 m_assembler.movw_rm(src, address.offset, address.base);
1440 }
1441
1442 void store16(TrustedImm32 imm, BaseIndex address)
1443 {
1444 m_assembler.movw_im(static_cast<int16_t>(imm.m_value), address.offset, address.base, address.index, address.scale);
1445 }
1446
1447 void store16(TrustedImm32 imm, ImplicitAddress address)
1448 {
1449 m_assembler.movw_im(static_cast<int16_t>(imm.m_value), address.offset, address.base);
1450 }
1451
1452 // Floating-point operation:
1453 //
1454 void moveDouble(FPRegisterID src, FPRegisterID dest)
1455 {
1456 if (src != dest)
1457 m_assembler.movaps_rr(src, dest);
1458 }
1459
1460 void loadDouble(TrustedImmPtr address, FPRegisterID dest)
1461 {
1462#if CPU(X86)
1463 m_assembler.movsd_mr(address.asPtr(), dest);
1464#else
1465 move(address, scratchRegister());
1466 loadDouble(scratchRegister(), dest);
1467#endif
1468 }
1469
1470 void loadDouble(ImplicitAddress address, FPRegisterID dest)
1471 {
1472 m_assembler.movsd_mr(address.offset, address.base, dest);
1473 }
1474
1475 void loadDouble(BaseIndex address, FPRegisterID dest)
1476 {
1477 m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
1478 }
1479
1480 void loadFloat(TrustedImmPtr address, FPRegisterID dest)
1481 {
1482#if CPU(X86)
1483 m_assembler.movss_mr(address.asPtr(), dest);
1484#else
1485 move(address, scratchRegister());
1486 loadFloat(scratchRegister(), dest);
1487#endif
1488 }
1489
1490 void loadFloat(ImplicitAddress address, FPRegisterID dest)
1491 {
1492 m_assembler.movss_mr(address.offset, address.base, dest);
1493 }
1494
1495 void loadFloat(BaseIndex address, FPRegisterID dest)
1496 {
1497 m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest);
1498 }
1499
1500 void storeDouble(FPRegisterID src, ImplicitAddress address)
1501 {
1502 m_assembler.movsd_rm(src, address.offset, address.base);
1503 }
1504
1505 void storeDouble(FPRegisterID src, BaseIndex address)
1506 {
1507 m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
1508 }
1509
1510 void storeFloat(FPRegisterID src, ImplicitAddress address)
1511 {
1512 m_assembler.movss_rm(src, address.offset, address.base);
1513 }
1514
1515 void storeFloat(FPRegisterID src, BaseIndex address)
1516 {
1517 m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale);
1518 }
1519
1520 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
1521 {
1522 m_assembler.cvtsd2ss_rr(src, dst);
1523 }
1524
1525 void convertDoubleToFloat(Address address, FPRegisterID dst)
1526 {
1527 m_assembler.cvtsd2ss_mr(address.offset, address.base, dst);
1528 }
1529
1530 void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
1531 {
1532 m_assembler.cvtss2sd_rr(src, dst);
1533 }
1534
1535 void convertFloatToDouble(Address address, FPRegisterID dst)
1536 {
1537 m_assembler.cvtss2sd_mr(address.offset, address.base, dst);
1538 }
1539
1540 void addDouble(FPRegisterID src, FPRegisterID dest)
1541 {
1542 addDouble(src, dest, dest);
1543 }
1544
1545 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1546 {
1547 if (supportsAVX())
1548 m_assembler.vaddsd_rr(op1, op2, dest);
1549 else {
1550 if (op1 == dest)
1551 m_assembler.addsd_rr(op2, dest);
1552 else {
1553 moveDouble(op2, dest);
1554 m_assembler.addsd_rr(op1, dest);
1555 }
1556 }
1557 }
1558
1559 void addDouble(Address src, FPRegisterID dest)
1560 {
1561 addDouble(src, dest, dest);
1562 }
1563
1564 void addDouble(Address op1, FPRegisterID op2, FPRegisterID dest)
1565 {
1566 if (supportsAVX())
1567 m_assembler.vaddsd_mr(op1.offset, op1.base, op2, dest);
1568 else {
1569 if (op2 == dest) {
1570 m_assembler.addsd_mr(op1.offset, op1.base, dest);
1571 return;
1572 }
1573
1574 loadDouble(op1, dest);
1575 addDouble(op2, dest);
1576 }
1577 }
1578
1579 void addDouble(FPRegisterID op1, Address op2, FPRegisterID dest)
1580 {
1581 addDouble(op2, op1, dest);
1582 }
1583
1584 void addDouble(BaseIndex op1, FPRegisterID op2, FPRegisterID dest)
1585 {
1586 if (supportsAVX())
1587 m_assembler.vaddsd_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest);
1588 else {
1589 if (op2 == dest) {
1590 m_assembler.addsd_mr(op1.offset, op1.base, op1.index, op1.scale, dest);
1591 return;
1592 }
1593 loadDouble(op1, dest);
1594 addDouble(op2, dest);
1595 }
1596 }
1597
1598 void addFloat(FPRegisterID src, FPRegisterID dest)
1599 {
1600 addFloat(src, dest, dest);
1601 }
1602
1603 void addFloat(Address src, FPRegisterID dest)
1604 {
1605 addFloat(src, dest, dest);
1606 }
1607
1608 void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1609 {
1610 if (supportsAVX())
1611 m_assembler.vaddss_rr(op1, op2, dest);
1612 else {
1613 if (op1 == dest)
1614 m_assembler.addss_rr(op2, dest);
1615 else {
1616 moveDouble(op2, dest);
1617 m_assembler.addss_rr(op1, dest);
1618 }
1619 }
1620 }
1621
1622 void addFloat(Address op1, FPRegisterID op2, FPRegisterID dest)
1623 {
1624 if (supportsAVX())
1625 m_assembler.vaddss_mr(op1.offset, op1.base, op2, dest);
1626 else {
1627 if (op2 == dest) {
1628 m_assembler.addss_mr(op1.offset, op1.base, dest);
1629 return;
1630 }
1631
1632 loadFloat(op1, dest);
1633 addFloat(op2, dest);
1634 }
1635 }
1636
1637 void addFloat(FPRegisterID op1, Address op2, FPRegisterID dest)
1638 {
1639 addFloat(op2, op1, dest);
1640 }
1641
1642 void addFloat(BaseIndex op1, FPRegisterID op2, FPRegisterID dest)
1643 {
1644 if (supportsAVX())
1645 m_assembler.vaddss_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest);
1646 else {
1647 if (op2 == dest) {
1648 m_assembler.addss_mr(op1.offset, op1.base, op1.index, op1.scale, dest);
1649 return;
1650 }
1651 loadFloat(op1, dest);
1652 addFloat(op2, dest);
1653 }
1654 }
1655
1656 void divDouble(FPRegisterID src, FPRegisterID dest)
1657 {
1658 m_assembler.divsd_rr(src, dest);
1659 }
1660
1661 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1662 {
1663 // B := A / B is invalid.
1664 ASSERT(op1 == dest || op2 != dest);
1665
1666 moveDouble(op1, dest);
1667 divDouble(op2, dest);
1668 }
1669
1670 void divDouble(Address src, FPRegisterID dest)
1671 {
1672 m_assembler.divsd_mr(src.offset, src.base, dest);
1673 }
1674
1675 void divFloat(FPRegisterID src, FPRegisterID dest)
1676 {
1677 m_assembler.divss_rr(src, dest);
1678 }
1679
1680 void divFloat(Address src, FPRegisterID dest)
1681 {
1682 m_assembler.divss_mr(src.offset, src.base, dest);
1683 }
1684
1685 void subDouble(FPRegisterID src, FPRegisterID dest)
1686 {
1687 subDouble(dest, src, dest);
1688 }
1689
1690 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1691 {
1692 if (supportsAVX())
1693 m_assembler.vsubsd_rr(op1, op2, dest);
1694 else {
1695 // B := A - B is invalid.
1696 ASSERT(op1 == dest || op2 != dest);
1697 moveDouble(op1, dest);
1698 m_assembler.subsd_rr(op2, dest);
1699 }
1700 }
1701
1702 void subDouble(FPRegisterID op1, Address op2, FPRegisterID dest)
1703 {
1704 if (supportsAVX())
1705 m_assembler.vsubsd_mr(op1, op2.offset, op2.base, dest);
1706 else {
1707 moveDouble(op1, dest);
1708 m_assembler.subsd_mr(op2.offset, op2.base, dest);
1709 }
1710 }
1711
1712 void subDouble(FPRegisterID op1, BaseIndex op2, FPRegisterID dest)
1713 {
1714 if (supportsAVX())
1715 m_assembler.vsubsd_mr(op1, op2.offset, op2.base, op2.index, op2.scale, dest);
1716 else {
1717 moveDouble(op1, dest);
1718 m_assembler.subsd_mr(op2.offset, op2.base, op2.index, op2.scale, dest);
1719 }
1720 }
1721
1722 void subDouble(Address src, FPRegisterID dest)
1723 {
1724 subDouble(dest, src, dest);
1725 }
1726
1727 void subFloat(FPRegisterID src, FPRegisterID dest)
1728 {
1729 subFloat(dest, src, dest);
1730 }
1731
1732 void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1733 {
1734 if (supportsAVX())
1735 m_assembler.vsubss_rr(op1, op2, dest);
1736 else {
1737 // B := A - B is invalid.
1738 ASSERT(op1 == dest || op2 != dest);
1739 moveDouble(op1, dest);
1740 m_assembler.subss_rr(op2, dest);
1741 }
1742 }
1743
1744 void subFloat(FPRegisterID op1, Address op2, FPRegisterID dest)
1745 {
1746 if (supportsAVX())
1747 m_assembler.vsubss_mr(op1, op2.offset, op2.base, dest);
1748 else {
1749 moveDouble(op1, dest);
1750 m_assembler.subss_mr(op2.offset, op2.base, dest);
1751 }
1752 }
1753
1754 void subFloat(FPRegisterID op1, BaseIndex op2, FPRegisterID dest)
1755 {
1756 if (supportsAVX())
1757 m_assembler.vsubss_mr(op1, op2.offset, op2.base, op2.index, op2.scale, dest);
1758 else {
1759 moveDouble(op1, dest);
1760 m_assembler.subss_mr(op2.offset, op2.base, op2.index, op2.scale, dest);
1761 }
1762 }
1763
1764 void subFloat(Address src, FPRegisterID dest)
1765 {
1766 subFloat(dest, src, dest);
1767 }
1768
1769 void mulDouble(FPRegisterID src, FPRegisterID dest)
1770 {
1771 mulDouble(src, dest, dest);
1772 }
1773
1774 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1775 {
1776 if (supportsAVX())
1777 m_assembler.vmulsd_rr(op1, op2, dest);
1778 else {
1779 if (op1 == dest)
1780 m_assembler.mulsd_rr(op2, dest);
1781 else {
1782 moveDouble(op2, dest);
1783 m_assembler.mulsd_rr(op1, dest);
1784 }
1785 }
1786 }
1787
1788 void mulDouble(Address src, FPRegisterID dest)
1789 {
1790 mulDouble(src, dest, dest);
1791 }
1792
1793 void mulDouble(Address op1, FPRegisterID op2, FPRegisterID dest)
1794 {
1795 if (supportsAVX())
1796 m_assembler.vmulsd_mr(op1.offset, op1.base, op2, dest);
1797 else {
1798 if (op2 == dest) {
1799 m_assembler.mulsd_mr(op1.offset, op1.base, dest);
1800 return;
1801 }
1802 loadDouble(op1, dest);
1803 mulDouble(op2, dest);
1804 }
1805 }
1806
1807 void mulDouble(FPRegisterID op1, Address op2, FPRegisterID dest)
1808 {
1809 return mulDouble(op2, op1, dest);
1810 }
1811
1812 void mulDouble(BaseIndex op1, FPRegisterID op2, FPRegisterID dest)
1813 {
1814 if (supportsAVX())
1815 m_assembler.vmulsd_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest);
1816 else {
1817 if (op2 == dest) {
1818 m_assembler.mulsd_mr(op1.offset, op1.base, op1.index, op1.scale, dest);
1819 return;
1820 }
1821 loadDouble(op1, dest);
1822 mulDouble(op2, dest);
1823 }
1824 }
1825
1826 void mulFloat(FPRegisterID src, FPRegisterID dest)
1827 {
1828 mulFloat(src, dest, dest);
1829 }
1830
1831 void mulFloat(Address src, FPRegisterID dest)
1832 {
1833 mulFloat(src, dest, dest);
1834 }
1835
1836 void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1837 {
1838 if (supportsAVX())
1839 m_assembler.vmulss_rr(op1, op2, dest);
1840 else {
1841 if (op1 == dest)
1842 m_assembler.mulss_rr(op2, dest);
1843 else {
1844 moveDouble(op2, dest);
1845 m_assembler.mulss_rr(op1, dest);
1846 }
1847 }
1848 }
1849
1850 void mulFloat(Address op1, FPRegisterID op2, FPRegisterID dest)
1851 {
1852 if (supportsAVX())
1853 m_assembler.vmulss_mr(op1.offset, op1.base, op2, dest);
1854 else {
1855 if (op2 == dest) {
1856 m_assembler.mulss_mr(op1.offset, op1.base, dest);
1857 return;
1858 }
1859 loadFloat(op1, dest);
1860 mulFloat(op2, dest);
1861 }
1862 }
1863
1864 void mulFloat(FPRegisterID op1, Address op2, FPRegisterID dest)
1865 {
1866 mulFloat(op2, op1, dest);
1867 }
1868
1869 void mulFloat(BaseIndex op1, FPRegisterID op2, FPRegisterID dest)
1870 {
1871 if (supportsAVX())
1872 m_assembler.vmulss_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest);
1873 else {
1874 if (op2 == dest) {
1875 m_assembler.mulss_mr(op1.offset, op1.base, op1.index, op1.scale, dest);
1876 return;
1877 }
1878 loadFloat(op1, dest);
1879 mulFloat(op2, dest);
1880 }
1881 }
1882
1883 void andDouble(FPRegisterID src, FPRegisterID dst)
1884 {
1885 // ANDPS is defined on 128bits and is shorter than ANDPD.
1886 m_assembler.andps_rr(src, dst);
1887 }
1888
1889 void andDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
1890 {
1891 if (src1 == dst)
1892 andDouble(src2, dst);
1893 else {
1894 moveDouble(src2, dst);
1895 andDouble(src1, dst);
1896 }
1897 }
1898
1899 void andFloat(FPRegisterID src, FPRegisterID dst)
1900 {
1901 m_assembler.andps_rr(src, dst);
1902 }
1903
1904 void andFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
1905 {
1906 if (src1 == dst)
1907 andFloat(src2, dst);
1908 else {
1909 moveDouble(src2, dst);
1910 andFloat(src1, dst);
1911 }
1912 }
1913
1914 void orDouble(FPRegisterID src, FPRegisterID dst)
1915 {
1916 m_assembler.orps_rr(src, dst);
1917 }
1918
1919 void orDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
1920 {
1921 if (src1 == dst)
1922 orDouble(src2, dst);
1923 else {
1924 moveDouble(src2, dst);
1925 orDouble(src1, dst);
1926 }
1927 }
1928
1929 void orFloat(FPRegisterID src, FPRegisterID dst)
1930 {
1931 m_assembler.orps_rr(src, dst);
1932 }
1933
1934 void orFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
1935 {
1936 if (src1 == dst)
1937 orFloat(src2, dst);
1938 else {
1939 moveDouble(src2, dst);
1940 orFloat(src1, dst);
1941 }
1942 }
1943
1944 void xorDouble(FPRegisterID src, FPRegisterID dst)
1945 {
1946 m_assembler.xorps_rr(src, dst);
1947 }
1948
1949 void xorDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
1950 {
1951 if (src1 == dst)
1952 xorDouble(src2, dst);
1953 else {
1954 moveDouble(src2, dst);
1955 xorDouble(src1, dst);
1956 }
1957 }
1958
1959 void xorFloat(FPRegisterID src, FPRegisterID dst)
1960 {
1961 m_assembler.xorps_rr(src, dst);
1962 }
1963
1964 void xorFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
1965 {
1966 if (src1 == dst)
1967 xorFloat(src2, dst);
1968 else {
1969 moveDouble(src2, dst);
1970 xorFloat(src1, dst);
1971 }
1972 }
1973
1974 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1975 {
1976 m_assembler.cvtsi2sd_rr(src, dest);
1977 }
1978
1979 void convertInt32ToDouble(Address src, FPRegisterID dest)
1980 {
1981 m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
1982 }
1983
1984 void convertInt32ToFloat(RegisterID src, FPRegisterID dest)
1985 {
1986 m_assembler.cvtsi2ss_rr(src, dest);
1987 }
1988
1989 void convertInt32ToFloat(Address src, FPRegisterID dest)
1990 {
1991 m_assembler.cvtsi2ss_mr(src.offset, src.base, dest);
1992 }
1993
1994 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1995 {
1996 if (cond & DoubleConditionBitInvert)
1997 m_assembler.ucomisd_rr(left, right);
1998 else
1999 m_assembler.ucomisd_rr(right, left);
2000 return jumpAfterFloatingPointCompare(cond, left, right);
2001 }
2002
2003 Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
2004 {
2005 if (cond & DoubleConditionBitInvert)
2006 m_assembler.ucomiss_rr(left, right);
2007 else
2008 m_assembler.ucomiss_rr(right, left);
2009 return jumpAfterFloatingPointCompare(cond, left, right);
2010 }
2011
2012 void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
2013 {
2014 floatingPointCompare(cond, left, right, dest, [this] (FPRegisterID arg1, FPRegisterID arg2) {
2015 m_assembler.ucomisd_rr(arg1, arg2);
2016 });
2017 }
2018
2019 void compareFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
2020 {
2021 floatingPointCompare(cond, left, right, dest, [this] (FPRegisterID arg1, FPRegisterID arg2) {
2022 m_assembler.ucomiss_rr(arg1, arg2);
2023 });
2024 }
2025
2026 // Truncates 'src' to an integer, and places the resulting 'dest'.
2027 // If the result is not representable as a 32 bit value, branch.
2028 // May also branch for some values that are representable in 32 bits
2029 // (specifically, in this case, INT_MIN).
2030 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
2031 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
2032 {
2033 m_assembler.cvttsd2si_rr(src, dest);
2034 return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
2035 }
2036
2037 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
2038 {
2039 m_assembler.cvttsd2si_rr(src, dest);
2040 }
2041
2042 void truncateFloatToInt32(FPRegisterID src, RegisterID dest)
2043 {
2044 m_assembler.cvttss2si_rr(src, dest);
2045 }
2046
2047 // Convert 'src' to an integer, and places the resulting 'dest'.
2048 // If the result is not representable as a 32 bit value, branch.
2049 // May also branch for some values that are representable in 32 bits
2050 // (specifically, in this case, 0).
2051 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
2052 {
2053 m_assembler.cvttsd2si_rr(src, dest);
2054
2055 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
2056#if CPU(X86_64)
2057 if (negZeroCheck) {
2058 Jump valueIsNonZero = branchTest32(NonZero, dest);
2059 m_assembler.movmskpd_rr(src, scratchRegister());
2060 failureCases.append(branchTest32(NonZero, scratchRegister(), TrustedImm32(1)));
2061 valueIsNonZero.link(this);
2062 }
2063#else
2064 if (negZeroCheck)
2065 failureCases.append(branchTest32(Zero, dest));
2066#endif
2067
2068 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
2069 convertInt32ToDouble(dest, fpTemp);
2070 m_assembler.ucomisd_rr(fpTemp, src);
2071 failureCases.append(m_assembler.jp());
2072 failureCases.append(m_assembler.jne());
2073 }
2074
2075 void moveZeroToDouble(FPRegisterID reg)
2076 {
2077 m_assembler.xorps_rr(reg, reg);
2078 }
2079
2080 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
2081 {
2082 m_assembler.xorpd_rr(scratch, scratch);
2083 return branchDouble(DoubleNotEqual, reg, scratch);
2084 }
2085
2086 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
2087 {
2088 m_assembler.xorpd_rr(scratch, scratch);
2089 return branchDouble(DoubleEqualOrUnordered, reg, scratch);
2090 }
2091
2092 void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
2093 {
2094 m_assembler.psllq_i8r(imm.m_value, reg);
2095 }
2096
2097 void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
2098 {
2099 m_assembler.psrlq_i8r(imm.m_value, reg);
2100 }
2101
2102 void orPacked(XMMRegisterID src, XMMRegisterID dst)
2103 {
2104 m_assembler.por_rr(src, dst);
2105 }
2106
2107 void move32ToFloat(RegisterID src, XMMRegisterID dst)
2108 {
2109 m_assembler.movd_rr(src, dst);
2110 }
2111
2112 void moveFloatTo32(XMMRegisterID src, RegisterID dst)
2113 {
2114 m_assembler.movd_rr(src, dst);
2115 }
2116
2117 // Stack manipulation operations:
2118 //
2119 // The ABI is assumed to provide a stack abstraction to memory,
2120 // containing machine word sized units of data. Push and pop
2121 // operations add and remove a single register sized unit of data
2122 // to or from the stack. Peek and poke operations read or write
2123 // values on the stack, without moving the current stack position.
2124
2125 void pop(RegisterID dest)
2126 {
2127 m_assembler.pop_r(dest);
2128 }
2129
2130 void push(RegisterID src)
2131 {
2132 m_assembler.push_r(src);
2133 }
2134
2135 void push(Address address)
2136 {
2137 m_assembler.push_m(address.offset, address.base);
2138 }
2139
2140 void push(TrustedImm32 imm)
2141 {
2142 m_assembler.push_i32(imm.m_value);
2143 }
2144
2145 void popPair(RegisterID dest1, RegisterID dest2)
2146 {
2147 pop(dest2);
2148 pop(dest1);
2149 }
2150
2151 void pushPair(RegisterID src1, RegisterID src2)
2152 {
2153 push(src1);
2154 push(src2);
2155 }
2156
2157 // Register move operations:
2158 //
2159 // Move values in registers.
2160
2161 void move(TrustedImm32 imm, RegisterID dest)
2162 {
2163 // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
2164 // may be useful to have a separate version that sign extends the value?
2165 if (!imm.m_value)
2166 m_assembler.xorl_rr(dest, dest);
2167 else
2168 m_assembler.movl_i32r(imm.m_value, dest);
2169 }
2170
2171#if CPU(X86_64)
2172 void move(RegisterID src, RegisterID dest)
2173 {
2174 // Note: on 64-bit this is is a full register move; perhaps it would be
2175 // useful to have separate move32 & movePtr, with move32 zero extending?
2176 if (src != dest)
2177 m_assembler.movq_rr(src, dest);
2178 }
2179
2180 void move(TrustedImmPtr imm, RegisterID dest)
2181 {
2182 if (!imm.m_value)
2183 m_assembler.xorq_rr(dest, dest);
2184 else
2185 m_assembler.movq_i64r(imm.asIntptr(), dest);
2186 }
2187
2188 void move(TrustedImm64 imm, RegisterID dest)
2189 {
2190 if (!imm.m_value)
2191 m_assembler.xorq_rr(dest, dest);
2192 else
2193 m_assembler.movq_i64r(imm.m_value, dest);
2194 }
2195
2196 void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
2197 {
2198 if (cond & DoubleConditionBitInvert)
2199 m_assembler.ucomisd_rr(left, right);
2200 else
2201 m_assembler.ucomisd_rr(right, left);
2202 moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
2203 }
2204
2205 void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2206 {
2207 if (thenCase != dest && elseCase != dest) {
2208 move(elseCase, dest);
2209 elseCase = dest;
2210 }
2211
2212 RegisterID src;
2213 if (elseCase == dest)
2214 src = thenCase;
2215 else {
2216 cond = invert(cond);
2217 src = elseCase;
2218 }
2219
2220 if (cond & DoubleConditionBitInvert)
2221 m_assembler.ucomisd_rr(left, right);
2222 else
2223 m_assembler.ucomisd_rr(right, left);
2224 moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
2225 }
2226
2227 void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
2228 {
2229 if (cond & DoubleConditionBitInvert)
2230 m_assembler.ucomiss_rr(left, right);
2231 else
2232 m_assembler.ucomiss_rr(right, left);
2233 moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
2234 }
2235
2236 void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2237 {
2238 if (thenCase != dest && elseCase != dest) {
2239 move(elseCase, dest);
2240 elseCase = dest;
2241 }
2242
2243 RegisterID src;
2244 if (elseCase == dest)
2245 src = thenCase;
2246 else {
2247 cond = invert(cond);
2248 src = elseCase;
2249 }
2250
2251 if (cond & DoubleConditionBitInvert)
2252 m_assembler.ucomiss_rr(left, right);
2253 else
2254 m_assembler.ucomiss_rr(right, left);
2255 moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
2256 }
2257
2258 void swap(RegisterID reg1, RegisterID reg2)
2259 {
2260 if (reg1 != reg2)
2261 m_assembler.xchgq_rr(reg1, reg2);
2262 }
2263
2264 void swap(FPRegisterID reg1, FPRegisterID reg2)
2265 {
2266 if (reg1 == reg2)
2267 return;
2268
2269 // FIXME: This is kinda a hack since we don't use xmm7 as a temp.
2270 ASSERT(reg1 != FPRegisterID::xmm7);
2271 ASSERT(reg2 != FPRegisterID::xmm7);
2272 moveDouble(reg1, FPRegisterID::xmm7);
2273 moveDouble(reg2, reg1);
2274 moveDouble(FPRegisterID::xmm7, reg2);
2275 }
2276
2277 void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
2278 {
2279 if (!imm.m_value)
2280 m_assembler.xorq_rr(dest, dest);
2281 else
2282 m_assembler.mov_i32r(imm.m_value, dest);
2283 }
2284
2285 void signExtend32ToPtr(RegisterID src, RegisterID dest)
2286 {
2287 m_assembler.movsxd_rr(src, dest);
2288 }
2289
2290 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
2291 {
2292 m_assembler.movl_rr(src, dest);
2293 }
2294
2295 void zeroExtend32ToPtr(TrustedImm32 src, RegisterID dest)
2296 {
2297 m_assembler.movl_i32r(src.m_value, dest);
2298 }
2299#else
2300 void move(RegisterID src, RegisterID dest)
2301 {
2302 if (src != dest)
2303 m_assembler.movl_rr(src, dest);
2304 }
2305
2306 void move(TrustedImmPtr imm, RegisterID dest)
2307 {
2308 if (!imm.m_value)
2309 m_assembler.xorl_rr(dest, dest);
2310 else
2311 m_assembler.movl_i32r(imm.asIntptr(), dest);
2312 }
2313
2314 // Only here for templates!
2315 void move(TrustedImm64, RegisterID)
2316 {
2317 UNREACHABLE_FOR_PLATFORM();
2318 }
2319
2320 void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
2321 {
2322 if (cond & DoubleConditionBitInvert)
2323 m_assembler.ucomisd_rr(left, right);
2324 else
2325 m_assembler.ucomisd_rr(right, left);
2326
2327 if (cond == DoubleEqual) {
2328 if (left == right) {
2329 m_assembler.cmovnpl_rr(src, dest);
2330 return;
2331 }
2332
2333 Jump isUnordered(m_assembler.jp());
2334 m_assembler.cmovel_rr(src, dest);
2335 isUnordered.link(this);
2336 return;
2337 }
2338
2339 if (cond == DoubleNotEqualOrUnordered) {
2340 if (left == right) {
2341 m_assembler.cmovpl_rr(src, dest);
2342 return;
2343 }
2344
2345 m_assembler.cmovpl_rr(src, dest);
2346 m_assembler.cmovnel_rr(src, dest);
2347 return;
2348 }
2349
2350 ASSERT(!(cond & DoubleConditionBitSpecial));
2351 m_assembler.cmovl_rr(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), src, dest);
2352 }
2353
2354 void swap(RegisterID reg1, RegisterID reg2)
2355 {
2356 if (reg1 != reg2)
2357 m_assembler.xchgl_rr(reg1, reg2);
2358 }
2359
2360 void swap(FPRegisterID reg1, FPRegisterID reg2)
2361 {
2362 if (reg1 == reg2)
2363 return;
2364
2365 // FIXME: This is kinda a hack since we don't use xmm7 as a temp.
2366 ASSERT(reg1 != FPRegisterID::xmm7);
2367 ASSERT(reg2 != FPRegisterID::xmm7);
2368 moveDouble(reg1, FPRegisterID::xmm7);
2369 moveDouble(reg2, reg1);
2370 moveDouble(FPRegisterID::xmm7, reg2);
2371 }
2372
2373 void signExtend32ToPtr(RegisterID src, RegisterID dest)
2374 {
2375 move(src, dest);
2376 }
2377
2378 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
2379 {
2380 move(src, dest);
2381 }
2382#endif
2383
2384 void swap32(RegisterID src, RegisterID dest)
2385 {
2386 m_assembler.xchgl_rr(src, dest);
2387 }
2388
2389 void swap32(RegisterID src, Address dest)
2390 {
2391 m_assembler.xchgl_rm(src, dest.offset, dest.base);
2392 }
2393
2394 void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
2395 {
2396 m_assembler.cmpl_rr(right, left);
2397 cmov(x86Condition(cond), src, dest);
2398 }
2399
2400 void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2401 {
2402 m_assembler.cmpl_rr(right, left);
2403
2404 if (thenCase != dest && elseCase != dest) {
2405 move(elseCase, dest);
2406 elseCase = dest;
2407 }
2408
2409 if (elseCase == dest)
2410 cmov(x86Condition(cond), thenCase, dest);
2411 else
2412 cmov(x86Condition(invert(cond)), elseCase, dest);
2413 }
2414
2415 void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2416 {
2417 if (!right.m_value) {
2418 if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2419 moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
2420 return;
2421 }
2422 }
2423
2424 m_assembler.cmpl_ir(right.m_value, left);
2425
2426 if (thenCase != dest && elseCase != dest) {
2427 move(elseCase, dest);
2428 elseCase = dest;
2429 }
2430
2431 if (elseCase == dest)
2432 cmov(x86Condition(cond), thenCase, dest);
2433 else
2434 cmov(x86Condition(invert(cond)), elseCase, dest);
2435 }
2436
2437 void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
2438 {
2439 m_assembler.testl_rr(testReg, mask);
2440 cmov(x86Condition(cond), src, dest);
2441 }
2442
2443 void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2444 {
2445 ASSERT(isInvertible(cond));
2446 ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
2447
2448 m_assembler.testl_rr(right, left);
2449
2450 if (thenCase != dest && elseCase != dest) {
2451 move(elseCase, dest);
2452 elseCase = dest;
2453 }
2454
2455 if (elseCase == dest)
2456 cmov(x86Condition(cond), thenCase, dest);
2457 else
2458 cmov(x86Condition(invert(cond)), elseCase, dest);
2459 }
2460
2461 void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID src, RegisterID dest)
2462 {
2463 test32(testReg, mask);
2464 cmov(x86Condition(cond), src, dest);
2465 }
2466
2467 void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2468 {
2469 ASSERT(isInvertible(cond));
2470 ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
2471
2472 test32(testReg, mask);
2473
2474 if (thenCase != dest && elseCase != dest) {
2475 move(elseCase, dest);
2476 elseCase = dest;
2477 }
2478
2479 if (elseCase == dest)
2480 cmov(x86Condition(cond), thenCase, dest);
2481 else
2482 cmov(x86Condition(invert(cond)), elseCase, dest);
2483 }
2484
2485 template<typename LeftType, typename RightType>
2486 void moveDoubleConditionally32(RelationalCondition cond, LeftType left, RightType right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2487 {
2488 static_assert(!std::is_same<LeftType, FPRegisterID>::value && !std::is_same<RightType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
2489
2490 if (thenCase != dest && elseCase != dest) {
2491 moveDouble(elseCase, dest);
2492 elseCase = dest;
2493 }
2494
2495 if (elseCase == dest) {
2496 Jump falseCase = branch32(invert(cond), left, right);
2497 moveDouble(thenCase, dest);
2498 falseCase.link(this);
2499 } else {
2500 Jump trueCase = branch32(cond, left, right);
2501 moveDouble(elseCase, dest);
2502 trueCase.link(this);
2503 }
2504 }
2505
2506 template<typename TestType, typename MaskType>
2507 void moveDoubleConditionallyTest32(ResultCondition cond, TestType test, MaskType mask, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2508 {
2509 static_assert(!std::is_same<TestType, FPRegisterID>::value && !std::is_same<MaskType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
2510
2511 if (elseCase == dest && isInvertible(cond)) {
2512 Jump falseCase = branchTest32(invert(cond), test, mask);
2513 moveDouble(thenCase, dest);
2514 falseCase.link(this);
2515 } else if (thenCase == dest) {
2516 Jump trueCase = branchTest32(cond, test, mask);
2517 moveDouble(elseCase, dest);
2518 trueCase.link(this);
2519 }
2520
2521 Jump trueCase = branchTest32(cond, test, mask);
2522 moveDouble(elseCase, dest);
2523 Jump falseCase = jump();
2524 trueCase.link(this);
2525 moveDouble(thenCase, dest);
2526 falseCase.link(this);
2527 }
2528
2529 void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2530 {
2531 if (elseCase == dest) {
2532 Jump falseCase = branchDouble(invert(cond), left, right);
2533 moveDouble(thenCase, dest);
2534 falseCase.link(this);
2535 } else if (thenCase == dest) {
2536 Jump trueCase = branchDouble(cond, left, right);
2537 moveDouble(elseCase, dest);
2538 trueCase.link(this);
2539 } else {
2540 Jump trueCase = branchDouble(cond, left, right);
2541 moveDouble(elseCase, dest);
2542 Jump falseCase = jump();
2543 trueCase.link(this);
2544 moveDouble(thenCase, dest);
2545 falseCase.link(this);
2546 }
2547 }
2548
2549 void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2550 {
2551 if (elseCase == dest) {
2552 Jump falseCase = branchFloat(invert(cond), left, right);
2553 moveDouble(thenCase, dest);
2554 falseCase.link(this);
2555 } else if (thenCase == dest) {
2556 Jump trueCase = branchFloat(cond, left, right);
2557 moveDouble(elseCase, dest);
2558 trueCase.link(this);
2559 } else {
2560 Jump trueCase = branchFloat(cond, left, right);
2561 moveDouble(elseCase, dest);
2562 Jump falseCase = jump();
2563 trueCase.link(this);
2564 moveDouble(thenCase, dest);
2565 falseCase.link(this);
2566 }
2567 }
2568
2569 // Forwards / external control flow operations:
2570 //
2571 // This set of jump and conditional branch operations return a Jump
2572 // object which may linked at a later point, allow forwards jump,
2573 // or jumps that will require external linkage (after the code has been
2574 // relocated).
2575 //
2576 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
2577 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
2578 // used (representing the names 'below' and 'above').
2579 //
2580 // Operands to the comparision are provided in the expected order, e.g.
2581 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
2582 // treated as a signed 32bit value, is less than or equal to 5.
2583 //
2584 // jz and jnz test whether the first operand is equal to zero, and take
2585 // an optional second operand of a mask under which to perform the test.
2586
2587public:
2588 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
2589 {
2590 TrustedImm32 right8(static_cast<int8_t>(right.m_value));
2591 m_assembler.cmpb_im(right8.m_value, left.offset, left.base);
2592 return Jump(m_assembler.jCC(x86Condition(cond)));
2593 }
2594
2595 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
2596 {
2597 m_assembler.cmpl_rr(right, left);
2598 return Jump(m_assembler.jCC(x86Condition(cond)));
2599 }
2600
2601 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
2602 {
2603 if (!right.m_value) {
2604 if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
2605 return branchTest32(*resultCondition, left, left);
2606 }
2607
2608 m_assembler.cmpl_ir(right.m_value, left);
2609 return Jump(m_assembler.jCC(x86Condition(cond)));
2610 }
2611
2612 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
2613 {
2614 m_assembler.cmpl_mr(right.offset, right.base, left);
2615 return Jump(m_assembler.jCC(x86Condition(cond)));
2616 }
2617
2618 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
2619 {
2620 m_assembler.cmpl_rm(right, left.offset, left.base);
2621 return Jump(m_assembler.jCC(x86Condition(cond)));
2622 }
2623
2624 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
2625 {
2626 m_assembler.cmpl_im(right.m_value, left.offset, left.base);
2627 return Jump(m_assembler.jCC(x86Condition(cond)));
2628 }
2629
2630 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2631 {
2632 m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
2633 return Jump(m_assembler.jCC(x86Condition(cond)));
2634 }
2635
2636 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2637 {
2638 return branch32(cond, left, right);
2639 }
2640
2641 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
2642 {
2643 m_assembler.testl_rr(reg, mask);
2644 return Jump(m_assembler.jCC(x86Condition(cond)));
2645 }
2646
2647 void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2648 {
2649 if (mask.m_value == -1)
2650 m_assembler.testl_rr(reg, reg);
2651 else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) { // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah.
2652 if (mask.m_value == 0xff)
2653 m_assembler.testb_rr(reg, reg);
2654 else
2655 m_assembler.testb_i8r(mask.m_value, reg);
2656 } else
2657 m_assembler.testl_i32r(mask.m_value, reg);
2658 }
2659
2660 Jump branch(ResultCondition cond)
2661 {
2662 return Jump(m_assembler.jCC(x86Condition(cond)));
2663 }
2664
2665 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2666 {
2667 test32(reg, mask);
2668 return branch(cond);
2669 }
2670
2671 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2672 {
2673 generateTest32(address, mask);
2674 return Jump(m_assembler.jCC(x86Condition(cond)));
2675 }
2676
2677 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2678 {
2679 if (mask.m_value == -1)
2680 m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
2681 else
2682 m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
2683 return Jump(m_assembler.jCC(x86Condition(cond)));
2684 }
2685
2686 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2687 {
2688 TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
2689 if (mask8.m_value == -1)
2690 m_assembler.cmpb_im(0, address.offset, address.base);
2691 else
2692 m_assembler.testb_im(mask8.m_value, address.offset, address.base);
2693 return Jump(m_assembler.jCC(x86Condition(cond)));
2694 }
2695
2696 Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2697 {
2698 TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
2699 if (mask8.m_value == -1)
2700 m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
2701 else
2702 m_assembler.testb_im(mask8.m_value, address.offset, address.base, address.index, address.scale);
2703 return Jump(m_assembler.jCC(x86Condition(cond)));
2704 }
2705
2706 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2707 {
2708 TrustedImm32 right8(static_cast<int8_t>(right.m_value));
2709 m_assembler.cmpb_im(right8.m_value, left.offset, left.base, left.index, left.scale);
2710 return Jump(m_assembler.jCC(x86Condition(cond)));
2711 }
2712
2713 Jump jump()
2714 {
2715 return Jump(m_assembler.jmp());
2716 }
2717
2718 void jump(RegisterID target, PtrTag)
2719 {
2720 m_assembler.jmp_r(target);
2721 }
2722
2723 // Address is a memory location containing the address to jump to
2724 void jump(Address address, PtrTag)
2725 {
2726 m_assembler.jmp_m(address.offset, address.base);
2727 }
2728
2729 // Address is a memory location containing the address to jump to
2730 void jump(BaseIndex address, PtrTag)
2731 {
2732 m_assembler.jmp_m(address.offset, address.base, address.index, address.scale);
2733 }
2734
2735 ALWAYS_INLINE void jump(RegisterID target, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), jump(target, NoPtrTag); }
2736 ALWAYS_INLINE void jump(Address address, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), jump(address, NoPtrTag); }
2737 ALWAYS_INLINE void jump(BaseIndex address, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), jump(address, NoPtrTag); }
2738
2739 // Arithmetic control flow operations:
2740 //
2741 // This set of conditional branch operations branch based
2742 // on the result of an arithmetic operation. The operation
2743 // is performed as normal, storing the result.
2744 //
2745 // * jz operations branch if the result is zero.
2746 // * jo operations branch if the (signed) arithmetic
2747 // operation caused an overflow to occur.
2748
2749 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
2750 {
2751 add32(src, dest);
2752 return Jump(m_assembler.jCC(x86Condition(cond)));
2753 }
2754
2755 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2756 {
2757 add32(imm, dest);
2758 return Jump(m_assembler.jCC(x86Condition(cond)));
2759 }
2760
2761 Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
2762 {
2763 add32(src, dest);
2764 return Jump(m_assembler.jCC(x86Condition(cond)));
2765 }
2766
2767 Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
2768 {
2769 add32(src, dest);
2770 return Jump(m_assembler.jCC(x86Condition(cond)));
2771 }
2772
2773 Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
2774 {
2775 add32(src, dest);
2776 return Jump(m_assembler.jCC(x86Condition(cond)));
2777 }
2778
2779 Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2780 {
2781 if (src1 == dest)
2782 return branchAdd32(cond, src2, dest);
2783 move32IfNeeded(src2, dest);
2784 return branchAdd32(cond, src1, dest);
2785 }
2786
2787 Jump branchAdd32(ResultCondition cond, Address op1, RegisterID op2, RegisterID dest)
2788 {
2789 if (op2 == dest)
2790 return branchAdd32(cond, op1, dest);
2791 if (op1.base == dest) {
2792 load32(op1, dest);
2793 return branchAdd32(cond, op2, dest);
2794 }
2795 zeroExtend32ToPtr(op2, dest);
2796 return branchAdd32(cond, op1, dest);
2797 }
2798
2799 Jump branchAdd32(ResultCondition cond, RegisterID src1, Address src2, RegisterID dest)
2800 {
2801 return branchAdd32(cond, src2, src1, dest);
2802 }
2803
2804 Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
2805 {
2806 move32IfNeeded(src, dest);
2807 return branchAdd32(cond, imm, dest);
2808 }
2809
2810 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
2811 {
2812 mul32(src, dest);
2813 if (cond != Overflow)
2814 m_assembler.testl_rr(dest, dest);
2815 return Jump(m_assembler.jCC(x86Condition(cond)));
2816 }
2817
2818 Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
2819 {
2820 mul32(src, dest);
2821 if (cond != Overflow)
2822 m_assembler.testl_rr(dest, dest);
2823 return Jump(m_assembler.jCC(x86Condition(cond)));
2824 }
2825
2826 Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
2827 {
2828 mul32(imm, src, dest);
2829 if (cond != Overflow)
2830 m_assembler.testl_rr(dest, dest);
2831 return Jump(m_assembler.jCC(x86Condition(cond)));
2832 }
2833
2834 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2835 {
2836 if (src1 == dest)
2837 return branchMul32(cond, src2, dest);
2838 move32IfNeeded(src2, dest);
2839 return branchMul32(cond, src1, dest);
2840 }
2841
2842 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
2843 {
2844 sub32(src, dest);
2845 return Jump(m_assembler.jCC(x86Condition(cond)));
2846 }
2847
2848 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2849 {
2850 sub32(imm, dest);
2851 return Jump(m_assembler.jCC(x86Condition(cond)));
2852 }
2853
2854 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
2855 {
2856 sub32(imm, dest);
2857 return Jump(m_assembler.jCC(x86Condition(cond)));
2858 }
2859
2860 Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
2861 {
2862 sub32(src, dest);
2863 return Jump(m_assembler.jCC(x86Condition(cond)));
2864 }
2865
2866 Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
2867 {
2868 sub32(src, dest);
2869 return Jump(m_assembler.jCC(x86Condition(cond)));
2870 }
2871
2872 Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2873 {
2874 // B := A - B is invalid.
2875 ASSERT(src1 == dest || src2 != dest);
2876
2877 move32IfNeeded(src1, dest);
2878 return branchSub32(cond, src2, dest);
2879 }
2880
2881 Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
2882 {
2883 move32IfNeeded(src1, dest);
2884 return branchSub32(cond, src2, dest);
2885 }
2886
2887 Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
2888 {
2889 neg32(srcDest);
2890 return Jump(m_assembler.jCC(x86Condition(cond)));
2891 }
2892
2893 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
2894 {
2895 or32(src, dest);
2896 return Jump(m_assembler.jCC(x86Condition(cond)));
2897 }
2898
2899
2900 // Miscellaneous operations:
2901
2902 void breakpoint()
2903 {
2904 m_assembler.int3();
2905 }
2906
2907 static bool isBreakpoint(void* address) { return X86Assembler::isInt3(address); }
2908
2909 Call nearTailCall()
2910 {
2911 return Call(m_assembler.jmp(), Call::LinkableNearTail);
2912 }
2913
2914 Call nearCall()
2915 {
2916 return Call(m_assembler.call(), Call::LinkableNear);
2917 }
2918
2919 Call call(RegisterID target, PtrTag)
2920 {
2921 return Call(m_assembler.call(target), Call::None);
2922 }
2923
2924 void call(Address address, PtrTag)
2925 {
2926 m_assembler.call_m(address.offset, address.base);
2927 }
2928
2929 ALWAYS_INLINE Call call(RegisterID target, RegisterID callTag) { return UNUSED_PARAM(callTag), call(target, NoPtrTag); }
2930 ALWAYS_INLINE void call(Address address, RegisterID callTag) { UNUSED_PARAM(callTag), call(address, NoPtrTag); }
2931
2932 void ret()
2933 {
2934 m_assembler.ret();
2935 }
2936
2937 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
2938 {
2939 TrustedImm32 right8(static_cast<int8_t>(right.m_value));
2940 m_assembler.cmpb_im(right8.m_value, left.offset, left.base);
2941 set32(x86Condition(cond), dest);
2942 }
2943
2944 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2945 {
2946 m_assembler.cmpl_rr(right, left);
2947 set32(x86Condition(cond), dest);
2948 }
2949
2950 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2951 {
2952 if (!right.m_value) {
2953 if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2954 test32(*resultCondition, left, left, dest);
2955 return;
2956 }
2957 }
2958
2959 m_assembler.cmpl_ir(right.m_value, left);
2960 set32(x86Condition(cond), dest);
2961 }
2962
2963 // FIXME:
2964 // The mask should be optional... perhaps the argument order should be
2965 // dest-src, operations always have a dest? ... possibly not true, considering
2966 // asm ops like test, or pseudo ops like pop().
2967
2968 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2969 {
2970 TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
2971 if (mask8.m_value == -1)
2972 m_assembler.cmpb_im(0, address.offset, address.base);
2973 else
2974 m_assembler.testb_im(mask8.m_value, address.offset, address.base);
2975 set32(x86Condition(cond), dest);
2976 }
2977
2978 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2979 {
2980 generateTest32(address, mask);
2981 set32(x86Condition(cond), dest);
2982 }
2983
2984 void test32(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
2985 {
2986 m_assembler.testl_rr(reg, mask);
2987 set32(x86Condition(cond), dest);
2988 }
2989
2990 void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
2991 {
2992 test32(reg, mask);
2993 set32(x86Condition(cond), dest);
2994 }
2995
2996 void setCarry(RegisterID dest)
2997 {
2998 set32(X86Assembler::ConditionC, dest);
2999 }
3000
3001 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
3002 static RelationalCondition invert(RelationalCondition cond)
3003 {
3004 return static_cast<RelationalCondition>(cond ^ 1);
3005 }
3006
3007 static DoubleCondition invert(DoubleCondition cond)
3008 {
3009 switch (cond) {
3010 case DoubleEqual:
3011 return DoubleNotEqualOrUnordered;
3012 case DoubleNotEqual:
3013 return DoubleEqualOrUnordered;
3014 case DoubleGreaterThan:
3015 return DoubleLessThanOrEqualOrUnordered;
3016 case DoubleGreaterThanOrEqual:
3017 return DoubleLessThanOrUnordered;
3018 case DoubleLessThan:
3019 return DoubleGreaterThanOrEqualOrUnordered;
3020 case DoubleLessThanOrEqual:
3021 return DoubleGreaterThanOrUnordered;
3022 case DoubleEqualOrUnordered:
3023 return DoubleNotEqual;
3024 case DoubleNotEqualOrUnordered:
3025 return DoubleEqual;
3026 case DoubleGreaterThanOrUnordered:
3027 return DoubleLessThanOrEqual;
3028 case DoubleGreaterThanOrEqualOrUnordered:
3029 return DoubleLessThan;
3030 case DoubleLessThanOrUnordered:
3031 return DoubleGreaterThanOrEqual;
3032 case DoubleLessThanOrEqualOrUnordered:
3033 return DoubleGreaterThan;
3034 }
3035 RELEASE_ASSERT_NOT_REACHED();
3036 return DoubleEqual; // make compiler happy
3037 }
3038
3039 static bool isInvertible(ResultCondition cond)
3040 {
3041 switch (cond) {
3042 case Zero:
3043 case NonZero:
3044 case Signed:
3045 case PositiveOrZero:
3046 return true;
3047 default:
3048 return false;
3049 }
3050 }
3051
3052 static ResultCondition invert(ResultCondition cond)
3053 {
3054 switch (cond) {
3055 case Zero:
3056 return NonZero;
3057 case NonZero:
3058 return Zero;
3059 case Signed:
3060 return PositiveOrZero;
3061 case PositiveOrZero:
3062 return Signed;
3063 default:
3064 RELEASE_ASSERT_NOT_REACHED();
3065 return Zero; // Make compiler happy for release builds.
3066 }
3067 }
3068
3069 static Optional<ResultCondition> commuteCompareToZeroIntoTest(RelationalCondition cond)
3070 {
3071 switch (cond) {
3072 case Equal:
3073 return Zero;
3074 case NotEqual:
3075 return NonZero;
3076 case LessThan:
3077 return Signed;
3078 case GreaterThanOrEqual:
3079 return PositiveOrZero;
3080 break;
3081 default:
3082 return WTF::nullopt;
3083 }
3084 }
3085
3086 void nop()
3087 {
3088 m_assembler.nop();
3089 }
3090
3091 void xchg8(RegisterID reg, Address address)
3092 {
3093 m_assembler.xchgb_rm(reg, address.offset, address.base);
3094 }
3095
3096 void xchg8(RegisterID reg, BaseIndex address)
3097 {
3098 m_assembler.xchgb_rm(reg, address.offset, address.base, address.index, address.scale);
3099 }
3100
3101 void xchg16(RegisterID reg, Address address)
3102 {
3103 m_assembler.xchgw_rm(reg, address.offset, address.base);
3104 }
3105
3106 void xchg16(RegisterID reg, BaseIndex address)
3107 {
3108 m_assembler.xchgw_rm(reg, address.offset, address.base, address.index, address.scale);
3109 }
3110
3111 void xchg32(RegisterID reg, Address address)
3112 {
3113 m_assembler.xchgl_rm(reg, address.offset, address.base);
3114 }
3115
3116 void xchg32(RegisterID reg, BaseIndex address)
3117 {
3118 m_assembler.xchgl_rm(reg, address.offset, address.base, address.index, address.scale);
3119 }
3120
3121 // We take memoryFence to mean acqrel. This has acqrel semantics on x86.
3122 void memoryFence()
3123 {
3124 // lock; orl $0, (%rsp)
3125 m_assembler.lock();
3126 m_assembler.orl_im(0, 0, X86Registers::esp);
3127 }
3128
3129 void atomicStrongCAS8(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, Address address, RegisterID result)
3130 {
3131 atomicStrongCAS(cond, expectedAndResult, result, address, [&] { m_assembler.cmpxchgb_rm(newValue, address.offset, address.base); });
3132 }
3133
3134 void atomicStrongCAS8(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, BaseIndex address, RegisterID result)
3135 {
3136 atomicStrongCAS(cond, expectedAndResult, result, address, [&] { m_assembler.cmpxchgb_rm(newValue, address.offset, address.base, address.index, address.scale); });
3137 }
3138
3139 void atomicStrongCAS16(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, Address address, RegisterID result)
3140 {
3141 atomicStrongCAS(cond, expectedAndResult, result, address, [&] { m_assembler.cmpxchgw_rm(newValue, address.offset, address.base); });
3142 }
3143
3144 void atomicStrongCAS16(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, BaseIndex address, RegisterID result)
3145 {
3146 atomicStrongCAS(cond, expectedAndResult, result, address, [&] { m_assembler.cmpxchgw_rm(newValue, address.offset, address.base, address.index, address.scale); });
3147 }
3148
3149 void atomicStrongCAS32(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, Address address, RegisterID result)
3150 {
3151 atomicStrongCAS(cond, expectedAndResult, result, address, [&] { m_assembler.cmpxchgl_rm(newValue, address.offset, address.base); });
3152 }
3153
3154 void atomicStrongCAS32(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, BaseIndex address, RegisterID result)
3155 {
3156 atomicStrongCAS(cond, expectedAndResult, result, address, [&] { m_assembler.cmpxchgl_rm(newValue, address.offset, address.base, address.index, address.scale); });
3157 }
3158
3159 void atomicStrongCAS8(RegisterID expectedAndResult, RegisterID newValue, Address address)
3160 {
3161 atomicStrongCAS(expectedAndResult, address, [&] { m_assembler.cmpxchgb_rm(newValue, address.offset, address.base); });
3162 }
3163
3164 void atomicStrongCAS8(RegisterID expectedAndResult, RegisterID newValue, BaseIndex address)
3165 {
3166 atomicStrongCAS(expectedAndResult, address, [&] { m_assembler.cmpxchgb_rm(newValue, address.offset, address.base, address.index, address.scale); });
3167 }
3168
3169 void atomicStrongCAS16(RegisterID expectedAndResult, RegisterID newValue, Address address)
3170 {
3171 atomicStrongCAS(expectedAndResult, address, [&] { m_assembler.cmpxchgw_rm(newValue, address.offset, address.base); });
3172 }
3173
3174 void atomicStrongCAS16(RegisterID expectedAndResult, RegisterID newValue, BaseIndex address)
3175 {
3176 atomicStrongCAS(expectedAndResult, address, [&] { m_assembler.cmpxchgw_rm(newValue, address.offset, address.base, address.index, address.scale); });
3177 }
3178
3179 void atomicStrongCAS32(RegisterID expectedAndResult, RegisterID newValue, Address address)
3180 {
3181 atomicStrongCAS(expectedAndResult, address, [&] { m_assembler.cmpxchgl_rm(newValue, address.offset, address.base); });
3182 }
3183
3184 void atomicStrongCAS32(RegisterID expectedAndResult, RegisterID newValue, BaseIndex address)
3185 {
3186 atomicStrongCAS(expectedAndResult, address, [&] { m_assembler.cmpxchgl_rm(newValue, address.offset, address.base, address.index, address.scale); });
3187 }
3188
3189 Jump branchAtomicStrongCAS8(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, Address address)
3190 {
3191 return branchAtomicStrongCAS(cond, expectedAndResult, address, [&] { m_assembler.cmpxchgb_rm(newValue, address.offset, address.base); });
3192 }
3193
3194 Jump branchAtomicStrongCAS8(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, BaseIndex address)
3195 {
3196 return branchAtomicStrongCAS(cond, expectedAndResult, address, [&] { m_assembler.cmpxchgb_rm(newValue, address.offset, address.base, address.index, address.scale); });
3197 }
3198
3199 Jump branchAtomicStrongCAS16(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, Address address)
3200 {
3201 return branchAtomicStrongCAS(cond, expectedAndResult, address, [&] { m_assembler.cmpxchgw_rm(newValue, address.offset, address.base); });
3202 }
3203
3204 Jump branchAtomicStrongCAS16(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, BaseIndex address)
3205 {
3206 return branchAtomicStrongCAS(cond, expectedAndResult, address, [&] { m_assembler.cmpxchgw_rm(newValue, address.offset, address.base, address.index, address.scale); });
3207 }
3208
3209 Jump branchAtomicStrongCAS32(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, Address address)
3210 {
3211 return branchAtomicStrongCAS(cond, expectedAndResult, address, [&] { m_assembler.cmpxchgl_rm(newValue, address.offset, address.base); });
3212 }
3213
3214 Jump branchAtomicStrongCAS32(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, BaseIndex address)
3215 {
3216 return branchAtomicStrongCAS(cond, expectedAndResult, address, [&] { m_assembler.cmpxchgl_rm(newValue, address.offset, address.base, address.index, address.scale); });
3217 }
3218
3219 // If you use weak CAS, you cannot rely on expectedAndClobbered to have any particular value after
3220 // this completes. On x86, it will contain the result of the strong CAS. On ARM, it will still have
3221 // the expected value.
3222 void atomicWeakCAS8(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address, RegisterID result)
3223 {
3224 atomicStrongCAS8(cond, expectedAndClobbered, newValue, address, result);
3225 }
3226
3227 void atomicWeakCAS8(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address, RegisterID result)
3228 {
3229 atomicStrongCAS8(cond, expectedAndClobbered, newValue, address, result);
3230 }
3231
3232 void atomicWeakCAS16(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address, RegisterID result)
3233 {
3234 atomicStrongCAS16(cond, expectedAndClobbered, newValue, address, result);
3235 }
3236
3237 void atomicWeakCAS16(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address, RegisterID result)
3238 {
3239 atomicStrongCAS16(cond, expectedAndClobbered, newValue, address, result);
3240 }
3241
3242 void atomicWeakCAS32(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address, RegisterID result)
3243 {
3244 atomicStrongCAS32(cond, expectedAndClobbered, newValue, address, result);
3245 }
3246
3247 void atomicWeakCAS32(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address, RegisterID result)
3248 {
3249 atomicStrongCAS32(cond, expectedAndClobbered, newValue, address, result);
3250 }
3251
3252 Jump branchAtomicWeakCAS8(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address)
3253 {
3254 return branchAtomicStrongCAS8(cond, expectedAndClobbered, newValue, address);
3255 }
3256
3257 Jump branchAtomicWeakCAS8(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address)
3258 {
3259 return branchAtomicStrongCAS8(cond, expectedAndClobbered, newValue, address);
3260 }
3261
3262 Jump branchAtomicWeakCAS16(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address)
3263 {
3264 return branchAtomicStrongCAS16(cond, expectedAndClobbered, newValue, address);
3265 }
3266
3267 Jump branchAtomicWeakCAS16(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address)
3268 {
3269 return branchAtomicStrongCAS16(cond, expectedAndClobbered, newValue, address);
3270 }
3271
3272 Jump branchAtomicWeakCAS32(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address)
3273 {
3274 return branchAtomicStrongCAS32(cond, expectedAndClobbered, newValue, address);
3275 }
3276
3277 Jump branchAtomicWeakCAS32(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address)
3278 {
3279 return branchAtomicStrongCAS32(cond, expectedAndClobbered, newValue, address);
3280 }
3281
3282 void atomicRelaxedWeakCAS8(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address, RegisterID result)
3283 {
3284 atomicStrongCAS8(cond, expectedAndClobbered, newValue, address, result);
3285 }
3286
3287 void atomicRelaxedWeakCAS8(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address, RegisterID result)
3288 {
3289 atomicStrongCAS8(cond, expectedAndClobbered, newValue, address, result);
3290 }
3291
3292 void atomicRelaxedWeakCAS16(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address, RegisterID result)
3293 {
3294 atomicStrongCAS16(cond, expectedAndClobbered, newValue, address, result);
3295 }
3296
3297 void atomicRelaxedWeakCAS16(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address, RegisterID result)
3298 {
3299 atomicStrongCAS16(cond, expectedAndClobbered, newValue, address, result);
3300 }
3301
3302 void atomicRelaxedWeakCAS32(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address, RegisterID result)
3303 {
3304 atomicStrongCAS32(cond, expectedAndClobbered, newValue, address, result);
3305 }
3306
3307 void atomicRelaxedWeakCAS32(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address, RegisterID result)
3308 {
3309 atomicStrongCAS32(cond, expectedAndClobbered, newValue, address, result);
3310 }
3311
3312 Jump branchAtomicRelaxedWeakCAS8(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address)
3313 {
3314 return branchAtomicStrongCAS8(cond, expectedAndClobbered, newValue, address);
3315 }
3316
3317 Jump branchAtomicRelaxedWeakCAS8(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address)
3318 {
3319 return branchAtomicStrongCAS8(cond, expectedAndClobbered, newValue, address);
3320 }
3321
3322 Jump branchAtomicRelaxedWeakCAS16(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address)
3323 {
3324 return branchAtomicStrongCAS16(cond, expectedAndClobbered, newValue, address);
3325 }
3326
3327 Jump branchAtomicRelaxedWeakCAS16(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address)
3328 {
3329 return branchAtomicStrongCAS16(cond, expectedAndClobbered, newValue, address);
3330 }
3331
3332 Jump branchAtomicRelaxedWeakCAS32(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address)
3333 {
3334 return branchAtomicStrongCAS32(cond, expectedAndClobbered, newValue, address);
3335 }
3336
3337 Jump branchAtomicRelaxedWeakCAS32(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address)
3338 {
3339 return branchAtomicStrongCAS32(cond, expectedAndClobbered, newValue, address);
3340 }
3341
3342 void atomicAdd8(TrustedImm32 imm, Address address)
3343 {
3344 m_assembler.lock();
3345 add8(imm, address);
3346 }
3347
3348 void atomicAdd8(TrustedImm32 imm, BaseIndex address)
3349 {
3350 m_assembler.lock();
3351 add8(imm, address);
3352 }
3353
3354 void atomicAdd8(RegisterID reg, Address address)
3355 {
3356 m_assembler.lock();
3357 add8(reg, address);
3358 }
3359
3360 void atomicAdd8(RegisterID reg, BaseIndex address)
3361 {
3362 m_assembler.lock();
3363 add8(reg, address);
3364 }
3365
3366 void atomicAdd16(TrustedImm32 imm, Address address)
3367 {
3368 m_assembler.lock();
3369 add16(imm, address);
3370 }
3371
3372 void atomicAdd16(TrustedImm32 imm, BaseIndex address)
3373 {
3374 m_assembler.lock();
3375 add16(imm, address);
3376 }
3377
3378 void atomicAdd16(RegisterID reg, Address address)
3379 {
3380 m_assembler.lock();
3381 add16(reg, address);
3382 }
3383
3384 void atomicAdd16(RegisterID reg, BaseIndex address)
3385 {
3386 m_assembler.lock();
3387 add16(reg, address);
3388 }
3389
3390 void atomicAdd32(TrustedImm32 imm, Address address)
3391 {
3392 m_assembler.lock();
3393 add32(imm, address);
3394 }
3395
3396 void atomicAdd32(TrustedImm32 imm, BaseIndex address)
3397 {
3398 m_assembler.lock();
3399 add32(imm, address);
3400 }
3401
3402 void atomicAdd32(RegisterID reg, Address address)
3403 {
3404 m_assembler.lock();
3405 add32(reg, address);
3406 }
3407
3408 void atomicAdd32(RegisterID reg, BaseIndex address)
3409 {
3410 m_assembler.lock();
3411 add32(reg, address);
3412 }
3413
3414 void atomicSub8(TrustedImm32 imm, Address address)
3415 {
3416 m_assembler.lock();
3417 sub8(imm, address);
3418 }
3419
3420 void atomicSub8(TrustedImm32 imm, BaseIndex address)
3421 {
3422 m_assembler.lock();
3423 sub8(imm, address);
3424 }
3425
3426 void atomicSub8(RegisterID reg, Address address)
3427 {
3428 m_assembler.lock();
3429 sub8(reg, address);
3430 }
3431
3432 void atomicSub8(RegisterID reg, BaseIndex address)
3433 {
3434 m_assembler.lock();
3435 sub8(reg, address);
3436 }
3437
3438 void atomicSub16(TrustedImm32 imm, Address address)
3439 {
3440 m_assembler.lock();
3441 sub16(imm, address);
3442 }
3443
3444 void atomicSub16(TrustedImm32 imm, BaseIndex address)
3445 {
3446 m_assembler.lock();
3447 sub16(imm, address);
3448 }
3449
3450 void atomicSub16(RegisterID reg, Address address)
3451 {
3452 m_assembler.lock();
3453 sub16(reg, address);
3454 }
3455
3456 void atomicSub16(RegisterID reg, BaseIndex address)
3457 {
3458 m_assembler.lock();
3459 sub16(reg, address);
3460 }
3461
3462 void atomicSub32(TrustedImm32 imm, Address address)
3463 {
3464 m_assembler.lock();
3465 sub32(imm, address);
3466 }
3467
3468 void atomicSub32(TrustedImm32 imm, BaseIndex address)
3469 {
3470 m_assembler.lock();
3471 sub32(imm, address);
3472 }
3473
3474 void atomicSub32(RegisterID reg, Address address)
3475 {
3476 m_assembler.lock();
3477 sub32(reg, address);
3478 }
3479
3480 void atomicSub32(RegisterID reg, BaseIndex address)
3481 {
3482 m_assembler.lock();
3483 sub32(reg, address);
3484 }
3485
3486 void atomicAnd8(TrustedImm32 imm, Address address)
3487 {
3488 m_assembler.lock();
3489 and8(imm, address);
3490 }
3491
3492 void atomicAnd8(TrustedImm32 imm, BaseIndex address)
3493 {
3494 m_assembler.lock();
3495 and8(imm, address);
3496 }
3497
3498 void atomicAnd8(RegisterID reg, Address address)
3499 {
3500 m_assembler.lock();
3501 and8(reg, address);
3502 }
3503
3504 void atomicAnd8(RegisterID reg, BaseIndex address)
3505 {
3506 m_assembler.lock();
3507 and8(reg, address);
3508 }
3509
3510 void atomicAnd16(TrustedImm32 imm, Address address)
3511 {
3512 m_assembler.lock();
3513 and16(imm, address);
3514 }
3515
3516 void atomicAnd16(TrustedImm32 imm, BaseIndex address)
3517 {
3518 m_assembler.lock();
3519 and16(imm, address);
3520 }
3521
3522 void atomicAnd16(RegisterID reg, Address address)
3523 {
3524 m_assembler.lock();
3525 and16(reg, address);
3526 }
3527
3528 void atomicAnd16(RegisterID reg, BaseIndex address)
3529 {
3530 m_assembler.lock();
3531 and16(reg, address);
3532 }
3533
3534 void atomicAnd32(TrustedImm32 imm, Address address)
3535 {
3536 m_assembler.lock();
3537 and32(imm, address);
3538 }
3539
3540 void atomicAnd32(TrustedImm32 imm, BaseIndex address)
3541 {
3542 m_assembler.lock();
3543 and32(imm, address);
3544 }
3545
3546 void atomicAnd32(RegisterID reg, Address address)
3547 {
3548 m_assembler.lock();
3549 and32(reg, address);
3550 }
3551
3552 void atomicAnd32(RegisterID reg, BaseIndex address)
3553 {
3554 m_assembler.lock();
3555 and32(reg, address);
3556 }
3557
3558 void atomicOr8(TrustedImm32 imm, Address address)
3559 {
3560 m_assembler.lock();
3561 or8(imm, address);
3562 }
3563
3564 void atomicOr8(TrustedImm32 imm, BaseIndex address)
3565 {
3566 m_assembler.lock();
3567 or8(imm, address);
3568 }
3569
3570 void atomicOr8(RegisterID reg, Address address)
3571 {
3572 m_assembler.lock();
3573 or8(reg, address);
3574 }
3575
3576 void atomicOr8(RegisterID reg, BaseIndex address)
3577 {
3578 m_assembler.lock();
3579 or8(reg, address);
3580 }
3581
3582 void atomicOr16(TrustedImm32 imm, Address address)
3583 {
3584 m_assembler.lock();
3585 or16(imm, address);
3586 }
3587
3588 void atomicOr16(TrustedImm32 imm, BaseIndex address)
3589 {
3590 m_assembler.lock();
3591 or16(imm, address);
3592 }
3593
3594 void atomicOr16(RegisterID reg, Address address)
3595 {
3596 m_assembler.lock();
3597 or16(reg, address);
3598 }
3599
3600 void atomicOr16(RegisterID reg, BaseIndex address)
3601 {
3602 m_assembler.lock();
3603 or16(reg, address);
3604 }
3605
3606 void atomicOr32(TrustedImm32 imm, Address address)
3607 {
3608 m_assembler.lock();
3609 or32(imm, address);
3610 }
3611
3612 void atomicOr32(TrustedImm32 imm, BaseIndex address)
3613 {
3614 m_assembler.lock();
3615 or32(imm, address);
3616 }
3617
3618 void atomicOr32(RegisterID reg, Address address)
3619 {
3620 m_assembler.lock();
3621 or32(reg, address);
3622 }
3623
3624 void atomicOr32(RegisterID reg, BaseIndex address)
3625 {
3626 m_assembler.lock();
3627 or32(reg, address);
3628 }
3629
3630 void atomicXor8(TrustedImm32 imm, Address address)
3631 {
3632 m_assembler.lock();
3633 xor8(imm, address);
3634 }
3635
3636 void atomicXor8(TrustedImm32 imm, BaseIndex address)
3637 {
3638 m_assembler.lock();
3639 xor8(imm, address);
3640 }
3641
3642 void atomicXor8(RegisterID reg, Address address)
3643 {
3644 m_assembler.lock();
3645 xor8(reg, address);
3646 }
3647
3648 void atomicXor8(RegisterID reg, BaseIndex address)
3649 {
3650 m_assembler.lock();
3651 xor8(reg, address);
3652 }
3653
3654 void atomicXor16(TrustedImm32 imm, Address address)
3655 {
3656 m_assembler.lock();
3657 xor16(imm, address);
3658 }
3659
3660 void atomicXor16(TrustedImm32 imm, BaseIndex address)
3661 {
3662 m_assembler.lock();
3663 xor16(imm, address);
3664 }
3665
3666 void atomicXor16(RegisterID reg, Address address)
3667 {
3668 m_assembler.lock();
3669 xor16(reg, address);
3670 }
3671
3672 void atomicXor16(RegisterID reg, BaseIndex address)
3673 {
3674 m_assembler.lock();
3675 xor16(reg, address);
3676 }
3677
3678 void atomicXor32(TrustedImm32 imm, Address address)
3679 {
3680 m_assembler.lock();
3681 xor32(imm, address);
3682 }
3683
3684 void atomicXor32(TrustedImm32 imm, BaseIndex address)
3685 {
3686 m_assembler.lock();
3687 xor32(imm, address);
3688 }
3689
3690 void atomicXor32(RegisterID reg, Address address)
3691 {
3692 m_assembler.lock();
3693 xor32(reg, address);
3694 }
3695
3696 void atomicXor32(RegisterID reg, BaseIndex address)
3697 {
3698 m_assembler.lock();
3699 xor32(reg, address);
3700 }
3701
3702 void atomicNeg8(Address address)
3703 {
3704 m_assembler.lock();
3705 neg8(address);
3706 }
3707
3708 void atomicNeg8(BaseIndex address)
3709 {
3710 m_assembler.lock();
3711 neg8(address);
3712 }
3713
3714 void atomicNeg16(Address address)
3715 {
3716 m_assembler.lock();
3717 neg16(address);
3718 }
3719
3720 void atomicNeg16(BaseIndex address)
3721 {
3722 m_assembler.lock();
3723 neg16(address);
3724 }
3725
3726 void atomicNeg32(Address address)
3727 {
3728 m_assembler.lock();
3729 neg32(address);
3730 }
3731
3732 void atomicNeg32(BaseIndex address)
3733 {
3734 m_assembler.lock();
3735 neg32(address);
3736 }
3737
3738 void atomicNot8(Address address)
3739 {
3740 m_assembler.lock();
3741 not8(address);
3742 }
3743
3744 void atomicNot8(BaseIndex address)
3745 {
3746 m_assembler.lock();
3747 not8(address);
3748 }
3749
3750 void atomicNot16(Address address)
3751 {
3752 m_assembler.lock();
3753 not16(address);
3754 }
3755
3756 void atomicNot16(BaseIndex address)
3757 {
3758 m_assembler.lock();
3759 not16(address);
3760 }
3761
3762 void atomicNot32(Address address)
3763 {
3764 m_assembler.lock();
3765 not32(address);
3766 }
3767
3768 void atomicNot32(BaseIndex address)
3769 {
3770 m_assembler.lock();
3771 not32(address);
3772 }
3773
3774 void atomicXchgAdd8(RegisterID reg, Address address)
3775 {
3776 m_assembler.lock();
3777 m_assembler.xaddb_rm(reg, address.offset, address.base);
3778 }
3779
3780 void atomicXchgAdd8(RegisterID reg, BaseIndex address)
3781 {
3782 m_assembler.lock();
3783 m_assembler.xaddb_rm(reg, address.offset, address.base, address.index, address.scale);
3784 }
3785
3786 void atomicXchgAdd16(RegisterID reg, Address address)
3787 {
3788 m_assembler.lock();
3789 m_assembler.xaddw_rm(reg, address.offset, address.base);
3790 }
3791
3792 void atomicXchgAdd16(RegisterID reg, BaseIndex address)
3793 {
3794 m_assembler.lock();
3795 m_assembler.xaddw_rm(reg, address.offset, address.base, address.index, address.scale);
3796 }
3797
3798 void atomicXchgAdd32(RegisterID reg, Address address)
3799 {
3800 m_assembler.lock();
3801 m_assembler.xaddl_rm(reg, address.offset, address.base);
3802 }
3803
3804 void atomicXchgAdd32(RegisterID reg, BaseIndex address)
3805 {
3806 m_assembler.lock();
3807 m_assembler.xaddl_rm(reg, address.offset, address.base, address.index, address.scale);
3808 }
3809
3810 void atomicXchg8(RegisterID reg, Address address)
3811 {
3812 m_assembler.lock();
3813 m_assembler.xchgb_rm(reg, address.offset, address.base);
3814 }
3815
3816 void atomicXchg8(RegisterID reg, BaseIndex address)
3817 {
3818 m_assembler.lock();
3819 m_assembler.xchgb_rm(reg, address.offset, address.base, address.index, address.scale);
3820 }
3821
3822 void atomicXchg16(RegisterID reg, Address address)
3823 {
3824 m_assembler.lock();
3825 m_assembler.xchgw_rm(reg, address.offset, address.base);
3826 }
3827
3828 void atomicXchg16(RegisterID reg, BaseIndex address)
3829 {
3830 m_assembler.lock();
3831 m_assembler.xchgw_rm(reg, address.offset, address.base, address.index, address.scale);
3832 }
3833
3834 void atomicXchg32(RegisterID reg, Address address)
3835 {
3836 m_assembler.lock();
3837 m_assembler.xchgl_rm(reg, address.offset, address.base);
3838 }
3839
3840 void atomicXchg32(RegisterID reg, BaseIndex address)
3841 {
3842 m_assembler.lock();
3843 m_assembler.xchgl_rm(reg, address.offset, address.base, address.index, address.scale);
3844 }
3845
3846 // We take this to mean that it prevents motion of normal stores. So, it's a no-op on x86.
3847 void storeFence()
3848 {
3849 }
3850
3851 // We take this to mean that it prevents motion of normal loads. So, it's a no-op on x86.
3852 void loadFence()
3853 {
3854 }
3855
3856#if ENABLE(FAST_TLS_JIT)
3857 void loadFromTLS32(uint32_t offset, RegisterID dst)
3858 {
3859 m_assembler.gs();
3860 m_assembler.movl_mr(offset, dst);
3861 }
3862
3863
3864 static bool loadFromTLSPtrNeedsMacroScratchRegister()
3865 {
3866 return false;
3867 }
3868
3869 void storeToTLS32(RegisterID src, uint32_t offset)
3870 {
3871 m_assembler.gs();
3872 m_assembler.movl_rm(src, offset);
3873 }
3874
3875 static bool storeToTLSPtrNeedsMacroScratchRegister()
3876 {
3877 return false;
3878 }
3879#endif
3880
3881 template<PtrTag tag>
3882 static void replaceWithVMHalt(CodeLocationLabel<tag> instructionStart)
3883 {
3884 X86Assembler::replaceWithHlt(instructionStart.executableAddress());
3885 }
3886
3887 template<PtrTag startTag, PtrTag destTag>
3888 static void replaceWithJump(CodeLocationLabel<startTag> instructionStart, CodeLocationLabel<destTag> destination)
3889 {
3890 X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress());
3891 }
3892
3893 static ptrdiff_t maxJumpReplacementSize()
3894 {
3895 return X86Assembler::maxJumpReplacementSize();
3896 }
3897
3898 static ptrdiff_t patchableJumpSize()
3899 {
3900 return X86Assembler::patchableJumpSize();
3901 }
3902
3903 static bool supportsFloatingPointRounding()
3904 {
3905 if (s_sse4_1CheckState == CPUIDCheckState::NotChecked)
3906 collectCPUFeatures();
3907 return s_sse4_1CheckState == CPUIDCheckState::Set;
3908 }
3909
3910 static bool supportsCountPopulation()
3911 {
3912 if (s_popcntCheckState == CPUIDCheckState::NotChecked)
3913 collectCPUFeatures();
3914 return s_popcntCheckState == CPUIDCheckState::Set;
3915 }
3916
3917 static bool supportsAVX()
3918 {
3919 // AVX still causes mysterious regressions and those regressions can be massive.
3920 return false;
3921 }
3922
3923 void lfence()
3924 {
3925 m_assembler.lfence();
3926 }
3927
3928 void mfence()
3929 {
3930 m_assembler.mfence();
3931 }
3932
3933 void sfence()
3934 {
3935 m_assembler.sfence();
3936 }
3937
3938 void rdtsc()
3939 {
3940 m_assembler.rdtsc();
3941 }
3942
3943 void pause()
3944 {
3945 m_assembler.pause();
3946 }
3947
3948 void cpuid()
3949 {
3950 m_assembler.cpuid();
3951 }
3952
3953protected:
3954 X86Assembler::Condition x86Condition(RelationalCondition cond)
3955 {
3956 return static_cast<X86Assembler::Condition>(cond);
3957 }
3958
3959 X86Assembler::Condition x86Condition(ResultCondition cond)
3960 {
3961 return static_cast<X86Assembler::Condition>(cond);
3962 }
3963
3964 X86Assembler::Condition x86Condition(StatusCondition cond)
3965 {
3966 switch (cond) {
3967 case Success:
3968 return X86Assembler::ConditionE;
3969 case Failure:
3970 return X86Assembler::ConditionNE;
3971 }
3972 RELEASE_ASSERT_NOT_REACHED();
3973 return X86Assembler::ConditionE;
3974 }
3975
3976 void set32(X86Assembler::Condition cond, RegisterID dest)
3977 {
3978#if CPU(X86)
3979 // On 32-bit x86 we can only set the first 4 registers;
3980 // esp..edi are mapped to the 'h' registers!
3981 if (dest >= 4) {
3982 m_assembler.xchgl_rr(dest, X86Registers::eax);
3983 m_assembler.setCC_r(cond, X86Registers::eax);
3984 m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax);
3985 m_assembler.xchgl_rr(dest, X86Registers::eax);
3986 return;
3987 }
3988#endif
3989 m_assembler.setCC_r(cond, dest);
3990 m_assembler.movzbl_rr(dest, dest);
3991 }
3992
3993 void cmov(X86Assembler::Condition cond, RegisterID src, RegisterID dest)
3994 {
3995#if CPU(X86_64)
3996 m_assembler.cmovq_rr(cond, src, dest);
3997#else
3998 m_assembler.cmovl_rr(cond, src, dest);
3999#endif
4000 }
4001
4002 static bool supportsLZCNT()
4003 {
4004 if (s_lzcntCheckState == CPUIDCheckState::NotChecked)
4005 collectCPUFeatures();
4006 return s_lzcntCheckState == CPUIDCheckState::Set;
4007 }
4008
4009 static bool supportsBMI1()
4010 {
4011 if (s_bmi1CheckState == CPUIDCheckState::NotChecked)
4012 collectCPUFeatures();
4013 return s_bmi1CheckState == CPUIDCheckState::Set;
4014 }
4015
4016 template<int sizeOfRegister>
4017 void ctzAfterBsf(RegisterID dst)
4018 {
4019 Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
4020 move(TrustedImm32(sizeOfRegister), dst);
4021 srcIsNonZero.link(this);
4022 }
4023
4024 template<typename AddressType, typename Func>
4025 void atomicStrongCAS(StatusCondition cond, RegisterID expectedAndResult, RegisterID result, AddressType& address, const Func& func)
4026 {
4027 address = address.withSwappedRegister(X86Registers::eax, expectedAndResult);
4028 swap(expectedAndResult, X86Registers::eax);
4029 m_assembler.lock();
4030 func();
4031 swap(expectedAndResult, X86Registers::eax);
4032 set32(x86Condition(cond), result);
4033 }
4034
4035 template<typename AddressType, typename Func>
4036 void atomicStrongCAS(RegisterID expectedAndResult, AddressType& address, const Func& func)
4037 {
4038 address = address.withSwappedRegister(X86Registers::eax, expectedAndResult);
4039 swap(expectedAndResult, X86Registers::eax);
4040 m_assembler.lock();
4041 func();
4042 swap(expectedAndResult, X86Registers::eax);
4043 }
4044
4045 template<typename AddressType, typename Func>
4046 Jump branchAtomicStrongCAS(StatusCondition cond, RegisterID expectedAndResult, AddressType& address, const Func& func)
4047 {
4048 address = address.withSwappedRegister(X86Registers::eax, expectedAndResult);
4049 swap(expectedAndResult, X86Registers::eax);
4050 m_assembler.lock();
4051 func();
4052 swap(expectedAndResult, X86Registers::eax);
4053 return Jump(m_assembler.jCC(x86Condition(cond)));
4054 }
4055
4056private:
4057 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
4058 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
4059 friend class MacroAssemblerX86;
4060
4061 ALWAYS_INLINE void generateTest32(Address address, TrustedImm32 mask = TrustedImm32(-1))
4062 {
4063 if (mask.m_value == -1)
4064 m_assembler.cmpl_im(0, address.offset, address.base);
4065 else if (!(mask.m_value & ~0xff))
4066 m_assembler.testb_im(mask.m_value, address.offset, address.base);
4067 else if (!(mask.m_value & ~0xff00))
4068 m_assembler.testb_im(mask.m_value >> 8, address.offset + 1, address.base);
4069 else if (!(mask.m_value & ~0xff0000))
4070 m_assembler.testb_im(mask.m_value >> 16, address.offset + 2, address.base);
4071 else if (!(mask.m_value & ~0xff000000))
4072 m_assembler.testb_im(mask.m_value >> 24, address.offset + 3, address.base);
4073 else
4074 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
4075 }
4076
4077 // If lzcnt is not available, use this after BSR
4078 // to count the leading zeros.
4079 void clz32AfterBsr(RegisterID dst)
4080 {
4081 Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
4082 move(TrustedImm32(32), dst);
4083
4084 Jump skipNonZeroCase = jump();
4085 srcIsNonZero.link(this);
4086 xor32(TrustedImm32(0x1f), dst);
4087 skipNonZeroCase.link(this);
4088 }
4089
4090 template<typename Function>
4091 void floatingPointCompare(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest, Function compare)
4092 {
4093 if (cond & DoubleConditionBitSpecial) {
4094 ASSERT(!(cond & DoubleConditionBitInvert));
4095 if (cond == DoubleEqual) {
4096 if (left == right) {
4097 compare(right, left);
4098 set32(X86Assembler::ConditionNP, dest);
4099 return;
4100 }
4101
4102 move(TrustedImm32(0), dest);
4103 compare(right, left);
4104 Jump isUnordered = m_assembler.jp();
4105 set32(X86Assembler::ConditionE, dest);
4106 isUnordered.link(this);
4107 return;
4108 }
4109 if (cond == DoubleNotEqualOrUnordered) {
4110 if (left == right) {
4111 compare(right, left);
4112 set32(X86Assembler::ConditionP, dest);
4113 return;
4114 }
4115
4116 move(TrustedImm32(1), dest);
4117 compare(right, left);
4118 Jump isUnordered = m_assembler.jp();
4119 set32(X86Assembler::ConditionNE, dest);
4120 isUnordered.link(this);
4121 return;
4122 }
4123
4124 RELEASE_ASSERT_NOT_REACHED();
4125 return;
4126 }
4127
4128 if (cond & DoubleConditionBitInvert)
4129 compare(left, right);
4130 else
4131 compare(right, left);
4132 set32(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), dest);
4133 }
4134
4135 Jump jumpAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
4136 {
4137 if (cond == DoubleEqual) {
4138 if (left == right)
4139 return Jump(m_assembler.jnp());
4140 Jump isUnordered(m_assembler.jp());
4141 Jump result = Jump(m_assembler.je());
4142 isUnordered.link(this);
4143 return result;
4144 }
4145 if (cond == DoubleNotEqualOrUnordered) {
4146 if (left == right)
4147 return Jump(m_assembler.jp());
4148 Jump isUnordered(m_assembler.jp());
4149 Jump isEqual(m_assembler.je());
4150 isUnordered.link(this);
4151 Jump result = jump();
4152 isEqual.link(this);
4153 return result;
4154 }
4155
4156 ASSERT(!(cond & DoubleConditionBitSpecial));
4157 return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
4158 }
4159
4160 // The 32bit Move does not need the REX byte for low registers, making it shorter.
4161 // Use this if the top bits are irrelevant because they will be reset by the next instruction.
4162 void move32IfNeeded(RegisterID src, RegisterID dest)
4163 {
4164 if (src == dest)
4165 return;
4166 m_assembler.movl_rr(src, dest);
4167 }
4168
4169#if CPU(X86_64)
4170 void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
4171 {
4172 if (cond == DoubleEqual) {
4173 if (left == right) {
4174 m_assembler.cmovnpq_rr(src, dest);
4175 return;
4176 }
4177
4178 Jump isUnordered(m_assembler.jp());
4179 m_assembler.cmoveq_rr(src, dest);
4180 isUnordered.link(this);
4181 return;
4182 }
4183
4184 if (cond == DoubleNotEqualOrUnordered) {
4185 if (left == right) {
4186 m_assembler.cmovpq_rr(src, dest);
4187 return;
4188 }
4189
4190 m_assembler.cmovpq_rr(src, dest);
4191 m_assembler.cmovneq_rr(src, dest);
4192 return;
4193 }
4194
4195 ASSERT(!(cond & DoubleConditionBitSpecial));
4196 cmov(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), src, dest);
4197 }
4198#endif
4199
4200 using CPUID = std::array<unsigned, 4>;
4201 static CPUID getCPUID(unsigned level);
4202 static CPUID getCPUIDEx(unsigned level, unsigned count);
4203 JS_EXPORT_PRIVATE static void collectCPUFeatures();
4204
4205 JS_EXPORT_PRIVATE static CPUIDCheckState s_sse4_1CheckState;
4206 JS_EXPORT_PRIVATE static CPUIDCheckState s_sse4_2CheckState;
4207 JS_EXPORT_PRIVATE static CPUIDCheckState s_avxCheckState;
4208 JS_EXPORT_PRIVATE static CPUIDCheckState s_lzcntCheckState;
4209 JS_EXPORT_PRIVATE static CPUIDCheckState s_bmi1CheckState;
4210 JS_EXPORT_PRIVATE static CPUIDCheckState s_popcntCheckState;
4211};
4212
4213} // namespace JSC
4214
4215#endif // ENABLE(ASSEMBLER)
4216