1 | // Copyright 2012 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #if V8_TARGET_ARCH_X64 |
6 | |
7 | #include "src/base/bits.h" |
8 | #include "src/base/division-by-constant.h" |
9 | #include "src/base/utils/random-number-generator.h" |
10 | #include "src/bootstrapper.h" |
11 | #include "src/callable.h" |
12 | #include "src/code-factory.h" |
13 | #include "src/counters.h" |
14 | #include "src/debug/debug.h" |
15 | #include "src/external-reference-table.h" |
16 | #include "src/frames-inl.h" |
17 | #include "src/globals.h" |
18 | #include "src/heap/heap-inl.h" // For MemoryChunk. |
19 | #include "src/macro-assembler.h" |
20 | #include "src/objects-inl.h" |
21 | #include "src/objects/smi.h" |
22 | #include "src/register-configuration.h" |
23 | #include "src/snapshot/embedded-data.h" |
24 | #include "src/snapshot/snapshot.h" |
25 | #include "src/string-constants.h" |
26 | #include "src/x64/assembler-x64.h" |
27 | |
28 | // Satisfy cpplint check, but don't include platform-specific header. It is |
29 | // included recursively via macro-assembler.h. |
30 | #if 0 |
31 | #include "src/x64/macro-assembler-x64.h" |
32 | #endif |
33 | |
34 | namespace v8 { |
35 | namespace internal { |
36 | |
37 | Operand StackArgumentsAccessor::GetArgumentOperand(int index) { |
38 | DCHECK_GE(index, 0); |
39 | int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0; |
40 | int displacement_to_last_argument = |
41 | base_reg_ == rsp ? kPCOnStackSize : kFPOnStackSize + kPCOnStackSize; |
42 | displacement_to_last_argument += extra_displacement_to_last_argument_; |
43 | if (argument_count_reg_ == no_reg) { |
44 | // argument[0] is at base_reg_ + displacement_to_last_argument + |
45 | // (argument_count_immediate_ + receiver - 1) * kSystemPointerSize. |
46 | DCHECK_GT(argument_count_immediate_ + receiver, 0); |
47 | return Operand(base_reg_, |
48 | displacement_to_last_argument + |
49 | (argument_count_immediate_ + receiver - 1 - index) * |
50 | kSystemPointerSize); |
51 | } else { |
52 | // argument[0] is at base_reg_ + displacement_to_last_argument + |
53 | // argument_count_reg_ * times_system_pointer_size + (receiver - 1) * |
54 | // kSystemPointerSize. |
55 | return Operand(base_reg_, argument_count_reg_, times_system_pointer_size, |
56 | displacement_to_last_argument + |
57 | (receiver - 1 - index) * kSystemPointerSize); |
58 | } |
59 | } |
60 | |
61 | StackArgumentsAccessor::StackArgumentsAccessor( |
62 | Register base_reg, const ParameterCount& parameter_count, |
63 | StackArgumentsAccessorReceiverMode receiver_mode, |
64 | int ) |
65 | : base_reg_(base_reg), |
66 | argument_count_reg_(parameter_count.is_reg() ? parameter_count.reg() |
67 | : no_reg), |
68 | argument_count_immediate_( |
69 | parameter_count.is_immediate() ? parameter_count.immediate() : 0), |
70 | receiver_mode_(receiver_mode), |
71 | extra_displacement_to_last_argument_( |
72 | extra_displacement_to_last_argument) {} |
73 | |
74 | void MacroAssembler::Load(Register destination, ExternalReference source) { |
75 | if (root_array_available_ && options().enable_root_array_delta_access) { |
76 | intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source); |
77 | if (is_int32(delta)) { |
78 | movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta))); |
79 | return; |
80 | } |
81 | } |
82 | // Safe code. |
83 | if (destination == rax && !options().isolate_independent_code) { |
84 | load_rax(source); |
85 | } else { |
86 | movq(destination, ExternalReferenceAsOperand(source)); |
87 | } |
88 | } |
89 | |
90 | |
91 | void MacroAssembler::Store(ExternalReference destination, Register source) { |
92 | if (root_array_available_ && options().enable_root_array_delta_access) { |
93 | intptr_t delta = |
94 | RootRegisterOffsetForExternalReference(isolate(), destination); |
95 | if (is_int32(delta)) { |
96 | movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source); |
97 | return; |
98 | } |
99 | } |
100 | // Safe code. |
101 | if (source == rax && !options().isolate_independent_code) { |
102 | store_rax(destination); |
103 | } else { |
104 | movq(ExternalReferenceAsOperand(destination), source); |
105 | } |
106 | } |
107 | |
108 | void TurboAssembler::LoadFromConstantsTable(Register destination, |
109 | int constant_index) { |
110 | DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); |
111 | LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); |
112 | LoadTaggedPointerField( |
113 | destination, |
114 | FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index))); |
115 | } |
116 | |
117 | void TurboAssembler::LoadRootRegisterOffset(Register destination, |
118 | intptr_t offset) { |
119 | DCHECK(is_int32(offset)); |
120 | if (offset == 0) { |
121 | Move(destination, kRootRegister); |
122 | } else { |
123 | leaq(destination, Operand(kRootRegister, static_cast<int32_t>(offset))); |
124 | } |
125 | } |
126 | |
127 | void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { |
128 | movq(destination, Operand(kRootRegister, offset)); |
129 | } |
130 | |
131 | void TurboAssembler::LoadAddress(Register destination, |
132 | ExternalReference source) { |
133 | if (root_array_available_ && options().enable_root_array_delta_access) { |
134 | intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source); |
135 | if (is_int32(delta)) { |
136 | leaq(destination, Operand(kRootRegister, static_cast<int32_t>(delta))); |
137 | return; |
138 | } |
139 | } |
140 | // Safe code. |
141 | if (FLAG_embedded_builtins) { |
142 | if (root_array_available_ && options().isolate_independent_code) { |
143 | IndirectLoadExternalReference(destination, source); |
144 | return; |
145 | } |
146 | } |
147 | Move(destination, source); |
148 | } |
149 | |
150 | Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference, |
151 | Register scratch) { |
152 | if (root_array_available_ && options().enable_root_array_delta_access) { |
153 | int64_t delta = |
154 | RootRegisterOffsetForExternalReference(isolate(), reference); |
155 | if (is_int32(delta)) { |
156 | return Operand(kRootRegister, static_cast<int32_t>(delta)); |
157 | } |
158 | } |
159 | if (root_array_available_ && options().isolate_independent_code) { |
160 | if (IsAddressableThroughRootRegister(isolate(), reference)) { |
161 | // Some external references can be efficiently loaded as an offset from |
162 | // kRootRegister. |
163 | intptr_t offset = |
164 | RootRegisterOffsetForExternalReference(isolate(), reference); |
165 | CHECK(is_int32(offset)); |
166 | return Operand(kRootRegister, static_cast<int32_t>(offset)); |
167 | } else { |
168 | // Otherwise, do a memory load from the external reference table. |
169 | movq(scratch, Operand(kRootRegister, |
170 | RootRegisterOffsetForExternalReferenceTableEntry( |
171 | isolate(), reference))); |
172 | return Operand(scratch, 0); |
173 | } |
174 | } |
175 | Move(scratch, reference); |
176 | return Operand(scratch, 0); |
177 | } |
178 | |
179 | void MacroAssembler::PushAddress(ExternalReference source) { |
180 | LoadAddress(kScratchRegister, source); |
181 | Push(kScratchRegister); |
182 | } |
183 | |
184 | void TurboAssembler::LoadRoot(Register destination, RootIndex index) { |
185 | DCHECK(root_array_available_); |
186 | movq(destination, |
187 | Operand(kRootRegister, RootRegisterOffsetForRootIndex(index))); |
188 | } |
189 | |
190 | void MacroAssembler::PushRoot(RootIndex index) { |
191 | DCHECK(root_array_available_); |
192 | Push(Operand(kRootRegister, RootRegisterOffsetForRootIndex(index))); |
193 | } |
194 | |
195 | void TurboAssembler::CompareRoot(Register with, RootIndex index) { |
196 | DCHECK(root_array_available_); |
197 | if (IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot, |
198 | RootIndex::kLastStrongOrReadOnlyRoot)) { |
199 | cmp_tagged(with, |
200 | Operand(kRootRegister, RootRegisterOffsetForRootIndex(index))); |
201 | } else { |
202 | // Some smi roots contain system pointer size values like stack limits. |
203 | cmpq(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index))); |
204 | } |
205 | } |
206 | |
207 | void TurboAssembler::CompareRoot(Operand with, RootIndex index) { |
208 | DCHECK(root_array_available_); |
209 | DCHECK(!with.AddressUsesRegister(kScratchRegister)); |
210 | LoadRoot(kScratchRegister, index); |
211 | if (IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot, |
212 | RootIndex::kLastStrongOrReadOnlyRoot)) { |
213 | cmp_tagged(with, kScratchRegister); |
214 | } else { |
215 | // Some smi roots contain system pointer size values like stack limits. |
216 | cmpq(with, kScratchRegister); |
217 | } |
218 | } |
219 | |
220 | void TurboAssembler::LoadTaggedPointerField(Register destination, |
221 | Operand field_operand) { |
222 | #ifdef V8_COMPRESS_POINTERS |
223 | DecompressTaggedPointer(destination, field_operand); |
224 | #else |
225 | mov_tagged(destination, field_operand); |
226 | #endif |
227 | } |
228 | |
229 | void TurboAssembler::LoadAnyTaggedField(Register destination, |
230 | Operand field_operand, |
231 | Register scratch) { |
232 | #ifdef V8_COMPRESS_POINTERS |
233 | DecompressAnyTagged(destination, field_operand, scratch); |
234 | #else |
235 | mov_tagged(destination, field_operand); |
236 | #endif |
237 | } |
238 | |
239 | void TurboAssembler::PushTaggedPointerField(Operand field_operand, |
240 | Register scratch) { |
241 | #ifdef V8_COMPRESS_POINTERS |
242 | DCHECK(!field_operand.AddressUsesRegister(scratch)); |
243 | DecompressTaggedPointer(scratch, field_operand); |
244 | Push(scratch); |
245 | #else |
246 | Push(field_operand); |
247 | #endif |
248 | } |
249 | |
250 | void TurboAssembler::PushTaggedAnyField(Operand field_operand, |
251 | Register scratch1, Register scratch2) { |
252 | #ifdef V8_COMPRESS_POINTERS |
253 | DCHECK(!AreAliased(scratch1, scratch2)); |
254 | DCHECK(!field_operand.AddressUsesRegister(scratch1)); |
255 | DCHECK(!field_operand.AddressUsesRegister(scratch2)); |
256 | DecompressAnyTagged(scratch1, field_operand, scratch2); |
257 | Push(scratch1); |
258 | #else |
259 | Push(field_operand); |
260 | #endif |
261 | } |
262 | |
263 | void TurboAssembler::SmiUntagField(Register dst, Operand src) { |
264 | SmiUntag(dst, src); |
265 | } |
266 | |
267 | void TurboAssembler::StoreTaggedField(Operand dst_field_operand, |
268 | Immediate value) { |
269 | #ifdef V8_COMPRESS_POINTERS |
270 | RecordComment("[ StoreTagged" ); |
271 | movl(dst_field_operand, value); |
272 | RecordComment("]" ); |
273 | #else |
274 | movq(dst_field_operand, value); |
275 | #endif |
276 | } |
277 | |
278 | void TurboAssembler::StoreTaggedField(Operand dst_field_operand, |
279 | Register value) { |
280 | #ifdef V8_COMPRESS_POINTERS |
281 | RecordComment("[ StoreTagged" ); |
282 | movl(dst_field_operand, value); |
283 | RecordComment("]" ); |
284 | #else |
285 | movq(dst_field_operand, value); |
286 | #endif |
287 | } |
288 | |
289 | void TurboAssembler::DecompressTaggedSigned(Register destination, |
290 | Operand field_operand) { |
291 | RecordComment("[ DecompressTaggedSigned" ); |
292 | movsxlq(destination, field_operand); |
293 | RecordComment("]" ); |
294 | } |
295 | |
296 | void TurboAssembler::DecompressTaggedPointer(Register destination, |
297 | Operand field_operand) { |
298 | RecordComment("[ DecompressTaggedPointer" ); |
299 | movsxlq(destination, field_operand); |
300 | addq(destination, kRootRegister); |
301 | RecordComment("]" ); |
302 | } |
303 | |
304 | void TurboAssembler::DecompressAnyTagged(Register destination, |
305 | Operand field_operand, |
306 | Register scratch) { |
307 | DCHECK(!AreAliased(destination, scratch)); |
308 | RecordComment("[ DecompressAnyTagged" ); |
309 | movsxlq(destination, field_operand); |
310 | if (kUseBranchlessPtrDecompression) { |
311 | // Branchlessly compute |masked_root|: |
312 | // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; |
313 | STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32)); |
314 | Register masked_root = scratch; |
315 | movl(masked_root, destination); |
316 | andl(masked_root, Immediate(kSmiTagMask)); |
317 | negq(masked_root); |
318 | andq(masked_root, kRootRegister); |
319 | // Now this add operation will either leave the value unchanged if it is |
320 | // a smi or add the isolate root if it is a heap object. |
321 | addq(destination, masked_root); |
322 | } else { |
323 | Label done; |
324 | JumpIfSmi(destination, &done); |
325 | addq(destination, kRootRegister); |
326 | bind(&done); |
327 | } |
328 | RecordComment("]" ); |
329 | } |
330 | |
331 | void MacroAssembler::RecordWriteField(Register object, int offset, |
332 | Register value, Register dst, |
333 | SaveFPRegsMode save_fp, |
334 | RememberedSetAction remembered_set_action, |
335 | SmiCheck smi_check) { |
336 | // First, check if a write barrier is even needed. The tests below |
337 | // catch stores of Smis. |
338 | Label done; |
339 | |
340 | // Skip barrier if writing a smi. |
341 | if (smi_check == INLINE_SMI_CHECK) { |
342 | JumpIfSmi(value, &done); |
343 | } |
344 | |
345 | // Although the object register is tagged, the offset is relative to the start |
346 | // of the object, so the offset must be a multiple of kTaggedSize. |
347 | DCHECK(IsAligned(offset, kTaggedSize)); |
348 | |
349 | leaq(dst, FieldOperand(object, offset)); |
350 | if (emit_debug_code()) { |
351 | Label ok; |
352 | testb(dst, Immediate(kTaggedSize - 1)); |
353 | j(zero, &ok, Label::kNear); |
354 | int3(); |
355 | bind(&ok); |
356 | } |
357 | |
358 | RecordWrite(object, dst, value, save_fp, remembered_set_action, |
359 | OMIT_SMI_CHECK); |
360 | |
361 | bind(&done); |
362 | |
363 | // Clobber clobbered input registers when running with the debug-code flag |
364 | // turned on to provoke errors. |
365 | if (emit_debug_code()) { |
366 | Move(value, kZapValue, RelocInfo::NONE); |
367 | Move(dst, kZapValue, RelocInfo::NONE); |
368 | } |
369 | } |
370 | |
371 | void TurboAssembler::SaveRegisters(RegList registers) { |
372 | DCHECK_GT(NumRegs(registers), 0); |
373 | for (int i = 0; i < Register::kNumRegisters; ++i) { |
374 | if ((registers >> i) & 1u) { |
375 | pushq(Register::from_code(i)); |
376 | } |
377 | } |
378 | } |
379 | |
380 | void TurboAssembler::RestoreRegisters(RegList registers) { |
381 | DCHECK_GT(NumRegs(registers), 0); |
382 | for (int i = Register::kNumRegisters - 1; i >= 0; --i) { |
383 | if ((registers >> i) & 1u) { |
384 | popq(Register::from_code(i)); |
385 | } |
386 | } |
387 | } |
388 | |
389 | void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address, |
390 | SaveFPRegsMode fp_mode) { |
391 | EphemeronKeyBarrierDescriptor descriptor; |
392 | RegList registers = descriptor.allocatable_registers(); |
393 | |
394 | SaveRegisters(registers); |
395 | |
396 | Register object_parameter( |
397 | descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject)); |
398 | Register slot_parameter(descriptor.GetRegisterParameter( |
399 | EphemeronKeyBarrierDescriptor::kSlotAddress)); |
400 | Register fp_mode_parameter( |
401 | descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode)); |
402 | |
403 | MovePair(slot_parameter, address, object_parameter, object); |
404 | Smi smi_fm = Smi::FromEnum(fp_mode); |
405 | Move(fp_mode_parameter, smi_fm); |
406 | Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier), |
407 | RelocInfo::CODE_TARGET); |
408 | |
409 | RestoreRegisters(registers); |
410 | } |
411 | |
412 | void TurboAssembler::CallRecordWriteStub( |
413 | Register object, Register address, |
414 | RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) { |
415 | CallRecordWriteStub( |
416 | object, address, remembered_set_action, fp_mode, |
417 | isolate()->builtins()->builtin_handle(Builtins::kRecordWrite), |
418 | kNullAddress); |
419 | } |
420 | |
421 | void TurboAssembler::CallRecordWriteStub( |
422 | Register object, Register address, |
423 | RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, |
424 | Address wasm_target) { |
425 | CallRecordWriteStub(object, address, remembered_set_action, fp_mode, |
426 | Handle<Code>::null(), wasm_target); |
427 | } |
428 | |
429 | void TurboAssembler::CallRecordWriteStub( |
430 | Register object, Register address, |
431 | RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, |
432 | Handle<Code> code_target, Address wasm_target) { |
433 | DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress); |
434 | |
435 | RecordWriteDescriptor descriptor; |
436 | RegList registers = descriptor.allocatable_registers(); |
437 | |
438 | SaveRegisters(registers); |
439 | |
440 | Register object_parameter( |
441 | descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject)); |
442 | Register slot_parameter( |
443 | descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot)); |
444 | Register remembered_set_parameter( |
445 | descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet)); |
446 | Register fp_mode_parameter( |
447 | descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode)); |
448 | |
449 | // Prepare argument registers for calling RecordWrite |
450 | // slot_parameter <= address |
451 | // object_parameter <= object |
452 | MovePair(slot_parameter, address, object_parameter, object); |
453 | |
454 | Smi smi_rsa = Smi::FromEnum(remembered_set_action); |
455 | Smi smi_fm = Smi::FromEnum(fp_mode); |
456 | Move(remembered_set_parameter, smi_rsa); |
457 | if (smi_rsa != smi_fm) { |
458 | Move(fp_mode_parameter, smi_fm); |
459 | } else { |
460 | movq(fp_mode_parameter, remembered_set_parameter); |
461 | } |
462 | if (code_target.is_null()) { |
463 | // Use {near_call} for direct Wasm call within a module. |
464 | near_call(wasm_target, RelocInfo::WASM_STUB_CALL); |
465 | } else { |
466 | Call(code_target, RelocInfo::CODE_TARGET); |
467 | } |
468 | |
469 | RestoreRegisters(registers); |
470 | } |
471 | |
472 | void MacroAssembler::RecordWrite(Register object, Register address, |
473 | Register value, SaveFPRegsMode fp_mode, |
474 | RememberedSetAction remembered_set_action, |
475 | SmiCheck smi_check) { |
476 | DCHECK(object != value); |
477 | DCHECK(object != address); |
478 | DCHECK(value != address); |
479 | AssertNotSmi(object); |
480 | |
481 | if (remembered_set_action == OMIT_REMEMBERED_SET && |
482 | !FLAG_incremental_marking) { |
483 | return; |
484 | } |
485 | |
486 | if (emit_debug_code()) { |
487 | Label ok; |
488 | cmp_tagged(value, Operand(address, 0)); |
489 | j(equal, &ok, Label::kNear); |
490 | int3(); |
491 | bind(&ok); |
492 | } |
493 | |
494 | // First, check if a write barrier is even needed. The tests below |
495 | // catch stores of smis and stores into the young generation. |
496 | Label done; |
497 | |
498 | if (smi_check == INLINE_SMI_CHECK) { |
499 | // Skip barrier if writing a smi. |
500 | JumpIfSmi(value, &done); |
501 | } |
502 | |
503 | CheckPageFlag(value, |
504 | value, // Used as scratch. |
505 | MemoryChunk::kPointersToHereAreInterestingMask, zero, &done, |
506 | Label::kNear); |
507 | |
508 | CheckPageFlag(object, |
509 | value, // Used as scratch. |
510 | MemoryChunk::kPointersFromHereAreInterestingMask, |
511 | zero, |
512 | &done, |
513 | Label::kNear); |
514 | |
515 | CallRecordWriteStub(object, address, remembered_set_action, fp_mode); |
516 | |
517 | bind(&done); |
518 | |
519 | // Clobber clobbered registers when running with the debug-code flag |
520 | // turned on to provoke errors. |
521 | if (emit_debug_code()) { |
522 | Move(address, kZapValue, RelocInfo::NONE); |
523 | Move(value, kZapValue, RelocInfo::NONE); |
524 | } |
525 | } |
526 | |
527 | void TurboAssembler::Assert(Condition cc, AbortReason reason) { |
528 | if (emit_debug_code()) Check(cc, reason); |
529 | } |
530 | |
531 | void TurboAssembler::AssertUnreachable(AbortReason reason) { |
532 | if (emit_debug_code()) Abort(reason); |
533 | } |
534 | |
535 | void TurboAssembler::Check(Condition cc, AbortReason reason) { |
536 | Label L; |
537 | j(cc, &L, Label::kNear); |
538 | Abort(reason); |
539 | // Control will not return here. |
540 | bind(&L); |
541 | } |
542 | |
543 | void TurboAssembler::CheckStackAlignment() { |
544 | int frame_alignment = base::OS::ActivationFrameAlignment(); |
545 | int frame_alignment_mask = frame_alignment - 1; |
546 | if (frame_alignment > kSystemPointerSize) { |
547 | DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); |
548 | Label alignment_as_expected; |
549 | testq(rsp, Immediate(frame_alignment_mask)); |
550 | j(zero, &alignment_as_expected, Label::kNear); |
551 | // Abort if stack is not aligned. |
552 | int3(); |
553 | bind(&alignment_as_expected); |
554 | } |
555 | } |
556 | |
557 | void TurboAssembler::Abort(AbortReason reason) { |
558 | #ifdef DEBUG |
559 | const char* msg = GetAbortReason(reason); |
560 | RecordComment("Abort message: " ); |
561 | RecordComment(msg); |
562 | #endif |
563 | |
564 | // Avoid emitting call to builtin if requested. |
565 | if (trap_on_abort()) { |
566 | int3(); |
567 | return; |
568 | } |
569 | |
570 | if (should_abort_hard()) { |
571 | // We don't care if we constructed a frame. Just pretend we did. |
572 | FrameScope assume_frame(this, StackFrame::NONE); |
573 | movl(arg_reg_1, Immediate(static_cast<int>(reason))); |
574 | PrepareCallCFunction(1); |
575 | LoadAddress(rax, ExternalReference::abort_with_reason()); |
576 | call(rax); |
577 | return; |
578 | } |
579 | |
580 | Move(rdx, Smi::FromInt(static_cast<int>(reason))); |
581 | |
582 | if (!has_frame()) { |
583 | // We don't actually want to generate a pile of code for this, so just |
584 | // claim there is a stack frame, without generating one. |
585 | FrameScope scope(this, StackFrame::NONE); |
586 | Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); |
587 | } else { |
588 | Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); |
589 | } |
590 | // Control will not return here. |
591 | int3(); |
592 | } |
593 | |
594 | void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid, |
595 | Register centry) { |
596 | const Runtime::Function* f = Runtime::FunctionForId(fid); |
597 | // TODO(1236192): Most runtime routines don't need the number of |
598 | // arguments passed in because it is constant. At some point we |
599 | // should remove this need and make the runtime routine entry code |
600 | // smarter. |
601 | Set(rax, f->nargs); |
602 | LoadAddress(rbx, ExternalReference::Create(f)); |
603 | DCHECK(!AreAliased(centry, rax, rbx)); |
604 | DCHECK(centry == rcx); |
605 | CallCodeObject(centry); |
606 | } |
607 | |
608 | void MacroAssembler::CallRuntime(const Runtime::Function* f, |
609 | int num_arguments, |
610 | SaveFPRegsMode save_doubles) { |
611 | // If the expected number of arguments of the runtime function is |
612 | // constant, we check that the actual number of arguments match the |
613 | // expectation. |
614 | CHECK(f->nargs < 0 || f->nargs == num_arguments); |
615 | |
616 | // TODO(1236192): Most runtime routines don't need the number of |
617 | // arguments passed in because it is constant. At some point we |
618 | // should remove this need and make the runtime routine entry code |
619 | // smarter. |
620 | Set(rax, num_arguments); |
621 | LoadAddress(rbx, ExternalReference::Create(f)); |
622 | Handle<Code> code = |
623 | CodeFactory::CEntry(isolate(), f->result_size, save_doubles); |
624 | Call(code, RelocInfo::CODE_TARGET); |
625 | } |
626 | |
627 | void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { |
628 | // ----------- S t a t e ------------- |
629 | // -- rsp[0] : return address |
630 | // -- rsp[8] : argument num_arguments - 1 |
631 | // ... |
632 | // -- rsp[8 * num_arguments] : argument 0 (receiver) |
633 | // |
634 | // For runtime functions with variable arguments: |
635 | // -- rax : number of arguments |
636 | // ----------------------------------- |
637 | |
638 | const Runtime::Function* function = Runtime::FunctionForId(fid); |
639 | DCHECK_EQ(1, function->result_size); |
640 | if (function->nargs >= 0) { |
641 | Set(rax, function->nargs); |
642 | } |
643 | JumpToExternalReference(ExternalReference::Create(fid)); |
644 | } |
645 | |
646 | void MacroAssembler::JumpToExternalReference(const ExternalReference& ext, |
647 | bool builtin_exit_frame) { |
648 | // Set the entry point and jump to the C entry runtime stub. |
649 | LoadAddress(rbx, ext); |
650 | Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, |
651 | kArgvOnStack, builtin_exit_frame); |
652 | Jump(code, RelocInfo::CODE_TARGET); |
653 | } |
654 | |
655 | static constexpr Register saved_regs[] = {rax, rcx, rdx, rbx, rbp, rsi, |
656 | rdi, r8, r9, r10, r11}; |
657 | |
658 | static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register); |
659 | |
660 | int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, |
661 | Register exclusion1, |
662 | Register exclusion2, |
663 | Register exclusion3) const { |
664 | int bytes = 0; |
665 | for (int i = 0; i < kNumberOfSavedRegs; i++) { |
666 | Register reg = saved_regs[i]; |
667 | if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) { |
668 | bytes += kSystemPointerSize; |
669 | } |
670 | } |
671 | |
672 | // R12 to r15 are callee save on all platforms. |
673 | if (fp_mode == kSaveFPRegs) { |
674 | bytes += kDoubleSize * XMMRegister::kNumRegisters; |
675 | } |
676 | |
677 | return bytes; |
678 | } |
679 | |
680 | int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, |
681 | Register exclusion2, Register exclusion3) { |
682 | // We don't allow a GC during a store buffer overflow so there is no need to |
683 | // store the registers in any particular way, but we do have to store and |
684 | // restore them. |
685 | int bytes = 0; |
686 | for (int i = 0; i < kNumberOfSavedRegs; i++) { |
687 | Register reg = saved_regs[i]; |
688 | if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) { |
689 | pushq(reg); |
690 | bytes += kSystemPointerSize; |
691 | } |
692 | } |
693 | |
694 | // R12 to r15 are callee save on all platforms. |
695 | if (fp_mode == kSaveFPRegs) { |
696 | int delta = kDoubleSize * XMMRegister::kNumRegisters; |
697 | subq(rsp, Immediate(delta)); |
698 | for (int i = 0; i < XMMRegister::kNumRegisters; i++) { |
699 | XMMRegister reg = XMMRegister::from_code(i); |
700 | Movsd(Operand(rsp, i * kDoubleSize), reg); |
701 | } |
702 | bytes += delta; |
703 | } |
704 | |
705 | return bytes; |
706 | } |
707 | |
708 | int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, |
709 | Register exclusion2, Register exclusion3) { |
710 | int bytes = 0; |
711 | if (fp_mode == kSaveFPRegs) { |
712 | for (int i = 0; i < XMMRegister::kNumRegisters; i++) { |
713 | XMMRegister reg = XMMRegister::from_code(i); |
714 | Movsd(reg, Operand(rsp, i * kDoubleSize)); |
715 | } |
716 | int delta = kDoubleSize * XMMRegister::kNumRegisters; |
717 | addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); |
718 | bytes += delta; |
719 | } |
720 | |
721 | for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) { |
722 | Register reg = saved_regs[i]; |
723 | if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) { |
724 | popq(reg); |
725 | bytes += kSystemPointerSize; |
726 | } |
727 | } |
728 | |
729 | return bytes; |
730 | } |
731 | |
732 | void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) { |
733 | if (CpuFeatures::IsSupported(AVX)) { |
734 | CpuFeatureScope scope(this, AVX); |
735 | vcvtss2sd(dst, src, src); |
736 | } else { |
737 | cvtss2sd(dst, src); |
738 | } |
739 | } |
740 | |
741 | void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) { |
742 | if (CpuFeatures::IsSupported(AVX)) { |
743 | CpuFeatureScope scope(this, AVX); |
744 | vcvtss2sd(dst, dst, src); |
745 | } else { |
746 | cvtss2sd(dst, src); |
747 | } |
748 | } |
749 | |
750 | void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) { |
751 | if (CpuFeatures::IsSupported(AVX)) { |
752 | CpuFeatureScope scope(this, AVX); |
753 | vcvtsd2ss(dst, src, src); |
754 | } else { |
755 | cvtsd2ss(dst, src); |
756 | } |
757 | } |
758 | |
759 | void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) { |
760 | if (CpuFeatures::IsSupported(AVX)) { |
761 | CpuFeatureScope scope(this, AVX); |
762 | vcvtsd2ss(dst, dst, src); |
763 | } else { |
764 | cvtsd2ss(dst, src); |
765 | } |
766 | } |
767 | |
768 | void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) { |
769 | if (CpuFeatures::IsSupported(AVX)) { |
770 | CpuFeatureScope scope(this, AVX); |
771 | vxorpd(dst, dst, dst); |
772 | vcvtlsi2sd(dst, dst, src); |
773 | } else { |
774 | xorpd(dst, dst); |
775 | cvtlsi2sd(dst, src); |
776 | } |
777 | } |
778 | |
779 | void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) { |
780 | if (CpuFeatures::IsSupported(AVX)) { |
781 | CpuFeatureScope scope(this, AVX); |
782 | vxorpd(dst, dst, dst); |
783 | vcvtlsi2sd(dst, dst, src); |
784 | } else { |
785 | xorpd(dst, dst); |
786 | cvtlsi2sd(dst, src); |
787 | } |
788 | } |
789 | |
790 | void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) { |
791 | if (CpuFeatures::IsSupported(AVX)) { |
792 | CpuFeatureScope scope(this, AVX); |
793 | vxorps(dst, dst, dst); |
794 | vcvtlsi2ss(dst, dst, src); |
795 | } else { |
796 | xorps(dst, dst); |
797 | cvtlsi2ss(dst, src); |
798 | } |
799 | } |
800 | |
801 | void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) { |
802 | if (CpuFeatures::IsSupported(AVX)) { |
803 | CpuFeatureScope scope(this, AVX); |
804 | vxorps(dst, dst, dst); |
805 | vcvtlsi2ss(dst, dst, src); |
806 | } else { |
807 | xorps(dst, dst); |
808 | cvtlsi2ss(dst, src); |
809 | } |
810 | } |
811 | |
812 | void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) { |
813 | if (CpuFeatures::IsSupported(AVX)) { |
814 | CpuFeatureScope scope(this, AVX); |
815 | vxorps(dst, dst, dst); |
816 | vcvtqsi2ss(dst, dst, src); |
817 | } else { |
818 | xorps(dst, dst); |
819 | cvtqsi2ss(dst, src); |
820 | } |
821 | } |
822 | |
823 | void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) { |
824 | if (CpuFeatures::IsSupported(AVX)) { |
825 | CpuFeatureScope scope(this, AVX); |
826 | vxorps(dst, dst, dst); |
827 | vcvtqsi2ss(dst, dst, src); |
828 | } else { |
829 | xorps(dst, dst); |
830 | cvtqsi2ss(dst, src); |
831 | } |
832 | } |
833 | |
834 | void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) { |
835 | if (CpuFeatures::IsSupported(AVX)) { |
836 | CpuFeatureScope scope(this, AVX); |
837 | vxorpd(dst, dst, dst); |
838 | vcvtqsi2sd(dst, dst, src); |
839 | } else { |
840 | xorpd(dst, dst); |
841 | cvtqsi2sd(dst, src); |
842 | } |
843 | } |
844 | |
845 | void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) { |
846 | if (CpuFeatures::IsSupported(AVX)) { |
847 | CpuFeatureScope scope(this, AVX); |
848 | vxorpd(dst, dst, dst); |
849 | vcvtqsi2sd(dst, dst, src); |
850 | } else { |
851 | xorpd(dst, dst); |
852 | cvtqsi2sd(dst, src); |
853 | } |
854 | } |
855 | |
856 | void TurboAssembler::Cvtlui2ss(XMMRegister dst, Register src) { |
857 | // Zero-extend the 32 bit value to 64 bit. |
858 | movl(kScratchRegister, src); |
859 | Cvtqsi2ss(dst, kScratchRegister); |
860 | } |
861 | |
862 | void TurboAssembler::Cvtlui2ss(XMMRegister dst, Operand src) { |
863 | // Zero-extend the 32 bit value to 64 bit. |
864 | movl(kScratchRegister, src); |
865 | Cvtqsi2ss(dst, kScratchRegister); |
866 | } |
867 | |
868 | void TurboAssembler::Cvtlui2sd(XMMRegister dst, Register src) { |
869 | // Zero-extend the 32 bit value to 64 bit. |
870 | movl(kScratchRegister, src); |
871 | Cvtqsi2sd(dst, kScratchRegister); |
872 | } |
873 | |
874 | void TurboAssembler::Cvtlui2sd(XMMRegister dst, Operand src) { |
875 | // Zero-extend the 32 bit value to 64 bit. |
876 | movl(kScratchRegister, src); |
877 | Cvtqsi2sd(dst, kScratchRegister); |
878 | } |
879 | |
880 | void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) { |
881 | Label done; |
882 | Cvtqsi2ss(dst, src); |
883 | testq(src, src); |
884 | j(positive, &done, Label::kNear); |
885 | |
886 | // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors). |
887 | if (src != kScratchRegister) movq(kScratchRegister, src); |
888 | shrq(kScratchRegister, Immediate(1)); |
889 | // The LSB is shifted into CF. If it is set, set the LSB in {tmp}. |
890 | Label msb_not_set; |
891 | j(not_carry, &msb_not_set, Label::kNear); |
892 | orq(kScratchRegister, Immediate(1)); |
893 | bind(&msb_not_set); |
894 | Cvtqsi2ss(dst, kScratchRegister); |
895 | addss(dst, dst); |
896 | bind(&done); |
897 | } |
898 | |
899 | void TurboAssembler::Cvtqui2ss(XMMRegister dst, Operand src) { |
900 | movq(kScratchRegister, src); |
901 | Cvtqui2ss(dst, kScratchRegister); |
902 | } |
903 | |
904 | void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) { |
905 | Label done; |
906 | Cvtqsi2sd(dst, src); |
907 | testq(src, src); |
908 | j(positive, &done, Label::kNear); |
909 | |
910 | // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors). |
911 | if (src != kScratchRegister) movq(kScratchRegister, src); |
912 | shrq(kScratchRegister, Immediate(1)); |
913 | // The LSB is shifted into CF. If it is set, set the LSB in {tmp}. |
914 | Label msb_not_set; |
915 | j(not_carry, &msb_not_set, Label::kNear); |
916 | orq(kScratchRegister, Immediate(1)); |
917 | bind(&msb_not_set); |
918 | Cvtqsi2sd(dst, kScratchRegister); |
919 | addsd(dst, dst); |
920 | bind(&done); |
921 | } |
922 | |
923 | void TurboAssembler::Cvtqui2sd(XMMRegister dst, Operand src) { |
924 | movq(kScratchRegister, src); |
925 | Cvtqui2sd(dst, kScratchRegister); |
926 | } |
927 | |
928 | void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) { |
929 | if (CpuFeatures::IsSupported(AVX)) { |
930 | CpuFeatureScope scope(this, AVX); |
931 | vcvttss2si(dst, src); |
932 | } else { |
933 | cvttss2si(dst, src); |
934 | } |
935 | } |
936 | |
937 | void TurboAssembler::Cvttss2si(Register dst, Operand src) { |
938 | if (CpuFeatures::IsSupported(AVX)) { |
939 | CpuFeatureScope scope(this, AVX); |
940 | vcvttss2si(dst, src); |
941 | } else { |
942 | cvttss2si(dst, src); |
943 | } |
944 | } |
945 | |
946 | void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) { |
947 | if (CpuFeatures::IsSupported(AVX)) { |
948 | CpuFeatureScope scope(this, AVX); |
949 | vcvttsd2si(dst, src); |
950 | } else { |
951 | cvttsd2si(dst, src); |
952 | } |
953 | } |
954 | |
955 | void TurboAssembler::Cvttsd2si(Register dst, Operand src) { |
956 | if (CpuFeatures::IsSupported(AVX)) { |
957 | CpuFeatureScope scope(this, AVX); |
958 | vcvttsd2si(dst, src); |
959 | } else { |
960 | cvttsd2si(dst, src); |
961 | } |
962 | } |
963 | |
964 | void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) { |
965 | if (CpuFeatures::IsSupported(AVX)) { |
966 | CpuFeatureScope scope(this, AVX); |
967 | vcvttss2siq(dst, src); |
968 | } else { |
969 | cvttss2siq(dst, src); |
970 | } |
971 | } |
972 | |
973 | void TurboAssembler::Cvttss2siq(Register dst, Operand src) { |
974 | if (CpuFeatures::IsSupported(AVX)) { |
975 | CpuFeatureScope scope(this, AVX); |
976 | vcvttss2siq(dst, src); |
977 | } else { |
978 | cvttss2siq(dst, src); |
979 | } |
980 | } |
981 | |
982 | void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) { |
983 | if (CpuFeatures::IsSupported(AVX)) { |
984 | CpuFeatureScope scope(this, AVX); |
985 | vcvttsd2siq(dst, src); |
986 | } else { |
987 | cvttsd2siq(dst, src); |
988 | } |
989 | } |
990 | |
991 | void TurboAssembler::Cvttsd2siq(Register dst, Operand src) { |
992 | if (CpuFeatures::IsSupported(AVX)) { |
993 | CpuFeatureScope scope(this, AVX); |
994 | vcvttsd2siq(dst, src); |
995 | } else { |
996 | cvttsd2siq(dst, src); |
997 | } |
998 | } |
999 | |
1000 | namespace { |
1001 | template <typename OperandOrXMMRegister, bool is_double> |
1002 | void ConvertFloatToUint64(TurboAssembler* tasm, Register dst, |
1003 | OperandOrXMMRegister src, Label* fail) { |
1004 | Label success; |
1005 | // There does not exist a native float-to-uint instruction, so we have to use |
1006 | // a float-to-int, and postprocess the result. |
1007 | if (is_double) { |
1008 | tasm->Cvttsd2siq(dst, src); |
1009 | } else { |
1010 | tasm->Cvttss2siq(dst, src); |
1011 | } |
1012 | // If the result of the conversion is positive, we are already done. |
1013 | tasm->testq(dst, dst); |
1014 | tasm->j(positive, &success); |
1015 | // The result of the first conversion was negative, which means that the |
1016 | // input value was not within the positive int64 range. We subtract 2^63 |
1017 | // and convert it again to see if it is within the uint64 range. |
1018 | if (is_double) { |
1019 | tasm->Move(kScratchDoubleReg, -9223372036854775808.0); |
1020 | tasm->addsd(kScratchDoubleReg, src); |
1021 | tasm->Cvttsd2siq(dst, kScratchDoubleReg); |
1022 | } else { |
1023 | tasm->Move(kScratchDoubleReg, -9223372036854775808.0f); |
1024 | tasm->addss(kScratchDoubleReg, src); |
1025 | tasm->Cvttss2siq(dst, kScratchDoubleReg); |
1026 | } |
1027 | tasm->testq(dst, dst); |
1028 | // The only possible negative value here is 0x80000000000000000, which is |
1029 | // used on x64 to indicate an integer overflow. |
1030 | tasm->j(negative, fail ? fail : &success); |
1031 | // The input value is within uint64 range and the second conversion worked |
1032 | // successfully, but we still have to undo the subtraction we did |
1033 | // earlier. |
1034 | tasm->Set(kScratchRegister, 0x8000000000000000); |
1035 | tasm->orq(dst, kScratchRegister); |
1036 | tasm->bind(&success); |
1037 | } |
1038 | } // namespace |
1039 | |
1040 | void TurboAssembler::Cvttsd2uiq(Register dst, Operand src, Label* success) { |
1041 | ConvertFloatToUint64<Operand, true>(this, dst, src, success); |
1042 | } |
1043 | |
1044 | void TurboAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* success) { |
1045 | ConvertFloatToUint64<XMMRegister, true>(this, dst, src, success); |
1046 | } |
1047 | |
1048 | void TurboAssembler::Cvttss2uiq(Register dst, Operand src, Label* success) { |
1049 | ConvertFloatToUint64<Operand, false>(this, dst, src, success); |
1050 | } |
1051 | |
1052 | void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* success) { |
1053 | ConvertFloatToUint64<XMMRegister, false>(this, dst, src, success); |
1054 | } |
1055 | |
1056 | void TurboAssembler::Set(Register dst, int64_t x) { |
1057 | if (x == 0) { |
1058 | xorl(dst, dst); |
1059 | } else if (is_uint32(x)) { |
1060 | movl(dst, Immediate(static_cast<uint32_t>(x))); |
1061 | } else if (is_int32(x)) { |
1062 | movq(dst, Immediate(static_cast<int32_t>(x))); |
1063 | } else { |
1064 | movq(dst, x); |
1065 | } |
1066 | } |
1067 | |
1068 | void TurboAssembler::Set(Operand dst, intptr_t x) { |
1069 | if (is_int32(x)) { |
1070 | movq(dst, Immediate(static_cast<int32_t>(x))); |
1071 | } else { |
1072 | Set(kScratchRegister, x); |
1073 | movq(dst, kScratchRegister); |
1074 | } |
1075 | } |
1076 | |
1077 | |
1078 | // ---------------------------------------------------------------------------- |
1079 | // Smi tagging, untagging and tag detection. |
1080 | |
1081 | Register TurboAssembler::GetSmiConstant(Smi source) { |
1082 | STATIC_ASSERT(kSmiTag == 0); |
1083 | int value = source->value(); |
1084 | if (value == 0) { |
1085 | xorl(kScratchRegister, kScratchRegister); |
1086 | return kScratchRegister; |
1087 | } |
1088 | Move(kScratchRegister, source); |
1089 | return kScratchRegister; |
1090 | } |
1091 | |
1092 | void TurboAssembler::Move(Register dst, Smi source) { |
1093 | STATIC_ASSERT(kSmiTag == 0); |
1094 | int value = source->value(); |
1095 | if (value == 0) { |
1096 | xorl(dst, dst); |
1097 | } else { |
1098 | Move(dst, source.ptr(), RelocInfo::NONE); |
1099 | } |
1100 | } |
1101 | |
1102 | void TurboAssembler::Move(Register dst, ExternalReference ext) { |
1103 | if (FLAG_embedded_builtins) { |
1104 | if (root_array_available_ && options().isolate_independent_code) { |
1105 | IndirectLoadExternalReference(dst, ext); |
1106 | return; |
1107 | } |
1108 | } |
1109 | movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE)); |
1110 | } |
1111 | |
1112 | void MacroAssembler::SmiTag(Register dst, Register src) { |
1113 | STATIC_ASSERT(kSmiTag == 0); |
1114 | if (dst != src) { |
1115 | movq(dst, src); |
1116 | } |
1117 | DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); |
1118 | shlq(dst, Immediate(kSmiShift)); |
1119 | } |
1120 | |
1121 | void TurboAssembler::SmiUntag(Register dst, Register src) { |
1122 | STATIC_ASSERT(kSmiTag == 0); |
1123 | if (dst != src) { |
1124 | movq(dst, src); |
1125 | } |
1126 | DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); |
1127 | sarq(dst, Immediate(kSmiShift)); |
1128 | } |
1129 | |
1130 | void TurboAssembler::SmiUntag(Register dst, Operand src) { |
1131 | if (SmiValuesAre32Bits()) { |
1132 | movl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
1133 | // Sign extend to 64-bit. |
1134 | movsxlq(dst, dst); |
1135 | } else { |
1136 | DCHECK(SmiValuesAre31Bits()); |
1137 | #ifdef V8_COMPRESS_POINTERS |
1138 | movsxlq(dst, src); |
1139 | #else |
1140 | movq(dst, src); |
1141 | #endif |
1142 | sarq(dst, Immediate(kSmiShift)); |
1143 | } |
1144 | } |
1145 | |
1146 | void MacroAssembler::SmiCompare(Register smi1, Register smi2) { |
1147 | AssertSmi(smi1); |
1148 | AssertSmi(smi2); |
1149 | cmp_tagged(smi1, smi2); |
1150 | } |
1151 | |
1152 | void MacroAssembler::SmiCompare(Register dst, Smi src) { |
1153 | AssertSmi(dst); |
1154 | Cmp(dst, src); |
1155 | } |
1156 | |
1157 | void MacroAssembler::Cmp(Register dst, Smi src) { |
1158 | DCHECK_NE(dst, kScratchRegister); |
1159 | if (src->value() == 0) { |
1160 | test_tagged(dst, dst); |
1161 | } else { |
1162 | Register constant_reg = GetSmiConstant(src); |
1163 | cmp_tagged(dst, constant_reg); |
1164 | } |
1165 | } |
1166 | |
1167 | void MacroAssembler::SmiCompare(Register dst, Operand src) { |
1168 | AssertSmi(dst); |
1169 | AssertSmi(src); |
1170 | cmp_tagged(dst, src); |
1171 | } |
1172 | |
1173 | void MacroAssembler::SmiCompare(Operand dst, Register src) { |
1174 | AssertSmi(dst); |
1175 | AssertSmi(src); |
1176 | cmp_tagged(dst, src); |
1177 | } |
1178 | |
1179 | void MacroAssembler::SmiCompare(Operand dst, Smi src) { |
1180 | AssertSmi(dst); |
1181 | if (SmiValuesAre32Bits()) { |
1182 | cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); |
1183 | } else { |
1184 | DCHECK(SmiValuesAre31Bits()); |
1185 | cmpl(dst, Immediate(src)); |
1186 | } |
1187 | } |
1188 | |
1189 | void MacroAssembler::Cmp(Operand dst, Smi src) { |
1190 | // The Operand cannot use the smi register. |
1191 | Register smi_reg = GetSmiConstant(src); |
1192 | DCHECK(!dst.AddressUsesRegister(smi_reg)); |
1193 | cmp_tagged(dst, smi_reg); |
1194 | } |
1195 | |
1196 | |
1197 | Condition TurboAssembler::CheckSmi(Register src) { |
1198 | STATIC_ASSERT(kSmiTag == 0); |
1199 | testb(src, Immediate(kSmiTagMask)); |
1200 | return zero; |
1201 | } |
1202 | |
1203 | Condition TurboAssembler::CheckSmi(Operand src) { |
1204 | STATIC_ASSERT(kSmiTag == 0); |
1205 | testb(src, Immediate(kSmiTagMask)); |
1206 | return zero; |
1207 | } |
1208 | |
1209 | void TurboAssembler::JumpIfSmi(Register src, Label* on_smi, |
1210 | Label::Distance near_jump) { |
1211 | Condition smi = CheckSmi(src); |
1212 | j(smi, on_smi, near_jump); |
1213 | } |
1214 | |
1215 | void MacroAssembler::JumpIfNotSmi(Register src, |
1216 | Label* on_not_smi, |
1217 | Label::Distance near_jump) { |
1218 | Condition smi = CheckSmi(src); |
1219 | j(NegateCondition(smi), on_not_smi, near_jump); |
1220 | } |
1221 | |
1222 | void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi, |
1223 | Label::Distance near_jump) { |
1224 | Condition smi = CheckSmi(src); |
1225 | j(NegateCondition(smi), on_not_smi, near_jump); |
1226 | } |
1227 | |
1228 | void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) { |
1229 | if (constant->value() != 0) { |
1230 | if (SmiValuesAre32Bits()) { |
1231 | addl(Operand(dst, kSmiShift / kBitsPerByte), |
1232 | Immediate(constant->value())); |
1233 | } else { |
1234 | DCHECK(SmiValuesAre31Bits()); |
1235 | if (kTaggedSize == kInt64Size) { |
1236 | // Sign-extend value after addition |
1237 | movl(kScratchRegister, dst); |
1238 | addl(kScratchRegister, Immediate(constant)); |
1239 | movsxlq(kScratchRegister, kScratchRegister); |
1240 | movq(dst, kScratchRegister); |
1241 | } else { |
1242 | DCHECK_EQ(kTaggedSize, kInt32Size); |
1243 | addl(dst, Immediate(constant)); |
1244 | } |
1245 | } |
1246 | } |
1247 | } |
1248 | |
1249 | SmiIndex MacroAssembler::SmiToIndex(Register dst, |
1250 | Register src, |
1251 | int shift) { |
1252 | if (SmiValuesAre32Bits()) { |
1253 | DCHECK(is_uint6(shift)); |
1254 | // There is a possible optimization if shift is in the range 60-63, but that |
1255 | // will (and must) never happen. |
1256 | if (dst != src) { |
1257 | movq(dst, src); |
1258 | } |
1259 | if (shift < kSmiShift) { |
1260 | sarq(dst, Immediate(kSmiShift - shift)); |
1261 | } else { |
1262 | shlq(dst, Immediate(shift - kSmiShift)); |
1263 | } |
1264 | return SmiIndex(dst, times_1); |
1265 | } else { |
1266 | DCHECK(SmiValuesAre31Bits()); |
1267 | if (dst != src) { |
1268 | mov_tagged(dst, src); |
1269 | } |
1270 | // We have to sign extend the index register to 64-bit as the SMI might |
1271 | // be negative. |
1272 | movsxlq(dst, dst); |
1273 | if (shift < kSmiShift) { |
1274 | sarq(dst, Immediate(kSmiShift - shift)); |
1275 | } else if (shift != kSmiShift) { |
1276 | if (shift - kSmiShift <= static_cast<int>(times_8)) { |
1277 | return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiShift)); |
1278 | } |
1279 | shlq(dst, Immediate(shift - kSmiShift)); |
1280 | } |
1281 | return SmiIndex(dst, times_1); |
1282 | } |
1283 | } |
1284 | |
1285 | void TurboAssembler::Push(Smi source) { |
1286 | intptr_t smi = static_cast<intptr_t>(source.ptr()); |
1287 | if (is_int32(smi)) { |
1288 | Push(Immediate(static_cast<int32_t>(smi))); |
1289 | return; |
1290 | } |
1291 | int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8; |
1292 | int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8; |
1293 | if (first_byte_set == last_byte_set) { |
1294 | // This sequence has only 7 bytes, compared to the 12 bytes below. |
1295 | Push(Immediate(0)); |
1296 | movb(Operand(rsp, first_byte_set), |
1297 | Immediate(static_cast<int8_t>(smi >> (8 * first_byte_set)))); |
1298 | return; |
1299 | } |
1300 | Register constant = GetSmiConstant(source); |
1301 | Push(constant); |
1302 | } |
1303 | |
1304 | // ---------------------------------------------------------------------------- |
1305 | |
1306 | void TurboAssembler::Move(Register dst, Register src) { |
1307 | if (dst != src) { |
1308 | movq(dst, src); |
1309 | } |
1310 | } |
1311 | |
1312 | void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, |
1313 | Register src1) { |
1314 | if (dst0 != src1) { |
1315 | // Normal case: Writing to dst0 does not destroy src1. |
1316 | Move(dst0, src0); |
1317 | Move(dst1, src1); |
1318 | } else if (dst1 != src0) { |
1319 | // Only dst0 and src1 are the same register, |
1320 | // but writing to dst1 does not destroy src0. |
1321 | Move(dst1, src1); |
1322 | Move(dst0, src0); |
1323 | } else { |
1324 | // dst0 == src1, and dst1 == src0, a swap is required: |
1325 | // dst0 \/ src0 |
1326 | // dst1 /\ src1 |
1327 | xchgq(dst0, dst1); |
1328 | } |
1329 | } |
1330 | |
1331 | void TurboAssembler::MoveNumber(Register dst, double value) { |
1332 | int32_t smi; |
1333 | if (DoubleToSmiInteger(value, &smi)) { |
1334 | Move(dst, Smi::FromInt(smi)); |
1335 | } else { |
1336 | movq_heap_number(dst, value); |
1337 | } |
1338 | } |
1339 | |
1340 | void TurboAssembler::Move(XMMRegister dst, uint32_t src) { |
1341 | if (src == 0) { |
1342 | Xorps(dst, dst); |
1343 | } else { |
1344 | unsigned nlz = base::bits::CountLeadingZeros(src); |
1345 | unsigned ntz = base::bits::CountTrailingZeros(src); |
1346 | unsigned pop = base::bits::CountPopulation(src); |
1347 | DCHECK_NE(0u, pop); |
1348 | if (pop + ntz + nlz == 32) { |
1349 | Pcmpeqd(dst, dst); |
1350 | if (ntz) Pslld(dst, static_cast<byte>(ntz + nlz)); |
1351 | if (nlz) Psrld(dst, static_cast<byte>(nlz)); |
1352 | } else { |
1353 | movl(kScratchRegister, Immediate(src)); |
1354 | Movd(dst, kScratchRegister); |
1355 | } |
1356 | } |
1357 | } |
1358 | |
1359 | void TurboAssembler::Move(XMMRegister dst, uint64_t src) { |
1360 | if (src == 0) { |
1361 | Xorpd(dst, dst); |
1362 | } else { |
1363 | unsigned nlz = base::bits::CountLeadingZeros(src); |
1364 | unsigned ntz = base::bits::CountTrailingZeros(src); |
1365 | unsigned pop = base::bits::CountPopulation(src); |
1366 | DCHECK_NE(0u, pop); |
1367 | if (pop + ntz + nlz == 64) { |
1368 | Pcmpeqd(dst, dst); |
1369 | if (ntz) Psllq(dst, static_cast<byte>(ntz + nlz)); |
1370 | if (nlz) Psrlq(dst, static_cast<byte>(nlz)); |
1371 | } else { |
1372 | uint32_t lower = static_cast<uint32_t>(src); |
1373 | uint32_t upper = static_cast<uint32_t>(src >> 32); |
1374 | if (upper == 0) { |
1375 | Move(dst, lower); |
1376 | } else { |
1377 | movq(kScratchRegister, src); |
1378 | Movq(dst, kScratchRegister); |
1379 | } |
1380 | } |
1381 | } |
1382 | } |
1383 | |
1384 | // ---------------------------------------------------------------------------- |
1385 | |
1386 | void MacroAssembler::Absps(XMMRegister dst) { |
1387 | Andps(dst, ExternalReferenceAsOperand( |
1388 | ExternalReference::address_of_float_abs_constant())); |
1389 | } |
1390 | |
1391 | void MacroAssembler::Negps(XMMRegister dst) { |
1392 | Xorps(dst, ExternalReferenceAsOperand( |
1393 | ExternalReference::address_of_float_neg_constant())); |
1394 | } |
1395 | |
1396 | void MacroAssembler::Abspd(XMMRegister dst) { |
1397 | Andps(dst, ExternalReferenceAsOperand( |
1398 | ExternalReference::address_of_double_abs_constant())); |
1399 | } |
1400 | |
1401 | void MacroAssembler::Negpd(XMMRegister dst) { |
1402 | Xorps(dst, ExternalReferenceAsOperand( |
1403 | ExternalReference::address_of_double_neg_constant())); |
1404 | } |
1405 | |
1406 | void MacroAssembler::Cmp(Register dst, Handle<Object> source) { |
1407 | AllowDeferredHandleDereference smi_check; |
1408 | if (source->IsSmi()) { |
1409 | Cmp(dst, Smi::cast(*source)); |
1410 | } else { |
1411 | Move(kScratchRegister, Handle<HeapObject>::cast(source)); |
1412 | cmp_tagged(dst, kScratchRegister); |
1413 | } |
1414 | } |
1415 | |
1416 | void MacroAssembler::Cmp(Operand dst, Handle<Object> source) { |
1417 | AllowDeferredHandleDereference smi_check; |
1418 | if (source->IsSmi()) { |
1419 | Cmp(dst, Smi::cast(*source)); |
1420 | } else { |
1421 | Move(kScratchRegister, Handle<HeapObject>::cast(source)); |
1422 | cmp_tagged(dst, kScratchRegister); |
1423 | } |
1424 | } |
1425 | |
1426 | void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, |
1427 | unsigned higher_limit, Label* on_in_range, |
1428 | Label::Distance near_jump) { |
1429 | if (lower_limit != 0) { |
1430 | leal(kScratchRegister, Operand(value, 0u - lower_limit)); |
1431 | cmpl(kScratchRegister, Immediate(higher_limit - lower_limit)); |
1432 | } else { |
1433 | cmpl(value, Immediate(higher_limit)); |
1434 | } |
1435 | j(below_equal, on_in_range, near_jump); |
1436 | } |
1437 | |
1438 | void TurboAssembler::Push(Handle<HeapObject> source) { |
1439 | Move(kScratchRegister, source); |
1440 | Push(kScratchRegister); |
1441 | } |
1442 | |
1443 | void TurboAssembler::Move(Register result, Handle<HeapObject> object, |
1444 | RelocInfo::Mode rmode) { |
1445 | if (FLAG_embedded_builtins) { |
1446 | if (root_array_available_ && options().isolate_independent_code) { |
1447 | IndirectLoadConstant(result, object); |
1448 | return; |
1449 | } |
1450 | } |
1451 | movq(result, Immediate64(object.address(), rmode)); |
1452 | } |
1453 | |
1454 | void TurboAssembler::Move(Operand dst, Handle<HeapObject> object, |
1455 | RelocInfo::Mode rmode) { |
1456 | Move(kScratchRegister, object, rmode); |
1457 | movq(dst, kScratchRegister); |
1458 | } |
1459 | |
1460 | void TurboAssembler::MoveStringConstant(Register result, |
1461 | const StringConstantBase* string, |
1462 | RelocInfo::Mode rmode) { |
1463 | movq_string(result, string); |
1464 | } |
1465 | |
1466 | void MacroAssembler::Drop(int stack_elements) { |
1467 | if (stack_elements > 0) { |
1468 | addq(rsp, Immediate(stack_elements * kSystemPointerSize)); |
1469 | } |
1470 | } |
1471 | |
1472 | |
1473 | void MacroAssembler::DropUnderReturnAddress(int stack_elements, |
1474 | Register scratch) { |
1475 | DCHECK_GT(stack_elements, 0); |
1476 | if (stack_elements == 1) { |
1477 | popq(MemOperand(rsp, 0)); |
1478 | return; |
1479 | } |
1480 | |
1481 | PopReturnAddressTo(scratch); |
1482 | Drop(stack_elements); |
1483 | PushReturnAddressFrom(scratch); |
1484 | } |
1485 | |
1486 | void TurboAssembler::Push(Register src) { pushq(src); } |
1487 | |
1488 | void TurboAssembler::Push(Operand src) { pushq(src); } |
1489 | |
1490 | void MacroAssembler::PushQuad(Operand src) { pushq(src); } |
1491 | |
1492 | void TurboAssembler::Push(Immediate value) { pushq(value); } |
1493 | |
1494 | void MacroAssembler::PushImm32(int32_t imm32) { pushq_imm32(imm32); } |
1495 | |
1496 | void MacroAssembler::Pop(Register dst) { popq(dst); } |
1497 | |
1498 | void MacroAssembler::Pop(Operand dst) { popq(dst); } |
1499 | |
1500 | void MacroAssembler::PopQuad(Operand dst) { popq(dst); } |
1501 | |
1502 | void TurboAssembler::Jump(ExternalReference ext) { |
1503 | LoadAddress(kScratchRegister, ext); |
1504 | jmp(kScratchRegister); |
1505 | } |
1506 | |
1507 | void TurboAssembler::Jump(Operand op) { jmp(op); } |
1508 | |
1509 | void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) { |
1510 | Move(kScratchRegister, destination, rmode); |
1511 | jmp(kScratchRegister); |
1512 | } |
1513 | |
1514 | void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode, |
1515 | Condition cc) { |
1516 | DCHECK_IMPLIES(options().isolate_independent_code, |
1517 | Builtins::IsIsolateIndependentBuiltin(*code_object)); |
1518 | if (options().inline_offheap_trampolines) { |
1519 | int builtin_index = Builtins::kNoBuiltinId; |
1520 | if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) && |
1521 | Builtins::IsIsolateIndependent(builtin_index)) { |
1522 | Label skip; |
1523 | if (cc != always) { |
1524 | if (cc == never) return; |
1525 | j(NegateCondition(cc), &skip, Label::kNear); |
1526 | } |
1527 | // Inline the trampoline. |
1528 | RecordCommentForOffHeapTrampoline(builtin_index); |
1529 | CHECK_NE(builtin_index, Builtins::kNoBuiltinId); |
1530 | EmbeddedData d = EmbeddedData::FromBlob(); |
1531 | Address entry = d.InstructionStartOfBuiltin(builtin_index); |
1532 | Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET); |
1533 | jmp(kScratchRegister); |
1534 | bind(&skip); |
1535 | return; |
1536 | } |
1537 | } |
1538 | j(cc, code_object, rmode); |
1539 | } |
1540 | |
1541 | void MacroAssembler::JumpToInstructionStream(Address entry) { |
1542 | Move(kOffHeapTrampolineRegister, entry, RelocInfo::OFF_HEAP_TARGET); |
1543 | jmp(kOffHeapTrampolineRegister); |
1544 | } |
1545 | |
1546 | void TurboAssembler::Call(ExternalReference ext) { |
1547 | LoadAddress(kScratchRegister, ext); |
1548 | call(kScratchRegister); |
1549 | } |
1550 | |
1551 | void TurboAssembler::Call(Operand op) { |
1552 | if (!CpuFeatures::IsSupported(ATOM)) { |
1553 | call(op); |
1554 | } else { |
1555 | movq(kScratchRegister, op); |
1556 | call(kScratchRegister); |
1557 | } |
1558 | } |
1559 | |
1560 | void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) { |
1561 | Move(kScratchRegister, destination, rmode); |
1562 | call(kScratchRegister); |
1563 | } |
1564 | |
1565 | void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) { |
1566 | DCHECK_IMPLIES(options().isolate_independent_code, |
1567 | Builtins::IsIsolateIndependentBuiltin(*code_object)); |
1568 | if (options().inline_offheap_trampolines) { |
1569 | int builtin_index = Builtins::kNoBuiltinId; |
1570 | if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) && |
1571 | Builtins::IsIsolateIndependent(builtin_index)) { |
1572 | // Inline the trampoline. |
1573 | RecordCommentForOffHeapTrampoline(builtin_index); |
1574 | CHECK_NE(builtin_index, Builtins::kNoBuiltinId); |
1575 | EmbeddedData d = EmbeddedData::FromBlob(); |
1576 | Address entry = d.InstructionStartOfBuiltin(builtin_index); |
1577 | Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET); |
1578 | call(kScratchRegister); |
1579 | return; |
1580 | } |
1581 | } |
1582 | DCHECK(RelocInfo::IsCodeTarget(rmode)); |
1583 | call(code_object, rmode); |
1584 | } |
1585 | |
1586 | void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { |
1587 | #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) |
1588 | STATIC_ASSERT(kSmiShiftSize == 0); |
1589 | STATIC_ASSERT(kSmiTagSize == 1); |
1590 | STATIC_ASSERT(kSmiTag == 0); |
1591 | |
1592 | // The builtin_pointer register contains the builtin index as a Smi. |
1593 | // Untagging is folded into the indexing operand below (we use times_4 instead |
1594 | // of times_8 since smis are already shifted by one). |
1595 | Call(Operand(kRootRegister, builtin_pointer, times_4, |
1596 | IsolateData::builtin_entry_table_offset())); |
1597 | #else // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) |
1598 | STATIC_ASSERT(kSmiShiftSize == 31); |
1599 | STATIC_ASSERT(kSmiTagSize == 1); |
1600 | STATIC_ASSERT(kSmiTag == 0); |
1601 | |
1602 | // The builtin_pointer register contains the builtin index as a Smi. |
1603 | SmiUntag(builtin_pointer, builtin_pointer); |
1604 | Call(Operand(kRootRegister, builtin_pointer, times_8, |
1605 | IsolateData::builtin_entry_table_offset())); |
1606 | #endif // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) |
1607 | } |
1608 | |
1609 | void TurboAssembler::LoadCodeObjectEntry(Register destination, |
1610 | Register code_object) { |
1611 | // Code objects are called differently depending on whether we are generating |
1612 | // builtin code (which will later be embedded into the binary) or compiling |
1613 | // user JS code at runtime. |
1614 | // * Builtin code runs in --jitless mode and thus must not call into on-heap |
1615 | // Code targets. Instead, we dispatch through the builtins entry table. |
1616 | // * Codegen at runtime does not have this restriction and we can use the |
1617 | // shorter, branchless instruction sequence. The assumption here is that |
1618 | // targets are usually generated code and not builtin Code objects. |
1619 | |
1620 | if (options().isolate_independent_code) { |
1621 | DCHECK(root_array_available()); |
1622 | Label if_code_is_off_heap, out; |
1623 | |
1624 | // Check whether the Code object is an off-heap trampoline. If so, call its |
1625 | // (off-heap) entry point directly without going through the (on-heap) |
1626 | // trampoline. Otherwise, just call the Code object as always. |
1627 | testl(FieldOperand(code_object, Code::kFlagsOffset), |
1628 | Immediate(Code::IsOffHeapTrampoline::kMask)); |
1629 | j(not_equal, &if_code_is_off_heap); |
1630 | |
1631 | // Not an off-heap trampoline, the entry point is at |
1632 | // Code::raw_instruction_start(). |
1633 | Move(destination, code_object); |
1634 | addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag)); |
1635 | jmp(&out); |
1636 | |
1637 | // An off-heap trampoline, the entry point is loaded from the builtin entry |
1638 | // table. |
1639 | bind(&if_code_is_off_heap); |
1640 | movl(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset)); |
1641 | movq(destination, |
1642 | Operand(kRootRegister, destination, times_system_pointer_size, |
1643 | IsolateData::builtin_entry_table_offset())); |
1644 | |
1645 | bind(&out); |
1646 | } else { |
1647 | Move(destination, code_object); |
1648 | addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag)); |
1649 | } |
1650 | } |
1651 | |
1652 | void TurboAssembler::CallCodeObject(Register code_object) { |
1653 | LoadCodeObjectEntry(code_object, code_object); |
1654 | call(code_object); |
1655 | } |
1656 | |
1657 | void TurboAssembler::JumpCodeObject(Register code_object) { |
1658 | LoadCodeObjectEntry(code_object, code_object); |
1659 | jmp(code_object); |
1660 | } |
1661 | |
1662 | void TurboAssembler::RetpolineCall(Register reg) { |
1663 | Label setup_return, setup_target, inner_indirect_branch, capture_spec; |
1664 | |
1665 | jmp(&setup_return); // Jump past the entire retpoline below. |
1666 | |
1667 | bind(&inner_indirect_branch); |
1668 | call(&setup_target); |
1669 | |
1670 | bind(&capture_spec); |
1671 | pause(); |
1672 | jmp(&capture_spec); |
1673 | |
1674 | bind(&setup_target); |
1675 | movq(Operand(rsp, 0), reg); |
1676 | ret(0); |
1677 | |
1678 | bind(&setup_return); |
1679 | call(&inner_indirect_branch); // Callee will return after this instruction. |
1680 | } |
1681 | |
1682 | void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) { |
1683 | Move(kScratchRegister, destination, rmode); |
1684 | RetpolineCall(kScratchRegister); |
1685 | } |
1686 | |
1687 | void TurboAssembler::RetpolineJump(Register reg) { |
1688 | Label setup_target, capture_spec; |
1689 | |
1690 | call(&setup_target); |
1691 | |
1692 | bind(&capture_spec); |
1693 | pause(); |
1694 | jmp(&capture_spec); |
1695 | |
1696 | bind(&setup_target); |
1697 | movq(Operand(rsp, 0), reg); |
1698 | ret(0); |
1699 | } |
1700 | |
1701 | void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) { |
1702 | if (imm8 == 0) { |
1703 | Movd(dst, src); |
1704 | return; |
1705 | } |
1706 | if (CpuFeatures::IsSupported(SSE4_1)) { |
1707 | CpuFeatureScope sse_scope(this, SSE4_1); |
1708 | pextrd(dst, src, imm8); |
1709 | return; |
1710 | } |
1711 | DCHECK_EQ(1, imm8); |
1712 | movq(dst, src); |
1713 | shrq(dst, Immediate(32)); |
1714 | } |
1715 | |
1716 | void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) { |
1717 | if (CpuFeatures::IsSupported(SSE4_1)) { |
1718 | CpuFeatureScope sse_scope(this, SSE4_1); |
1719 | pinsrd(dst, src, imm8); |
1720 | return; |
1721 | } |
1722 | Movd(kScratchDoubleReg, src); |
1723 | if (imm8 == 1) { |
1724 | punpckldq(dst, kScratchDoubleReg); |
1725 | } else { |
1726 | DCHECK_EQ(0, imm8); |
1727 | Movss(dst, kScratchDoubleReg); |
1728 | } |
1729 | } |
1730 | |
1731 | void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) { |
1732 | if (CpuFeatures::IsSupported(SSE4_1)) { |
1733 | CpuFeatureScope sse_scope(this, SSE4_1); |
1734 | pinsrd(dst, src, imm8); |
1735 | return; |
1736 | } |
1737 | Movd(kScratchDoubleReg, src); |
1738 | if (imm8 == 1) { |
1739 | punpckldq(dst, kScratchDoubleReg); |
1740 | } else { |
1741 | DCHECK_EQ(0, imm8); |
1742 | Movss(dst, kScratchDoubleReg); |
1743 | } |
1744 | } |
1745 | |
1746 | void TurboAssembler::Lzcntl(Register dst, Register src) { |
1747 | if (CpuFeatures::IsSupported(LZCNT)) { |
1748 | CpuFeatureScope scope(this, LZCNT); |
1749 | lzcntl(dst, src); |
1750 | return; |
1751 | } |
1752 | Label not_zero_src; |
1753 | bsrl(dst, src); |
1754 | j(not_zero, ¬_zero_src, Label::kNear); |
1755 | Set(dst, 63); // 63^31 == 32 |
1756 | bind(¬_zero_src); |
1757 | xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x |
1758 | } |
1759 | |
1760 | void TurboAssembler::Lzcntl(Register dst, Operand src) { |
1761 | if (CpuFeatures::IsSupported(LZCNT)) { |
1762 | CpuFeatureScope scope(this, LZCNT); |
1763 | lzcntl(dst, src); |
1764 | return; |
1765 | } |
1766 | Label not_zero_src; |
1767 | bsrl(dst, src); |
1768 | j(not_zero, ¬_zero_src, Label::kNear); |
1769 | Set(dst, 63); // 63^31 == 32 |
1770 | bind(¬_zero_src); |
1771 | xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x |
1772 | } |
1773 | |
1774 | void TurboAssembler::Lzcntq(Register dst, Register src) { |
1775 | if (CpuFeatures::IsSupported(LZCNT)) { |
1776 | CpuFeatureScope scope(this, LZCNT); |
1777 | lzcntq(dst, src); |
1778 | return; |
1779 | } |
1780 | Label not_zero_src; |
1781 | bsrq(dst, src); |
1782 | j(not_zero, ¬_zero_src, Label::kNear); |
1783 | Set(dst, 127); // 127^63 == 64 |
1784 | bind(¬_zero_src); |
1785 | xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x |
1786 | } |
1787 | |
1788 | void TurboAssembler::Lzcntq(Register dst, Operand src) { |
1789 | if (CpuFeatures::IsSupported(LZCNT)) { |
1790 | CpuFeatureScope scope(this, LZCNT); |
1791 | lzcntq(dst, src); |
1792 | return; |
1793 | } |
1794 | Label not_zero_src; |
1795 | bsrq(dst, src); |
1796 | j(not_zero, ¬_zero_src, Label::kNear); |
1797 | Set(dst, 127); // 127^63 == 64 |
1798 | bind(¬_zero_src); |
1799 | xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x |
1800 | } |
1801 | |
1802 | void TurboAssembler::Tzcntq(Register dst, Register src) { |
1803 | if (CpuFeatures::IsSupported(BMI1)) { |
1804 | CpuFeatureScope scope(this, BMI1); |
1805 | tzcntq(dst, src); |
1806 | return; |
1807 | } |
1808 | Label not_zero_src; |
1809 | bsfq(dst, src); |
1810 | j(not_zero, ¬_zero_src, Label::kNear); |
1811 | // Define the result of tzcnt(0) separately, because bsf(0) is undefined. |
1812 | Set(dst, 64); |
1813 | bind(¬_zero_src); |
1814 | } |
1815 | |
1816 | void TurboAssembler::Tzcntq(Register dst, Operand src) { |
1817 | if (CpuFeatures::IsSupported(BMI1)) { |
1818 | CpuFeatureScope scope(this, BMI1); |
1819 | tzcntq(dst, src); |
1820 | return; |
1821 | } |
1822 | Label not_zero_src; |
1823 | bsfq(dst, src); |
1824 | j(not_zero, ¬_zero_src, Label::kNear); |
1825 | // Define the result of tzcnt(0) separately, because bsf(0) is undefined. |
1826 | Set(dst, 64); |
1827 | bind(¬_zero_src); |
1828 | } |
1829 | |
1830 | void TurboAssembler::Tzcntl(Register dst, Register src) { |
1831 | if (CpuFeatures::IsSupported(BMI1)) { |
1832 | CpuFeatureScope scope(this, BMI1); |
1833 | tzcntl(dst, src); |
1834 | return; |
1835 | } |
1836 | Label not_zero_src; |
1837 | bsfl(dst, src); |
1838 | j(not_zero, ¬_zero_src, Label::kNear); |
1839 | Set(dst, 32); // The result of tzcnt is 32 if src = 0. |
1840 | bind(¬_zero_src); |
1841 | } |
1842 | |
1843 | void TurboAssembler::Tzcntl(Register dst, Operand src) { |
1844 | if (CpuFeatures::IsSupported(BMI1)) { |
1845 | CpuFeatureScope scope(this, BMI1); |
1846 | tzcntl(dst, src); |
1847 | return; |
1848 | } |
1849 | Label not_zero_src; |
1850 | bsfl(dst, src); |
1851 | j(not_zero, ¬_zero_src, Label::kNear); |
1852 | Set(dst, 32); // The result of tzcnt is 32 if src = 0. |
1853 | bind(¬_zero_src); |
1854 | } |
1855 | |
1856 | void TurboAssembler::Popcntl(Register dst, Register src) { |
1857 | if (CpuFeatures::IsSupported(POPCNT)) { |
1858 | CpuFeatureScope scope(this, POPCNT); |
1859 | popcntl(dst, src); |
1860 | return; |
1861 | } |
1862 | UNREACHABLE(); |
1863 | } |
1864 | |
1865 | void TurboAssembler::Popcntl(Register dst, Operand src) { |
1866 | if (CpuFeatures::IsSupported(POPCNT)) { |
1867 | CpuFeatureScope scope(this, POPCNT); |
1868 | popcntl(dst, src); |
1869 | return; |
1870 | } |
1871 | UNREACHABLE(); |
1872 | } |
1873 | |
1874 | void TurboAssembler::Popcntq(Register dst, Register src) { |
1875 | if (CpuFeatures::IsSupported(POPCNT)) { |
1876 | CpuFeatureScope scope(this, POPCNT); |
1877 | popcntq(dst, src); |
1878 | return; |
1879 | } |
1880 | UNREACHABLE(); |
1881 | } |
1882 | |
1883 | void TurboAssembler::Popcntq(Register dst, Operand src) { |
1884 | if (CpuFeatures::IsSupported(POPCNT)) { |
1885 | CpuFeatureScope scope(this, POPCNT); |
1886 | popcntq(dst, src); |
1887 | return; |
1888 | } |
1889 | UNREACHABLE(); |
1890 | } |
1891 | |
1892 | |
1893 | void MacroAssembler::Pushad() { |
1894 | Push(rax); |
1895 | Push(rcx); |
1896 | Push(rdx); |
1897 | Push(rbx); |
1898 | // Not pushing rsp or rbp. |
1899 | Push(rsi); |
1900 | Push(rdi); |
1901 | Push(r8); |
1902 | Push(r9); |
1903 | // r10 is kScratchRegister. |
1904 | Push(r11); |
1905 | Push(r12); |
1906 | // r13 is kRootRegister. |
1907 | Push(r14); |
1908 | Push(r15); |
1909 | STATIC_ASSERT(12 == kNumSafepointSavedRegisters); |
1910 | // Use lea for symmetry with Popad. |
1911 | int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) * |
1912 | kSystemPointerSize; |
1913 | leaq(rsp, Operand(rsp, -sp_delta)); |
1914 | } |
1915 | |
1916 | |
1917 | void MacroAssembler::Popad() { |
1918 | // Popad must not change the flags, so use lea instead of addq. |
1919 | int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) * |
1920 | kSystemPointerSize; |
1921 | leaq(rsp, Operand(rsp, sp_delta)); |
1922 | Pop(r15); |
1923 | Pop(r14); |
1924 | Pop(r12); |
1925 | Pop(r11); |
1926 | Pop(r9); |
1927 | Pop(r8); |
1928 | Pop(rdi); |
1929 | Pop(rsi); |
1930 | Pop(rbx); |
1931 | Pop(rdx); |
1932 | Pop(rcx); |
1933 | Pop(rax); |
1934 | } |
1935 | |
1936 | |
1937 | // Order general registers are pushed by Pushad: |
1938 | // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15. |
1939 | const int |
1940 | MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = { |
1941 | 0, |
1942 | 1, |
1943 | 2, |
1944 | 3, |
1945 | -1, |
1946 | -1, |
1947 | 4, |
1948 | 5, |
1949 | 6, |
1950 | 7, |
1951 | -1, |
1952 | 8, |
1953 | 9, |
1954 | -1, |
1955 | 10, |
1956 | 11 |
1957 | }; |
1958 | |
1959 | void MacroAssembler::PushStackHandler() { |
1960 | // Adjust this code if not the case. |
1961 | STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize); |
1962 | STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
1963 | |
1964 | Push(Immediate(0)); // Padding. |
1965 | |
1966 | // Link the current handler as the next handler. |
1967 | ExternalReference handler_address = |
1968 | ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()); |
1969 | Push(ExternalReferenceAsOperand(handler_address)); |
1970 | |
1971 | // Set this new handler as the current one. |
1972 | movq(ExternalReferenceAsOperand(handler_address), rsp); |
1973 | } |
1974 | |
1975 | |
1976 | void MacroAssembler::PopStackHandler() { |
1977 | STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
1978 | ExternalReference handler_address = |
1979 | ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()); |
1980 | Pop(ExternalReferenceAsOperand(handler_address)); |
1981 | addq(rsp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize)); |
1982 | } |
1983 | |
1984 | void TurboAssembler::Ret() { ret(0); } |
1985 | |
1986 | void TurboAssembler::Ret(int bytes_dropped, Register scratch) { |
1987 | if (is_uint16(bytes_dropped)) { |
1988 | ret(bytes_dropped); |
1989 | } else { |
1990 | PopReturnAddressTo(scratch); |
1991 | addq(rsp, Immediate(bytes_dropped)); |
1992 | PushReturnAddressFrom(scratch); |
1993 | ret(0); |
1994 | } |
1995 | } |
1996 | |
1997 | void MacroAssembler::CmpObjectType(Register heap_object, |
1998 | InstanceType type, |
1999 | Register map) { |
2000 | LoadTaggedPointerField(map, |
2001 | FieldOperand(heap_object, HeapObject::kMapOffset)); |
2002 | CmpInstanceType(map, type); |
2003 | } |
2004 | |
2005 | |
2006 | void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { |
2007 | cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type)); |
2008 | } |
2009 | |
2010 | void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg, |
2011 | XMMRegister scratch, Label* lost_precision, |
2012 | Label* is_nan, Label::Distance dst) { |
2013 | Cvttsd2si(result_reg, input_reg); |
2014 | Cvtlsi2sd(kScratchDoubleReg, result_reg); |
2015 | Ucomisd(kScratchDoubleReg, input_reg); |
2016 | j(not_equal, lost_precision, dst); |
2017 | j(parity_even, is_nan, dst); // NaN. |
2018 | } |
2019 | |
2020 | |
2021 | void MacroAssembler::AssertNotSmi(Register object) { |
2022 | if (emit_debug_code()) { |
2023 | Condition is_smi = CheckSmi(object); |
2024 | Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi); |
2025 | } |
2026 | } |
2027 | |
2028 | |
2029 | void MacroAssembler::AssertSmi(Register object) { |
2030 | if (emit_debug_code()) { |
2031 | Condition is_smi = CheckSmi(object); |
2032 | Check(is_smi, AbortReason::kOperandIsNotASmi); |
2033 | } |
2034 | } |
2035 | |
2036 | void MacroAssembler::AssertSmi(Operand object) { |
2037 | if (emit_debug_code()) { |
2038 | Condition is_smi = CheckSmi(object); |
2039 | Check(is_smi, AbortReason::kOperandIsNotASmi); |
2040 | } |
2041 | } |
2042 | |
2043 | void TurboAssembler::AssertZeroExtended(Register int32_register) { |
2044 | if (emit_debug_code()) { |
2045 | DCHECK_NE(int32_register, kScratchRegister); |
2046 | movq(kScratchRegister, int64_t{0x0000000100000000}); |
2047 | cmpq(kScratchRegister, int32_register); |
2048 | Check(above_equal, AbortReason::k32BitValueInRegisterIsNotZeroExtended); |
2049 | } |
2050 | } |
2051 | |
2052 | void MacroAssembler::AssertConstructor(Register object) { |
2053 | if (emit_debug_code()) { |
2054 | testb(object, Immediate(kSmiTagMask)); |
2055 | Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor); |
2056 | Push(object); |
2057 | LoadTaggedPointerField(object, |
2058 | FieldOperand(object, HeapObject::kMapOffset)); |
2059 | testb(FieldOperand(object, Map::kBitFieldOffset), |
2060 | Immediate(Map::IsConstructorBit::kMask)); |
2061 | Pop(object); |
2062 | Check(not_zero, AbortReason::kOperandIsNotAConstructor); |
2063 | } |
2064 | } |
2065 | |
2066 | void MacroAssembler::AssertFunction(Register object) { |
2067 | if (emit_debug_code()) { |
2068 | testb(object, Immediate(kSmiTagMask)); |
2069 | Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction); |
2070 | Push(object); |
2071 | CmpObjectType(object, JS_FUNCTION_TYPE, object); |
2072 | Pop(object); |
2073 | Check(equal, AbortReason::kOperandIsNotAFunction); |
2074 | } |
2075 | } |
2076 | |
2077 | |
2078 | void MacroAssembler::AssertBoundFunction(Register object) { |
2079 | if (emit_debug_code()) { |
2080 | testb(object, Immediate(kSmiTagMask)); |
2081 | Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction); |
2082 | Push(object); |
2083 | CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object); |
2084 | Pop(object); |
2085 | Check(equal, AbortReason::kOperandIsNotABoundFunction); |
2086 | } |
2087 | } |
2088 | |
2089 | void MacroAssembler::AssertGeneratorObject(Register object) { |
2090 | if (!emit_debug_code()) return; |
2091 | testb(object, Immediate(kSmiTagMask)); |
2092 | Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject); |
2093 | |
2094 | // Load map |
2095 | Register map = object; |
2096 | Push(object); |
2097 | LoadTaggedPointerField(map, FieldOperand(object, HeapObject::kMapOffset)); |
2098 | |
2099 | Label do_check; |
2100 | // Check if JSGeneratorObject |
2101 | CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE); |
2102 | j(equal, &do_check); |
2103 | |
2104 | // Check if JSAsyncFunctionObject |
2105 | CmpInstanceType(map, JS_ASYNC_FUNCTION_OBJECT_TYPE); |
2106 | j(equal, &do_check); |
2107 | |
2108 | // Check if JSAsyncGeneratorObject |
2109 | CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE); |
2110 | |
2111 | bind(&do_check); |
2112 | // Restore generator object to register and perform assertion |
2113 | Pop(object); |
2114 | Check(equal, AbortReason::kOperandIsNotAGeneratorObject); |
2115 | } |
2116 | |
2117 | void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) { |
2118 | if (emit_debug_code()) { |
2119 | Label done_checking; |
2120 | AssertNotSmi(object); |
2121 | Cmp(object, isolate()->factory()->undefined_value()); |
2122 | j(equal, &done_checking); |
2123 | Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map()); |
2124 | Assert(equal, AbortReason::kExpectedUndefinedOrCell); |
2125 | bind(&done_checking); |
2126 | } |
2127 | } |
2128 | |
2129 | void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) { |
2130 | cmpl(in_out, Immediate(kClearedWeakHeapObjectLower32)); |
2131 | j(equal, target_if_cleared); |
2132 | |
2133 | andq(in_out, Immediate(~static_cast<int32_t>(kWeakHeapObjectMask))); |
2134 | } |
2135 | |
2136 | void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) { |
2137 | DCHECK_GT(value, 0); |
2138 | if (FLAG_native_code_counters && counter->Enabled()) { |
2139 | Operand counter_operand = |
2140 | ExternalReferenceAsOperand(ExternalReference::Create(counter)); |
2141 | // This operation has to be exactly 32-bit wide in case the external |
2142 | // reference table redirects the counter to a uint32_t dummy_stats_counter_ |
2143 | // field. |
2144 | if (value == 1) { |
2145 | incl(counter_operand); |
2146 | } else { |
2147 | addl(counter_operand, Immediate(value)); |
2148 | } |
2149 | } |
2150 | } |
2151 | |
2152 | |
2153 | void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) { |
2154 | DCHECK_GT(value, 0); |
2155 | if (FLAG_native_code_counters && counter->Enabled()) { |
2156 | Operand counter_operand = |
2157 | ExternalReferenceAsOperand(ExternalReference::Create(counter)); |
2158 | // This operation has to be exactly 32-bit wide in case the external |
2159 | // reference table redirects the counter to a uint32_t dummy_stats_counter_ |
2160 | // field. |
2161 | if (value == 1) { |
2162 | decl(counter_operand); |
2163 | } else { |
2164 | subl(counter_operand, Immediate(value)); |
2165 | } |
2166 | } |
2167 | } |
2168 | |
2169 | void MacroAssembler::MaybeDropFrames() { |
2170 | // Check whether we need to drop frames to restart a function on the stack. |
2171 | ExternalReference restart_fp = |
2172 | ExternalReference::debug_restart_fp_address(isolate()); |
2173 | Load(rbx, restart_fp); |
2174 | testq(rbx, rbx); |
2175 | |
2176 | Label dont_drop; |
2177 | j(zero, &dont_drop, Label::kNear); |
2178 | Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET); |
2179 | |
2180 | bind(&dont_drop); |
2181 | } |
2182 | |
2183 | void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count, |
2184 | Register caller_args_count_reg, |
2185 | Register scratch0, Register scratch1) { |
2186 | #if DEBUG |
2187 | if (callee_args_count.is_reg()) { |
2188 | DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0, |
2189 | scratch1)); |
2190 | } else { |
2191 | DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1)); |
2192 | } |
2193 | #endif |
2194 | |
2195 | // Calculate the destination address where we will put the return address |
2196 | // after we drop current frame. |
2197 | Register new_sp_reg = scratch0; |
2198 | if (callee_args_count.is_reg()) { |
2199 | subq(caller_args_count_reg, callee_args_count.reg()); |
2200 | leaq(new_sp_reg, |
2201 | Operand(rbp, caller_args_count_reg, times_system_pointer_size, |
2202 | StandardFrameConstants::kCallerPCOffset)); |
2203 | } else { |
2204 | leaq(new_sp_reg, |
2205 | Operand(rbp, caller_args_count_reg, times_system_pointer_size, |
2206 | StandardFrameConstants::kCallerPCOffset - |
2207 | callee_args_count.immediate() * kSystemPointerSize)); |
2208 | } |
2209 | |
2210 | if (FLAG_debug_code) { |
2211 | cmpq(rsp, new_sp_reg); |
2212 | Check(below, AbortReason::kStackAccessBelowStackPointer); |
2213 | } |
2214 | |
2215 | // Copy return address from caller's frame to current frame's return address |
2216 | // to avoid its trashing and let the following loop copy it to the right |
2217 | // place. |
2218 | Register tmp_reg = scratch1; |
2219 | movq(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset)); |
2220 | movq(Operand(rsp, 0), tmp_reg); |
2221 | |
2222 | // Restore caller's frame pointer now as it could be overwritten by |
2223 | // the copying loop. |
2224 | movq(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
2225 | |
2226 | // +2 here is to copy both receiver and return address. |
2227 | Register count_reg = caller_args_count_reg; |
2228 | if (callee_args_count.is_reg()) { |
2229 | leaq(count_reg, Operand(callee_args_count.reg(), 2)); |
2230 | } else { |
2231 | movq(count_reg, Immediate(callee_args_count.immediate() + 2)); |
2232 | // TODO(ishell): Unroll copying loop for small immediate values. |
2233 | } |
2234 | |
2235 | // Now copy callee arguments to the caller frame going backwards to avoid |
2236 | // callee arguments corruption (source and destination areas could overlap). |
2237 | Label loop, entry; |
2238 | jmp(&entry, Label::kNear); |
2239 | bind(&loop); |
2240 | decq(count_reg); |
2241 | movq(tmp_reg, Operand(rsp, count_reg, times_system_pointer_size, 0)); |
2242 | movq(Operand(new_sp_reg, count_reg, times_system_pointer_size, 0), tmp_reg); |
2243 | bind(&entry); |
2244 | cmpq(count_reg, Immediate(0)); |
2245 | j(not_equal, &loop, Label::kNear); |
2246 | |
2247 | // Leave current frame. |
2248 | movq(rsp, new_sp_reg); |
2249 | } |
2250 | |
2251 | void MacroAssembler::InvokeFunction(Register function, Register new_target, |
2252 | const ParameterCount& actual, |
2253 | InvokeFlag flag) { |
2254 | LoadTaggedPointerField( |
2255 | rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
2256 | movzxwq(rbx, |
2257 | FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset)); |
2258 | |
2259 | ParameterCount expected(rbx); |
2260 | InvokeFunction(function, new_target, expected, actual, flag); |
2261 | } |
2262 | |
2263 | void MacroAssembler::InvokeFunction(Register function, Register new_target, |
2264 | const ParameterCount& expected, |
2265 | const ParameterCount& actual, |
2266 | InvokeFlag flag) { |
2267 | DCHECK(function == rdi); |
2268 | LoadTaggedPointerField(rsi, |
2269 | FieldOperand(function, JSFunction::kContextOffset)); |
2270 | InvokeFunctionCode(rdi, new_target, expected, actual, flag); |
2271 | } |
2272 | |
2273 | void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, |
2274 | const ParameterCount& expected, |
2275 | const ParameterCount& actual, |
2276 | InvokeFlag flag) { |
2277 | // You can't call a function without a valid frame. |
2278 | DCHECK(flag == JUMP_FUNCTION || has_frame()); |
2279 | DCHECK(function == rdi); |
2280 | DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx); |
2281 | |
2282 | // On function call, call into the debugger if necessary. |
2283 | CheckDebugHook(function, new_target, expected, actual); |
2284 | |
2285 | // Clear the new.target register if not given. |
2286 | if (!new_target.is_valid()) { |
2287 | LoadRoot(rdx, RootIndex::kUndefinedValue); |
2288 | } |
2289 | |
2290 | Label done; |
2291 | bool definitely_mismatches = false; |
2292 | InvokePrologue(expected, actual, &done, &definitely_mismatches, flag, |
2293 | Label::kNear); |
2294 | if (!definitely_mismatches) { |
2295 | // We call indirectly through the code field in the function to |
2296 | // allow recompilation to take effect without changing any of the |
2297 | // call sites. |
2298 | static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch" ); |
2299 | LoadTaggedPointerField(rcx, |
2300 | FieldOperand(function, JSFunction::kCodeOffset)); |
2301 | if (flag == CALL_FUNCTION) { |
2302 | CallCodeObject(rcx); |
2303 | } else { |
2304 | DCHECK(flag == JUMP_FUNCTION); |
2305 | JumpCodeObject(rcx); |
2306 | } |
2307 | bind(&done); |
2308 | } |
2309 | } |
2310 | |
2311 | void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
2312 | const ParameterCount& actual, Label* done, |
2313 | bool* definitely_mismatches, |
2314 | InvokeFlag flag, |
2315 | Label::Distance near_jump) { |
2316 | bool definitely_matches = false; |
2317 | *definitely_mismatches = false; |
2318 | Label invoke; |
2319 | if (expected.is_immediate()) { |
2320 | DCHECK(actual.is_immediate()); |
2321 | Set(rax, actual.immediate()); |
2322 | if (expected.immediate() == actual.immediate()) { |
2323 | definitely_matches = true; |
2324 | } else { |
2325 | if (expected.immediate() == |
2326 | SharedFunctionInfo::kDontAdaptArgumentsSentinel) { |
2327 | // Don't worry about adapting arguments for built-ins that |
2328 | // don't want that done. Skip adaption code by making it look |
2329 | // like we have a match between expected and actual number of |
2330 | // arguments. |
2331 | definitely_matches = true; |
2332 | } else { |
2333 | *definitely_mismatches = true; |
2334 | Set(rbx, expected.immediate()); |
2335 | } |
2336 | } |
2337 | } else { |
2338 | if (actual.is_immediate()) { |
2339 | // Expected is in register, actual is immediate. This is the |
2340 | // case when we invoke function values without going through the |
2341 | // IC mechanism. |
2342 | Set(rax, actual.immediate()); |
2343 | cmpq(expected.reg(), Immediate(actual.immediate())); |
2344 | j(equal, &invoke, Label::kNear); |
2345 | DCHECK(expected.reg() == rbx); |
2346 | } else if (expected.reg() != actual.reg()) { |
2347 | // Both expected and actual are in (different) registers. This |
2348 | // is the case when we invoke functions using call and apply. |
2349 | cmpq(expected.reg(), actual.reg()); |
2350 | j(equal, &invoke, Label::kNear); |
2351 | DCHECK(actual.reg() == rax); |
2352 | DCHECK(expected.reg() == rbx); |
2353 | } else { |
2354 | definitely_matches = true; |
2355 | Move(rax, actual.reg()); |
2356 | } |
2357 | } |
2358 | |
2359 | if (!definitely_matches) { |
2360 | Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline); |
2361 | if (flag == CALL_FUNCTION) { |
2362 | Call(adaptor, RelocInfo::CODE_TARGET); |
2363 | if (!*definitely_mismatches) { |
2364 | jmp(done, near_jump); |
2365 | } |
2366 | } else { |
2367 | Jump(adaptor, RelocInfo::CODE_TARGET); |
2368 | } |
2369 | bind(&invoke); |
2370 | } |
2371 | } |
2372 | |
2373 | void MacroAssembler::CheckDebugHook(Register fun, Register new_target, |
2374 | const ParameterCount& expected, |
2375 | const ParameterCount& actual) { |
2376 | Label skip_hook; |
2377 | ExternalReference debug_hook_active = |
2378 | ExternalReference::debug_hook_on_function_call_address(isolate()); |
2379 | Operand debug_hook_active_operand = |
2380 | ExternalReferenceAsOperand(debug_hook_active); |
2381 | cmpb(debug_hook_active_operand, Immediate(0)); |
2382 | j(equal, &skip_hook); |
2383 | |
2384 | { |
2385 | FrameScope frame(this, |
2386 | has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); |
2387 | if (expected.is_reg()) { |
2388 | SmiTag(expected.reg(), expected.reg()); |
2389 | Push(expected.reg()); |
2390 | } |
2391 | if (actual.is_reg()) { |
2392 | SmiTag(actual.reg(), actual.reg()); |
2393 | Push(actual.reg()); |
2394 | SmiUntag(actual.reg(), actual.reg()); |
2395 | } |
2396 | if (new_target.is_valid()) { |
2397 | Push(new_target); |
2398 | } |
2399 | Push(fun); |
2400 | Push(fun); |
2401 | Push(StackArgumentsAccessor(rbp, actual).GetReceiverOperand()); |
2402 | CallRuntime(Runtime::kDebugOnFunctionCall); |
2403 | Pop(fun); |
2404 | if (new_target.is_valid()) { |
2405 | Pop(new_target); |
2406 | } |
2407 | if (actual.is_reg()) { |
2408 | Pop(actual.reg()); |
2409 | SmiUntag(actual.reg(), actual.reg()); |
2410 | } |
2411 | if (expected.is_reg()) { |
2412 | Pop(expected.reg()); |
2413 | SmiUntag(expected.reg(), expected.reg()); |
2414 | } |
2415 | } |
2416 | bind(&skip_hook); |
2417 | } |
2418 | |
2419 | void TurboAssembler::StubPrologue(StackFrame::Type type) { |
2420 | pushq(rbp); // Caller's frame pointer. |
2421 | movq(rbp, rsp); |
2422 | Push(Immediate(StackFrame::TypeToMarker(type))); |
2423 | } |
2424 | |
2425 | void TurboAssembler::Prologue() { |
2426 | pushq(rbp); // Caller's frame pointer. |
2427 | movq(rbp, rsp); |
2428 | Push(rsi); // Callee's context. |
2429 | Push(rdi); // Callee's JS function. |
2430 | } |
2431 | |
2432 | void TurboAssembler::EnterFrame(StackFrame::Type type) { |
2433 | pushq(rbp); |
2434 | movq(rbp, rsp); |
2435 | Push(Immediate(StackFrame::TypeToMarker(type))); |
2436 | } |
2437 | |
2438 | void TurboAssembler::LeaveFrame(StackFrame::Type type) { |
2439 | if (emit_debug_code()) { |
2440 | cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset), |
2441 | Immediate(StackFrame::TypeToMarker(type))); |
2442 | Check(equal, AbortReason::kStackFrameTypesMustMatch); |
2443 | } |
2444 | movq(rsp, rbp); |
2445 | popq(rbp); |
2446 | } |
2447 | |
2448 | void MacroAssembler::EnterExitFramePrologue(bool save_rax, |
2449 | StackFrame::Type frame_type) { |
2450 | DCHECK(frame_type == StackFrame::EXIT || |
2451 | frame_type == StackFrame::BUILTIN_EXIT); |
2452 | |
2453 | // Set up the frame structure on the stack. |
2454 | // All constants are relative to the frame pointer of the exit frame. |
2455 | DCHECK_EQ(kFPOnStackSize + kPCOnStackSize, |
2456 | ExitFrameConstants::kCallerSPDisplacement); |
2457 | DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset); |
2458 | DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset); |
2459 | pushq(rbp); |
2460 | movq(rbp, rsp); |
2461 | |
2462 | // Reserve room for entry stack pointer. |
2463 | Push(Immediate(StackFrame::TypeToMarker(frame_type))); |
2464 | DCHECK_EQ(-2 * kSystemPointerSize, ExitFrameConstants::kSPOffset); |
2465 | Push(Immediate(0)); // Saved entry sp, patched before call. |
2466 | |
2467 | // Save the frame pointer and the context in top. |
2468 | if (save_rax) { |
2469 | movq(r14, rax); // Backup rax in callee-save register. |
2470 | } |
2471 | |
2472 | Store( |
2473 | ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()), |
2474 | rbp); |
2475 | Store(ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()), |
2476 | rsi); |
2477 | Store( |
2478 | ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate()), |
2479 | rbx); |
2480 | } |
2481 | |
2482 | |
2483 | void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, |
2484 | bool save_doubles) { |
2485 | #ifdef _WIN64 |
2486 | const int kShadowSpace = 4; |
2487 | arg_stack_space += kShadowSpace; |
2488 | #endif |
2489 | // Optionally save all XMM registers. |
2490 | if (save_doubles) { |
2491 | int space = XMMRegister::kNumRegisters * kDoubleSize + |
2492 | arg_stack_space * kSystemPointerSize; |
2493 | subq(rsp, Immediate(space)); |
2494 | int offset = -ExitFrameConstants::kFixedFrameSizeFromFp; |
2495 | const RegisterConfiguration* config = RegisterConfiguration::Default(); |
2496 | for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { |
2497 | DoubleRegister reg = |
2498 | DoubleRegister::from_code(config->GetAllocatableDoubleCode(i)); |
2499 | Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); |
2500 | } |
2501 | } else if (arg_stack_space > 0) { |
2502 | subq(rsp, Immediate(arg_stack_space * kSystemPointerSize)); |
2503 | } |
2504 | |
2505 | // Get the required frame alignment for the OS. |
2506 | const int kFrameAlignment = base::OS::ActivationFrameAlignment(); |
2507 | if (kFrameAlignment > 0) { |
2508 | DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment)); |
2509 | DCHECK(is_int8(kFrameAlignment)); |
2510 | andq(rsp, Immediate(-kFrameAlignment)); |
2511 | } |
2512 | |
2513 | // Patch the saved entry sp. |
2514 | movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp); |
2515 | } |
2516 | |
2517 | void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles, |
2518 | StackFrame::Type frame_type) { |
2519 | EnterExitFramePrologue(true, frame_type); |
2520 | |
2521 | // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame, |
2522 | // so it must be retained across the C-call. |
2523 | int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize; |
2524 | leaq(r15, Operand(rbp, r14, times_system_pointer_size, offset)); |
2525 | |
2526 | EnterExitFrameEpilogue(arg_stack_space, save_doubles); |
2527 | } |
2528 | |
2529 | |
2530 | void MacroAssembler::EnterApiExitFrame(int arg_stack_space) { |
2531 | EnterExitFramePrologue(false, StackFrame::EXIT); |
2532 | EnterExitFrameEpilogue(arg_stack_space, false); |
2533 | } |
2534 | |
2535 | |
2536 | void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) { |
2537 | // Registers: |
2538 | // r15 : argv |
2539 | if (save_doubles) { |
2540 | int offset = -ExitFrameConstants::kFixedFrameSizeFromFp; |
2541 | const RegisterConfiguration* config = RegisterConfiguration::Default(); |
2542 | for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { |
2543 | DoubleRegister reg = |
2544 | DoubleRegister::from_code(config->GetAllocatableDoubleCode(i)); |
2545 | Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize))); |
2546 | } |
2547 | } |
2548 | |
2549 | if (pop_arguments) { |
2550 | // Get the return address from the stack and restore the frame pointer. |
2551 | movq(rcx, Operand(rbp, kFPOnStackSize)); |
2552 | movq(rbp, Operand(rbp, 0 * kSystemPointerSize)); |
2553 | |
2554 | // Drop everything up to and including the arguments and the receiver |
2555 | // from the caller stack. |
2556 | leaq(rsp, Operand(r15, 1 * kSystemPointerSize)); |
2557 | |
2558 | PushReturnAddressFrom(rcx); |
2559 | } else { |
2560 | // Otherwise just leave the exit frame. |
2561 | leave(); |
2562 | } |
2563 | |
2564 | LeaveExitFrameEpilogue(); |
2565 | } |
2566 | |
2567 | void MacroAssembler::LeaveApiExitFrame() { |
2568 | movq(rsp, rbp); |
2569 | popq(rbp); |
2570 | |
2571 | LeaveExitFrameEpilogue(); |
2572 | } |
2573 | |
2574 | void MacroAssembler::LeaveExitFrameEpilogue() { |
2575 | // Restore current context from top and clear it in debug mode. |
2576 | ExternalReference context_address = |
2577 | ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()); |
2578 | Operand context_operand = ExternalReferenceAsOperand(context_address); |
2579 | movq(rsi, context_operand); |
2580 | #ifdef DEBUG |
2581 | movq(context_operand, Immediate(Context::kInvalidContext)); |
2582 | #endif |
2583 | |
2584 | // Clear the top frame. |
2585 | ExternalReference c_entry_fp_address = |
2586 | ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()); |
2587 | Operand c_entry_fp_operand = ExternalReferenceAsOperand(c_entry_fp_address); |
2588 | movq(c_entry_fp_operand, Immediate(0)); |
2589 | } |
2590 | |
2591 | |
2592 | #ifdef _WIN64 |
2593 | static const int kRegisterPassedArguments = 4; |
2594 | #else |
2595 | static const int kRegisterPassedArguments = 6; |
2596 | #endif |
2597 | |
2598 | |
2599 | void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { |
2600 | LoadTaggedPointerField(dst, NativeContextOperand()); |
2601 | LoadTaggedPointerField(dst, ContextOperand(dst, index)); |
2602 | } |
2603 | |
2604 | |
2605 | int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { |
2606 | // On Windows 64 stack slots are reserved by the caller for all arguments |
2607 | // including the ones passed in registers, and space is always allocated for |
2608 | // the four register arguments even if the function takes fewer than four |
2609 | // arguments. |
2610 | // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers |
2611 | // and the caller does not reserve stack slots for them. |
2612 | DCHECK_GE(num_arguments, 0); |
2613 | #ifdef _WIN64 |
2614 | const int kMinimumStackSlots = kRegisterPassedArguments; |
2615 | if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots; |
2616 | return num_arguments; |
2617 | #else |
2618 | if (num_arguments < kRegisterPassedArguments) return 0; |
2619 | return num_arguments - kRegisterPassedArguments; |
2620 | #endif |
2621 | } |
2622 | |
2623 | void TurboAssembler::PrepareCallCFunction(int num_arguments) { |
2624 | int frame_alignment = base::OS::ActivationFrameAlignment(); |
2625 | DCHECK_NE(frame_alignment, 0); |
2626 | DCHECK_GE(num_arguments, 0); |
2627 | |
2628 | // Make stack end at alignment and allocate space for arguments and old rsp. |
2629 | movq(kScratchRegister, rsp); |
2630 | DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); |
2631 | int argument_slots_on_stack = |
2632 | ArgumentStackSlotsForCFunctionCall(num_arguments); |
2633 | subq(rsp, Immediate((argument_slots_on_stack + 1) * kSystemPointerSize)); |
2634 | andq(rsp, Immediate(-frame_alignment)); |
2635 | movq(Operand(rsp, argument_slots_on_stack * kSystemPointerSize), |
2636 | kScratchRegister); |
2637 | } |
2638 | |
2639 | void TurboAssembler::CallCFunction(ExternalReference function, |
2640 | int num_arguments) { |
2641 | LoadAddress(rax, function); |
2642 | CallCFunction(rax, num_arguments); |
2643 | } |
2644 | |
2645 | void TurboAssembler::CallCFunction(Register function, int num_arguments) { |
2646 | DCHECK_LE(num_arguments, kMaxCParameters); |
2647 | DCHECK(has_frame()); |
2648 | // Check stack alignment. |
2649 | if (emit_debug_code()) { |
2650 | CheckStackAlignment(); |
2651 | } |
2652 | |
2653 | // Save the frame pointer and PC so that the stack layout remains iterable, |
2654 | // even without an ExitFrame which normally exists between JS and C frames. |
2655 | if (isolate() != nullptr) { |
2656 | Label get_pc; |
2657 | DCHECK(!AreAliased(kScratchRegister, function)); |
2658 | leaq(kScratchRegister, Operand(&get_pc, 0)); |
2659 | bind(&get_pc); |
2660 | movq(ExternalReferenceAsOperand( |
2661 | ExternalReference::fast_c_call_caller_pc_address(isolate())), |
2662 | kScratchRegister); |
2663 | movq(ExternalReferenceAsOperand( |
2664 | ExternalReference::fast_c_call_caller_fp_address(isolate())), |
2665 | rbp); |
2666 | } |
2667 | |
2668 | call(function); |
2669 | |
2670 | if (isolate() != nullptr) { |
2671 | // We don't unset the PC; the FP is the source of truth. |
2672 | movq(ExternalReferenceAsOperand( |
2673 | ExternalReference::fast_c_call_caller_fp_address(isolate())), |
2674 | Immediate(0)); |
2675 | } |
2676 | |
2677 | DCHECK_NE(base::OS::ActivationFrameAlignment(), 0); |
2678 | DCHECK_GE(num_arguments, 0); |
2679 | int argument_slots_on_stack = |
2680 | ArgumentStackSlotsForCFunctionCall(num_arguments); |
2681 | movq(rsp, Operand(rsp, argument_slots_on_stack * kSystemPointerSize)); |
2682 | } |
2683 | |
2684 | void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, |
2685 | Condition cc, Label* condition_met, |
2686 | Label::Distance condition_met_distance) { |
2687 | DCHECK(cc == zero || cc == not_zero); |
2688 | if (scratch == object) { |
2689 | andq(scratch, Immediate(~kPageAlignmentMask)); |
2690 | } else { |
2691 | movq(scratch, Immediate(~kPageAlignmentMask)); |
2692 | andq(scratch, object); |
2693 | } |
2694 | if (mask < (1 << kBitsPerByte)) { |
2695 | testb(Operand(scratch, MemoryChunk::kFlagsOffset), |
2696 | Immediate(static_cast<uint8_t>(mask))); |
2697 | } else { |
2698 | testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); |
2699 | } |
2700 | j(cc, condition_met, condition_met_distance); |
2701 | } |
2702 | |
2703 | void TurboAssembler::ComputeCodeStartAddress(Register dst) { |
2704 | Label current; |
2705 | bind(¤t); |
2706 | int pc = pc_offset(); |
2707 | // Load effective address to get the address of the current instruction. |
2708 | leaq(dst, Operand(¤t, -pc)); |
2709 | } |
2710 | |
2711 | void TurboAssembler::ResetSpeculationPoisonRegister() { |
2712 | // TODO(tebbi): Perhaps, we want to put an lfence here. |
2713 | Set(kSpeculationPoisonRegister, -1); |
2714 | } |
2715 | |
2716 | void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) { |
2717 | NoRootArrayScope no_root_array(this); |
2718 | // Save the deopt id in r13 (we don't need the roots array from now on). |
2719 | movq(r13, Immediate(deopt_id)); |
2720 | call(target, RelocInfo::RUNTIME_ENTRY); |
2721 | } |
2722 | |
2723 | } // namespace internal |
2724 | } // namespace v8 |
2725 | |
2726 | #endif // V8_TARGET_ARCH_X64 |
2727 | |