1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/backend/instruction-selector.h"
6
7#include <limits>
8
9#include "src/assembler-inl.h"
10#include "src/base/adapters.h"
11#include "src/compiler/backend/instruction-selector-impl.h"
12#include "src/compiler/compiler-source-position-table.h"
13#include "src/compiler/node-matchers.h"
14#include "src/compiler/pipeline.h"
15#include "src/compiler/schedule.h"
16#include "src/compiler/state-values-utils.h"
17#include "src/deoptimizer.h"
18
19namespace v8 {
20namespace internal {
21namespace compiler {
22
23InstructionSelector::InstructionSelector(
24 Zone* zone, size_t node_count, Linkage* linkage,
25 InstructionSequence* sequence, Schedule* schedule,
26 SourcePositionTable* source_positions, Frame* frame,
27 EnableSwitchJumpTable enable_switch_jump_table,
28 SourcePositionMode source_position_mode, Features features,
29 EnableScheduling enable_scheduling,
30 EnableRootsRelativeAddressing enable_roots_relative_addressing,
31 PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo)
32 : zone_(zone),
33 linkage_(linkage),
34 sequence_(sequence),
35 source_positions_(source_positions),
36 source_position_mode_(source_position_mode),
37 features_(features),
38 schedule_(schedule),
39 current_block_(nullptr),
40 instructions_(zone),
41 continuation_inputs_(sequence->zone()),
42 continuation_outputs_(sequence->zone()),
43 defined_(node_count, false, zone),
44 used_(node_count, false, zone),
45 effect_level_(node_count, 0, zone),
46 virtual_registers_(node_count,
47 InstructionOperand::kInvalidVirtualRegister, zone),
48 virtual_register_rename_(zone),
49 scheduler_(nullptr),
50 enable_scheduling_(enable_scheduling),
51 enable_roots_relative_addressing_(enable_roots_relative_addressing),
52 enable_switch_jump_table_(enable_switch_jump_table),
53 poisoning_level_(poisoning_level),
54 frame_(frame),
55 instruction_selection_failed_(false),
56 instr_origins_(sequence->zone()),
57 trace_turbo_(trace_turbo) {
58 instructions_.reserve(node_count);
59 continuation_inputs_.reserve(5);
60 continuation_outputs_.reserve(2);
61
62 if (trace_turbo_ == kEnableTraceTurboJson) {
63 instr_origins_.assign(node_count, {-1, 0});
64 }
65}
66
67bool InstructionSelector::SelectInstructions() {
68 // Mark the inputs of all phis in loop headers as used.
69 BasicBlockVector* blocks = schedule()->rpo_order();
70 for (auto const block : *blocks) {
71 if (!block->IsLoopHeader()) continue;
72 DCHECK_LE(2u, block->PredecessorCount());
73 for (Node* const phi : *block) {
74 if (phi->opcode() != IrOpcode::kPhi) continue;
75
76 // Mark all inputs as used.
77 for (Node* const input : phi->inputs()) {
78 MarkAsUsed(input);
79 }
80 }
81 }
82
83 // Visit each basic block in post order.
84 for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
85 VisitBlock(*i);
86 if (instruction_selection_failed()) return false;
87 }
88
89 // Schedule the selected instructions.
90 if (UseInstructionScheduling()) {
91 scheduler_ = new (zone()) InstructionScheduler(zone(), sequence());
92 }
93
94 for (auto const block : *blocks) {
95 InstructionBlock* instruction_block =
96 sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
97 for (size_t i = 0; i < instruction_block->phis().size(); i++) {
98 UpdateRenamesInPhi(instruction_block->PhiAt(i));
99 }
100 size_t end = instruction_block->code_end();
101 size_t start = instruction_block->code_start();
102 DCHECK_LE(end, start);
103 StartBlock(RpoNumber::FromInt(block->rpo_number()));
104 if (end != start) {
105 while (start-- > end + 1) {
106 UpdateRenames(instructions_[start]);
107 AddInstruction(instructions_[start]);
108 }
109 UpdateRenames(instructions_[end]);
110 AddTerminator(instructions_[end]);
111 }
112 EndBlock(RpoNumber::FromInt(block->rpo_number()));
113 }
114#if DEBUG
115 sequence()->ValidateSSA();
116#endif
117 return true;
118}
119
120void InstructionSelector::StartBlock(RpoNumber rpo) {
121 if (UseInstructionScheduling()) {
122 DCHECK_NOT_NULL(scheduler_);
123 scheduler_->StartBlock(rpo);
124 } else {
125 sequence()->StartBlock(rpo);
126 }
127}
128
129void InstructionSelector::EndBlock(RpoNumber rpo) {
130 if (UseInstructionScheduling()) {
131 DCHECK_NOT_NULL(scheduler_);
132 scheduler_->EndBlock(rpo);
133 } else {
134 sequence()->EndBlock(rpo);
135 }
136}
137
138void InstructionSelector::AddTerminator(Instruction* instr) {
139 if (UseInstructionScheduling()) {
140 DCHECK_NOT_NULL(scheduler_);
141 scheduler_->AddTerminator(instr);
142 } else {
143 sequence()->AddInstruction(instr);
144 }
145}
146
147void InstructionSelector::AddInstruction(Instruction* instr) {
148 if (UseInstructionScheduling()) {
149 DCHECK_NOT_NULL(scheduler_);
150 scheduler_->AddInstruction(instr);
151 } else {
152 sequence()->AddInstruction(instr);
153 }
154}
155
156Instruction* InstructionSelector::Emit(InstructionCode opcode,
157 InstructionOperand output,
158 size_t temp_count,
159 InstructionOperand* temps) {
160 size_t output_count = output.IsInvalid() ? 0 : 1;
161 return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps);
162}
163
164Instruction* InstructionSelector::Emit(InstructionCode opcode,
165 InstructionOperand output,
166 InstructionOperand a, size_t temp_count,
167 InstructionOperand* temps) {
168 size_t output_count = output.IsInvalid() ? 0 : 1;
169 return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
170}
171
172Instruction* InstructionSelector::Emit(InstructionCode opcode,
173 InstructionOperand output,
174 InstructionOperand a,
175 InstructionOperand b, size_t temp_count,
176 InstructionOperand* temps) {
177 size_t output_count = output.IsInvalid() ? 0 : 1;
178 InstructionOperand inputs[] = {a, b};
179 size_t input_count = arraysize(inputs);
180 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
181 temps);
182}
183
184Instruction* InstructionSelector::Emit(InstructionCode opcode,
185 InstructionOperand output,
186 InstructionOperand a,
187 InstructionOperand b,
188 InstructionOperand c, size_t temp_count,
189 InstructionOperand* temps) {
190 size_t output_count = output.IsInvalid() ? 0 : 1;
191 InstructionOperand inputs[] = {a, b, c};
192 size_t input_count = arraysize(inputs);
193 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
194 temps);
195}
196
197Instruction* InstructionSelector::Emit(
198 InstructionCode opcode, InstructionOperand output, InstructionOperand a,
199 InstructionOperand b, InstructionOperand c, InstructionOperand d,
200 size_t temp_count, InstructionOperand* temps) {
201 size_t output_count = output.IsInvalid() ? 0 : 1;
202 InstructionOperand inputs[] = {a, b, c, d};
203 size_t input_count = arraysize(inputs);
204 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
205 temps);
206}
207
208Instruction* InstructionSelector::Emit(
209 InstructionCode opcode, InstructionOperand output, InstructionOperand a,
210 InstructionOperand b, InstructionOperand c, InstructionOperand d,
211 InstructionOperand e, size_t temp_count, InstructionOperand* temps) {
212 size_t output_count = output.IsInvalid() ? 0 : 1;
213 InstructionOperand inputs[] = {a, b, c, d, e};
214 size_t input_count = arraysize(inputs);
215 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
216 temps);
217}
218
219Instruction* InstructionSelector::Emit(
220 InstructionCode opcode, InstructionOperand output, InstructionOperand a,
221 InstructionOperand b, InstructionOperand c, InstructionOperand d,
222 InstructionOperand e, InstructionOperand f, size_t temp_count,
223 InstructionOperand* temps) {
224 size_t output_count = output.IsInvalid() ? 0 : 1;
225 InstructionOperand inputs[] = {a, b, c, d, e, f};
226 size_t input_count = arraysize(inputs);
227 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
228 temps);
229}
230
231Instruction* InstructionSelector::Emit(
232 InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
233 size_t input_count, InstructionOperand* inputs, size_t temp_count,
234 InstructionOperand* temps) {
235 if (output_count >= Instruction::kMaxOutputCount ||
236 input_count >= Instruction::kMaxInputCount ||
237 temp_count >= Instruction::kMaxTempCount) {
238 set_instruction_selection_failed();
239 return nullptr;
240 }
241
242 Instruction* instr =
243 Instruction::New(instruction_zone(), opcode, output_count, outputs,
244 input_count, inputs, temp_count, temps);
245 return Emit(instr);
246}
247
248Instruction* InstructionSelector::Emit(Instruction* instr) {
249 instructions_.push_back(instr);
250 return instr;
251}
252
253bool InstructionSelector::CanCover(Node* user, Node* node) const {
254 // 1. Both {user} and {node} must be in the same basic block.
255 if (schedule()->block(node) != schedule()->block(user)) {
256 return false;
257 }
258 // 2. Pure {node}s must be owned by the {user}.
259 if (node->op()->HasProperty(Operator::kPure)) {
260 return node->OwnedBy(user);
261 }
262 // 3. Impure {node}s must match the effect level of {user}.
263 if (GetEffectLevel(node) != GetEffectLevel(user)) {
264 return false;
265 }
266 // 4. Only {node} must have value edges pointing to {user}.
267 for (Edge const edge : node->use_edges()) {
268 if (edge.from() != user && NodeProperties::IsValueEdge(edge)) {
269 return false;
270 }
271 }
272 return true;
273}
274
275bool InstructionSelector::CanCoverTransitively(Node* user, Node* node,
276 Node* node_input) const {
277 if (CanCover(user, node) && CanCover(node, node_input)) {
278 // If {node} is pure, transitivity might not hold.
279 if (node->op()->HasProperty(Operator::kPure)) {
280 // If {node_input} is pure, the effect levels do not matter.
281 if (node_input->op()->HasProperty(Operator::kPure)) return true;
282 // Otherwise, {user} and {node_input} must have the same effect level.
283 return GetEffectLevel(user) == GetEffectLevel(node_input);
284 }
285 return true;
286 }
287 return false;
288}
289
290bool InstructionSelector::IsOnlyUserOfNodeInSameBlock(Node* user,
291 Node* node) const {
292 BasicBlock* bb_user = schedule()->block(user);
293 BasicBlock* bb_node = schedule()->block(node);
294 if (bb_user != bb_node) return false;
295 for (Edge const edge : node->use_edges()) {
296 Node* from = edge.from();
297 if ((from != user) && (schedule()->block(from) == bb_user)) {
298 return false;
299 }
300 }
301 return true;
302}
303
304void InstructionSelector::UpdateRenames(Instruction* instruction) {
305 for (size_t i = 0; i < instruction->InputCount(); i++) {
306 TryRename(instruction->InputAt(i));
307 }
308}
309
310void InstructionSelector::UpdateRenamesInPhi(PhiInstruction* phi) {
311 for (size_t i = 0; i < phi->operands().size(); i++) {
312 int vreg = phi->operands()[i];
313 int renamed = GetRename(vreg);
314 if (vreg != renamed) {
315 phi->RenameInput(i, renamed);
316 }
317 }
318}
319
320int InstructionSelector::GetRename(int virtual_register) {
321 int rename = virtual_register;
322 while (true) {
323 if (static_cast<size_t>(rename) >= virtual_register_rename_.size()) break;
324 int next = virtual_register_rename_[rename];
325 if (next == InstructionOperand::kInvalidVirtualRegister) {
326 break;
327 }
328 rename = next;
329 }
330 return rename;
331}
332
333void InstructionSelector::TryRename(InstructionOperand* op) {
334 if (!op->IsUnallocated()) return;
335 UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
336 int vreg = unalloc->virtual_register();
337 int rename = GetRename(vreg);
338 if (rename != vreg) {
339 *unalloc = UnallocatedOperand(*unalloc, rename);
340 }
341}
342
343void InstructionSelector::SetRename(const Node* node, const Node* rename) {
344 int vreg = GetVirtualRegister(node);
345 if (static_cast<size_t>(vreg) >= virtual_register_rename_.size()) {
346 int invalid = InstructionOperand::kInvalidVirtualRegister;
347 virtual_register_rename_.resize(vreg + 1, invalid);
348 }
349 virtual_register_rename_[vreg] = GetVirtualRegister(rename);
350}
351
352int InstructionSelector::GetVirtualRegister(const Node* node) {
353 DCHECK_NOT_NULL(node);
354 size_t const id = node->id();
355 DCHECK_LT(id, virtual_registers_.size());
356 int virtual_register = virtual_registers_[id];
357 if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
358 virtual_register = sequence()->NextVirtualRegister();
359 virtual_registers_[id] = virtual_register;
360 }
361 return virtual_register;
362}
363
364const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
365 const {
366 std::map<NodeId, int> virtual_registers;
367 for (size_t n = 0; n < virtual_registers_.size(); ++n) {
368 if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) {
369 NodeId const id = static_cast<NodeId>(n);
370 virtual_registers.insert(std::make_pair(id, virtual_registers_[n]));
371 }
372 }
373 return virtual_registers;
374}
375
376bool InstructionSelector::IsDefined(Node* node) const {
377 DCHECK_NOT_NULL(node);
378 size_t const id = node->id();
379 DCHECK_LT(id, defined_.size());
380 return defined_[id];
381}
382
383void InstructionSelector::MarkAsDefined(Node* node) {
384 DCHECK_NOT_NULL(node);
385 size_t const id = node->id();
386 DCHECK_LT(id, defined_.size());
387 defined_[id] = true;
388}
389
390bool InstructionSelector::IsUsed(Node* node) const {
391 DCHECK_NOT_NULL(node);
392 // TODO(bmeurer): This is a terrible monster hack, but we have to make sure
393 // that the Retain is actually emitted, otherwise the GC will mess up.
394 if (node->opcode() == IrOpcode::kRetain) return true;
395 if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
396 size_t const id = node->id();
397 DCHECK_LT(id, used_.size());
398 return used_[id];
399}
400
401void InstructionSelector::MarkAsUsed(Node* node) {
402 DCHECK_NOT_NULL(node);
403 size_t const id = node->id();
404 DCHECK_LT(id, used_.size());
405 used_[id] = true;
406}
407
408int InstructionSelector::GetEffectLevel(Node* node) const {
409 DCHECK_NOT_NULL(node);
410 size_t const id = node->id();
411 DCHECK_LT(id, effect_level_.size());
412 return effect_level_[id];
413}
414
415void InstructionSelector::SetEffectLevel(Node* node, int effect_level) {
416 DCHECK_NOT_NULL(node);
417 size_t const id = node->id();
418 DCHECK_LT(id, effect_level_.size());
419 effect_level_[id] = effect_level;
420}
421
422bool InstructionSelector::CanAddressRelativeToRootsRegister() const {
423 return enable_roots_relative_addressing_ == kEnableRootsRelativeAddressing &&
424 CanUseRootsRegister();
425}
426
427bool InstructionSelector::CanUseRootsRegister() const {
428 return linkage()->GetIncomingDescriptor()->flags() &
429 CallDescriptor::kCanUseRoots;
430}
431
432void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
433 const InstructionOperand& op) {
434 UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
435 sequence()->MarkAsRepresentation(rep, unalloc.virtual_register());
436}
437
438void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
439 Node* node) {
440 sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
441}
442
443namespace {
444
445InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
446 Node* input, FrameStateInputKind kind,
447 MachineRepresentation rep) {
448 if (rep == MachineRepresentation::kNone) {
449 return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
450 }
451
452 switch (input->opcode()) {
453 case IrOpcode::kInt32Constant:
454 case IrOpcode::kInt64Constant:
455 case IrOpcode::kNumberConstant:
456 case IrOpcode::kFloat32Constant:
457 case IrOpcode::kFloat64Constant:
458 case IrOpcode::kDelayedStringConstant:
459 return g->UseImmediate(input);
460 case IrOpcode::kHeapConstant: {
461 if (!CanBeTaggedPointer(rep)) {
462 // If we have inconsistent static and dynamic types, e.g. if we
463 // smi-check a string, we can get here with a heap object that
464 // says it is a smi. In that case, we return an invalid instruction
465 // operand, which will be interpreted as an optimized-out value.
466
467 // TODO(jarin) Ideally, we should turn the current instruction
468 // into an abort (we should never execute it).
469 return InstructionOperand();
470 }
471
472 Handle<HeapObject> constant = HeapConstantOf(input->op());
473 RootIndex root_index;
474 if (isolate->roots_table().IsRootHandle(constant, &root_index) &&
475 root_index == RootIndex::kOptimizedOut) {
476 // For an optimized-out object we return an invalid instruction
477 // operand, so that we take the fast path for optimized-out values.
478 return InstructionOperand();
479 }
480
481 return g->UseImmediate(input);
482 }
483 case IrOpcode::kArgumentsElementsState:
484 case IrOpcode::kArgumentsLengthState:
485 case IrOpcode::kObjectState:
486 case IrOpcode::kTypedObjectState:
487 UNREACHABLE();
488 break;
489 default:
490 switch (kind) {
491 case FrameStateInputKind::kStackSlot:
492 return g->UseUniqueSlot(input);
493 case FrameStateInputKind::kAny:
494 // Currently deopts "wrap" other operations, so the deopt's inputs
495 // are potentially needed until the end of the deoptimising code.
496 return g->UseAnyAtEnd(input);
497 }
498 }
499 UNREACHABLE();
500}
501
502} // namespace
503
504class StateObjectDeduplicator {
505 public:
506 explicit StateObjectDeduplicator(Zone* zone) : objects_(zone) {}
507 static const size_t kNotDuplicated = SIZE_MAX;
508
509 size_t GetObjectId(Node* node) {
510 DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
511 node->opcode() == IrOpcode::kObjectId ||
512 node->opcode() == IrOpcode::kArgumentsElementsState);
513 for (size_t i = 0; i < objects_.size(); ++i) {
514 if (objects_[i] == node) return i;
515 // ObjectId nodes are the Turbofan way to express objects with the same
516 // identity in the deopt info. So they should always be mapped to
517 // previously appearing TypedObjectState nodes.
518 if (HasObjectId(objects_[i]) && HasObjectId(node) &&
519 ObjectIdOf(objects_[i]->op()) == ObjectIdOf(node->op())) {
520 return i;
521 }
522 }
523 DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
524 node->opcode() == IrOpcode::kArgumentsElementsState);
525 return kNotDuplicated;
526 }
527
528 size_t InsertObject(Node* node) {
529 DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
530 node->opcode() == IrOpcode::kObjectId ||
531 node->opcode() == IrOpcode::kArgumentsElementsState);
532 size_t id = objects_.size();
533 objects_.push_back(node);
534 return id;
535 }
536
537 private:
538 static bool HasObjectId(Node* node) {
539 return node->opcode() == IrOpcode::kTypedObjectState ||
540 node->opcode() == IrOpcode::kObjectId;
541 }
542
543 ZoneVector<Node*> objects_;
544};
545
546// Returns the number of instruction operands added to inputs.
547size_t InstructionSelector::AddOperandToStateValueDescriptor(
548 StateValueList* values, InstructionOperandVector* inputs,
549 OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* input,
550 MachineType type, FrameStateInputKind kind, Zone* zone) {
551 if (input == nullptr) {
552 values->PushOptimizedOut();
553 return 0;
554 }
555
556 switch (input->opcode()) {
557 case IrOpcode::kArgumentsElementsState: {
558 values->PushArgumentsElements(ArgumentsStateTypeOf(input->op()));
559 // The elements backing store of an arguments object participates in the
560 // duplicate object counting, but can itself never appear duplicated.
561 DCHECK_EQ(StateObjectDeduplicator::kNotDuplicated,
562 deduplicator->GetObjectId(input));
563 deduplicator->InsertObject(input);
564 return 0;
565 }
566 case IrOpcode::kArgumentsLengthState: {
567 values->PushArgumentsLength(ArgumentsStateTypeOf(input->op()));
568 return 0;
569 }
570 case IrOpcode::kObjectState: {
571 UNREACHABLE();
572 }
573 case IrOpcode::kTypedObjectState:
574 case IrOpcode::kObjectId: {
575 size_t id = deduplicator->GetObjectId(input);
576 if (id == StateObjectDeduplicator::kNotDuplicated) {
577 DCHECK_EQ(IrOpcode::kTypedObjectState, input->opcode());
578 size_t entries = 0;
579 id = deduplicator->InsertObject(input);
580 StateValueList* nested = values->PushRecursiveField(zone, id);
581 int const input_count = input->op()->ValueInputCount();
582 ZoneVector<MachineType> const* types = MachineTypesOf(input->op());
583 for (int i = 0; i < input_count; ++i) {
584 entries += AddOperandToStateValueDescriptor(
585 nested, inputs, g, deduplicator, input->InputAt(i), types->at(i),
586 kind, zone);
587 }
588 return entries;
589 } else {
590 // Deoptimizer counts duplicate objects for the running id, so we have
591 // to push the input again.
592 deduplicator->InsertObject(input);
593 values->PushDuplicate(id);
594 return 0;
595 }
596 }
597 default: {
598 InstructionOperand op =
599 OperandForDeopt(isolate(), g, input, kind, type.representation());
600 if (op.kind() == InstructionOperand::INVALID) {
601 // Invalid operand means the value is impossible or optimized-out.
602 values->PushOptimizedOut();
603 return 0;
604 } else {
605 inputs->push_back(op);
606 values->PushPlain(type);
607 return 1;
608 }
609 }
610 }
611}
612
613// Returns the number of instruction operands added to inputs.
614size_t InstructionSelector::AddInputsToFrameStateDescriptor(
615 FrameStateDescriptor* descriptor, Node* state, OperandGenerator* g,
616 StateObjectDeduplicator* deduplicator, InstructionOperandVector* inputs,
617 FrameStateInputKind kind, Zone* zone) {
618 DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
619
620 size_t entries = 0;
621 size_t initial_size = inputs->size();
622 USE(initial_size); // initial_size is only used for debug.
623
624 if (descriptor->outer_state()) {
625 entries += AddInputsToFrameStateDescriptor(
626 descriptor->outer_state(), state->InputAt(kFrameStateOuterStateInput),
627 g, deduplicator, inputs, kind, zone);
628 }
629
630 Node* parameters = state->InputAt(kFrameStateParametersInput);
631 Node* locals = state->InputAt(kFrameStateLocalsInput);
632 Node* stack = state->InputAt(kFrameStateStackInput);
633 Node* context = state->InputAt(kFrameStateContextInput);
634 Node* function = state->InputAt(kFrameStateFunctionInput);
635
636 DCHECK_EQ(descriptor->parameters_count(),
637 StateValuesAccess(parameters).size());
638 DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
639 DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
640
641 StateValueList* values_descriptor = descriptor->GetStateValueDescriptors();
642
643 DCHECK_EQ(values_descriptor->size(), 0u);
644 values_descriptor->ReserveSize(descriptor->GetSize());
645
646 entries += AddOperandToStateValueDescriptor(
647 values_descriptor, inputs, g, deduplicator, function,
648 MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
649 for (StateValuesAccess::TypedNode input_node :
650 StateValuesAccess(parameters)) {
651 entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
652 deduplicator, input_node.node,
653 input_node.type, kind, zone);
654 }
655 if (descriptor->HasContext()) {
656 entries += AddOperandToStateValueDescriptor(
657 values_descriptor, inputs, g, deduplicator, context,
658 MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
659 }
660 for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
661 entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
662 deduplicator, input_node.node,
663 input_node.type, kind, zone);
664 }
665 for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
666 entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
667 deduplicator, input_node.node,
668 input_node.type, kind, zone);
669 }
670 DCHECK_EQ(initial_size + entries, inputs->size());
671 return entries;
672}
673
674Instruction* InstructionSelector::EmitWithContinuation(
675 InstructionCode opcode, FlagsContinuation* cont) {
676 return EmitWithContinuation(opcode, 0, nullptr, 0, nullptr, cont);
677}
678
679Instruction* InstructionSelector::EmitWithContinuation(
680 InstructionCode opcode, InstructionOperand a, FlagsContinuation* cont) {
681 return EmitWithContinuation(opcode, 0, nullptr, 1, &a, cont);
682}
683
684Instruction* InstructionSelector::EmitWithContinuation(
685 InstructionCode opcode, InstructionOperand a, InstructionOperand b,
686 FlagsContinuation* cont) {
687 InstructionOperand inputs[] = {a, b};
688 return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs), inputs,
689 cont);
690}
691
692Instruction* InstructionSelector::EmitWithContinuation(
693 InstructionCode opcode, InstructionOperand a, InstructionOperand b,
694 InstructionOperand c, FlagsContinuation* cont) {
695 InstructionOperand inputs[] = {a, b, c};
696 return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs), inputs,
697 cont);
698}
699
700Instruction* InstructionSelector::EmitWithContinuation(
701 InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
702 size_t input_count, InstructionOperand* inputs, FlagsContinuation* cont) {
703 OperandGenerator g(this);
704
705 opcode = cont->Encode(opcode);
706
707 continuation_inputs_.resize(0);
708 for (size_t i = 0; i < input_count; i++) {
709 continuation_inputs_.push_back(inputs[i]);
710 }
711
712 continuation_outputs_.resize(0);
713 for (size_t i = 0; i < output_count; i++) {
714 continuation_outputs_.push_back(outputs[i]);
715 }
716
717 if (cont->IsBranch()) {
718 continuation_inputs_.push_back(g.Label(cont->true_block()));
719 continuation_inputs_.push_back(g.Label(cont->false_block()));
720 } else if (cont->IsDeoptimize()) {
721 opcode |= MiscField::encode(static_cast<int>(input_count));
722 AppendDeoptimizeArguments(&continuation_inputs_, cont->kind(),
723 cont->reason(), cont->feedback(),
724 cont->frame_state());
725 } else if (cont->IsSet()) {
726 continuation_outputs_.push_back(g.DefineAsRegister(cont->result()));
727 } else if (cont->IsTrap()) {
728 int trap_id = static_cast<int>(cont->trap_id());
729 continuation_inputs_.push_back(g.UseImmediate(trap_id));
730 } else {
731 DCHECK(cont->IsNone());
732 }
733
734 size_t const emit_inputs_size = continuation_inputs_.size();
735 auto* emit_inputs =
736 emit_inputs_size ? &continuation_inputs_.front() : nullptr;
737 size_t const emit_outputs_size = continuation_outputs_.size();
738 auto* emit_outputs =
739 emit_outputs_size ? &continuation_outputs_.front() : nullptr;
740 return Emit(opcode, emit_outputs_size, emit_outputs, emit_inputs_size,
741 emit_inputs, 0, nullptr);
742}
743
744void InstructionSelector::AppendDeoptimizeArguments(
745 InstructionOperandVector* args, DeoptimizeKind kind,
746 DeoptimizeReason reason, VectorSlotPair const& feedback,
747 Node* frame_state) {
748 OperandGenerator g(this);
749 FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
750 DCHECK_NE(DeoptimizeKind::kLazy, kind);
751 int const state_id =
752 sequence()->AddDeoptimizationEntry(descriptor, kind, reason, feedback);
753 args->push_back(g.TempImmediate(state_id));
754 StateObjectDeduplicator deduplicator(instruction_zone());
755 AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
756 args, FrameStateInputKind::kAny,
757 instruction_zone());
758}
759
760Instruction* InstructionSelector::EmitDeoptimize(
761 InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
762 size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind,
763 DeoptimizeReason reason, VectorSlotPair const& feedback,
764 Node* frame_state) {
765 InstructionOperandVector args(instruction_zone());
766 for (size_t i = 0; i < input_count; ++i) {
767 args.push_back(inputs[i]);
768 }
769 opcode |= MiscField::encode(static_cast<int>(input_count));
770 AppendDeoptimizeArguments(&args, kind, reason, feedback, frame_state);
771 return Emit(opcode, output_count, outputs, args.size(), &args.front(), 0,
772 nullptr);
773}
774
775// An internal helper class for generating the operands to calls.
776// TODO(bmeurer): Get rid of the CallBuffer business and make
777// InstructionSelector::VisitCall platform independent instead.
778struct CallBuffer {
779 CallBuffer(Zone* zone, const CallDescriptor* call_descriptor,
780 FrameStateDescriptor* frame_state)
781 : descriptor(call_descriptor),
782 frame_state_descriptor(frame_state),
783 output_nodes(zone),
784 outputs(zone),
785 instruction_args(zone),
786 pushed_nodes(zone) {
787 output_nodes.reserve(call_descriptor->ReturnCount());
788 outputs.reserve(call_descriptor->ReturnCount());
789 pushed_nodes.reserve(input_count());
790 instruction_args.reserve(input_count() + frame_state_value_count());
791 }
792
793 const CallDescriptor* descriptor;
794 FrameStateDescriptor* frame_state_descriptor;
795 ZoneVector<PushParameter> output_nodes;
796 InstructionOperandVector outputs;
797 InstructionOperandVector instruction_args;
798 ZoneVector<PushParameter> pushed_nodes;
799
800 size_t input_count() const { return descriptor->InputCount(); }
801
802 size_t frame_state_count() const { return descriptor->FrameStateCount(); }
803
804 size_t frame_state_value_count() const {
805 return (frame_state_descriptor == nullptr)
806 ? 0
807 : (frame_state_descriptor->GetTotalSize() +
808 1); // Include deopt id.
809 }
810};
811
812// TODO(bmeurer): Get rid of the CallBuffer business and make
813// InstructionSelector::VisitCall platform independent instead.
814void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
815 CallBufferFlags flags,
816 bool is_tail_call,
817 int stack_param_delta) {
818 OperandGenerator g(this);
819 size_t ret_count = buffer->descriptor->ReturnCount();
820 DCHECK_LE(call->op()->ValueOutputCount(), ret_count);
821 DCHECK_EQ(
822 call->op()->ValueInputCount(),
823 static_cast<int>(buffer->input_count() + buffer->frame_state_count()));
824
825 if (ret_count > 0) {
826 // Collect the projections that represent multiple outputs from this call.
827 if (ret_count == 1) {
828 PushParameter result = {call, buffer->descriptor->GetReturnLocation(0)};
829 buffer->output_nodes.push_back(result);
830 } else {
831 buffer->output_nodes.resize(ret_count);
832 int stack_count = 0;
833 for (size_t i = 0; i < ret_count; ++i) {
834 LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
835 buffer->output_nodes[i] = PushParameter(nullptr, location);
836 if (location.IsCallerFrameSlot()) {
837 stack_count += location.GetSizeInPointers();
838 }
839 }
840 for (Edge const edge : call->use_edges()) {
841 if (!NodeProperties::IsValueEdge(edge)) continue;
842 Node* node = edge.from();
843 DCHECK_EQ(IrOpcode::kProjection, node->opcode());
844 size_t const index = ProjectionIndexOf(node->op());
845
846 DCHECK_LT(index, buffer->output_nodes.size());
847 DCHECK(!buffer->output_nodes[index].node);
848 buffer->output_nodes[index].node = node;
849 }
850 frame_->EnsureReturnSlots(stack_count);
851 }
852
853 // Filter out the outputs that aren't live because no projection uses them.
854 size_t outputs_needed_by_framestate =
855 buffer->frame_state_descriptor == nullptr
856 ? 0
857 : buffer->frame_state_descriptor->state_combine()
858 .ConsumedOutputCount();
859 for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
860 bool output_is_live = buffer->output_nodes[i].node != nullptr ||
861 i < outputs_needed_by_framestate;
862 if (output_is_live) {
863 LinkageLocation location = buffer->output_nodes[i].location;
864 MachineRepresentation rep = location.GetType().representation();
865
866 Node* output = buffer->output_nodes[i].node;
867 InstructionOperand op = output == nullptr
868 ? g.TempLocation(location)
869 : g.DefineAsLocation(output, location);
870 MarkAsRepresentation(rep, op);
871
872 if (!UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
873 buffer->outputs.push_back(op);
874 buffer->output_nodes[i].node = nullptr;
875 }
876 }
877 }
878 }
879
880 // The first argument is always the callee code.
881 Node* callee = call->InputAt(0);
882 bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
883 bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
884 bool call_use_fixed_target_reg = (flags & kCallFixedTargetRegister) != 0;
885 bool call_through_slot = (flags & kAllowCallThroughSlot) != 0;
886 switch (buffer->descriptor->kind()) {
887 case CallDescriptor::kCallCodeObject:
888 // TODO(jgruber, v8:7449): The below is a hack to support tail-calls from
889 // JS-linkage callers with a register code target. The problem is that the
890 // code target register may be clobbered before the final jmp by
891 // AssemblePopArgumentsAdaptorFrame. As a more permanent fix we could
892 // entirely remove support for tail-calls from JS-linkage callers.
893 buffer->instruction_args.push_back(
894 (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
895 ? g.UseImmediate(callee)
896 : call_use_fixed_target_reg
897 ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
898 : is_tail_call ? g.UseUniqueRegister(callee)
899 : call_through_slot ? g.UseUniqueSlot(callee)
900 : g.UseRegister(callee));
901 break;
902 case CallDescriptor::kCallAddress:
903 buffer->instruction_args.push_back(
904 (call_address_immediate &&
905 callee->opcode() == IrOpcode::kExternalConstant)
906 ? g.UseImmediate(callee)
907 : call_use_fixed_target_reg
908 ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
909 : g.UseRegister(callee));
910 break;
911 case CallDescriptor::kCallWasmFunction:
912 case CallDescriptor::kCallWasmImportWrapper:
913 buffer->instruction_args.push_back(
914 (call_address_immediate &&
915 (callee->opcode() == IrOpcode::kRelocatableInt64Constant ||
916 callee->opcode() == IrOpcode::kRelocatableInt32Constant))
917 ? g.UseImmediate(callee)
918 : call_use_fixed_target_reg
919 ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
920 : g.UseRegister(callee));
921 break;
922 case CallDescriptor::kCallBuiltinPointer:
923 // The common case for builtin pointers is to have the target in a
924 // register. If we have a constant, we use a register anyway to simplify
925 // related code.
926 buffer->instruction_args.push_back(
927 call_use_fixed_target_reg
928 ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
929 : g.UseRegister(callee));
930 break;
931 case CallDescriptor::kCallJSFunction:
932 buffer->instruction_args.push_back(
933 g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)));
934 break;
935 }
936 DCHECK_EQ(1u, buffer->instruction_args.size());
937
938 // Argument 1 is used for poison-alias index (encoded in a word-sized
939 // immediate. This an index of the operand that aliases with poison register
940 // or -1 if there is no aliasing.
941 buffer->instruction_args.push_back(g.TempImmediate(-1));
942 const size_t poison_alias_index = 1;
943 DCHECK_EQ(buffer->instruction_args.size() - 1, poison_alias_index);
944
945 // If the call needs a frame state, we insert the state information as
946 // follows (n is the number of value inputs to the frame state):
947 // arg 2 : deoptimization id.
948 // arg 3 - arg (n + 2) : value inputs to the frame state.
949 size_t frame_state_entries = 0;
950 USE(frame_state_entries); // frame_state_entries is only used for debug.
951 if (buffer->frame_state_descriptor != nullptr) {
952 Node* frame_state =
953 call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
954
955 // If it was a syntactic tail call we need to drop the current frame and
956 // all the frames on top of it that are either an arguments adaptor frame
957 // or a tail caller frame.
958 if (is_tail_call) {
959 frame_state = NodeProperties::GetFrameStateInput(frame_state);
960 buffer->frame_state_descriptor =
961 buffer->frame_state_descriptor->outer_state();
962 while (buffer->frame_state_descriptor != nullptr &&
963 buffer->frame_state_descriptor->type() ==
964 FrameStateType::kArgumentsAdaptor) {
965 frame_state = NodeProperties::GetFrameStateInput(frame_state);
966 buffer->frame_state_descriptor =
967 buffer->frame_state_descriptor->outer_state();
968 }
969 }
970
971 int const state_id = sequence()->AddDeoptimizationEntry(
972 buffer->frame_state_descriptor, DeoptimizeKind::kLazy,
973 DeoptimizeReason::kUnknown, VectorSlotPair());
974 buffer->instruction_args.push_back(g.TempImmediate(state_id));
975
976 StateObjectDeduplicator deduplicator(instruction_zone());
977
978 frame_state_entries =
979 1 + AddInputsToFrameStateDescriptor(
980 buffer->frame_state_descriptor, frame_state, &g, &deduplicator,
981 &buffer->instruction_args, FrameStateInputKind::kStackSlot,
982 instruction_zone());
983
984 DCHECK_EQ(2 + frame_state_entries, buffer->instruction_args.size());
985 }
986
987 size_t input_count = static_cast<size_t>(buffer->input_count());
988
989 // Split the arguments into pushed_nodes and instruction_args. Pushed
990 // arguments require an explicit push instruction before the call and do
991 // not appear as arguments to the call. Everything else ends up
992 // as an InstructionOperand argument to the call.
993 auto iter(call->inputs().begin());
994 size_t pushed_count = 0;
995 bool call_tail = (flags & kCallTail) != 0;
996 for (size_t index = 0; index < input_count; ++iter, ++index) {
997 DCHECK(iter != call->inputs().end());
998 DCHECK_NE(IrOpcode::kFrameState, (*iter)->op()->opcode());
999 if (index == 0) continue; // The first argument (callee) is already done.
1000
1001 LinkageLocation location = buffer->descriptor->GetInputLocation(index);
1002 if (call_tail) {
1003 location = LinkageLocation::ConvertToTailCallerLocation(
1004 location, stack_param_delta);
1005 }
1006 InstructionOperand op = g.UseLocation(*iter, location);
1007 UnallocatedOperand unallocated = UnallocatedOperand::cast(op);
1008 if (unallocated.HasFixedSlotPolicy() && !call_tail) {
1009 int stack_index = -unallocated.fixed_slot_index() - 1;
1010 if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
1011 buffer->pushed_nodes.resize(stack_index + 1);
1012 }
1013 PushParameter param = {*iter, location};
1014 buffer->pushed_nodes[stack_index] = param;
1015 pushed_count++;
1016 } else {
1017 // If we do load poisoning and the linkage uses the poisoning register,
1018 // then we request the input in memory location, and during code
1019 // generation, we move the input to the register.
1020 if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
1021 unallocated.HasFixedRegisterPolicy()) {
1022 int reg = unallocated.fixed_register_index();
1023 if (Register::from_code(reg) == kSpeculationPoisonRegister) {
1024 buffer->instruction_args[poison_alias_index] = g.TempImmediate(
1025 static_cast<int32_t>(buffer->instruction_args.size()));
1026 op = g.UseRegisterOrSlotOrConstant(*iter);
1027 }
1028 }
1029 buffer->instruction_args.push_back(op);
1030 }
1031 }
1032 DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
1033 frame_state_entries - 1);
1034 if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && call_tail &&
1035 stack_param_delta != 0) {
1036 // For tail calls that change the size of their parameter list and keep
1037 // their return address on the stack, move the return address to just above
1038 // the parameters.
1039 LinkageLocation saved_return_location =
1040 LinkageLocation::ForSavedCallerReturnAddress();
1041 InstructionOperand return_address =
1042 g.UsePointerLocation(LinkageLocation::ConvertToTailCallerLocation(
1043 saved_return_location, stack_param_delta),
1044 saved_return_location);
1045 buffer->instruction_args.push_back(return_address);
1046 }
1047}
1048
1049bool InstructionSelector::IsSourcePositionUsed(Node* node) {
1050 return (source_position_mode_ == kAllSourcePositions ||
1051 node->opcode() == IrOpcode::kCall ||
1052 node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
1053 node->opcode() == IrOpcode::kTrapIf ||
1054 node->opcode() == IrOpcode::kTrapUnless ||
1055 node->opcode() == IrOpcode::kProtectedLoad ||
1056 node->opcode() == IrOpcode::kProtectedStore);
1057}
1058
1059void InstructionSelector::VisitBlock(BasicBlock* block) {
1060 DCHECK(!current_block_);
1061 current_block_ = block;
1062 auto current_num_instructions = [&] {
1063 DCHECK_GE(kMaxInt, instructions_.size());
1064 return static_cast<int>(instructions_.size());
1065 };
1066 int current_block_end = current_num_instructions();
1067
1068 int effect_level = 0;
1069 for (Node* const node : *block) {
1070 SetEffectLevel(node, effect_level);
1071 if (node->opcode() == IrOpcode::kStore ||
1072 node->opcode() == IrOpcode::kUnalignedStore ||
1073 node->opcode() == IrOpcode::kCall ||
1074 node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
1075 node->opcode() == IrOpcode::kProtectedLoad ||
1076 node->opcode() == IrOpcode::kProtectedStore) {
1077 ++effect_level;
1078 }
1079 }
1080
1081 // We visit the control first, then the nodes in the block, so the block's
1082 // control input should be on the same effect level as the last node.
1083 if (block->control_input() != nullptr) {
1084 SetEffectLevel(block->control_input(), effect_level);
1085 }
1086
1087 auto FinishEmittedInstructions = [&](Node* node, int instruction_start) {
1088 if (instruction_selection_failed()) return false;
1089 if (current_num_instructions() == instruction_start) return true;
1090 std::reverse(instructions_.begin() + instruction_start,
1091 instructions_.end());
1092 if (!node) return true;
1093 if (!source_positions_) return true;
1094 SourcePosition source_position = source_positions_->GetSourcePosition(node);
1095 if (source_position.IsKnown() && IsSourcePositionUsed(node)) {
1096 sequence()->SetSourcePosition(instructions_[instruction_start],
1097 source_position);
1098 }
1099 return true;
1100 };
1101
1102 // Generate code for the block control "top down", but schedule the code
1103 // "bottom up".
1104 VisitControl(block);
1105 if (!FinishEmittedInstructions(block->control_input(), current_block_end))
1106 return;
1107
1108 // Visit code in reverse control flow order, because architecture-specific
1109 // matching may cover more than one node at a time.
1110 for (auto node : base::Reversed(*block)) {
1111 int current_node_end = current_num_instructions();
1112 // Skip nodes that are unused or already defined.
1113 if (IsUsed(node) && !IsDefined(node)) {
1114 // Generate code for this node "top down", but schedule the code "bottom
1115 // up".
1116 VisitNode(node);
1117 if (!FinishEmittedInstructions(node, current_node_end)) return;
1118 }
1119 if (trace_turbo_ == kEnableTraceTurboJson) {
1120 instr_origins_[node->id()] = {current_num_instructions(),
1121 current_node_end};
1122 }
1123 }
1124
1125 // We're done with the block.
1126 InstructionBlock* instruction_block =
1127 sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
1128 if (current_num_instructions() == current_block_end) {
1129 // Avoid empty block: insert a {kArchNop} instruction.
1130 Emit(Instruction::New(sequence()->zone(), kArchNop));
1131 }
1132 instruction_block->set_code_start(current_num_instructions());
1133 instruction_block->set_code_end(current_block_end);
1134 current_block_ = nullptr;
1135}
1136
1137void InstructionSelector::VisitControl(BasicBlock* block) {
1138#ifdef DEBUG
1139 // SSA deconstruction requires targets of branches not to have phis.
1140 // Edge split form guarantees this property, but is more strict.
1141 if (block->SuccessorCount() > 1) {
1142 for (BasicBlock* const successor : block->successors()) {
1143 for (Node* const node : *successor) {
1144 if (IrOpcode::IsPhiOpcode(node->opcode())) {
1145 std::ostringstream str;
1146 str << "You might have specified merged variables for a label with "
1147 << "only one predecessor." << std::endl
1148 << "# Current Block: " << *successor << std::endl
1149 << "# Node: " << *node;
1150 FATAL("%s", str.str().c_str());
1151 }
1152 }
1153 }
1154 }
1155#endif
1156
1157 Node* input = block->control_input();
1158 int instruction_end = static_cast<int>(instructions_.size());
1159 switch (block->control()) {
1160 case BasicBlock::kGoto:
1161 VisitGoto(block->SuccessorAt(0));
1162 break;
1163 case BasicBlock::kCall: {
1164 DCHECK_EQ(IrOpcode::kCall, input->opcode());
1165 BasicBlock* success = block->SuccessorAt(0);
1166 BasicBlock* exception = block->SuccessorAt(1);
1167 VisitCall(input, exception);
1168 VisitGoto(success);
1169 break;
1170 }
1171 case BasicBlock::kTailCall: {
1172 DCHECK_EQ(IrOpcode::kTailCall, input->opcode());
1173 VisitTailCall(input);
1174 break;
1175 }
1176 case BasicBlock::kBranch: {
1177 DCHECK_EQ(IrOpcode::kBranch, input->opcode());
1178 BasicBlock* tbranch = block->SuccessorAt(0);
1179 BasicBlock* fbranch = block->SuccessorAt(1);
1180 if (tbranch == fbranch) {
1181 VisitGoto(tbranch);
1182 } else {
1183 VisitBranch(input, tbranch, fbranch);
1184 }
1185 break;
1186 }
1187 case BasicBlock::kSwitch: {
1188 DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
1189 // Last successor must be {IfDefault}.
1190 BasicBlock* default_branch = block->successors().back();
1191 DCHECK_EQ(IrOpcode::kIfDefault, default_branch->front()->opcode());
1192 // All other successors must be {IfValue}s.
1193 int32_t min_value = std::numeric_limits<int32_t>::max();
1194 int32_t max_value = std::numeric_limits<int32_t>::min();
1195 size_t case_count = block->SuccessorCount() - 1;
1196 ZoneVector<CaseInfo> cases(case_count, zone());
1197 for (size_t i = 0; i < case_count; ++i) {
1198 BasicBlock* branch = block->SuccessorAt(i);
1199 const IfValueParameters& p = IfValueParametersOf(branch->front()->op());
1200 cases[i] = CaseInfo{p.value(), p.comparison_order(), branch};
1201 if (min_value > p.value()) min_value = p.value();
1202 if (max_value < p.value()) max_value = p.value();
1203 }
1204 SwitchInfo sw(cases, min_value, max_value, default_branch);
1205 VisitSwitch(input, sw);
1206 break;
1207 }
1208 case BasicBlock::kReturn: {
1209 DCHECK_EQ(IrOpcode::kReturn, input->opcode());
1210 VisitReturn(input);
1211 break;
1212 }
1213 case BasicBlock::kDeoptimize: {
1214 DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
1215 Node* value = input->InputAt(0);
1216 VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value);
1217 break;
1218 }
1219 case BasicBlock::kThrow:
1220 DCHECK_EQ(IrOpcode::kThrow, input->opcode());
1221 VisitThrow(input);
1222 break;
1223 case BasicBlock::kNone: {
1224 // Exit block doesn't have control.
1225 DCHECK_NULL(input);
1226 break;
1227 }
1228 default:
1229 UNREACHABLE();
1230 break;
1231 }
1232 if (trace_turbo_ == kEnableTraceTurboJson && input) {
1233 int instruction_start = static_cast<int>(instructions_.size());
1234 instr_origins_[input->id()] = {instruction_start, instruction_end};
1235 }
1236}
1237
1238void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) {
1239 Node* projection0 = NodeProperties::FindProjection(node, 0);
1240 if (projection0) {
1241 MarkAsWord32(projection0);
1242 }
1243 Node* projection1 = NodeProperties::FindProjection(node, 1);
1244 if (projection1) {
1245 MarkAsWord32(projection1);
1246 }
1247}
1248
1249void InstructionSelector::VisitNode(Node* node) {
1250 DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
1251 switch (node->opcode()) {
1252 case IrOpcode::kStart:
1253 case IrOpcode::kLoop:
1254 case IrOpcode::kEnd:
1255 case IrOpcode::kBranch:
1256 case IrOpcode::kIfTrue:
1257 case IrOpcode::kIfFalse:
1258 case IrOpcode::kIfSuccess:
1259 case IrOpcode::kSwitch:
1260 case IrOpcode::kIfValue:
1261 case IrOpcode::kIfDefault:
1262 case IrOpcode::kEffectPhi:
1263 case IrOpcode::kMerge:
1264 case IrOpcode::kTerminate:
1265 case IrOpcode::kBeginRegion:
1266 // No code needed for these graph artifacts.
1267 return;
1268 case IrOpcode::kIfException:
1269 return MarkAsReference(node), VisitIfException(node);
1270 case IrOpcode::kFinishRegion:
1271 return MarkAsReference(node), VisitFinishRegion(node);
1272 case IrOpcode::kParameter: {
1273 MachineType type =
1274 linkage()->GetParameterType(ParameterIndexOf(node->op()));
1275 MarkAsRepresentation(type.representation(), node);
1276 return VisitParameter(node);
1277 }
1278 case IrOpcode::kOsrValue:
1279 return MarkAsReference(node), VisitOsrValue(node);
1280 case IrOpcode::kPhi: {
1281 MachineRepresentation rep = PhiRepresentationOf(node->op());
1282 if (rep == MachineRepresentation::kNone) return;
1283 MarkAsRepresentation(rep, node);
1284 return VisitPhi(node);
1285 }
1286 case IrOpcode::kProjection:
1287 return VisitProjection(node);
1288 case IrOpcode::kInt32Constant:
1289 case IrOpcode::kInt64Constant:
1290 case IrOpcode::kExternalConstant:
1291 case IrOpcode::kRelocatableInt32Constant:
1292 case IrOpcode::kRelocatableInt64Constant:
1293 return VisitConstant(node);
1294 case IrOpcode::kFloat32Constant:
1295 return MarkAsFloat32(node), VisitConstant(node);
1296 case IrOpcode::kFloat64Constant:
1297 return MarkAsFloat64(node), VisitConstant(node);
1298 case IrOpcode::kHeapConstant:
1299 return MarkAsReference(node), VisitConstant(node);
1300 case IrOpcode::kNumberConstant: {
1301 double value = OpParameter<double>(node->op());
1302 if (!IsSmiDouble(value)) MarkAsReference(node);
1303 return VisitConstant(node);
1304 }
1305 case IrOpcode::kDelayedStringConstant:
1306 return MarkAsReference(node), VisitConstant(node);
1307 case IrOpcode::kCall:
1308 return VisitCall(node);
1309 case IrOpcode::kCallWithCallerSavedRegisters:
1310 return VisitCallWithCallerSavedRegisters(node);
1311 case IrOpcode::kDeoptimizeIf:
1312 return VisitDeoptimizeIf(node);
1313 case IrOpcode::kDeoptimizeUnless:
1314 return VisitDeoptimizeUnless(node);
1315 case IrOpcode::kTrapIf:
1316 return VisitTrapIf(node, TrapIdOf(node->op()));
1317 case IrOpcode::kTrapUnless:
1318 return VisitTrapUnless(node, TrapIdOf(node->op()));
1319 case IrOpcode::kFrameState:
1320 case IrOpcode::kStateValues:
1321 case IrOpcode::kObjectState:
1322 return;
1323 case IrOpcode::kDebugAbort:
1324 VisitDebugAbort(node);
1325 return;
1326 case IrOpcode::kDebugBreak:
1327 VisitDebugBreak(node);
1328 return;
1329 case IrOpcode::kUnreachable:
1330 VisitUnreachable(node);
1331 return;
1332 case IrOpcode::kDeadValue:
1333 VisitDeadValue(node);
1334 return;
1335 case IrOpcode::kComment:
1336 VisitComment(node);
1337 return;
1338 case IrOpcode::kRetain:
1339 VisitRetain(node);
1340 return;
1341 case IrOpcode::kLoad: {
1342 LoadRepresentation type = LoadRepresentationOf(node->op());
1343 MarkAsRepresentation(type.representation(), node);
1344 return VisitLoad(node);
1345 }
1346 case IrOpcode::kPoisonedLoad: {
1347 LoadRepresentation type = LoadRepresentationOf(node->op());
1348 MarkAsRepresentation(type.representation(), node);
1349 return VisitPoisonedLoad(node);
1350 }
1351 case IrOpcode::kStore:
1352 return VisitStore(node);
1353 case IrOpcode::kProtectedStore:
1354 return VisitProtectedStore(node);
1355 case IrOpcode::kWord32And:
1356 return MarkAsWord32(node), VisitWord32And(node);
1357 case IrOpcode::kWord32Or:
1358 return MarkAsWord32(node), VisitWord32Or(node);
1359 case IrOpcode::kWord32Xor:
1360 return MarkAsWord32(node), VisitWord32Xor(node);
1361 case IrOpcode::kWord32Shl:
1362 return MarkAsWord32(node), VisitWord32Shl(node);
1363 case IrOpcode::kWord32Shr:
1364 return MarkAsWord32(node), VisitWord32Shr(node);
1365 case IrOpcode::kWord32Sar:
1366 return MarkAsWord32(node), VisitWord32Sar(node);
1367 case IrOpcode::kWord32Ror:
1368 return MarkAsWord32(node), VisitWord32Ror(node);
1369 case IrOpcode::kWord32Equal:
1370 return VisitWord32Equal(node);
1371 case IrOpcode::kWord32Clz:
1372 return MarkAsWord32(node), VisitWord32Clz(node);
1373 case IrOpcode::kWord32Ctz:
1374 return MarkAsWord32(node), VisitWord32Ctz(node);
1375 case IrOpcode::kWord32ReverseBits:
1376 return MarkAsWord32(node), VisitWord32ReverseBits(node);
1377 case IrOpcode::kWord32ReverseBytes:
1378 return MarkAsWord32(node), VisitWord32ReverseBytes(node);
1379 case IrOpcode::kInt32AbsWithOverflow:
1380 return MarkAsWord32(node), VisitInt32AbsWithOverflow(node);
1381 case IrOpcode::kWord32Popcnt:
1382 return MarkAsWord32(node), VisitWord32Popcnt(node);
1383 case IrOpcode::kWord64Popcnt:
1384 return MarkAsWord32(node), VisitWord64Popcnt(node);
1385 case IrOpcode::kWord64And:
1386 return MarkAsWord64(node), VisitWord64And(node);
1387 case IrOpcode::kWord64Or:
1388 return MarkAsWord64(node), VisitWord64Or(node);
1389 case IrOpcode::kWord64Xor:
1390 return MarkAsWord64(node), VisitWord64Xor(node);
1391 case IrOpcode::kWord64Shl:
1392 return MarkAsWord64(node), VisitWord64Shl(node);
1393 case IrOpcode::kWord64Shr:
1394 return MarkAsWord64(node), VisitWord64Shr(node);
1395 case IrOpcode::kWord64Sar:
1396 return MarkAsWord64(node), VisitWord64Sar(node);
1397 case IrOpcode::kWord64Ror:
1398 return MarkAsWord64(node), VisitWord64Ror(node);
1399 case IrOpcode::kWord64Clz:
1400 return MarkAsWord64(node), VisitWord64Clz(node);
1401 case IrOpcode::kWord64Ctz:
1402 return MarkAsWord64(node), VisitWord64Ctz(node);
1403 case IrOpcode::kWord64ReverseBits:
1404 return MarkAsWord64(node), VisitWord64ReverseBits(node);
1405 case IrOpcode::kWord64ReverseBytes:
1406 return MarkAsWord64(node), VisitWord64ReverseBytes(node);
1407 case IrOpcode::kInt64AbsWithOverflow:
1408 return MarkAsWord64(node), VisitInt64AbsWithOverflow(node);
1409 case IrOpcode::kWord64Equal:
1410 return VisitWord64Equal(node);
1411 case IrOpcode::kInt32Add:
1412 return MarkAsWord32(node), VisitInt32Add(node);
1413 case IrOpcode::kInt32AddWithOverflow:
1414 return MarkAsWord32(node), VisitInt32AddWithOverflow(node);
1415 case IrOpcode::kInt32Sub:
1416 return MarkAsWord32(node), VisitInt32Sub(node);
1417 case IrOpcode::kInt32SubWithOverflow:
1418 return VisitInt32SubWithOverflow(node);
1419 case IrOpcode::kInt32Mul:
1420 return MarkAsWord32(node), VisitInt32Mul(node);
1421 case IrOpcode::kInt32MulWithOverflow:
1422 return MarkAsWord32(node), VisitInt32MulWithOverflow(node);
1423 case IrOpcode::kInt32MulHigh:
1424 return VisitInt32MulHigh(node);
1425 case IrOpcode::kInt32Div:
1426 return MarkAsWord32(node), VisitInt32Div(node);
1427 case IrOpcode::kInt32Mod:
1428 return MarkAsWord32(node), VisitInt32Mod(node);
1429 case IrOpcode::kInt32LessThan:
1430 return VisitInt32LessThan(node);
1431 case IrOpcode::kInt32LessThanOrEqual:
1432 return VisitInt32LessThanOrEqual(node);
1433 case IrOpcode::kUint32Div:
1434 return MarkAsWord32(node), VisitUint32Div(node);
1435 case IrOpcode::kUint32LessThan:
1436 return VisitUint32LessThan(node);
1437 case IrOpcode::kUint32LessThanOrEqual:
1438 return VisitUint32LessThanOrEqual(node);
1439 case IrOpcode::kUint32Mod:
1440 return MarkAsWord32(node), VisitUint32Mod(node);
1441 case IrOpcode::kUint32MulHigh:
1442 return VisitUint32MulHigh(node);
1443 case IrOpcode::kInt64Add:
1444 return MarkAsWord64(node), VisitInt64Add(node);
1445 case IrOpcode::kInt64AddWithOverflow:
1446 return MarkAsWord64(node), VisitInt64AddWithOverflow(node);
1447 case IrOpcode::kInt64Sub:
1448 return MarkAsWord64(node), VisitInt64Sub(node);
1449 case IrOpcode::kInt64SubWithOverflow:
1450 return MarkAsWord64(node), VisitInt64SubWithOverflow(node);
1451 case IrOpcode::kInt64Mul:
1452 return MarkAsWord64(node), VisitInt64Mul(node);
1453 case IrOpcode::kInt64Div:
1454 return MarkAsWord64(node), VisitInt64Div(node);
1455 case IrOpcode::kInt64Mod:
1456 return MarkAsWord64(node), VisitInt64Mod(node);
1457 case IrOpcode::kInt64LessThan:
1458 return VisitInt64LessThan(node);
1459 case IrOpcode::kInt64LessThanOrEqual:
1460 return VisitInt64LessThanOrEqual(node);
1461 case IrOpcode::kUint64Div:
1462 return MarkAsWord64(node), VisitUint64Div(node);
1463 case IrOpcode::kUint64LessThan:
1464 return VisitUint64LessThan(node);
1465 case IrOpcode::kUint64LessThanOrEqual:
1466 return VisitUint64LessThanOrEqual(node);
1467 case IrOpcode::kUint64Mod:
1468 return MarkAsWord64(node), VisitUint64Mod(node);
1469 case IrOpcode::kBitcastTaggedToWord:
1470 return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
1471 VisitBitcastTaggedToWord(node);
1472 case IrOpcode::kBitcastWordToTagged:
1473 return MarkAsReference(node), VisitBitcastWordToTagged(node);
1474 case IrOpcode::kBitcastWordToTaggedSigned:
1475 return MarkAsRepresentation(MachineRepresentation::kTaggedSigned, node),
1476 EmitIdentity(node);
1477 case IrOpcode::kChangeFloat32ToFloat64:
1478 return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
1479 case IrOpcode::kChangeInt32ToFloat64:
1480 return MarkAsFloat64(node), VisitChangeInt32ToFloat64(node);
1481 case IrOpcode::kChangeInt64ToFloat64:
1482 return MarkAsFloat64(node), VisitChangeInt64ToFloat64(node);
1483 case IrOpcode::kChangeUint32ToFloat64:
1484 return MarkAsFloat64(node), VisitChangeUint32ToFloat64(node);
1485 case IrOpcode::kChangeFloat64ToInt32:
1486 return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
1487 case IrOpcode::kChangeFloat64ToInt64:
1488 return MarkAsWord64(node), VisitChangeFloat64ToInt64(node);
1489 case IrOpcode::kChangeFloat64ToUint32:
1490 return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
1491 case IrOpcode::kChangeFloat64ToUint64:
1492 return MarkAsWord64(node), VisitChangeFloat64ToUint64(node);
1493 case IrOpcode::kFloat64SilenceNaN:
1494 MarkAsFloat64(node);
1495 if (CanProduceSignalingNaN(node->InputAt(0))) {
1496 return VisitFloat64SilenceNaN(node);
1497 } else {
1498 return EmitIdentity(node);
1499 }
1500 case IrOpcode::kTruncateFloat64ToInt64:
1501 return MarkAsWord64(node), VisitTruncateFloat64ToInt64(node);
1502 case IrOpcode::kTruncateFloat64ToUint32:
1503 return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node);
1504 case IrOpcode::kTruncateFloat32ToInt32:
1505 return MarkAsWord32(node), VisitTruncateFloat32ToInt32(node);
1506 case IrOpcode::kTruncateFloat32ToUint32:
1507 return MarkAsWord32(node), VisitTruncateFloat32ToUint32(node);
1508 case IrOpcode::kTryTruncateFloat32ToInt64:
1509 return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node);
1510 case IrOpcode::kTryTruncateFloat64ToInt64:
1511 return MarkAsWord64(node), VisitTryTruncateFloat64ToInt64(node);
1512 case IrOpcode::kTryTruncateFloat32ToUint64:
1513 return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node);
1514 case IrOpcode::kTryTruncateFloat64ToUint64:
1515 return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node);
1516 case IrOpcode::kChangeInt32ToInt64:
1517 return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
1518 case IrOpcode::kChangeUint32ToUint64:
1519 return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
1520// TODO(mips-team): Support compress pointers.
1521#ifdef V8_COMPRESS_POINTERS
1522 case IrOpcode::kChangeTaggedToCompressed:
1523 return MarkAsCompressed(node), VisitChangeTaggedToCompressed(node);
1524 case IrOpcode::kChangeTaggedPointerToCompressedPointer:
1525 return MarkAsCompressed(node),
1526 VisitChangeTaggedPointerToCompressedPointer(node);
1527 case IrOpcode::kChangeTaggedSignedToCompressedSigned:
1528 return MarkAsWord32(node),
1529 VisitChangeTaggedSignedToCompressedSigned(node);
1530 case IrOpcode::kChangeCompressedToTagged:
1531 return MarkAsReference(node), VisitChangeCompressedToTagged(node);
1532 case IrOpcode::kChangeCompressedPointerToTaggedPointer:
1533 return MarkAsReference(node),
1534 VisitChangeCompressedPointerToTaggedPointer(node);
1535 case IrOpcode::kChangeCompressedSignedToTaggedSigned:
1536 return MarkAsWord64(node),
1537 VisitChangeCompressedSignedToTaggedSigned(node);
1538#endif
1539 case IrOpcode::kTruncateFloat64ToFloat32:
1540 return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
1541 case IrOpcode::kTruncateFloat64ToWord32:
1542 return MarkAsWord32(node), VisitTruncateFloat64ToWord32(node);
1543 case IrOpcode::kTruncateInt64ToInt32:
1544 return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
1545 case IrOpcode::kRoundFloat64ToInt32:
1546 return MarkAsWord32(node), VisitRoundFloat64ToInt32(node);
1547 case IrOpcode::kRoundInt64ToFloat32:
1548 return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
1549 case IrOpcode::kRoundInt32ToFloat32:
1550 return MarkAsFloat32(node), VisitRoundInt32ToFloat32(node);
1551 case IrOpcode::kRoundInt64ToFloat64:
1552 return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
1553 case IrOpcode::kBitcastFloat32ToInt32:
1554 return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
1555 case IrOpcode::kRoundUint32ToFloat32:
1556 return MarkAsFloat32(node), VisitRoundUint32ToFloat32(node);
1557 case IrOpcode::kRoundUint64ToFloat32:
1558 return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node);
1559 case IrOpcode::kRoundUint64ToFloat64:
1560 return MarkAsFloat64(node), VisitRoundUint64ToFloat64(node);
1561 case IrOpcode::kBitcastFloat64ToInt64:
1562 return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node);
1563 case IrOpcode::kBitcastInt32ToFloat32:
1564 return MarkAsFloat32(node), VisitBitcastInt32ToFloat32(node);
1565 case IrOpcode::kBitcastInt64ToFloat64:
1566 return MarkAsFloat64(node), VisitBitcastInt64ToFloat64(node);
1567 case IrOpcode::kFloat32Add:
1568 return MarkAsFloat32(node), VisitFloat32Add(node);
1569 case IrOpcode::kFloat32Sub:
1570 return MarkAsFloat32(node), VisitFloat32Sub(node);
1571 case IrOpcode::kFloat32Neg:
1572 return MarkAsFloat32(node), VisitFloat32Neg(node);
1573 case IrOpcode::kFloat32Mul:
1574 return MarkAsFloat32(node), VisitFloat32Mul(node);
1575 case IrOpcode::kFloat32Div:
1576 return MarkAsFloat32(node), VisitFloat32Div(node);
1577 case IrOpcode::kFloat32Abs:
1578 return MarkAsFloat32(node), VisitFloat32Abs(node);
1579 case IrOpcode::kFloat32Sqrt:
1580 return MarkAsFloat32(node), VisitFloat32Sqrt(node);
1581 case IrOpcode::kFloat32Equal:
1582 return VisitFloat32Equal(node);
1583 case IrOpcode::kFloat32LessThan:
1584 return VisitFloat32LessThan(node);
1585 case IrOpcode::kFloat32LessThanOrEqual:
1586 return VisitFloat32LessThanOrEqual(node);
1587 case IrOpcode::kFloat32Max:
1588 return MarkAsFloat32(node), VisitFloat32Max(node);
1589 case IrOpcode::kFloat32Min:
1590 return MarkAsFloat32(node), VisitFloat32Min(node);
1591 case IrOpcode::kFloat64Add:
1592 return MarkAsFloat64(node), VisitFloat64Add(node);
1593 case IrOpcode::kFloat64Sub:
1594 return MarkAsFloat64(node), VisitFloat64Sub(node);
1595 case IrOpcode::kFloat64Neg:
1596 return MarkAsFloat64(node), VisitFloat64Neg(node);
1597 case IrOpcode::kFloat64Mul:
1598 return MarkAsFloat64(node), VisitFloat64Mul(node);
1599 case IrOpcode::kFloat64Div:
1600 return MarkAsFloat64(node), VisitFloat64Div(node);
1601 case IrOpcode::kFloat64Mod:
1602 return MarkAsFloat64(node), VisitFloat64Mod(node);
1603 case IrOpcode::kFloat64Min:
1604 return MarkAsFloat64(node), VisitFloat64Min(node);
1605 case IrOpcode::kFloat64Max:
1606 return MarkAsFloat64(node), VisitFloat64Max(node);
1607 case IrOpcode::kFloat64Abs:
1608 return MarkAsFloat64(node), VisitFloat64Abs(node);
1609 case IrOpcode::kFloat64Acos:
1610 return MarkAsFloat64(node), VisitFloat64Acos(node);
1611 case IrOpcode::kFloat64Acosh:
1612 return MarkAsFloat64(node), VisitFloat64Acosh(node);
1613 case IrOpcode::kFloat64Asin:
1614 return MarkAsFloat64(node), VisitFloat64Asin(node);
1615 case IrOpcode::kFloat64Asinh:
1616 return MarkAsFloat64(node), VisitFloat64Asinh(node);
1617 case IrOpcode::kFloat64Atan:
1618 return MarkAsFloat64(node), VisitFloat64Atan(node);
1619 case IrOpcode::kFloat64Atanh:
1620 return MarkAsFloat64(node), VisitFloat64Atanh(node);
1621 case IrOpcode::kFloat64Atan2:
1622 return MarkAsFloat64(node), VisitFloat64Atan2(node);
1623 case IrOpcode::kFloat64Cbrt:
1624 return MarkAsFloat64(node), VisitFloat64Cbrt(node);
1625 case IrOpcode::kFloat64Cos:
1626 return MarkAsFloat64(node), VisitFloat64Cos(node);
1627 case IrOpcode::kFloat64Cosh:
1628 return MarkAsFloat64(node), VisitFloat64Cosh(node);
1629 case IrOpcode::kFloat64Exp:
1630 return MarkAsFloat64(node), VisitFloat64Exp(node);
1631 case IrOpcode::kFloat64Expm1:
1632 return MarkAsFloat64(node), VisitFloat64Expm1(node);
1633 case IrOpcode::kFloat64Log:
1634 return MarkAsFloat64(node), VisitFloat64Log(node);
1635 case IrOpcode::kFloat64Log1p:
1636 return MarkAsFloat64(node), VisitFloat64Log1p(node);
1637 case IrOpcode::kFloat64Log10:
1638 return MarkAsFloat64(node), VisitFloat64Log10(node);
1639 case IrOpcode::kFloat64Log2:
1640 return MarkAsFloat64(node), VisitFloat64Log2(node);
1641 case IrOpcode::kFloat64Pow:
1642 return MarkAsFloat64(node), VisitFloat64Pow(node);
1643 case IrOpcode::kFloat64Sin:
1644 return MarkAsFloat64(node), VisitFloat64Sin(node);
1645 case IrOpcode::kFloat64Sinh:
1646 return MarkAsFloat64(node), VisitFloat64Sinh(node);
1647 case IrOpcode::kFloat64Sqrt:
1648 return MarkAsFloat64(node), VisitFloat64Sqrt(node);
1649 case IrOpcode::kFloat64Tan:
1650 return MarkAsFloat64(node), VisitFloat64Tan(node);
1651 case IrOpcode::kFloat64Tanh:
1652 return MarkAsFloat64(node), VisitFloat64Tanh(node);
1653 case IrOpcode::kFloat64Equal:
1654 return VisitFloat64Equal(node);
1655 case IrOpcode::kFloat64LessThan:
1656 return VisitFloat64LessThan(node);
1657 case IrOpcode::kFloat64LessThanOrEqual:
1658 return VisitFloat64LessThanOrEqual(node);
1659 case IrOpcode::kFloat32RoundDown:
1660 return MarkAsFloat32(node), VisitFloat32RoundDown(node);
1661 case IrOpcode::kFloat64RoundDown:
1662 return MarkAsFloat64(node), VisitFloat64RoundDown(node);
1663 case IrOpcode::kFloat32RoundUp:
1664 return MarkAsFloat32(node), VisitFloat32RoundUp(node);
1665 case IrOpcode::kFloat64RoundUp:
1666 return MarkAsFloat64(node), VisitFloat64RoundUp(node);
1667 case IrOpcode::kFloat32RoundTruncate:
1668 return MarkAsFloat32(node), VisitFloat32RoundTruncate(node);
1669 case IrOpcode::kFloat64RoundTruncate:
1670 return MarkAsFloat64(node), VisitFloat64RoundTruncate(node);
1671 case IrOpcode::kFloat64RoundTiesAway:
1672 return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node);
1673 case IrOpcode::kFloat32RoundTiesEven:
1674 return MarkAsFloat32(node), VisitFloat32RoundTiesEven(node);
1675 case IrOpcode::kFloat64RoundTiesEven:
1676 return MarkAsFloat64(node), VisitFloat64RoundTiesEven(node);
1677 case IrOpcode::kFloat64ExtractLowWord32:
1678 return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node);
1679 case IrOpcode::kFloat64ExtractHighWord32:
1680 return MarkAsWord32(node), VisitFloat64ExtractHighWord32(node);
1681 case IrOpcode::kFloat64InsertLowWord32:
1682 return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
1683 case IrOpcode::kFloat64InsertHighWord32:
1684 return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
1685 case IrOpcode::kTaggedPoisonOnSpeculation:
1686 return MarkAsReference(node), VisitTaggedPoisonOnSpeculation(node);
1687 case IrOpcode::kWord32PoisonOnSpeculation:
1688 return MarkAsWord32(node), VisitWord32PoisonOnSpeculation(node);
1689 case IrOpcode::kWord64PoisonOnSpeculation:
1690 return MarkAsWord64(node), VisitWord64PoisonOnSpeculation(node);
1691 case IrOpcode::kStackSlot:
1692 return VisitStackSlot(node);
1693 case IrOpcode::kLoadStackPointer:
1694 return VisitLoadStackPointer(node);
1695 case IrOpcode::kLoadFramePointer:
1696 return VisitLoadFramePointer(node);
1697 case IrOpcode::kLoadParentFramePointer:
1698 return VisitLoadParentFramePointer(node);
1699 case IrOpcode::kUnalignedLoad: {
1700 LoadRepresentation type = LoadRepresentationOf(node->op());
1701 MarkAsRepresentation(type.representation(), node);
1702 return VisitUnalignedLoad(node);
1703 }
1704 case IrOpcode::kUnalignedStore:
1705 return VisitUnalignedStore(node);
1706 case IrOpcode::kInt32PairAdd:
1707 MarkAsWord32(node);
1708 MarkPairProjectionsAsWord32(node);
1709 return VisitInt32PairAdd(node);
1710 case IrOpcode::kInt32PairSub:
1711 MarkAsWord32(node);
1712 MarkPairProjectionsAsWord32(node);
1713 return VisitInt32PairSub(node);
1714 case IrOpcode::kInt32PairMul:
1715 MarkAsWord32(node);
1716 MarkPairProjectionsAsWord32(node);
1717 return VisitInt32PairMul(node);
1718 case IrOpcode::kWord32PairShl:
1719 MarkAsWord32(node);
1720 MarkPairProjectionsAsWord32(node);
1721 return VisitWord32PairShl(node);
1722 case IrOpcode::kWord32PairShr:
1723 MarkAsWord32(node);
1724 MarkPairProjectionsAsWord32(node);
1725 return VisitWord32PairShr(node);
1726 case IrOpcode::kWord32PairSar:
1727 MarkAsWord32(node);
1728 MarkPairProjectionsAsWord32(node);
1729 return VisitWord32PairSar(node);
1730 case IrOpcode::kWord32AtomicLoad: {
1731 LoadRepresentation type = LoadRepresentationOf(node->op());
1732 MarkAsRepresentation(type.representation(), node);
1733 return VisitWord32AtomicLoad(node);
1734 }
1735 case IrOpcode::kWord64AtomicLoad: {
1736 LoadRepresentation type = LoadRepresentationOf(node->op());
1737 MarkAsRepresentation(type.representation(), node);
1738 return VisitWord64AtomicLoad(node);
1739 }
1740 case IrOpcode::kWord32AtomicStore:
1741 return VisitWord32AtomicStore(node);
1742 case IrOpcode::kWord64AtomicStore:
1743 return VisitWord64AtomicStore(node);
1744 case IrOpcode::kWord32AtomicPairStore:
1745 return VisitWord32AtomicPairStore(node);
1746 case IrOpcode::kWord32AtomicPairLoad: {
1747 MarkAsWord32(node);
1748 MarkPairProjectionsAsWord32(node);
1749 return VisitWord32AtomicPairLoad(node);
1750 }
1751#define ATOMIC_CASE(name, rep) \
1752 case IrOpcode::k##rep##Atomic##name: { \
1753 MachineType type = AtomicOpType(node->op()); \
1754 MarkAsRepresentation(type.representation(), node); \
1755 return Visit##rep##Atomic##name(node); \
1756 }
1757 ATOMIC_CASE(Add, Word32)
1758 ATOMIC_CASE(Add, Word64)
1759 ATOMIC_CASE(Sub, Word32)
1760 ATOMIC_CASE(Sub, Word64)
1761 ATOMIC_CASE(And, Word32)
1762 ATOMIC_CASE(And, Word64)
1763 ATOMIC_CASE(Or, Word32)
1764 ATOMIC_CASE(Or, Word64)
1765 ATOMIC_CASE(Xor, Word32)
1766 ATOMIC_CASE(Xor, Word64)
1767 ATOMIC_CASE(Exchange, Word32)
1768 ATOMIC_CASE(Exchange, Word64)
1769 ATOMIC_CASE(CompareExchange, Word32)
1770 ATOMIC_CASE(CompareExchange, Word64)
1771#undef ATOMIC_CASE
1772#define ATOMIC_CASE(name) \
1773 case IrOpcode::kWord32AtomicPair##name: { \
1774 MarkAsWord32(node); \
1775 MarkPairProjectionsAsWord32(node); \
1776 return VisitWord32AtomicPair##name(node); \
1777 }
1778 ATOMIC_CASE(Add)
1779 ATOMIC_CASE(Sub)
1780 ATOMIC_CASE(And)
1781 ATOMIC_CASE(Or)
1782 ATOMIC_CASE(Xor)
1783 ATOMIC_CASE(Exchange)
1784 ATOMIC_CASE(CompareExchange)
1785#undef ATOMIC_CASE
1786 case IrOpcode::kProtectedLoad: {
1787 LoadRepresentation type = LoadRepresentationOf(node->op());
1788 MarkAsRepresentation(type.representation(), node);
1789 return VisitProtectedLoad(node);
1790 }
1791 case IrOpcode::kSignExtendWord8ToInt32:
1792 return MarkAsWord32(node), VisitSignExtendWord8ToInt32(node);
1793 case IrOpcode::kSignExtendWord16ToInt32:
1794 return MarkAsWord32(node), VisitSignExtendWord16ToInt32(node);
1795 case IrOpcode::kSignExtendWord8ToInt64:
1796 return MarkAsWord64(node), VisitSignExtendWord8ToInt64(node);
1797 case IrOpcode::kSignExtendWord16ToInt64:
1798 return MarkAsWord64(node), VisitSignExtendWord16ToInt64(node);
1799 case IrOpcode::kSignExtendWord32ToInt64:
1800 return MarkAsWord64(node), VisitSignExtendWord32ToInt64(node);
1801 case IrOpcode::kUnsafePointerAdd:
1802 MarkAsRepresentation(MachineType::PointerRepresentation(), node);
1803 return VisitUnsafePointerAdd(node);
1804 case IrOpcode::kF32x4Splat:
1805 return MarkAsSimd128(node), VisitF32x4Splat(node);
1806 case IrOpcode::kF32x4ExtractLane:
1807 return MarkAsFloat32(node), VisitF32x4ExtractLane(node);
1808 case IrOpcode::kF32x4ReplaceLane:
1809 return MarkAsSimd128(node), VisitF32x4ReplaceLane(node);
1810 case IrOpcode::kF32x4SConvertI32x4:
1811 return MarkAsSimd128(node), VisitF32x4SConvertI32x4(node);
1812 case IrOpcode::kF32x4UConvertI32x4:
1813 return MarkAsSimd128(node), VisitF32x4UConvertI32x4(node);
1814 case IrOpcode::kF32x4Abs:
1815 return MarkAsSimd128(node), VisitF32x4Abs(node);
1816 case IrOpcode::kF32x4Neg:
1817 return MarkAsSimd128(node), VisitF32x4Neg(node);
1818 case IrOpcode::kF32x4RecipApprox:
1819 return MarkAsSimd128(node), VisitF32x4RecipApprox(node);
1820 case IrOpcode::kF32x4RecipSqrtApprox:
1821 return MarkAsSimd128(node), VisitF32x4RecipSqrtApprox(node);
1822 case IrOpcode::kF32x4Add:
1823 return MarkAsSimd128(node), VisitF32x4Add(node);
1824 case IrOpcode::kF32x4AddHoriz:
1825 return MarkAsSimd128(node), VisitF32x4AddHoriz(node);
1826 case IrOpcode::kF32x4Sub:
1827 return MarkAsSimd128(node), VisitF32x4Sub(node);
1828 case IrOpcode::kF32x4Mul:
1829 return MarkAsSimd128(node), VisitF32x4Mul(node);
1830 case IrOpcode::kF32x4Min:
1831 return MarkAsSimd128(node), VisitF32x4Min(node);
1832 case IrOpcode::kF32x4Max:
1833 return MarkAsSimd128(node), VisitF32x4Max(node);
1834 case IrOpcode::kF32x4Eq:
1835 return MarkAsSimd128(node), VisitF32x4Eq(node);
1836 case IrOpcode::kF32x4Ne:
1837 return MarkAsSimd128(node), VisitF32x4Ne(node);
1838 case IrOpcode::kF32x4Lt:
1839 return MarkAsSimd128(node), VisitF32x4Lt(node);
1840 case IrOpcode::kF32x4Le:
1841 return MarkAsSimd128(node), VisitF32x4Le(node);
1842 case IrOpcode::kI32x4Splat:
1843 return MarkAsSimd128(node), VisitI32x4Splat(node);
1844 case IrOpcode::kI32x4ExtractLane:
1845 return MarkAsWord32(node), VisitI32x4ExtractLane(node);
1846 case IrOpcode::kI32x4ReplaceLane:
1847 return MarkAsSimd128(node), VisitI32x4ReplaceLane(node);
1848 case IrOpcode::kI32x4SConvertF32x4:
1849 return MarkAsSimd128(node), VisitI32x4SConvertF32x4(node);
1850 case IrOpcode::kI32x4SConvertI16x8Low:
1851 return MarkAsSimd128(node), VisitI32x4SConvertI16x8Low(node);
1852 case IrOpcode::kI32x4SConvertI16x8High:
1853 return MarkAsSimd128(node), VisitI32x4SConvertI16x8High(node);
1854 case IrOpcode::kI32x4Neg:
1855 return MarkAsSimd128(node), VisitI32x4Neg(node);
1856 case IrOpcode::kI32x4Shl:
1857 return MarkAsSimd128(node), VisitI32x4Shl(node);
1858 case IrOpcode::kI32x4ShrS:
1859 return MarkAsSimd128(node), VisitI32x4ShrS(node);
1860 case IrOpcode::kI32x4Add:
1861 return MarkAsSimd128(node), VisitI32x4Add(node);
1862 case IrOpcode::kI32x4AddHoriz:
1863 return MarkAsSimd128(node), VisitI32x4AddHoriz(node);
1864 case IrOpcode::kI32x4Sub:
1865 return MarkAsSimd128(node), VisitI32x4Sub(node);
1866 case IrOpcode::kI32x4Mul:
1867 return MarkAsSimd128(node), VisitI32x4Mul(node);
1868 case IrOpcode::kI32x4MinS:
1869 return MarkAsSimd128(node), VisitI32x4MinS(node);
1870 case IrOpcode::kI32x4MaxS:
1871 return MarkAsSimd128(node), VisitI32x4MaxS(node);
1872 case IrOpcode::kI32x4Eq:
1873 return MarkAsSimd128(node), VisitI32x4Eq(node);
1874 case IrOpcode::kI32x4Ne:
1875 return MarkAsSimd128(node), VisitI32x4Ne(node);
1876 case IrOpcode::kI32x4GtS:
1877 return MarkAsSimd128(node), VisitI32x4GtS(node);
1878 case IrOpcode::kI32x4GeS:
1879 return MarkAsSimd128(node), VisitI32x4GeS(node);
1880 case IrOpcode::kI32x4UConvertF32x4:
1881 return MarkAsSimd128(node), VisitI32x4UConvertF32x4(node);
1882 case IrOpcode::kI32x4UConvertI16x8Low:
1883 return MarkAsSimd128(node), VisitI32x4UConvertI16x8Low(node);
1884 case IrOpcode::kI32x4UConvertI16x8High:
1885 return MarkAsSimd128(node), VisitI32x4UConvertI16x8High(node);
1886 case IrOpcode::kI32x4ShrU:
1887 return MarkAsSimd128(node), VisitI32x4ShrU(node);
1888 case IrOpcode::kI32x4MinU:
1889 return MarkAsSimd128(node), VisitI32x4MinU(node);
1890 case IrOpcode::kI32x4MaxU:
1891 return MarkAsSimd128(node), VisitI32x4MaxU(node);
1892 case IrOpcode::kI32x4GtU:
1893 return MarkAsSimd128(node), VisitI32x4GtU(node);
1894 case IrOpcode::kI32x4GeU:
1895 return MarkAsSimd128(node), VisitI32x4GeU(node);
1896 case IrOpcode::kI16x8Splat:
1897 return MarkAsSimd128(node), VisitI16x8Splat(node);
1898 case IrOpcode::kI16x8ExtractLane:
1899 return MarkAsWord32(node), VisitI16x8ExtractLane(node);
1900 case IrOpcode::kI16x8ReplaceLane:
1901 return MarkAsSimd128(node), VisitI16x8ReplaceLane(node);
1902 case IrOpcode::kI16x8SConvertI8x16Low:
1903 return MarkAsSimd128(node), VisitI16x8SConvertI8x16Low(node);
1904 case IrOpcode::kI16x8SConvertI8x16High:
1905 return MarkAsSimd128(node), VisitI16x8SConvertI8x16High(node);
1906 case IrOpcode::kI16x8Neg:
1907 return MarkAsSimd128(node), VisitI16x8Neg(node);
1908 case IrOpcode::kI16x8Shl:
1909 return MarkAsSimd128(node), VisitI16x8Shl(node);
1910 case IrOpcode::kI16x8ShrS:
1911 return MarkAsSimd128(node), VisitI16x8ShrS(node);
1912 case IrOpcode::kI16x8SConvertI32x4:
1913 return MarkAsSimd128(node), VisitI16x8SConvertI32x4(node);
1914 case IrOpcode::kI16x8Add:
1915 return MarkAsSimd128(node), VisitI16x8Add(node);
1916 case IrOpcode::kI16x8AddSaturateS:
1917 return MarkAsSimd128(node), VisitI16x8AddSaturateS(node);
1918 case IrOpcode::kI16x8AddHoriz:
1919 return MarkAsSimd128(node), VisitI16x8AddHoriz(node);
1920 case IrOpcode::kI16x8Sub:
1921 return MarkAsSimd128(node), VisitI16x8Sub(node);
1922 case IrOpcode::kI16x8SubSaturateS:
1923 return MarkAsSimd128(node), VisitI16x8SubSaturateS(node);
1924 case IrOpcode::kI16x8Mul:
1925 return MarkAsSimd128(node), VisitI16x8Mul(node);
1926 case IrOpcode::kI16x8MinS:
1927 return MarkAsSimd128(node), VisitI16x8MinS(node);
1928 case IrOpcode::kI16x8MaxS:
1929 return MarkAsSimd128(node), VisitI16x8MaxS(node);
1930 case IrOpcode::kI16x8Eq:
1931 return MarkAsSimd128(node), VisitI16x8Eq(node);
1932 case IrOpcode::kI16x8Ne:
1933 return MarkAsSimd128(node), VisitI16x8Ne(node);
1934 case IrOpcode::kI16x8GtS:
1935 return MarkAsSimd128(node), VisitI16x8GtS(node);
1936 case IrOpcode::kI16x8GeS:
1937 return MarkAsSimd128(node), VisitI16x8GeS(node);
1938 case IrOpcode::kI16x8UConvertI8x16Low:
1939 return MarkAsSimd128(node), VisitI16x8UConvertI8x16Low(node);
1940 case IrOpcode::kI16x8UConvertI8x16High:
1941 return MarkAsSimd128(node), VisitI16x8UConvertI8x16High(node);
1942 case IrOpcode::kI16x8ShrU:
1943 return MarkAsSimd128(node), VisitI16x8ShrU(node);
1944 case IrOpcode::kI16x8UConvertI32x4:
1945 return MarkAsSimd128(node), VisitI16x8UConvertI32x4(node);
1946 case IrOpcode::kI16x8AddSaturateU:
1947 return MarkAsSimd128(node), VisitI16x8AddSaturateU(node);
1948 case IrOpcode::kI16x8SubSaturateU:
1949 return MarkAsSimd128(node), VisitI16x8SubSaturateU(node);
1950 case IrOpcode::kI16x8MinU:
1951 return MarkAsSimd128(node), VisitI16x8MinU(node);
1952 case IrOpcode::kI16x8MaxU:
1953 return MarkAsSimd128(node), VisitI16x8MaxU(node);
1954 case IrOpcode::kI16x8GtU:
1955 return MarkAsSimd128(node), VisitI16x8GtU(node);
1956 case IrOpcode::kI16x8GeU:
1957 return MarkAsSimd128(node), VisitI16x8GeU(node);
1958 case IrOpcode::kI8x16Splat:
1959 return MarkAsSimd128(node), VisitI8x16Splat(node);
1960 case IrOpcode::kI8x16ExtractLane:
1961 return MarkAsWord32(node), VisitI8x16ExtractLane(node);
1962 case IrOpcode::kI8x16ReplaceLane:
1963 return MarkAsSimd128(node), VisitI8x16ReplaceLane(node);
1964 case IrOpcode::kI8x16Neg:
1965 return MarkAsSimd128(node), VisitI8x16Neg(node);
1966 case IrOpcode::kI8x16Shl:
1967 return MarkAsSimd128(node), VisitI8x16Shl(node);
1968 case IrOpcode::kI8x16ShrS:
1969 return MarkAsSimd128(node), VisitI8x16ShrS(node);
1970 case IrOpcode::kI8x16SConvertI16x8:
1971 return MarkAsSimd128(node), VisitI8x16SConvertI16x8(node);
1972 case IrOpcode::kI8x16Add:
1973 return MarkAsSimd128(node), VisitI8x16Add(node);
1974 case IrOpcode::kI8x16AddSaturateS:
1975 return MarkAsSimd128(node), VisitI8x16AddSaturateS(node);
1976 case IrOpcode::kI8x16Sub:
1977 return MarkAsSimd128(node), VisitI8x16Sub(node);
1978 case IrOpcode::kI8x16SubSaturateS:
1979 return MarkAsSimd128(node), VisitI8x16SubSaturateS(node);
1980 case IrOpcode::kI8x16Mul:
1981 return MarkAsSimd128(node), VisitI8x16Mul(node);
1982 case IrOpcode::kI8x16MinS:
1983 return MarkAsSimd128(node), VisitI8x16MinS(node);
1984 case IrOpcode::kI8x16MaxS:
1985 return MarkAsSimd128(node), VisitI8x16MaxS(node);
1986 case IrOpcode::kI8x16Eq:
1987 return MarkAsSimd128(node), VisitI8x16Eq(node);
1988 case IrOpcode::kI8x16Ne:
1989 return MarkAsSimd128(node), VisitI8x16Ne(node);
1990 case IrOpcode::kI8x16GtS:
1991 return MarkAsSimd128(node), VisitI8x16GtS(node);
1992 case IrOpcode::kI8x16GeS:
1993 return MarkAsSimd128(node), VisitI8x16GeS(node);
1994 case IrOpcode::kI8x16ShrU:
1995 return MarkAsSimd128(node), VisitI8x16ShrU(node);
1996 case IrOpcode::kI8x16UConvertI16x8:
1997 return MarkAsSimd128(node), VisitI8x16UConvertI16x8(node);
1998 case IrOpcode::kI8x16AddSaturateU:
1999 return MarkAsSimd128(node), VisitI8x16AddSaturateU(node);
2000 case IrOpcode::kI8x16SubSaturateU:
2001 return MarkAsSimd128(node), VisitI8x16SubSaturateU(node);
2002 case IrOpcode::kI8x16MinU:
2003 return MarkAsSimd128(node), VisitI8x16MinU(node);
2004 case IrOpcode::kI8x16MaxU:
2005 return MarkAsSimd128(node), VisitI8x16MaxU(node);
2006 case IrOpcode::kI8x16GtU:
2007 return MarkAsSimd128(node), VisitI8x16GtU(node);
2008 case IrOpcode::kI8x16GeU:
2009 return MarkAsSimd128(node), VisitI8x16GeU(node);
2010 case IrOpcode::kS128Zero:
2011 return MarkAsSimd128(node), VisitS128Zero(node);
2012 case IrOpcode::kS128And:
2013 return MarkAsSimd128(node), VisitS128And(node);
2014 case IrOpcode::kS128Or:
2015 return MarkAsSimd128(node), VisitS128Or(node);
2016 case IrOpcode::kS128Xor:
2017 return MarkAsSimd128(node), VisitS128Xor(node);
2018 case IrOpcode::kS128Not:
2019 return MarkAsSimd128(node), VisitS128Not(node);
2020 case IrOpcode::kS128Select:
2021 return MarkAsSimd128(node), VisitS128Select(node);
2022 case IrOpcode::kS8x16Shuffle:
2023 return MarkAsSimd128(node), VisitS8x16Shuffle(node);
2024 case IrOpcode::kS1x4AnyTrue:
2025 return MarkAsWord32(node), VisitS1x4AnyTrue(node);
2026 case IrOpcode::kS1x4AllTrue:
2027 return MarkAsWord32(node), VisitS1x4AllTrue(node);
2028 case IrOpcode::kS1x8AnyTrue:
2029 return MarkAsWord32(node), VisitS1x8AnyTrue(node);
2030 case IrOpcode::kS1x8AllTrue:
2031 return MarkAsWord32(node), VisitS1x8AllTrue(node);
2032 case IrOpcode::kS1x16AnyTrue:
2033 return MarkAsWord32(node), VisitS1x16AnyTrue(node);
2034 case IrOpcode::kS1x16AllTrue:
2035 return MarkAsWord32(node), VisitS1x16AllTrue(node);
2036 default:
2037 FATAL("Unexpected operator #%d:%s @ node #%d", node->opcode(),
2038 node->op()->mnemonic(), node->id());
2039 break;
2040 }
2041}
2042
2043void InstructionSelector::EmitWordPoisonOnSpeculation(Node* node) {
2044 if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
2045 OperandGenerator g(this);
2046 Node* input_node = NodeProperties::GetValueInput(node, 0);
2047 InstructionOperand input = g.UseRegister(input_node);
2048 InstructionOperand output = g.DefineSameAsFirst(node);
2049 Emit(kArchWordPoisonOnSpeculation, output, input);
2050 } else {
2051 EmitIdentity(node);
2052 }
2053}
2054
2055void InstructionSelector::VisitWord32PoisonOnSpeculation(Node* node) {
2056 EmitWordPoisonOnSpeculation(node);
2057}
2058
2059void InstructionSelector::VisitWord64PoisonOnSpeculation(Node* node) {
2060 EmitWordPoisonOnSpeculation(node);
2061}
2062
2063void InstructionSelector::VisitTaggedPoisonOnSpeculation(Node* node) {
2064 EmitWordPoisonOnSpeculation(node);
2065}
2066
2067void InstructionSelector::VisitLoadStackPointer(Node* node) {
2068 OperandGenerator g(this);
2069 Emit(kArchStackPointer, g.DefineAsRegister(node));
2070}
2071
2072void InstructionSelector::VisitLoadFramePointer(Node* node) {
2073 OperandGenerator g(this);
2074 Emit(kArchFramePointer, g.DefineAsRegister(node));
2075}
2076
2077void InstructionSelector::VisitLoadParentFramePointer(Node* node) {
2078 OperandGenerator g(this);
2079 Emit(kArchParentFramePointer, g.DefineAsRegister(node));
2080}
2081
2082void InstructionSelector::VisitFloat64Acos(Node* node) {
2083 VisitFloat64Ieee754Unop(node, kIeee754Float64Acos);
2084}
2085
2086void InstructionSelector::VisitFloat64Acosh(Node* node) {
2087 VisitFloat64Ieee754Unop(node, kIeee754Float64Acosh);
2088}
2089
2090void InstructionSelector::VisitFloat64Asin(Node* node) {
2091 VisitFloat64Ieee754Unop(node, kIeee754Float64Asin);
2092}
2093
2094void InstructionSelector::VisitFloat64Asinh(Node* node) {
2095 VisitFloat64Ieee754Unop(node, kIeee754Float64Asinh);
2096}
2097
2098void InstructionSelector::VisitFloat64Atan(Node* node) {
2099 VisitFloat64Ieee754Unop(node, kIeee754Float64Atan);
2100}
2101
2102void InstructionSelector::VisitFloat64Atanh(Node* node) {
2103 VisitFloat64Ieee754Unop(node, kIeee754Float64Atanh);
2104}
2105
2106void InstructionSelector::VisitFloat64Atan2(Node* node) {
2107 VisitFloat64Ieee754Binop(node, kIeee754Float64Atan2);
2108}
2109
2110void InstructionSelector::VisitFloat64Cbrt(Node* node) {
2111 VisitFloat64Ieee754Unop(node, kIeee754Float64Cbrt);
2112}
2113
2114void InstructionSelector::VisitFloat64Cos(Node* node) {
2115 VisitFloat64Ieee754Unop(node, kIeee754Float64Cos);
2116}
2117
2118void InstructionSelector::VisitFloat64Cosh(Node* node) {
2119 VisitFloat64Ieee754Unop(node, kIeee754Float64Cosh);
2120}
2121
2122void InstructionSelector::VisitFloat64Exp(Node* node) {
2123 VisitFloat64Ieee754Unop(node, kIeee754Float64Exp);
2124}
2125
2126void InstructionSelector::VisitFloat64Expm1(Node* node) {
2127 VisitFloat64Ieee754Unop(node, kIeee754Float64Expm1);
2128}
2129
2130void InstructionSelector::VisitFloat64Log(Node* node) {
2131 VisitFloat64Ieee754Unop(node, kIeee754Float64Log);
2132}
2133
2134void InstructionSelector::VisitFloat64Log1p(Node* node) {
2135 VisitFloat64Ieee754Unop(node, kIeee754Float64Log1p);
2136}
2137
2138void InstructionSelector::VisitFloat64Log2(Node* node) {
2139 VisitFloat64Ieee754Unop(node, kIeee754Float64Log2);
2140}
2141
2142void InstructionSelector::VisitFloat64Log10(Node* node) {
2143 VisitFloat64Ieee754Unop(node, kIeee754Float64Log10);
2144}
2145
2146void InstructionSelector::VisitFloat64Pow(Node* node) {
2147 VisitFloat64Ieee754Binop(node, kIeee754Float64Pow);
2148}
2149
2150void InstructionSelector::VisitFloat64Sin(Node* node) {
2151 VisitFloat64Ieee754Unop(node, kIeee754Float64Sin);
2152}
2153
2154void InstructionSelector::VisitFloat64Sinh(Node* node) {
2155 VisitFloat64Ieee754Unop(node, kIeee754Float64Sinh);
2156}
2157
2158void InstructionSelector::VisitFloat64Tan(Node* node) {
2159 VisitFloat64Ieee754Unop(node, kIeee754Float64Tan);
2160}
2161
2162void InstructionSelector::VisitFloat64Tanh(Node* node) {
2163 VisitFloat64Ieee754Unop(node, kIeee754Float64Tanh);
2164}
2165
2166void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
2167 InstructionOperand& index_operand) {
2168 OperandGenerator g(this);
2169 size_t input_count = 2 + sw.value_range();
2170 DCHECK_LE(sw.value_range(), std::numeric_limits<size_t>::max() - 2);
2171 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
2172 inputs[0] = index_operand;
2173 InstructionOperand default_operand = g.Label(sw.default_branch());
2174 std::fill(&inputs[1], &inputs[input_count], default_operand);
2175 for (const CaseInfo& c : sw.CasesUnsorted()) {
2176 size_t value = c.value - sw.min_value();
2177 DCHECK_LE(0u, value);
2178 DCHECK_LT(value + 2, input_count);
2179 inputs[value + 2] = g.Label(c.branch);
2180 }
2181 Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
2182}
2183
2184void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
2185 InstructionOperand& value_operand) {
2186 OperandGenerator g(this);
2187 std::vector<CaseInfo> cases = sw.CasesSortedByOriginalOrder();
2188 size_t input_count = 2 + sw.case_count() * 2;
2189 DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
2190 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
2191 inputs[0] = value_operand;
2192 inputs[1] = g.Label(sw.default_branch());
2193 for (size_t index = 0; index < cases.size(); ++index) {
2194 const CaseInfo& c = cases[index];
2195 inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value);
2196 inputs[index * 2 + 2 + 1] = g.Label(c.branch);
2197 }
2198 Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
2199}
2200
2201void InstructionSelector::EmitBinarySearchSwitch(
2202 const SwitchInfo& sw, InstructionOperand& value_operand) {
2203 OperandGenerator g(this);
2204 size_t input_count = 2 + sw.case_count() * 2;
2205 DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
2206 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
2207 inputs[0] = value_operand;
2208 inputs[1] = g.Label(sw.default_branch());
2209 std::vector<CaseInfo> cases = sw.CasesSortedByValue();
2210 std::stable_sort(cases.begin(), cases.end(),
2211 [](CaseInfo a, CaseInfo b) { return a.value < b.value; });
2212 for (size_t index = 0; index < cases.size(); ++index) {
2213 const CaseInfo& c = cases[index];
2214 inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value);
2215 inputs[index * 2 + 2 + 1] = g.Label(c.branch);
2216 }
2217 Emit(kArchBinarySearchSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
2218}
2219
2220void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
2221 EmitIdentity(node);
2222}
2223
2224void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
2225 OperandGenerator g(this);
2226 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
2227}
2228
2229// 32 bit targets do not implement the following instructions.
2230#if V8_TARGET_ARCH_32_BIT
2231
2232void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
2233
2234void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
2235
2236void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
2237
2238void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
2239
2240void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
2241
2242void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
2243
2244void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
2245
2246void InstructionSelector::VisitWord64Clz(Node* node) { UNIMPLEMENTED(); }
2247
2248void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED(); }
2249
2250void InstructionSelector::VisitWord64ReverseBits(Node* node) {
2251 UNIMPLEMENTED();
2252}
2253
2254void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); }
2255
2256void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
2257
2258void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
2259
2260void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2261 UNIMPLEMENTED();
2262}
2263
2264void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
2265
2266void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2267 UNIMPLEMENTED();
2268}
2269
2270void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
2271
2272void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
2273
2274void InstructionSelector::VisitInt64LessThan(Node* node) { UNIMPLEMENTED(); }
2275
2276void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2277 UNIMPLEMENTED();
2278}
2279
2280void InstructionSelector::VisitUint64Div(Node* node) { UNIMPLEMENTED(); }
2281
2282void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
2283
2284void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
2285
2286void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2287 UNIMPLEMENTED();
2288}
2289
2290void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
2291
2292void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
2293 UNIMPLEMENTED();
2294}
2295
2296void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
2297 UNIMPLEMENTED();
2298}
2299
2300void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
2301 UNIMPLEMENTED();
2302}
2303
2304// TODO(mips-team): Support compress pointers.
2305#ifdef V8_COMPRESS_POINTERS
2306void InstructionSelector::VisitChangeTaggedToCompressed(Node* node) {
2307 UNIMPLEMENTED();
2308}
2309
2310void InstructionSelector::VisitChangeTaggedPointerToCompressedPointer(
2311 Node* node) {
2312 UNIMPLEMENTED();
2313}
2314
2315void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned(
2316 Node* node) {
2317 UNIMPLEMENTED();
2318}
2319
2320void InstructionSelector::VisitChangeCompressedToTagged(Node* node) {
2321 UNIMPLEMENTED();
2322}
2323
2324void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer(
2325 Node* node) {
2326 UNIMPLEMENTED();
2327}
2328
2329void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned(
2330 Node* node) {
2331 UNIMPLEMENTED();
2332}
2333#endif
2334
2335void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
2336 UNIMPLEMENTED();
2337}
2338
2339void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
2340 UNIMPLEMENTED();
2341}
2342
2343void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
2344 UNIMPLEMENTED();
2345}
2346
2347void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
2348 UNIMPLEMENTED();
2349}
2350
2351void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
2352 UNIMPLEMENTED();
2353}
2354
2355void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
2356 UNIMPLEMENTED();
2357}
2358
2359void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
2360 UNIMPLEMENTED();
2361}
2362
2363void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
2364 UNIMPLEMENTED();
2365}
2366
2367void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
2368 UNIMPLEMENTED();
2369}
2370
2371void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
2372 UNIMPLEMENTED();
2373}
2374
2375void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
2376 UNIMPLEMENTED();
2377}
2378
2379void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
2380 UNIMPLEMENTED();
2381}
2382
2383void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
2384 UNIMPLEMENTED();
2385}
2386
2387void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
2388 UNIMPLEMENTED();
2389}
2390
2391void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
2392 UNIMPLEMENTED();
2393}
2394
2395void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
2396 UNIMPLEMENTED();
2397}
2398
2399void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
2400 UNIMPLEMENTED();
2401}
2402#endif // V8_TARGET_ARCH_32_BIT
2403
2404// 64 bit targets do not implement the following instructions.
2405#if V8_TARGET_ARCH_64_BIT
2406void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
2407
2408void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
2409
2410void InstructionSelector::VisitInt32PairMul(Node* node) { UNIMPLEMENTED(); }
2411
2412void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
2413
2414void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
2415
2416void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
2417#endif // V8_TARGET_ARCH_64_BIT
2418
2419#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
2420void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
2421 UNIMPLEMENTED();
2422}
2423
2424void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
2425 UNIMPLEMENTED();
2426}
2427
2428void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
2429 UNIMPLEMENTED();
2430}
2431
2432void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
2433 UNIMPLEMENTED();
2434}
2435
2436void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
2437 UNIMPLEMENTED();
2438}
2439
2440void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
2441 UNIMPLEMENTED();
2442}
2443
2444void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
2445 UNIMPLEMENTED();
2446}
2447
2448void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
2449 UNIMPLEMENTED();
2450}
2451
2452void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
2453 UNIMPLEMENTED();
2454}
2455#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
2456
2457#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
2458 !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC
2459void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
2460
2461void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2462 UNIMPLEMENTED();
2463}
2464
2465void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED(); }
2466
2467void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED(); }
2468
2469void InstructionSelector::VisitWord64AtomicAnd(Node* node) { UNIMPLEMENTED(); }
2470
2471void InstructionSelector::VisitWord64AtomicOr(Node* node) { UNIMPLEMENTED(); }
2472
2473void InstructionSelector::VisitWord64AtomicXor(Node* node) { UNIMPLEMENTED(); }
2474
2475void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2476 UNIMPLEMENTED();
2477}
2478
2479void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2480 UNIMPLEMENTED();
2481}
2482#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC
2483 // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
2484
2485void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
2486
2487void InstructionSelector::VisitParameter(Node* node) {
2488 OperandGenerator g(this);
2489 int index = ParameterIndexOf(node->op());
2490 InstructionOperand op =
2491 linkage()->ParameterHasSecondaryLocation(index)
2492 ? g.DefineAsDualLocation(
2493 node, linkage()->GetParameterLocation(index),
2494 linkage()->GetParameterSecondaryLocation(index))
2495 : g.DefineAsLocation(node, linkage()->GetParameterLocation(index));
2496
2497 Emit(kArchNop, op);
2498}
2499
2500namespace {
2501LinkageLocation ExceptionLocation() {
2502 return LinkageLocation::ForRegister(kReturnRegister0.code(),
2503 MachineType::IntPtr());
2504}
2505} // namespace
2506
2507void InstructionSelector::VisitIfException(Node* node) {
2508 OperandGenerator g(this);
2509 DCHECK_EQ(IrOpcode::kCall, node->InputAt(1)->opcode());
2510 Emit(kArchNop, g.DefineAsLocation(node, ExceptionLocation()));
2511}
2512
2513void InstructionSelector::VisitOsrValue(Node* node) {
2514 OperandGenerator g(this);
2515 int index = OsrValueIndexOf(node->op());
2516 Emit(kArchNop,
2517 g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index)));
2518}
2519
2520void InstructionSelector::VisitPhi(Node* node) {
2521 const int input_count = node->op()->ValueInputCount();
2522 DCHECK_EQ(input_count, current_block_->PredecessorCount());
2523 PhiInstruction* phi = new (instruction_zone())
2524 PhiInstruction(instruction_zone(), GetVirtualRegister(node),
2525 static_cast<size_t>(input_count));
2526 sequence()
2527 ->InstructionBlockAt(RpoNumber::FromInt(current_block_->rpo_number()))
2528 ->AddPhi(phi);
2529 for (int i = 0; i < input_count; ++i) {
2530 Node* const input = node->InputAt(i);
2531 MarkAsUsed(input);
2532 phi->SetInput(static_cast<size_t>(i), GetVirtualRegister(input));
2533 }
2534}
2535
2536void InstructionSelector::VisitProjection(Node* node) {
2537 OperandGenerator g(this);
2538 Node* value = node->InputAt(0);
2539 switch (value->opcode()) {
2540 case IrOpcode::kInt32AddWithOverflow:
2541 case IrOpcode::kInt32SubWithOverflow:
2542 case IrOpcode::kInt32MulWithOverflow:
2543 case IrOpcode::kInt64AddWithOverflow:
2544 case IrOpcode::kInt64SubWithOverflow:
2545 case IrOpcode::kTryTruncateFloat32ToInt64:
2546 case IrOpcode::kTryTruncateFloat64ToInt64:
2547 case IrOpcode::kTryTruncateFloat32ToUint64:
2548 case IrOpcode::kTryTruncateFloat64ToUint64:
2549 case IrOpcode::kInt32PairAdd:
2550 case IrOpcode::kInt32PairSub:
2551 case IrOpcode::kInt32PairMul:
2552 case IrOpcode::kWord32PairShl:
2553 case IrOpcode::kWord32PairShr:
2554 case IrOpcode::kWord32PairSar:
2555 case IrOpcode::kInt32AbsWithOverflow:
2556 case IrOpcode::kInt64AbsWithOverflow:
2557 if (ProjectionIndexOf(node->op()) == 0u) {
2558 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
2559 } else {
2560 DCHECK_EQ(1u, ProjectionIndexOf(node->op()));
2561 MarkAsUsed(value);
2562 }
2563 break;
2564 default:
2565 break;
2566 }
2567}
2568
2569void InstructionSelector::VisitConstant(Node* node) {
2570 // We must emit a NOP here because every live range needs a defining
2571 // instruction in the register allocator.
2572 OperandGenerator g(this);
2573 Emit(kArchNop, g.DefineAsConstant(node));
2574}
2575
2576void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
2577 OperandGenerator g(this);
2578 auto call_descriptor = CallDescriptorOf(node->op());
2579
2580 FrameStateDescriptor* frame_state_descriptor = nullptr;
2581 if (call_descriptor->NeedsFrameState()) {
2582 frame_state_descriptor = GetFrameStateDescriptor(
2583 node->InputAt(static_cast<int>(call_descriptor->InputCount())));
2584 }
2585
2586 CallBuffer buffer(zone(), call_descriptor, frame_state_descriptor);
2587 CallDescriptor::Flags flags = call_descriptor->flags();
2588
2589 // Compute InstructionOperands for inputs and outputs.
2590 // TODO(turbofan): on some architectures it's probably better to use
2591 // the code object in a register if there are multiple uses of it.
2592 // Improve constant pool and the heuristics in the register allocator
2593 // for where to emit constants.
2594 CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate);
2595 if (flags & CallDescriptor::kAllowCallThroughSlot) {
2596 // TODO(v8:6666): Remove kAllowCallThroughSlot and use a pc-relative call
2597 // instead once builtins are embedded in every build configuration.
2598 call_buffer_flags |= kAllowCallThroughSlot;
2599#ifndef V8_TARGET_ARCH_32_BIT
2600 // kAllowCallThroughSlot is only supported on ia32.
2601 UNREACHABLE();
2602#endif
2603 }
2604 InitializeCallBuffer(node, &buffer, call_buffer_flags, false);
2605
2606 EmitPrepareArguments(&(buffer.pushed_nodes), call_descriptor, node);
2607
2608 // Pass label of exception handler block.
2609 if (handler) {
2610 DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
2611 flags |= CallDescriptor::kHasExceptionHandler;
2612 buffer.instruction_args.push_back(g.Label(handler));
2613 }
2614
2615 // Select the appropriate opcode based on the call type.
2616 InstructionCode opcode = kArchNop;
2617 switch (call_descriptor->kind()) {
2618 case CallDescriptor::kCallAddress:
2619 opcode = kArchCallCFunction | MiscField::encode(static_cast<int>(
2620 call_descriptor->ParameterCount()));
2621 break;
2622 case CallDescriptor::kCallCodeObject:
2623 opcode = kArchCallCodeObject | MiscField::encode(flags);
2624 break;
2625 case CallDescriptor::kCallJSFunction:
2626 opcode = kArchCallJSFunction | MiscField::encode(flags);
2627 break;
2628 case CallDescriptor::kCallWasmFunction:
2629 case CallDescriptor::kCallWasmImportWrapper:
2630 opcode = kArchCallWasmFunction | MiscField::encode(flags);
2631 break;
2632 case CallDescriptor::kCallBuiltinPointer:
2633 opcode = kArchCallBuiltinPointer | MiscField::encode(flags);
2634 break;
2635 }
2636
2637 // Emit the call instruction.
2638 size_t const output_count = buffer.outputs.size();
2639 auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
2640 Instruction* call_instr =
2641 Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
2642 &buffer.instruction_args.front());
2643 if (instruction_selection_failed()) return;
2644 call_instr->MarkAsCall();
2645
2646 EmitPrepareResults(&(buffer.output_nodes), call_descriptor, node);
2647}
2648
2649void InstructionSelector::VisitCallWithCallerSavedRegisters(
2650 Node* node, BasicBlock* handler) {
2651 OperandGenerator g(this);
2652 const auto fp_mode = CallDescriptorOf(node->op())->get_save_fp_mode();
2653 Emit(kArchSaveCallerRegisters | MiscField::encode(static_cast<int>(fp_mode)),
2654 g.NoOutput());
2655 VisitCall(node, handler);
2656 Emit(kArchRestoreCallerRegisters |
2657 MiscField::encode(static_cast<int>(fp_mode)),
2658 g.NoOutput());
2659}
2660
2661void InstructionSelector::VisitTailCall(Node* node) {
2662 OperandGenerator g(this);
2663 auto call_descriptor = CallDescriptorOf(node->op());
2664
2665 CallDescriptor* caller = linkage()->GetIncomingDescriptor();
2666 DCHECK(caller->CanTailCall(node));
2667 const CallDescriptor* callee = CallDescriptorOf(node->op());
2668 int stack_param_delta = callee->GetStackParameterDelta(caller);
2669 CallBuffer buffer(zone(), call_descriptor, nullptr);
2670
2671 // Compute InstructionOperands for inputs and outputs.
2672 CallBufferFlags flags(kCallCodeImmediate | kCallTail);
2673 if (IsTailCallAddressImmediate()) {
2674 flags |= kCallAddressImmediate;
2675 }
2676 if (callee->flags() & CallDescriptor::kFixedTargetRegister) {
2677 flags |= kCallFixedTargetRegister;
2678 }
2679 DCHECK_EQ(callee->flags() & CallDescriptor::kAllowCallThroughSlot, 0);
2680 InitializeCallBuffer(node, &buffer, flags, true, stack_param_delta);
2681
2682 // Select the appropriate opcode based on the call type.
2683 InstructionCode opcode;
2684 InstructionOperandVector temps(zone());
2685 if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
2686 switch (call_descriptor->kind()) {
2687 case CallDescriptor::kCallCodeObject:
2688 opcode = kArchTailCallCodeObjectFromJSFunction;
2689 break;
2690 default:
2691 UNREACHABLE();
2692 return;
2693 }
2694 int temps_count = GetTempsCountForTailCallFromJSFunction();
2695 for (int i = 0; i < temps_count; i++) {
2696 temps.push_back(g.TempRegister());
2697 }
2698 } else {
2699 switch (call_descriptor->kind()) {
2700 case CallDescriptor::kCallCodeObject:
2701 opcode = kArchTailCallCodeObject;
2702 break;
2703 case CallDescriptor::kCallAddress:
2704 opcode = kArchTailCallAddress;
2705 break;
2706 case CallDescriptor::kCallWasmFunction:
2707 opcode = kArchTailCallWasm;
2708 break;
2709 default:
2710 UNREACHABLE();
2711 return;
2712 }
2713 }
2714 opcode |= MiscField::encode(call_descriptor->flags());
2715
2716 Emit(kArchPrepareTailCall, g.NoOutput());
2717
2718 // Add an immediate operand that represents the first slot that is unused
2719 // with respect to the stack pointer that has been updated for the tail call
2720 // instruction. This is used by backends that need to pad arguments for stack
2721 // alignment, in order to store an optional slot of padding above the
2722 // arguments.
2723 int optional_padding_slot = callee->GetFirstUnusedStackSlot();
2724 buffer.instruction_args.push_back(g.TempImmediate(optional_padding_slot));
2725
2726 int first_unused_stack_slot =
2727 (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? true : false) +
2728 stack_param_delta;
2729 buffer.instruction_args.push_back(g.TempImmediate(first_unused_stack_slot));
2730
2731 // Emit the tailcall instruction.
2732 Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
2733 &buffer.instruction_args.front(), temps.size(),
2734 temps.empty() ? nullptr : &temps.front());
2735}
2736
2737void InstructionSelector::VisitGoto(BasicBlock* target) {
2738 // jump to the next block.
2739 OperandGenerator g(this);
2740 Emit(kArchJmp, g.NoOutput(), g.Label(target));
2741}
2742
2743void InstructionSelector::VisitReturn(Node* ret) {
2744 OperandGenerator g(this);
2745 const int input_count = linkage()->GetIncomingDescriptor()->ReturnCount() == 0
2746 ? 1
2747 : ret->op()->ValueInputCount();
2748 DCHECK_GE(input_count, 1);
2749 auto value_locations = zone()->NewArray<InstructionOperand>(input_count);
2750 Node* pop_count = ret->InputAt(0);
2751 value_locations[0] = (pop_count->opcode() == IrOpcode::kInt32Constant ||
2752 pop_count->opcode() == IrOpcode::kInt64Constant)
2753 ? g.UseImmediate(pop_count)
2754 : g.UseRegister(pop_count);
2755 for (int i = 1; i < input_count; ++i) {
2756 value_locations[i] =
2757 g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i - 1));
2758 }
2759 Emit(kArchRet, 0, nullptr, input_count, value_locations);
2760}
2761
2762void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
2763 BasicBlock* fbranch) {
2764 if (NeedsPoisoning(IsSafetyCheckOf(branch->op()))) {
2765 FlagsContinuation cont =
2766 FlagsContinuation::ForBranchAndPoison(kNotEqual, tbranch, fbranch);
2767 VisitWordCompareZero(branch, branch->InputAt(0), &cont);
2768 } else {
2769 FlagsContinuation cont =
2770 FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
2771 VisitWordCompareZero(branch, branch->InputAt(0), &cont);
2772 }
2773}
2774
2775void InstructionSelector::VisitDeoptimizeIf(Node* node) {
2776 DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
2777 if (NeedsPoisoning(p.is_safety_check())) {
2778 FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
2779 kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
2780 VisitWordCompareZero(node, node->InputAt(0), &cont);
2781 } else {
2782 FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
2783 kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
2784 VisitWordCompareZero(node, node->InputAt(0), &cont);
2785 }
2786}
2787
2788void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
2789 DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
2790 if (NeedsPoisoning(p.is_safety_check())) {
2791 FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
2792 kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
2793 VisitWordCompareZero(node, node->InputAt(0), &cont);
2794 } else {
2795 FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
2796 kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
2797 VisitWordCompareZero(node, node->InputAt(0), &cont);
2798 }
2799}
2800
2801void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) {
2802 FlagsContinuation cont =
2803 FlagsContinuation::ForTrap(kNotEqual, trap_id, node->InputAt(1));
2804 VisitWordCompareZero(node, node->InputAt(0), &cont);
2805}
2806
2807void InstructionSelector::VisitTrapUnless(Node* node, TrapId trap_id) {
2808 FlagsContinuation cont =
2809 FlagsContinuation::ForTrap(kEqual, trap_id, node->InputAt(1));
2810 VisitWordCompareZero(node, node->InputAt(0), &cont);
2811}
2812
2813void InstructionSelector::EmitIdentity(Node* node) {
2814 OperandGenerator g(this);
2815 MarkAsUsed(node->InputAt(0));
2816 SetRename(node, node->InputAt(0));
2817}
2818
2819void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
2820 DeoptimizeReason reason,
2821 VectorSlotPair const& feedback,
2822 Node* value) {
2823 EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason,
2824 feedback, value);
2825}
2826
2827void InstructionSelector::VisitThrow(Node* node) {
2828 OperandGenerator g(this);
2829 Emit(kArchThrowTerminator, g.NoOutput());
2830}
2831
2832void InstructionSelector::VisitDebugBreak(Node* node) {
2833 OperandGenerator g(this);
2834 Emit(kArchDebugBreak, g.NoOutput());
2835}
2836
2837void InstructionSelector::VisitUnreachable(Node* node) {
2838 OperandGenerator g(this);
2839 Emit(kArchDebugBreak, g.NoOutput());
2840}
2841
2842void InstructionSelector::VisitDeadValue(Node* node) {
2843 OperandGenerator g(this);
2844 MarkAsRepresentation(DeadValueRepresentationOf(node->op()), node);
2845 Emit(kArchDebugBreak, g.DefineAsConstant(node));
2846}
2847
2848void InstructionSelector::VisitComment(Node* node) {
2849 OperandGenerator g(this);
2850 InstructionOperand operand(g.UseImmediate(node));
2851 Emit(kArchComment, 0, nullptr, 1, &operand);
2852}
2853
2854void InstructionSelector::VisitUnsafePointerAdd(Node* node) {
2855#if V8_TARGET_ARCH_64_BIT
2856 VisitInt64Add(node);
2857#else // V8_TARGET_ARCH_64_BIT
2858 VisitInt32Add(node);
2859#endif // V8_TARGET_ARCH_64_BIT
2860}
2861
2862void InstructionSelector::VisitRetain(Node* node) {
2863 OperandGenerator g(this);
2864 Emit(kArchNop, g.NoOutput(), g.UseAny(node->InputAt(0)));
2865}
2866
2867bool InstructionSelector::CanProduceSignalingNaN(Node* node) {
2868 // TODO(jarin) Improve the heuristic here.
2869 if (node->opcode() == IrOpcode::kFloat64Add ||
2870 node->opcode() == IrOpcode::kFloat64Sub ||
2871 node->opcode() == IrOpcode::kFloat64Mul) {
2872 return false;
2873 }
2874 return true;
2875}
2876
2877FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
2878 Node* state) {
2879 DCHECK_EQ(IrOpcode::kFrameState, state->opcode());
2880 DCHECK_EQ(kFrameStateInputCount, state->InputCount());
2881 FrameStateInfo state_info = FrameStateInfoOf(state->op());
2882
2883 int parameters = static_cast<int>(
2884 StateValuesAccess(state->InputAt(kFrameStateParametersInput)).size());
2885 int locals = static_cast<int>(
2886 StateValuesAccess(state->InputAt(kFrameStateLocalsInput)).size());
2887 int stack = static_cast<int>(
2888 StateValuesAccess(state->InputAt(kFrameStateStackInput)).size());
2889
2890 DCHECK_EQ(parameters, state_info.parameter_count());
2891 DCHECK_EQ(locals, state_info.local_count());
2892
2893 FrameStateDescriptor* outer_state = nullptr;
2894 Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
2895 if (outer_node->opcode() == IrOpcode::kFrameState) {
2896 outer_state = GetFrameStateDescriptor(outer_node);
2897 }
2898
2899 return new (instruction_zone()) FrameStateDescriptor(
2900 instruction_zone(), state_info.type(), state_info.bailout_id(),
2901 state_info.state_combine(), parameters, locals, stack,
2902 state_info.shared_info(), outer_state);
2903}
2904
2905// static
2906void InstructionSelector::CanonicalizeShuffle(bool inputs_equal,
2907 uint8_t* shuffle,
2908 bool* needs_swap,
2909 bool* is_swizzle) {
2910 *needs_swap = false;
2911 // Inputs equal, then it's a swizzle.
2912 if (inputs_equal) {
2913 *is_swizzle = true;
2914 } else {
2915 // Inputs are distinct; check that both are required.
2916 bool src0_is_used = false;
2917 bool src1_is_used = false;
2918 for (int i = 0; i < kSimd128Size; ++i) {
2919 if (shuffle[i] < kSimd128Size) {
2920 src0_is_used = true;
2921 } else {
2922 src1_is_used = true;
2923 }
2924 }
2925 if (src0_is_used && !src1_is_used) {
2926 *is_swizzle = true;
2927 } else if (src1_is_used && !src0_is_used) {
2928 *needs_swap = true;
2929 *is_swizzle = true;
2930 } else {
2931 *is_swizzle = false;
2932 // Canonicalize general 2 input shuffles so that the first input lanes are
2933 // encountered first. This makes architectural shuffle pattern matching
2934 // easier, since we only need to consider 1 input ordering instead of 2.
2935 if (shuffle[0] >= kSimd128Size) {
2936 // The second operand is used first. Swap inputs and adjust the shuffle.
2937 *needs_swap = true;
2938 for (int i = 0; i < kSimd128Size; ++i) {
2939 shuffle[i] ^= kSimd128Size;
2940 }
2941 }
2942 }
2943 }
2944 if (*is_swizzle) {
2945 for (int i = 0; i < kSimd128Size; ++i) shuffle[i] &= kSimd128Size - 1;
2946 }
2947}
2948
2949void InstructionSelector::CanonicalizeShuffle(Node* node, uint8_t* shuffle,
2950 bool* is_swizzle) {
2951 // Get raw shuffle indices.
2952 memcpy(shuffle, OpParameter<uint8_t*>(node->op()), kSimd128Size);
2953 bool needs_swap;
2954 bool inputs_equal = GetVirtualRegister(node->InputAt(0)) ==
2955 GetVirtualRegister(node->InputAt(1));
2956 CanonicalizeShuffle(inputs_equal, shuffle, &needs_swap, is_swizzle);
2957 if (needs_swap) {
2958 SwapShuffleInputs(node);
2959 }
2960 // Duplicate the first input; for some shuffles on some architectures, it's
2961 // easiest to implement a swizzle as a shuffle so it might be used.
2962 if (*is_swizzle) {
2963 node->ReplaceInput(1, node->InputAt(0));
2964 }
2965}
2966
2967// static
2968void InstructionSelector::SwapShuffleInputs(Node* node) {
2969 Node* input0 = node->InputAt(0);
2970 Node* input1 = node->InputAt(1);
2971 node->ReplaceInput(0, input1);
2972 node->ReplaceInput(1, input0);
2973}
2974
2975// static
2976bool InstructionSelector::TryMatchIdentity(const uint8_t* shuffle) {
2977 for (int i = 0; i < kSimd128Size; ++i) {
2978 if (shuffle[i] != i) return false;
2979 }
2980 return true;
2981}
2982
2983// static
2984bool InstructionSelector::TryMatch32x4Shuffle(const uint8_t* shuffle,
2985 uint8_t* shuffle32x4) {
2986 for (int i = 0; i < 4; ++i) {
2987 if (shuffle[i * 4] % 4 != 0) return false;
2988 for (int j = 1; j < 4; ++j) {
2989 if (shuffle[i * 4 + j] - shuffle[i * 4 + j - 1] != 1) return false;
2990 }
2991 shuffle32x4[i] = shuffle[i * 4] / 4;
2992 }
2993 return true;
2994}
2995
2996// static
2997bool InstructionSelector::TryMatch16x8Shuffle(const uint8_t* shuffle,
2998 uint8_t* shuffle16x8) {
2999 for (int i = 0; i < 8; ++i) {
3000 if (shuffle[i * 2] % 2 != 0) return false;
3001 for (int j = 1; j < 2; ++j) {
3002 if (shuffle[i * 2 + j] - shuffle[i * 2 + j - 1] != 1) return false;
3003 }
3004 shuffle16x8[i] = shuffle[i * 2] / 2;
3005 }
3006 return true;
3007}
3008
3009// static
3010bool InstructionSelector::TryMatchConcat(const uint8_t* shuffle,
3011 uint8_t* offset) {
3012 // Don't match the identity shuffle (e.g. [0 1 2 ... 15]).
3013 uint8_t start = shuffle[0];
3014 if (start == 0) return false;
3015 DCHECK_GT(kSimd128Size, start); // The shuffle should be canonicalized.
3016 // A concatenation is a series of consecutive indices, with at most one jump
3017 // in the middle from the last lane to the first.
3018 for (int i = 1; i < kSimd128Size; ++i) {
3019 if ((shuffle[i]) != ((shuffle[i - 1] + 1))) {
3020 if (shuffle[i - 1] != 15) return false;
3021 if (shuffle[i] % kSimd128Size != 0) return false;
3022 }
3023 }
3024 *offset = start;
3025 return true;
3026}
3027
3028// static
3029bool InstructionSelector::TryMatchBlend(const uint8_t* shuffle) {
3030 for (int i = 0; i < 16; ++i) {
3031 if ((shuffle[i] & 0xF) != i) return false;
3032 }
3033 return true;
3034}
3035
3036// static
3037int32_t InstructionSelector::Pack4Lanes(const uint8_t* shuffle) {
3038 int32_t result = 0;
3039 for (int i = 3; i >= 0; --i) {
3040 result <<= 8;
3041 result |= shuffle[i];
3042 }
3043 return result;
3044}
3045
3046bool InstructionSelector::NeedsPoisoning(IsSafetyCheck safety_check) const {
3047 switch (poisoning_level_) {
3048 case PoisoningMitigationLevel::kDontPoison:
3049 return false;
3050 case PoisoningMitigationLevel::kPoisonAll:
3051 return safety_check != IsSafetyCheck::kNoSafetyCheck;
3052 case PoisoningMitigationLevel::kPoisonCriticalOnly:
3053 return safety_check == IsSafetyCheck::kCriticalSafetyCheck;
3054 }
3055 UNREACHABLE();
3056}
3057
3058} // namespace compiler
3059} // namespace internal
3060} // namespace v8
3061