1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
6#define V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
7
8#include <map>
9
10#include "src/compiler/backend/instruction-scheduler.h"
11#include "src/compiler/backend/instruction.h"
12#include "src/compiler/common-operator.h"
13#include "src/compiler/linkage.h"
14#include "src/compiler/machine-operator.h"
15#include "src/compiler/node.h"
16#include "src/cpu-features.h"
17#include "src/globals.h"
18#include "src/zone/zone-containers.h"
19
20namespace v8 {
21namespace internal {
22namespace compiler {
23
24// Forward declarations.
25class BasicBlock;
26struct CallBuffer; // TODO(bmeurer): Remove this.
27class Linkage;
28class OperandGenerator;
29class SwitchInfo;
30class StateObjectDeduplicator;
31
32// The flags continuation is a way to combine a branch or a materialization
33// of a boolean value with an instruction that sets the flags register.
34// The whole instruction is treated as a unit by the register allocator, and
35// thus no spills or moves can be introduced between the flags-setting
36// instruction and the branch or set it should be combined with.
37class FlagsContinuation final {
38 public:
39 FlagsContinuation() : mode_(kFlags_none) {}
40
41 // Creates a new flags continuation from the given condition and true/false
42 // blocks.
43 static FlagsContinuation ForBranch(FlagsCondition condition,
44 BasicBlock* true_block,
45 BasicBlock* false_block) {
46 return FlagsContinuation(kFlags_branch, condition, true_block, false_block);
47 }
48
49 static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
50 BasicBlock* true_block,
51 BasicBlock* false_block) {
52 return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
53 false_block);
54 }
55
56 // Creates a new flags continuation for an eager deoptimization exit.
57 static FlagsContinuation ForDeoptimize(FlagsCondition condition,
58 DeoptimizeKind kind,
59 DeoptimizeReason reason,
60 VectorSlotPair const& feedback,
61 Node* frame_state) {
62 return FlagsContinuation(kFlags_deoptimize, condition, kind, reason,
63 feedback, frame_state);
64 }
65
66 // Creates a new flags continuation for an eager deoptimization exit.
67 static FlagsContinuation ForDeoptimizeAndPoison(
68 FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
69 VectorSlotPair const& feedback, Node* frame_state) {
70 return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind,
71 reason, feedback, frame_state);
72 }
73
74 // Creates a new flags continuation for a boolean value.
75 static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
76 return FlagsContinuation(condition, result);
77 }
78
79 // Creates a new flags continuation for a wasm trap.
80 static FlagsContinuation ForTrap(FlagsCondition condition, TrapId trap_id,
81 Node* result) {
82 return FlagsContinuation(condition, trap_id, result);
83 }
84
85 bool IsNone() const { return mode_ == kFlags_none; }
86 bool IsBranch() const {
87 return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
88 }
89 bool IsDeoptimize() const {
90 return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
91 }
92 bool IsPoisoned() const {
93 return mode_ == kFlags_branch_and_poison ||
94 mode_ == kFlags_deoptimize_and_poison;
95 }
96 bool IsSet() const { return mode_ == kFlags_set; }
97 bool IsTrap() const { return mode_ == kFlags_trap; }
98 FlagsCondition condition() const {
99 DCHECK(!IsNone());
100 return condition_;
101 }
102 DeoptimizeKind kind() const {
103 DCHECK(IsDeoptimize());
104 return kind_;
105 }
106 DeoptimizeReason reason() const {
107 DCHECK(IsDeoptimize());
108 return reason_;
109 }
110 VectorSlotPair const& feedback() const {
111 DCHECK(IsDeoptimize());
112 return feedback_;
113 }
114 Node* frame_state() const {
115 DCHECK(IsDeoptimize());
116 return frame_state_or_result_;
117 }
118 Node* result() const {
119 DCHECK(IsSet());
120 return frame_state_or_result_;
121 }
122 TrapId trap_id() const {
123 DCHECK(IsTrap());
124 return trap_id_;
125 }
126 BasicBlock* true_block() const {
127 DCHECK(IsBranch());
128 return true_block_;
129 }
130 BasicBlock* false_block() const {
131 DCHECK(IsBranch());
132 return false_block_;
133 }
134
135 void Negate() {
136 DCHECK(!IsNone());
137 condition_ = NegateFlagsCondition(condition_);
138 }
139
140 void Commute() {
141 DCHECK(!IsNone());
142 condition_ = CommuteFlagsCondition(condition_);
143 }
144
145 void Overwrite(FlagsCondition condition) { condition_ = condition; }
146
147 void OverwriteAndNegateIfEqual(FlagsCondition condition) {
148 DCHECK(condition_ == kEqual || condition_ == kNotEqual);
149 bool negate = condition_ == kEqual;
150 condition_ = condition;
151 if (negate) Negate();
152 }
153
154 void OverwriteUnsignedIfSigned() {
155 switch (condition_) {
156 case kSignedLessThan:
157 condition_ = kUnsignedLessThan;
158 break;
159 case kSignedLessThanOrEqual:
160 condition_ = kUnsignedLessThanOrEqual;
161 break;
162 case kSignedGreaterThan:
163 condition_ = kUnsignedGreaterThan;
164 break;
165 case kSignedGreaterThanOrEqual:
166 condition_ = kUnsignedGreaterThanOrEqual;
167 break;
168 default:
169 break;
170 }
171 }
172
173 // Encodes this flags continuation into the given opcode.
174 InstructionCode Encode(InstructionCode opcode) {
175 opcode |= FlagsModeField::encode(mode_);
176 if (mode_ != kFlags_none) {
177 opcode |= FlagsConditionField::encode(condition_);
178 }
179 return opcode;
180 }
181
182 private:
183 FlagsContinuation(FlagsMode mode, FlagsCondition condition,
184 BasicBlock* true_block, BasicBlock* false_block)
185 : mode_(mode),
186 condition_(condition),
187 true_block_(true_block),
188 false_block_(false_block) {
189 DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
190 DCHECK_NOT_NULL(true_block);
191 DCHECK_NOT_NULL(false_block);
192 }
193
194 FlagsContinuation(FlagsMode mode, FlagsCondition condition,
195 DeoptimizeKind kind, DeoptimizeReason reason,
196 VectorSlotPair const& feedback, Node* frame_state)
197 : mode_(mode),
198 condition_(condition),
199 kind_(kind),
200 reason_(reason),
201 feedback_(feedback),
202 frame_state_or_result_(frame_state) {
203 DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
204 DCHECK_NOT_NULL(frame_state);
205 }
206
207 FlagsContinuation(FlagsCondition condition, Node* result)
208 : mode_(kFlags_set),
209 condition_(condition),
210 frame_state_or_result_(result) {
211 DCHECK_NOT_NULL(result);
212 }
213
214 FlagsContinuation(FlagsCondition condition, TrapId trap_id, Node* result)
215 : mode_(kFlags_trap),
216 condition_(condition),
217 frame_state_or_result_(result),
218 trap_id_(trap_id) {
219 DCHECK_NOT_NULL(result);
220 }
221
222 FlagsMode const mode_;
223 FlagsCondition condition_;
224 DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize*
225 DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize*
226 VectorSlotPair feedback_; // Only valid if mode_ == kFlags_deoptimize*
227 Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize*
228 // or mode_ == kFlags_set.
229 BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch*.
230 BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch*.
231 TrapId trap_id_; // Only valid if mode_ == kFlags_trap.
232};
233
234// This struct connects nodes of parameters which are going to be pushed on the
235// call stack with their parameter index in the call descriptor of the callee.
236struct PushParameter {
237 PushParameter(Node* n = nullptr,
238 LinkageLocation l = LinkageLocation::ForAnyRegister())
239 : node(n), location(l) {}
240
241 Node* node;
242 LinkageLocation location;
243};
244
245enum class FrameStateInputKind { kAny, kStackSlot };
246
247// Instruction selection generates an InstructionSequence for a given Schedule.
248class V8_EXPORT_PRIVATE InstructionSelector final {
249 public:
250 // Forward declarations.
251 class Features;
252
253 enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
254 enum EnableScheduling { kDisableScheduling, kEnableScheduling };
255 enum EnableRootsRelativeAddressing {
256 kDisableRootsRelativeAddressing,
257 kEnableRootsRelativeAddressing
258 };
259 enum EnableSwitchJumpTable {
260 kDisableSwitchJumpTable,
261 kEnableSwitchJumpTable
262 };
263 enum EnableTraceTurboJson { kDisableTraceTurboJson, kEnableTraceTurboJson };
264
265 InstructionSelector(
266 Zone* zone, size_t node_count, Linkage* linkage,
267 InstructionSequence* sequence, Schedule* schedule,
268 SourcePositionTable* source_positions, Frame* frame,
269 EnableSwitchJumpTable enable_switch_jump_table,
270 SourcePositionMode source_position_mode = kCallSourcePositions,
271 Features features = SupportedFeatures(),
272 EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
273 ? kEnableScheduling
274 : kDisableScheduling,
275 EnableRootsRelativeAddressing enable_roots_relative_addressing =
276 kDisableRootsRelativeAddressing,
277 PoisoningMitigationLevel poisoning_level =
278 PoisoningMitigationLevel::kDontPoison,
279 EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
280
281 // Visit code for the entire graph with the included schedule.
282 bool SelectInstructions();
283
284 void StartBlock(RpoNumber rpo);
285 void EndBlock(RpoNumber rpo);
286 void AddInstruction(Instruction* instr);
287 void AddTerminator(Instruction* instr);
288
289 // ===========================================================================
290 // ============= Architecture-independent code emission methods. =============
291 // ===========================================================================
292
293 Instruction* Emit(InstructionCode opcode, InstructionOperand output,
294 size_t temp_count = 0, InstructionOperand* temps = nullptr);
295 Instruction* Emit(InstructionCode opcode, InstructionOperand output,
296 InstructionOperand a, size_t temp_count = 0,
297 InstructionOperand* temps = nullptr);
298 Instruction* Emit(InstructionCode opcode, InstructionOperand output,
299 InstructionOperand a, InstructionOperand b,
300 size_t temp_count = 0, InstructionOperand* temps = nullptr);
301 Instruction* Emit(InstructionCode opcode, InstructionOperand output,
302 InstructionOperand a, InstructionOperand b,
303 InstructionOperand c, size_t temp_count = 0,
304 InstructionOperand* temps = nullptr);
305 Instruction* Emit(InstructionCode opcode, InstructionOperand output,
306 InstructionOperand a, InstructionOperand b,
307 InstructionOperand c, InstructionOperand d,
308 size_t temp_count = 0, InstructionOperand* temps = nullptr);
309 Instruction* Emit(InstructionCode opcode, InstructionOperand output,
310 InstructionOperand a, InstructionOperand b,
311 InstructionOperand c, InstructionOperand d,
312 InstructionOperand e, size_t temp_count = 0,
313 InstructionOperand* temps = nullptr);
314 Instruction* Emit(InstructionCode opcode, InstructionOperand output,
315 InstructionOperand a, InstructionOperand b,
316 InstructionOperand c, InstructionOperand d,
317 InstructionOperand e, InstructionOperand f,
318 size_t temp_count = 0, InstructionOperand* temps = nullptr);
319 Instruction* Emit(InstructionCode opcode, size_t output_count,
320 InstructionOperand* outputs, size_t input_count,
321 InstructionOperand* inputs, size_t temp_count = 0,
322 InstructionOperand* temps = nullptr);
323 Instruction* Emit(Instruction* instr);
324
325 // [0-3] operand instructions with no output, uses labels for true and false
326 // blocks of the continuation.
327 Instruction* EmitWithContinuation(InstructionCode opcode,
328 FlagsContinuation* cont);
329 Instruction* EmitWithContinuation(InstructionCode opcode,
330 InstructionOperand a,
331 FlagsContinuation* cont);
332 Instruction* EmitWithContinuation(InstructionCode opcode,
333 InstructionOperand a, InstructionOperand b,
334 FlagsContinuation* cont);
335 Instruction* EmitWithContinuation(InstructionCode opcode,
336 InstructionOperand a, InstructionOperand b,
337 InstructionOperand c,
338 FlagsContinuation* cont);
339 Instruction* EmitWithContinuation(InstructionCode opcode, size_t output_count,
340 InstructionOperand* outputs,
341 size_t input_count,
342 InstructionOperand* inputs,
343 FlagsContinuation* cont);
344
345 // ===========================================================================
346 // ===== Architecture-independent deoptimization exit emission methods. ======
347 // ===========================================================================
348 Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
349 InstructionOperand* outputs, size_t input_count,
350 InstructionOperand* inputs, DeoptimizeKind kind,
351 DeoptimizeReason reason,
352 VectorSlotPair const& feedback,
353 Node* frame_state);
354
355 // ===========================================================================
356 // ============== Architecture-independent CPU feature methods. ==============
357 // ===========================================================================
358
359 class Features final {
360 public:
361 Features() : bits_(0) {}
362 explicit Features(unsigned bits) : bits_(bits) {}
363 explicit Features(CpuFeature f) : bits_(1u << f) {}
364 Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}
365
366 bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }
367
368 private:
369 unsigned bits_;
370 };
371
372 bool IsSupported(CpuFeature feature) const {
373 return features_.Contains(feature);
374 }
375
376 // Returns the features supported on the target platform.
377 static Features SupportedFeatures() {
378 return Features(CpuFeatures::SupportedFeatures());
379 }
380
381 // TODO(sigurds) This should take a CpuFeatures argument.
382 static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
383
384 static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
385
386 bool NeedsPoisoning(IsSafetyCheck safety_check) const;
387
388 // ===========================================================================
389 // ============ Architecture-independent graph covering methods. =============
390 // ===========================================================================
391
392 // Used in pattern matching during code generation.
393 // Check if {node} can be covered while generating code for the current
394 // instruction. A node can be covered if the {user} of the node has the only
395 // edge and the two are in the same basic block.
396 bool CanCover(Node* user, Node* node) const;
397 // CanCover is not transitive. The counter example are Nodes A,B,C such that
398 // CanCover(A, B) and CanCover(B,C) and B is pure: The the effect level of A
399 // and B might differ. CanCoverTransitively does the additional checks.
400 bool CanCoverTransitively(Node* user, Node* node, Node* node_input) const;
401
402 // Used in pattern matching during code generation.
403 // This function checks that {node} and {user} are in the same basic block,
404 // and that {user} is the only user of {node} in this basic block. This
405 // check guarantees that there are no users of {node} scheduled between
406 // {node} and {user}, and thus we can select a single instruction for both
407 // nodes, if such an instruction exists. This check can be used for example
408 // when selecting instructions for:
409 // n = Int32Add(a, b)
410 // c = Word32Compare(n, 0, cond)
411 // Branch(c, true_label, false_label)
412 // Here we can generate a flag-setting add instruction, even if the add has
413 // uses in other basic blocks, since the flag-setting add instruction will
414 // still generate the result of the addition and not just set the flags.
415 // However, if we had uses of the add in the same basic block, we could have:
416 // n = Int32Add(a, b)
417 // o = OtherOp(n, ...)
418 // c = Word32Compare(n, 0, cond)
419 // Branch(c, true_label, false_label)
420 // where we cannot select the add and the compare together. If we were to
421 // select a flag-setting add instruction for Word32Compare and Int32Add while
422 // visiting Word32Compare, we would then have to select an instruction for
423 // OtherOp *afterwards*, which means we would attempt to use the result of
424 // the add before we have defined it.
425 bool IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const;
426
427 // Checks if {node} was already defined, and therefore code was already
428 // generated for it.
429 bool IsDefined(Node* node) const;
430
431 // Checks if {node} has any uses, and therefore code has to be generated for
432 // it.
433 bool IsUsed(Node* node) const;
434
435 // Checks if {node} is currently live.
436 bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
437
438 // Gets the effect level of {node}.
439 int GetEffectLevel(Node* node) const;
440
441 int GetVirtualRegister(const Node* node);
442 const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
443
444 // Check if we can generate loads and stores of ExternalConstants relative
445 // to the roots register.
446 bool CanAddressRelativeToRootsRegister() const;
447 // Check if we can use the roots register to access GC roots.
448 bool CanUseRootsRegister() const;
449
450 Isolate* isolate() const { return sequence()->isolate(); }
451
452 const ZoneVector<std::pair<int, int>>& instr_origins() const {
453 return instr_origins_;
454 }
455
456 // Expose these SIMD helper functions for testing.
457 static void CanonicalizeShuffleForTesting(bool inputs_equal, uint8_t* shuffle,
458 bool* needs_swap,
459 bool* is_swizzle) {
460 CanonicalizeShuffle(inputs_equal, shuffle, needs_swap, is_swizzle);
461 }
462
463 static bool TryMatchIdentityForTesting(const uint8_t* shuffle) {
464 return TryMatchIdentity(shuffle);
465 }
466 template <int LANES>
467 static bool TryMatchDupForTesting(const uint8_t* shuffle, int* index) {
468 return TryMatchDup<LANES>(shuffle, index);
469 }
470 static bool TryMatch32x4ShuffleForTesting(const uint8_t* shuffle,
471 uint8_t* shuffle32x4) {
472 return TryMatch32x4Shuffle(shuffle, shuffle32x4);
473 }
474 static bool TryMatch16x8ShuffleForTesting(const uint8_t* shuffle,
475 uint8_t* shuffle16x8) {
476 return TryMatch16x8Shuffle(shuffle, shuffle16x8);
477 }
478 static bool TryMatchConcatForTesting(const uint8_t* shuffle,
479 uint8_t* offset) {
480 return TryMatchConcat(shuffle, offset);
481 }
482 static bool TryMatchBlendForTesting(const uint8_t* shuffle) {
483 return TryMatchBlend(shuffle);
484 }
485
486 private:
487 friend class OperandGenerator;
488
489 bool UseInstructionScheduling() const {
490 return (enable_scheduling_ == kEnableScheduling) &&
491 InstructionScheduler::SchedulerSupported();
492 }
493
494 void AppendDeoptimizeArguments(InstructionOperandVector* args,
495 DeoptimizeKind kind, DeoptimizeReason reason,
496 VectorSlotPair const& feedback,
497 Node* frame_state);
498
499 void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
500 void EmitLookupSwitch(const SwitchInfo& sw,
501 InstructionOperand& value_operand);
502 void EmitBinarySearchSwitch(const SwitchInfo& sw,
503 InstructionOperand& value_operand);
504
505 void TryRename(InstructionOperand* op);
506 int GetRename(int virtual_register);
507 void SetRename(const Node* node, const Node* rename);
508 void UpdateRenames(Instruction* instruction);
509 void UpdateRenamesInPhi(PhiInstruction* phi);
510
511 // Inform the instruction selection that {node} was just defined.
512 void MarkAsDefined(Node* node);
513
514 // Inform the instruction selection that {node} has at least one use and we
515 // will need to generate code for it.
516 void MarkAsUsed(Node* node);
517
518 // Sets the effect level of {node}.
519 void SetEffectLevel(Node* node, int effect_level);
520
521 // Inform the register allocation of the representation of the value produced
522 // by {node}.
523 void MarkAsRepresentation(MachineRepresentation rep, Node* node);
524 void MarkAsWord32(Node* node) {
525 MarkAsRepresentation(MachineRepresentation::kWord32, node);
526 }
527 void MarkAsWord64(Node* node) {
528 MarkAsRepresentation(MachineRepresentation::kWord64, node);
529 }
530 void MarkAsFloat32(Node* node) {
531 MarkAsRepresentation(MachineRepresentation::kFloat32, node);
532 }
533 void MarkAsFloat64(Node* node) {
534 MarkAsRepresentation(MachineRepresentation::kFloat64, node);
535 }
536 void MarkAsSimd128(Node* node) {
537 MarkAsRepresentation(MachineRepresentation::kSimd128, node);
538 }
539 void MarkAsReference(Node* node) {
540 MarkAsRepresentation(MachineRepresentation::kTagged, node);
541 }
542 void MarkAsCompressed(Node* node) {
543 MarkAsRepresentation(MachineRepresentation::kCompressed, node);
544 }
545
546 // Inform the register allocation of the representation of the unallocated
547 // operand {op}.
548 void MarkAsRepresentation(MachineRepresentation rep,
549 const InstructionOperand& op);
550
551 enum CallBufferFlag {
552 kCallCodeImmediate = 1u << 0,
553 kCallAddressImmediate = 1u << 1,
554 kCallTail = 1u << 2,
555 kCallFixedTargetRegister = 1u << 3,
556 kAllowCallThroughSlot = 1u << 4
557 };
558 using CallBufferFlags = base::Flags<CallBufferFlag>;
559
560 // Initialize the call buffer with the InstructionOperands, nodes, etc,
561 // corresponding
562 // to the inputs and outputs of the call.
563 // {call_code_immediate} to generate immediate operands to calls of code.
564 // {call_address_immediate} to generate immediate operands to address calls.
565 void InitializeCallBuffer(Node* call, CallBuffer* buffer,
566 CallBufferFlags flags, bool is_tail_call,
567 int stack_slot_delta = 0);
568 bool IsTailCallAddressImmediate();
569 int GetTempsCountForTailCallFromJSFunction();
570
571 FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
572 size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
573 Node* state, OperandGenerator* g,
574 StateObjectDeduplicator* deduplicator,
575 InstructionOperandVector* inputs,
576 FrameStateInputKind kind, Zone* zone);
577 size_t AddOperandToStateValueDescriptor(StateValueList* values,
578 InstructionOperandVector* inputs,
579 OperandGenerator* g,
580 StateObjectDeduplicator* deduplicator,
581 Node* input, MachineType type,
582 FrameStateInputKind kind, Zone* zone);
583
584 // ===========================================================================
585 // ============= Architecture-specific graph covering methods. ===============
586 // ===========================================================================
587
588 // Visit nodes in the given block and generate code.
589 void VisitBlock(BasicBlock* block);
590
591 // Visit the node for the control flow at the end of the block, generating
592 // code if necessary.
593 void VisitControl(BasicBlock* block);
594
595 // Visit the node and generate code, if any.
596 void VisitNode(Node* node);
597
598 // Visit the node and generate code for IEEE 754 functions.
599 void VisitFloat64Ieee754Binop(Node*, InstructionCode code);
600 void VisitFloat64Ieee754Unop(Node*, InstructionCode code);
601
602#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
603 MACHINE_OP_LIST(DECLARE_GENERATOR)
604 MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
605#undef DECLARE_GENERATOR
606
607 void VisitFinishRegion(Node* node);
608 void VisitParameter(Node* node);
609 void VisitIfException(Node* node);
610 void VisitOsrValue(Node* node);
611 void VisitPhi(Node* node);
612 void VisitProjection(Node* node);
613 void VisitConstant(Node* node);
614 void VisitCall(Node* call, BasicBlock* handler = nullptr);
615 void VisitCallWithCallerSavedRegisters(Node* call,
616 BasicBlock* handler = nullptr);
617 void VisitDeoptimizeIf(Node* node);
618 void VisitDeoptimizeUnless(Node* node);
619 void VisitTrapIf(Node* node, TrapId trap_id);
620 void VisitTrapUnless(Node* node, TrapId trap_id);
621 void VisitTailCall(Node* call);
622 void VisitGoto(BasicBlock* target);
623 void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
624 void VisitSwitch(Node* node, const SwitchInfo& sw);
625 void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
626 VectorSlotPair const& feedback, Node* value);
627 void VisitReturn(Node* ret);
628 void VisitThrow(Node* node);
629 void VisitRetain(Node* node);
630 void VisitUnreachable(Node* node);
631 void VisitDeadValue(Node* node);
632
633 void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
634
635 void EmitWordPoisonOnSpeculation(Node* node);
636
637 void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
638 const CallDescriptor* call_descriptor, Node* node);
639 void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
640 const CallDescriptor* call_descriptor, Node* node);
641
642 void EmitIdentity(Node* node);
643 bool CanProduceSignalingNaN(Node* node);
644
645 // ===========================================================================
646 // ============= Vector instruction (SIMD) helper fns. =======================
647 // ===========================================================================
648
649 // Converts a shuffle into canonical form, meaning that the first lane index
650 // is in the range [0 .. 15]. Set |inputs_equal| true if this is an explicit
651 // swizzle. Returns canonicalized |shuffle|, |needs_swap|, and |is_swizzle|.
652 // If |needs_swap| is true, inputs must be swapped. If |is_swizzle| is true,
653 // the second input can be ignored.
654 static void CanonicalizeShuffle(bool inputs_equal, uint8_t* shuffle,
655 bool* needs_swap, bool* is_swizzle);
656
657 // Canonicalize shuffles to make pattern matching simpler. Returns the shuffle
658 // indices, and a boolean indicating if the shuffle is a swizzle (one input).
659 void CanonicalizeShuffle(Node* node, uint8_t* shuffle, bool* is_swizzle);
660
661 // Swaps the two first input operands of the node, to help match shuffles
662 // to specific architectural instructions.
663 void SwapShuffleInputs(Node* node);
664
665 // Tries to match an 8x16 byte shuffle to the identity shuffle, which is
666 // [0 1 ... 15]. This should be called after canonicalizing the shuffle, so
667 // the second identity shuffle, [16 17 .. 31] is converted to the first one.
668 static bool TryMatchIdentity(const uint8_t* shuffle);
669
670 // Tries to match a byte shuffle to a scalar splat operation. Returns the
671 // index of the lane if successful.
672 template <int LANES>
673 static bool TryMatchDup(const uint8_t* shuffle, int* index) {
674 const int kBytesPerLane = kSimd128Size / LANES;
675 // Get the first lane's worth of bytes and check that indices start at a
676 // lane boundary and are consecutive.
677 uint8_t lane0[kBytesPerLane];
678 lane0[0] = shuffle[0];
679 if (lane0[0] % kBytesPerLane != 0) return false;
680 for (int i = 1; i < kBytesPerLane; ++i) {
681 lane0[i] = shuffle[i];
682 if (lane0[i] != lane0[0] + i) return false;
683 }
684 // Now check that the other lanes are identical to lane0.
685 for (int i = 1; i < LANES; ++i) {
686 for (int j = 0; j < kBytesPerLane; ++j) {
687 if (lane0[j] != shuffle[i * kBytesPerLane + j]) return false;
688 }
689 }
690 *index = lane0[0] / kBytesPerLane;
691 return true;
692 }
693
694 // Tries to match an 8x16 byte shuffle to an equivalent 32x4 shuffle. If
695 // successful, it writes the 32x4 shuffle word indices. E.g.
696 // [0 1 2 3 8 9 10 11 4 5 6 7 12 13 14 15] == [0 2 1 3]
697 static bool TryMatch32x4Shuffle(const uint8_t* shuffle, uint8_t* shuffle32x4);
698
699 // Tries to match an 8x16 byte shuffle to an equivalent 16x8 shuffle. If
700 // successful, it writes the 16x8 shuffle word indices. E.g.
701 // [0 1 8 9 2 3 10 11 4 5 12 13 6 7 14 15] == [0 4 1 5 2 6 3 7]
702 static bool TryMatch16x8Shuffle(const uint8_t* shuffle, uint8_t* shuffle16x8);
703
704 // Tries to match a byte shuffle to a concatenate operation, formed by taking
705 // 16 bytes from the 32 byte concatenation of the inputs. If successful, it
706 // writes the byte offset. E.g. [4 5 6 7 .. 16 17 18 19] concatenates both
707 // source vectors with offset 4. The shuffle should be canonicalized.
708 static bool TryMatchConcat(const uint8_t* shuffle, uint8_t* offset);
709
710 // Tries to match a byte shuffle to a blend operation, which is a shuffle
711 // where no lanes change position. E.g. [0 9 2 11 .. 14 31] interleaves the
712 // even lanes of the first source with the odd lanes of the second. The
713 // shuffle should be canonicalized.
714 static bool TryMatchBlend(const uint8_t* shuffle);
715
716 // Packs 4 bytes of shuffle into a 32 bit immediate.
717 static int32_t Pack4Lanes(const uint8_t* shuffle);
718
719 // ===========================================================================
720
721 Schedule* schedule() const { return schedule_; }
722 Linkage* linkage() const { return linkage_; }
723 InstructionSequence* sequence() const { return sequence_; }
724 Zone* instruction_zone() const { return sequence()->zone(); }
725 Zone* zone() const { return zone_; }
726
727 void set_instruction_selection_failed() {
728 instruction_selection_failed_ = true;
729 }
730 bool instruction_selection_failed() { return instruction_selection_failed_; }
731
732 void MarkPairProjectionsAsWord32(Node* node);
733 bool IsSourcePositionUsed(Node* node);
734 void VisitWord32AtomicBinaryOperation(Node* node, ArchOpcode int8_op,
735 ArchOpcode uint8_op,
736 ArchOpcode int16_op,
737 ArchOpcode uint16_op,
738 ArchOpcode word32_op);
739 void VisitWord64AtomicBinaryOperation(Node* node, ArchOpcode uint8_op,
740 ArchOpcode uint16_op,
741 ArchOpcode uint32_op,
742 ArchOpcode uint64_op);
743 void VisitWord64AtomicNarrowBinop(Node* node, ArchOpcode uint8_op,
744 ArchOpcode uint16_op, ArchOpcode uint32_op);
745
746 // ===========================================================================
747
748 Zone* const zone_;
749 Linkage* const linkage_;
750 InstructionSequence* const sequence_;
751 SourcePositionTable* const source_positions_;
752 SourcePositionMode const source_position_mode_;
753 Features features_;
754 Schedule* const schedule_;
755 BasicBlock* current_block_;
756 ZoneVector<Instruction*> instructions_;
757 InstructionOperandVector continuation_inputs_;
758 InstructionOperandVector continuation_outputs_;
759 BoolVector defined_;
760 BoolVector used_;
761 IntVector effect_level_;
762 IntVector virtual_registers_;
763 IntVector virtual_register_rename_;
764 InstructionScheduler* scheduler_;
765 EnableScheduling enable_scheduling_;
766 EnableRootsRelativeAddressing enable_roots_relative_addressing_;
767 EnableSwitchJumpTable enable_switch_jump_table_;
768
769 PoisoningMitigationLevel poisoning_level_;
770 Frame* frame_;
771 bool instruction_selection_failed_;
772 ZoneVector<std::pair<int, int>> instr_origins_;
773 EnableTraceTurboJson trace_turbo_;
774};
775
776} // namespace compiler
777} // namespace internal
778} // namespace v8
779
780#endif // V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
781