1 | // Copyright 2014 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #ifndef V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_ |
6 | #define V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_ |
7 | |
8 | #include <map> |
9 | |
10 | #include "src/compiler/backend/instruction-scheduler.h" |
11 | #include "src/compiler/backend/instruction.h" |
12 | #include "src/compiler/common-operator.h" |
13 | #include "src/compiler/linkage.h" |
14 | #include "src/compiler/machine-operator.h" |
15 | #include "src/compiler/node.h" |
16 | #include "src/cpu-features.h" |
17 | #include "src/globals.h" |
18 | #include "src/zone/zone-containers.h" |
19 | |
20 | namespace v8 { |
21 | namespace internal { |
22 | namespace compiler { |
23 | |
24 | // Forward declarations. |
25 | class BasicBlock; |
26 | struct CallBuffer; // TODO(bmeurer): Remove this. |
27 | class Linkage; |
28 | class OperandGenerator; |
29 | class SwitchInfo; |
30 | class StateObjectDeduplicator; |
31 | |
32 | // The flags continuation is a way to combine a branch or a materialization |
33 | // of a boolean value with an instruction that sets the flags register. |
34 | // The whole instruction is treated as a unit by the register allocator, and |
35 | // thus no spills or moves can be introduced between the flags-setting |
36 | // instruction and the branch or set it should be combined with. |
37 | class FlagsContinuation final { |
38 | public: |
39 | FlagsContinuation() : mode_(kFlags_none) {} |
40 | |
41 | // Creates a new flags continuation from the given condition and true/false |
42 | // blocks. |
43 | static FlagsContinuation ForBranch(FlagsCondition condition, |
44 | BasicBlock* true_block, |
45 | BasicBlock* false_block) { |
46 | return FlagsContinuation(kFlags_branch, condition, true_block, false_block); |
47 | } |
48 | |
49 | static FlagsContinuation ForBranchAndPoison(FlagsCondition condition, |
50 | BasicBlock* true_block, |
51 | BasicBlock* false_block) { |
52 | return FlagsContinuation(kFlags_branch_and_poison, condition, true_block, |
53 | false_block); |
54 | } |
55 | |
56 | // Creates a new flags continuation for an eager deoptimization exit. |
57 | static FlagsContinuation ForDeoptimize(FlagsCondition condition, |
58 | DeoptimizeKind kind, |
59 | DeoptimizeReason reason, |
60 | VectorSlotPair const& feedback, |
61 | Node* frame_state) { |
62 | return FlagsContinuation(kFlags_deoptimize, condition, kind, reason, |
63 | feedback, frame_state); |
64 | } |
65 | |
66 | // Creates a new flags continuation for an eager deoptimization exit. |
67 | static FlagsContinuation ForDeoptimizeAndPoison( |
68 | FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason, |
69 | VectorSlotPair const& feedback, Node* frame_state) { |
70 | return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind, |
71 | reason, feedback, frame_state); |
72 | } |
73 | |
74 | // Creates a new flags continuation for a boolean value. |
75 | static FlagsContinuation ForSet(FlagsCondition condition, Node* result) { |
76 | return FlagsContinuation(condition, result); |
77 | } |
78 | |
79 | // Creates a new flags continuation for a wasm trap. |
80 | static FlagsContinuation ForTrap(FlagsCondition condition, TrapId trap_id, |
81 | Node* result) { |
82 | return FlagsContinuation(condition, trap_id, result); |
83 | } |
84 | |
85 | bool IsNone() const { return mode_ == kFlags_none; } |
86 | bool IsBranch() const { |
87 | return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison; |
88 | } |
89 | bool IsDeoptimize() const { |
90 | return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison; |
91 | } |
92 | bool IsPoisoned() const { |
93 | return mode_ == kFlags_branch_and_poison || |
94 | mode_ == kFlags_deoptimize_and_poison; |
95 | } |
96 | bool IsSet() const { return mode_ == kFlags_set; } |
97 | bool IsTrap() const { return mode_ == kFlags_trap; } |
98 | FlagsCondition condition() const { |
99 | DCHECK(!IsNone()); |
100 | return condition_; |
101 | } |
102 | DeoptimizeKind kind() const { |
103 | DCHECK(IsDeoptimize()); |
104 | return kind_; |
105 | } |
106 | DeoptimizeReason reason() const { |
107 | DCHECK(IsDeoptimize()); |
108 | return reason_; |
109 | } |
110 | VectorSlotPair const& feedback() const { |
111 | DCHECK(IsDeoptimize()); |
112 | return feedback_; |
113 | } |
114 | Node* frame_state() const { |
115 | DCHECK(IsDeoptimize()); |
116 | return frame_state_or_result_; |
117 | } |
118 | Node* result() const { |
119 | DCHECK(IsSet()); |
120 | return frame_state_or_result_; |
121 | } |
122 | TrapId trap_id() const { |
123 | DCHECK(IsTrap()); |
124 | return trap_id_; |
125 | } |
126 | BasicBlock* true_block() const { |
127 | DCHECK(IsBranch()); |
128 | return true_block_; |
129 | } |
130 | BasicBlock* false_block() const { |
131 | DCHECK(IsBranch()); |
132 | return false_block_; |
133 | } |
134 | |
135 | void Negate() { |
136 | DCHECK(!IsNone()); |
137 | condition_ = NegateFlagsCondition(condition_); |
138 | } |
139 | |
140 | void Commute() { |
141 | DCHECK(!IsNone()); |
142 | condition_ = CommuteFlagsCondition(condition_); |
143 | } |
144 | |
145 | void Overwrite(FlagsCondition condition) { condition_ = condition; } |
146 | |
147 | void OverwriteAndNegateIfEqual(FlagsCondition condition) { |
148 | DCHECK(condition_ == kEqual || condition_ == kNotEqual); |
149 | bool negate = condition_ == kEqual; |
150 | condition_ = condition; |
151 | if (negate) Negate(); |
152 | } |
153 | |
154 | void OverwriteUnsignedIfSigned() { |
155 | switch (condition_) { |
156 | case kSignedLessThan: |
157 | condition_ = kUnsignedLessThan; |
158 | break; |
159 | case kSignedLessThanOrEqual: |
160 | condition_ = kUnsignedLessThanOrEqual; |
161 | break; |
162 | case kSignedGreaterThan: |
163 | condition_ = kUnsignedGreaterThan; |
164 | break; |
165 | case kSignedGreaterThanOrEqual: |
166 | condition_ = kUnsignedGreaterThanOrEqual; |
167 | break; |
168 | default: |
169 | break; |
170 | } |
171 | } |
172 | |
173 | // Encodes this flags continuation into the given opcode. |
174 | InstructionCode Encode(InstructionCode opcode) { |
175 | opcode |= FlagsModeField::encode(mode_); |
176 | if (mode_ != kFlags_none) { |
177 | opcode |= FlagsConditionField::encode(condition_); |
178 | } |
179 | return opcode; |
180 | } |
181 | |
182 | private: |
183 | FlagsContinuation(FlagsMode mode, FlagsCondition condition, |
184 | BasicBlock* true_block, BasicBlock* false_block) |
185 | : mode_(mode), |
186 | condition_(condition), |
187 | true_block_(true_block), |
188 | false_block_(false_block) { |
189 | DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison); |
190 | DCHECK_NOT_NULL(true_block); |
191 | DCHECK_NOT_NULL(false_block); |
192 | } |
193 | |
194 | FlagsContinuation(FlagsMode mode, FlagsCondition condition, |
195 | DeoptimizeKind kind, DeoptimizeReason reason, |
196 | VectorSlotPair const& feedback, Node* frame_state) |
197 | : mode_(mode), |
198 | condition_(condition), |
199 | kind_(kind), |
200 | reason_(reason), |
201 | feedback_(feedback), |
202 | frame_state_or_result_(frame_state) { |
203 | DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison); |
204 | DCHECK_NOT_NULL(frame_state); |
205 | } |
206 | |
207 | FlagsContinuation(FlagsCondition condition, Node* result) |
208 | : mode_(kFlags_set), |
209 | condition_(condition), |
210 | frame_state_or_result_(result) { |
211 | DCHECK_NOT_NULL(result); |
212 | } |
213 | |
214 | FlagsContinuation(FlagsCondition condition, TrapId trap_id, Node* result) |
215 | : mode_(kFlags_trap), |
216 | condition_(condition), |
217 | frame_state_or_result_(result), |
218 | trap_id_(trap_id) { |
219 | DCHECK_NOT_NULL(result); |
220 | } |
221 | |
222 | FlagsMode const mode_; |
223 | FlagsCondition condition_; |
224 | DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize* |
225 | DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize* |
226 | VectorSlotPair feedback_; // Only valid if mode_ == kFlags_deoptimize* |
227 | Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize* |
228 | // or mode_ == kFlags_set. |
229 | BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch*. |
230 | BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch*. |
231 | TrapId trap_id_; // Only valid if mode_ == kFlags_trap. |
232 | }; |
233 | |
234 | // This struct connects nodes of parameters which are going to be pushed on the |
235 | // call stack with their parameter index in the call descriptor of the callee. |
236 | struct PushParameter { |
237 | PushParameter(Node* n = nullptr, |
238 | LinkageLocation l = LinkageLocation::ForAnyRegister()) |
239 | : node(n), location(l) {} |
240 | |
241 | Node* node; |
242 | LinkageLocation location; |
243 | }; |
244 | |
245 | enum class FrameStateInputKind { kAny, kStackSlot }; |
246 | |
247 | // Instruction selection generates an InstructionSequence for a given Schedule. |
248 | class V8_EXPORT_PRIVATE InstructionSelector final { |
249 | public: |
250 | // Forward declarations. |
251 | class Features; |
252 | |
253 | enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions }; |
254 | enum EnableScheduling { kDisableScheduling, kEnableScheduling }; |
255 | enum EnableRootsRelativeAddressing { |
256 | kDisableRootsRelativeAddressing, |
257 | kEnableRootsRelativeAddressing |
258 | }; |
259 | enum EnableSwitchJumpTable { |
260 | kDisableSwitchJumpTable, |
261 | kEnableSwitchJumpTable |
262 | }; |
263 | enum EnableTraceTurboJson { kDisableTraceTurboJson, kEnableTraceTurboJson }; |
264 | |
265 | InstructionSelector( |
266 | Zone* zone, size_t node_count, Linkage* linkage, |
267 | InstructionSequence* sequence, Schedule* schedule, |
268 | SourcePositionTable* source_positions, Frame* frame, |
269 | EnableSwitchJumpTable enable_switch_jump_table, |
270 | SourcePositionMode source_position_mode = kCallSourcePositions, |
271 | Features features = SupportedFeatures(), |
272 | EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling |
273 | ? kEnableScheduling |
274 | : kDisableScheduling, |
275 | EnableRootsRelativeAddressing enable_roots_relative_addressing = |
276 | kDisableRootsRelativeAddressing, |
277 | PoisoningMitigationLevel poisoning_level = |
278 | PoisoningMitigationLevel::kDontPoison, |
279 | EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson); |
280 | |
281 | // Visit code for the entire graph with the included schedule. |
282 | bool SelectInstructions(); |
283 | |
284 | void StartBlock(RpoNumber rpo); |
285 | void EndBlock(RpoNumber rpo); |
286 | void AddInstruction(Instruction* instr); |
287 | void AddTerminator(Instruction* instr); |
288 | |
289 | // =========================================================================== |
290 | // ============= Architecture-independent code emission methods. ============= |
291 | // =========================================================================== |
292 | |
293 | Instruction* Emit(InstructionCode opcode, InstructionOperand output, |
294 | size_t temp_count = 0, InstructionOperand* temps = nullptr); |
295 | Instruction* Emit(InstructionCode opcode, InstructionOperand output, |
296 | InstructionOperand a, size_t temp_count = 0, |
297 | InstructionOperand* temps = nullptr); |
298 | Instruction* Emit(InstructionCode opcode, InstructionOperand output, |
299 | InstructionOperand a, InstructionOperand b, |
300 | size_t temp_count = 0, InstructionOperand* temps = nullptr); |
301 | Instruction* Emit(InstructionCode opcode, InstructionOperand output, |
302 | InstructionOperand a, InstructionOperand b, |
303 | InstructionOperand c, size_t temp_count = 0, |
304 | InstructionOperand* temps = nullptr); |
305 | Instruction* Emit(InstructionCode opcode, InstructionOperand output, |
306 | InstructionOperand a, InstructionOperand b, |
307 | InstructionOperand c, InstructionOperand d, |
308 | size_t temp_count = 0, InstructionOperand* temps = nullptr); |
309 | Instruction* Emit(InstructionCode opcode, InstructionOperand output, |
310 | InstructionOperand a, InstructionOperand b, |
311 | InstructionOperand c, InstructionOperand d, |
312 | InstructionOperand e, size_t temp_count = 0, |
313 | InstructionOperand* temps = nullptr); |
314 | Instruction* Emit(InstructionCode opcode, InstructionOperand output, |
315 | InstructionOperand a, InstructionOperand b, |
316 | InstructionOperand c, InstructionOperand d, |
317 | InstructionOperand e, InstructionOperand f, |
318 | size_t temp_count = 0, InstructionOperand* temps = nullptr); |
319 | Instruction* Emit(InstructionCode opcode, size_t output_count, |
320 | InstructionOperand* outputs, size_t input_count, |
321 | InstructionOperand* inputs, size_t temp_count = 0, |
322 | InstructionOperand* temps = nullptr); |
323 | Instruction* Emit(Instruction* instr); |
324 | |
325 | // [0-3] operand instructions with no output, uses labels for true and false |
326 | // blocks of the continuation. |
327 | Instruction* EmitWithContinuation(InstructionCode opcode, |
328 | FlagsContinuation* cont); |
329 | Instruction* EmitWithContinuation(InstructionCode opcode, |
330 | InstructionOperand a, |
331 | FlagsContinuation* cont); |
332 | Instruction* EmitWithContinuation(InstructionCode opcode, |
333 | InstructionOperand a, InstructionOperand b, |
334 | FlagsContinuation* cont); |
335 | Instruction* EmitWithContinuation(InstructionCode opcode, |
336 | InstructionOperand a, InstructionOperand b, |
337 | InstructionOperand c, |
338 | FlagsContinuation* cont); |
339 | Instruction* EmitWithContinuation(InstructionCode opcode, size_t output_count, |
340 | InstructionOperand* outputs, |
341 | size_t input_count, |
342 | InstructionOperand* inputs, |
343 | FlagsContinuation* cont); |
344 | |
345 | // =========================================================================== |
346 | // ===== Architecture-independent deoptimization exit emission methods. ====== |
347 | // =========================================================================== |
348 | Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count, |
349 | InstructionOperand* outputs, size_t input_count, |
350 | InstructionOperand* inputs, DeoptimizeKind kind, |
351 | DeoptimizeReason reason, |
352 | VectorSlotPair const& feedback, |
353 | Node* frame_state); |
354 | |
355 | // =========================================================================== |
356 | // ============== Architecture-independent CPU feature methods. ============== |
357 | // =========================================================================== |
358 | |
359 | class Features final { |
360 | public: |
361 | Features() : bits_(0) {} |
362 | explicit Features(unsigned bits) : bits_(bits) {} |
363 | explicit Features(CpuFeature f) : bits_(1u << f) {} |
364 | Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {} |
365 | |
366 | bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); } |
367 | |
368 | private: |
369 | unsigned bits_; |
370 | }; |
371 | |
372 | bool IsSupported(CpuFeature feature) const { |
373 | return features_.Contains(feature); |
374 | } |
375 | |
376 | // Returns the features supported on the target platform. |
377 | static Features SupportedFeatures() { |
378 | return Features(CpuFeatures::SupportedFeatures()); |
379 | } |
380 | |
381 | // TODO(sigurds) This should take a CpuFeatures argument. |
382 | static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags(); |
383 | |
384 | static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements(); |
385 | |
386 | bool NeedsPoisoning(IsSafetyCheck safety_check) const; |
387 | |
388 | // =========================================================================== |
389 | // ============ Architecture-independent graph covering methods. ============= |
390 | // =========================================================================== |
391 | |
392 | // Used in pattern matching during code generation. |
393 | // Check if {node} can be covered while generating code for the current |
394 | // instruction. A node can be covered if the {user} of the node has the only |
395 | // edge and the two are in the same basic block. |
396 | bool CanCover(Node* user, Node* node) const; |
397 | // CanCover is not transitive. The counter example are Nodes A,B,C such that |
398 | // CanCover(A, B) and CanCover(B,C) and B is pure: The the effect level of A |
399 | // and B might differ. CanCoverTransitively does the additional checks. |
400 | bool CanCoverTransitively(Node* user, Node* node, Node* node_input) const; |
401 | |
402 | // Used in pattern matching during code generation. |
403 | // This function checks that {node} and {user} are in the same basic block, |
404 | // and that {user} is the only user of {node} in this basic block. This |
405 | // check guarantees that there are no users of {node} scheduled between |
406 | // {node} and {user}, and thus we can select a single instruction for both |
407 | // nodes, if such an instruction exists. This check can be used for example |
408 | // when selecting instructions for: |
409 | // n = Int32Add(a, b) |
410 | // c = Word32Compare(n, 0, cond) |
411 | // Branch(c, true_label, false_label) |
412 | // Here we can generate a flag-setting add instruction, even if the add has |
413 | // uses in other basic blocks, since the flag-setting add instruction will |
414 | // still generate the result of the addition and not just set the flags. |
415 | // However, if we had uses of the add in the same basic block, we could have: |
416 | // n = Int32Add(a, b) |
417 | // o = OtherOp(n, ...) |
418 | // c = Word32Compare(n, 0, cond) |
419 | // Branch(c, true_label, false_label) |
420 | // where we cannot select the add and the compare together. If we were to |
421 | // select a flag-setting add instruction for Word32Compare and Int32Add while |
422 | // visiting Word32Compare, we would then have to select an instruction for |
423 | // OtherOp *afterwards*, which means we would attempt to use the result of |
424 | // the add before we have defined it. |
425 | bool IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const; |
426 | |
427 | // Checks if {node} was already defined, and therefore code was already |
428 | // generated for it. |
429 | bool IsDefined(Node* node) const; |
430 | |
431 | // Checks if {node} has any uses, and therefore code has to be generated for |
432 | // it. |
433 | bool IsUsed(Node* node) const; |
434 | |
435 | // Checks if {node} is currently live. |
436 | bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); } |
437 | |
438 | // Gets the effect level of {node}. |
439 | int GetEffectLevel(Node* node) const; |
440 | |
441 | int GetVirtualRegister(const Node* node); |
442 | const std::map<NodeId, int> GetVirtualRegistersForTesting() const; |
443 | |
444 | // Check if we can generate loads and stores of ExternalConstants relative |
445 | // to the roots register. |
446 | bool CanAddressRelativeToRootsRegister() const; |
447 | // Check if we can use the roots register to access GC roots. |
448 | bool CanUseRootsRegister() const; |
449 | |
450 | Isolate* isolate() const { return sequence()->isolate(); } |
451 | |
452 | const ZoneVector<std::pair<int, int>>& instr_origins() const { |
453 | return instr_origins_; |
454 | } |
455 | |
456 | // Expose these SIMD helper functions for testing. |
457 | static void CanonicalizeShuffleForTesting(bool inputs_equal, uint8_t* shuffle, |
458 | bool* needs_swap, |
459 | bool* is_swizzle) { |
460 | CanonicalizeShuffle(inputs_equal, shuffle, needs_swap, is_swizzle); |
461 | } |
462 | |
463 | static bool TryMatchIdentityForTesting(const uint8_t* shuffle) { |
464 | return TryMatchIdentity(shuffle); |
465 | } |
466 | template <int LANES> |
467 | static bool TryMatchDupForTesting(const uint8_t* shuffle, int* index) { |
468 | return TryMatchDup<LANES>(shuffle, index); |
469 | } |
470 | static bool TryMatch32x4ShuffleForTesting(const uint8_t* shuffle, |
471 | uint8_t* shuffle32x4) { |
472 | return TryMatch32x4Shuffle(shuffle, shuffle32x4); |
473 | } |
474 | static bool TryMatch16x8ShuffleForTesting(const uint8_t* shuffle, |
475 | uint8_t* shuffle16x8) { |
476 | return TryMatch16x8Shuffle(shuffle, shuffle16x8); |
477 | } |
478 | static bool TryMatchConcatForTesting(const uint8_t* shuffle, |
479 | uint8_t* offset) { |
480 | return TryMatchConcat(shuffle, offset); |
481 | } |
482 | static bool TryMatchBlendForTesting(const uint8_t* shuffle) { |
483 | return TryMatchBlend(shuffle); |
484 | } |
485 | |
486 | private: |
487 | friend class OperandGenerator; |
488 | |
489 | bool UseInstructionScheduling() const { |
490 | return (enable_scheduling_ == kEnableScheduling) && |
491 | InstructionScheduler::SchedulerSupported(); |
492 | } |
493 | |
494 | void AppendDeoptimizeArguments(InstructionOperandVector* args, |
495 | DeoptimizeKind kind, DeoptimizeReason reason, |
496 | VectorSlotPair const& feedback, |
497 | Node* frame_state); |
498 | |
499 | void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand); |
500 | void EmitLookupSwitch(const SwitchInfo& sw, |
501 | InstructionOperand& value_operand); |
502 | void EmitBinarySearchSwitch(const SwitchInfo& sw, |
503 | InstructionOperand& value_operand); |
504 | |
505 | void TryRename(InstructionOperand* op); |
506 | int GetRename(int virtual_register); |
507 | void SetRename(const Node* node, const Node* rename); |
508 | void UpdateRenames(Instruction* instruction); |
509 | void UpdateRenamesInPhi(PhiInstruction* phi); |
510 | |
511 | // Inform the instruction selection that {node} was just defined. |
512 | void MarkAsDefined(Node* node); |
513 | |
514 | // Inform the instruction selection that {node} has at least one use and we |
515 | // will need to generate code for it. |
516 | void MarkAsUsed(Node* node); |
517 | |
518 | // Sets the effect level of {node}. |
519 | void SetEffectLevel(Node* node, int effect_level); |
520 | |
521 | // Inform the register allocation of the representation of the value produced |
522 | // by {node}. |
523 | void MarkAsRepresentation(MachineRepresentation rep, Node* node); |
524 | void MarkAsWord32(Node* node) { |
525 | MarkAsRepresentation(MachineRepresentation::kWord32, node); |
526 | } |
527 | void MarkAsWord64(Node* node) { |
528 | MarkAsRepresentation(MachineRepresentation::kWord64, node); |
529 | } |
530 | void MarkAsFloat32(Node* node) { |
531 | MarkAsRepresentation(MachineRepresentation::kFloat32, node); |
532 | } |
533 | void MarkAsFloat64(Node* node) { |
534 | MarkAsRepresentation(MachineRepresentation::kFloat64, node); |
535 | } |
536 | void MarkAsSimd128(Node* node) { |
537 | MarkAsRepresentation(MachineRepresentation::kSimd128, node); |
538 | } |
539 | void MarkAsReference(Node* node) { |
540 | MarkAsRepresentation(MachineRepresentation::kTagged, node); |
541 | } |
542 | void MarkAsCompressed(Node* node) { |
543 | MarkAsRepresentation(MachineRepresentation::kCompressed, node); |
544 | } |
545 | |
546 | // Inform the register allocation of the representation of the unallocated |
547 | // operand {op}. |
548 | void MarkAsRepresentation(MachineRepresentation rep, |
549 | const InstructionOperand& op); |
550 | |
551 | enum CallBufferFlag { |
552 | kCallCodeImmediate = 1u << 0, |
553 | kCallAddressImmediate = 1u << 1, |
554 | kCallTail = 1u << 2, |
555 | kCallFixedTargetRegister = 1u << 3, |
556 | kAllowCallThroughSlot = 1u << 4 |
557 | }; |
558 | using CallBufferFlags = base::Flags<CallBufferFlag>; |
559 | |
560 | // Initialize the call buffer with the InstructionOperands, nodes, etc, |
561 | // corresponding |
562 | // to the inputs and outputs of the call. |
563 | // {call_code_immediate} to generate immediate operands to calls of code. |
564 | // {call_address_immediate} to generate immediate operands to address calls. |
565 | void InitializeCallBuffer(Node* call, CallBuffer* buffer, |
566 | CallBufferFlags flags, bool is_tail_call, |
567 | int stack_slot_delta = 0); |
568 | bool IsTailCallAddressImmediate(); |
569 | int GetTempsCountForTailCallFromJSFunction(); |
570 | |
571 | FrameStateDescriptor* GetFrameStateDescriptor(Node* node); |
572 | size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor, |
573 | Node* state, OperandGenerator* g, |
574 | StateObjectDeduplicator* deduplicator, |
575 | InstructionOperandVector* inputs, |
576 | FrameStateInputKind kind, Zone* zone); |
577 | size_t AddOperandToStateValueDescriptor(StateValueList* values, |
578 | InstructionOperandVector* inputs, |
579 | OperandGenerator* g, |
580 | StateObjectDeduplicator* deduplicator, |
581 | Node* input, MachineType type, |
582 | FrameStateInputKind kind, Zone* zone); |
583 | |
584 | // =========================================================================== |
585 | // ============= Architecture-specific graph covering methods. =============== |
586 | // =========================================================================== |
587 | |
588 | // Visit nodes in the given block and generate code. |
589 | void VisitBlock(BasicBlock* block); |
590 | |
591 | // Visit the node for the control flow at the end of the block, generating |
592 | // code if necessary. |
593 | void VisitControl(BasicBlock* block); |
594 | |
595 | // Visit the node and generate code, if any. |
596 | void VisitNode(Node* node); |
597 | |
598 | // Visit the node and generate code for IEEE 754 functions. |
599 | void VisitFloat64Ieee754Binop(Node*, InstructionCode code); |
600 | void VisitFloat64Ieee754Unop(Node*, InstructionCode code); |
601 | |
602 | #define DECLARE_GENERATOR(x) void Visit##x(Node* |
---|