1 | // Copyright 2014 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/compiler/backend/register-allocator.h" |
6 | |
7 | #include <iomanip> |
8 | |
9 | #include "src/assembler-inl.h" |
10 | #include "src/base/adapters.h" |
11 | #include "src/base/small-vector.h" |
12 | #include "src/compiler/linkage.h" |
13 | #include "src/string-stream.h" |
14 | #include "src/vector.h" |
15 | |
16 | namespace v8 { |
17 | namespace internal { |
18 | namespace compiler { |
19 | |
20 | #define TRACE(...) \ |
21 | do { \ |
22 | if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ |
23 | } while (false) |
24 | |
25 | namespace { |
26 | |
27 | static constexpr int kFloat32Bit = |
28 | RepresentationBit(MachineRepresentation::kFloat32); |
29 | static constexpr int kSimd128Bit = |
30 | RepresentationBit(MachineRepresentation::kSimd128); |
31 | |
32 | int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) { |
33 | return kind == FP_REGISTERS ? cfg->num_double_registers() |
34 | : cfg->num_general_registers(); |
35 | } |
36 | |
37 | int GetAllocatableRegisterCount(const RegisterConfiguration* cfg, |
38 | RegisterKind kind) { |
39 | return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers() |
40 | : cfg->num_allocatable_general_registers(); |
41 | } |
42 | |
43 | const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg, |
44 | RegisterKind kind) { |
45 | return kind == FP_REGISTERS ? cfg->allocatable_double_codes() |
46 | : cfg->allocatable_general_codes(); |
47 | } |
48 | |
49 | const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence, |
50 | const InstructionBlock* block) { |
51 | RpoNumber index = block->loop_header(); |
52 | if (!index.IsValid()) return nullptr; |
53 | return sequence->InstructionBlockAt(index); |
54 | } |
55 | |
56 | const InstructionBlock* GetInstructionBlock(const InstructionSequence* code, |
57 | LifetimePosition pos) { |
58 | return code->GetInstructionBlock(pos.ToInstructionIndex()); |
59 | } |
60 | |
61 | Instruction* GetLastInstruction(InstructionSequence* code, |
62 | const InstructionBlock* block) { |
63 | return code->InstructionAt(block->last_instruction_index()); |
64 | } |
65 | |
66 | // TODO(dcarney): fix frame to allow frame accesses to half size location. |
67 | int GetByteWidth(MachineRepresentation rep) { |
68 | switch (rep) { |
69 | case MachineRepresentation::kBit: |
70 | case MachineRepresentation::kWord8: |
71 | case MachineRepresentation::kWord16: |
72 | case MachineRepresentation::kWord32: |
73 | case MachineRepresentation::kFloat32: |
74 | return kSystemPointerSize; |
75 | case MachineRepresentation::kTaggedSigned: |
76 | case MachineRepresentation::kTaggedPointer: |
77 | case MachineRepresentation::kTagged: |
78 | case MachineRepresentation::kCompressedSigned: |
79 | case MachineRepresentation::kCompressedPointer: |
80 | case MachineRepresentation::kCompressed: |
81 | // TODO(ishell): kTaggedSize once half size locations are supported. |
82 | return kSystemPointerSize; |
83 | case MachineRepresentation::kWord64: |
84 | case MachineRepresentation::kFloat64: |
85 | return kDoubleSize; |
86 | case MachineRepresentation::kSimd128: |
87 | return kSimd128Size; |
88 | case MachineRepresentation::kNone: |
89 | break; |
90 | } |
91 | UNREACHABLE(); |
92 | } |
93 | |
94 | } // namespace |
95 | |
96 | class LiveRangeBound { |
97 | public: |
98 | explicit LiveRangeBound(LiveRange* range, bool skip) |
99 | : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) { |
100 | DCHECK(!range->IsEmpty()); |
101 | } |
102 | |
103 | bool CanCover(LifetimePosition position) { |
104 | return start_ <= position && position < end_; |
105 | } |
106 | |
107 | LiveRange* const range_; |
108 | const LifetimePosition start_; |
109 | const LifetimePosition end_; |
110 | const bool skip_; |
111 | |
112 | private: |
113 | DISALLOW_COPY_AND_ASSIGN(LiveRangeBound); |
114 | }; |
115 | |
116 | struct FindResult { |
117 | LiveRange* cur_cover_; |
118 | LiveRange* pred_cover_; |
119 | }; |
120 | |
121 | class LiveRangeBoundArray { |
122 | public: |
123 | LiveRangeBoundArray() : length_(0), start_(nullptr) {} |
124 | |
125 | bool ShouldInitialize() { return start_ == nullptr; } |
126 | |
127 | void Initialize(Zone* zone, TopLevelLiveRange* range) { |
128 | size_t max_child_count = range->GetMaxChildCount(); |
129 | |
130 | start_ = zone->NewArray<LiveRangeBound>(max_child_count); |
131 | length_ = 0; |
132 | LiveRangeBound* curr = start_; |
133 | // Normally, spilled ranges do not need connecting moves, because the spill |
134 | // location has been assigned at definition. For ranges spilled in deferred |
135 | // blocks, that is not the case, so we need to connect the spilled children. |
136 | for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr, ++length_) { |
137 | new (curr) LiveRangeBound(i, i->spilled()); |
138 | } |
139 | } |
140 | |
141 | LiveRangeBound* Find(const LifetimePosition position) const { |
142 | size_t left_index = 0; |
143 | size_t right_index = length_; |
144 | while (true) { |
145 | size_t current_index = left_index + (right_index - left_index) / 2; |
146 | DCHECK(right_index > current_index); |
147 | LiveRangeBound* bound = &start_[current_index]; |
148 | if (bound->start_ <= position) { |
149 | if (position < bound->end_) return bound; |
150 | DCHECK(left_index < current_index); |
151 | left_index = current_index; |
152 | } else { |
153 | right_index = current_index; |
154 | } |
155 | } |
156 | } |
157 | |
158 | LiveRangeBound* FindPred(const InstructionBlock* pred) { |
159 | LifetimePosition pred_end = |
160 | LifetimePosition::InstructionFromInstructionIndex( |
161 | pred->last_instruction_index()); |
162 | return Find(pred_end); |
163 | } |
164 | |
165 | LiveRangeBound* FindSucc(const InstructionBlock* succ) { |
166 | LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex( |
167 | succ->first_instruction_index()); |
168 | return Find(succ_start); |
169 | } |
170 | |
171 | bool FindConnectableSubranges(const InstructionBlock* block, |
172 | const InstructionBlock* pred, |
173 | FindResult* result) const { |
174 | LifetimePosition pred_end = |
175 | LifetimePosition::InstructionFromInstructionIndex( |
176 | pred->last_instruction_index()); |
177 | LiveRangeBound* bound = Find(pred_end); |
178 | result->pred_cover_ = bound->range_; |
179 | LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex( |
180 | block->first_instruction_index()); |
181 | |
182 | if (bound->CanCover(cur_start)) { |
183 | // Both blocks are covered by the same range, so there is nothing to |
184 | // connect. |
185 | return false; |
186 | } |
187 | bound = Find(cur_start); |
188 | if (bound->skip_) { |
189 | return false; |
190 | } |
191 | result->cur_cover_ = bound->range_; |
192 | DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr); |
193 | return (result->cur_cover_ != result->pred_cover_); |
194 | } |
195 | |
196 | private: |
197 | size_t length_; |
198 | LiveRangeBound* start_; |
199 | |
200 | DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray); |
201 | }; |
202 | |
203 | class LiveRangeFinder { |
204 | public: |
205 | explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone) |
206 | : data_(data), |
207 | bounds_length_(static_cast<int>(data_->live_ranges().size())), |
208 | bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)), |
209 | zone_(zone) { |
210 | for (int i = 0; i < bounds_length_; ++i) { |
211 | new (&bounds_[i]) LiveRangeBoundArray(); |
212 | } |
213 | } |
214 | |
215 | LiveRangeBoundArray* ArrayFor(int operand_index) { |
216 | DCHECK(operand_index < bounds_length_); |
217 | TopLevelLiveRange* range = data_->live_ranges()[operand_index]; |
218 | DCHECK(range != nullptr && !range->IsEmpty()); |
219 | LiveRangeBoundArray* array = &bounds_[operand_index]; |
220 | if (array->ShouldInitialize()) { |
221 | array->Initialize(zone_, range); |
222 | } |
223 | return array; |
224 | } |
225 | |
226 | private: |
227 | const RegisterAllocationData* const data_; |
228 | const int bounds_length_; |
229 | LiveRangeBoundArray* const bounds_; |
230 | Zone* const zone_; |
231 | |
232 | DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder); |
233 | }; |
234 | |
235 | using DelayedInsertionMapKey = std::pair<ParallelMove*, InstructionOperand>; |
236 | |
237 | struct DelayedInsertionMapCompare { |
238 | bool operator()(const DelayedInsertionMapKey& a, |
239 | const DelayedInsertionMapKey& b) const { |
240 | if (a.first == b.first) { |
241 | return a.second.Compare(b.second); |
242 | } |
243 | return a.first < b.first; |
244 | } |
245 | }; |
246 | |
247 | using DelayedInsertionMap = ZoneMap<DelayedInsertionMapKey, InstructionOperand, |
248 | DelayedInsertionMapCompare>; |
249 | |
250 | UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand, |
251 | void* hint, UsePositionHintType hint_type) |
252 | : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) { |
253 | DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone); |
254 | bool register_beneficial = true; |
255 | UsePositionType type = UsePositionType::kRegisterOrSlot; |
256 | if (operand_ != nullptr && operand_->IsUnallocated()) { |
257 | const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_); |
258 | if (unalloc->HasRegisterPolicy()) { |
259 | type = UsePositionType::kRequiresRegister; |
260 | } else if (unalloc->HasSlotPolicy()) { |
261 | type = UsePositionType::kRequiresSlot; |
262 | register_beneficial = false; |
263 | } else if (unalloc->HasRegisterOrSlotOrConstantPolicy()) { |
264 | type = UsePositionType::kRegisterOrSlotOrConstant; |
265 | register_beneficial = false; |
266 | } else { |
267 | register_beneficial = !unalloc->HasRegisterOrSlotPolicy(); |
268 | } |
269 | } |
270 | flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) | |
271 | RegisterBeneficialField::encode(register_beneficial) | |
272 | AssignedRegisterField::encode(kUnassignedRegister); |
273 | DCHECK(pos_.IsValid()); |
274 | } |
275 | |
276 | bool UsePosition::HasHint() const { |
277 | int hint_register; |
278 | return HintRegister(&hint_register); |
279 | } |
280 | |
281 | bool UsePosition::HintRegister(int* register_code) const { |
282 | if (hint_ == nullptr) return false; |
283 | switch (HintTypeField::decode(flags_)) { |
284 | case UsePositionHintType::kNone: |
285 | case UsePositionHintType::kUnresolved: |
286 | return false; |
287 | case UsePositionHintType::kUsePos: { |
288 | UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_); |
289 | int assigned_register = AssignedRegisterField::decode(use_pos->flags_); |
290 | if (assigned_register == kUnassignedRegister) return false; |
291 | *register_code = assigned_register; |
292 | return true; |
293 | } |
294 | case UsePositionHintType::kOperand: { |
295 | InstructionOperand* operand = |
296 | reinterpret_cast<InstructionOperand*>(hint_); |
297 | *register_code = LocationOperand::cast(operand)->register_code(); |
298 | return true; |
299 | } |
300 | case UsePositionHintType::kPhi: { |
301 | RegisterAllocationData::PhiMapValue* phi = |
302 | reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_); |
303 | int assigned_register = phi->assigned_register(); |
304 | if (assigned_register == kUnassignedRegister) return false; |
305 | *register_code = assigned_register; |
306 | return true; |
307 | } |
308 | } |
309 | UNREACHABLE(); |
310 | } |
311 | |
312 | UsePositionHintType UsePosition::HintTypeForOperand( |
313 | const InstructionOperand& op) { |
314 | switch (op.kind()) { |
315 | case InstructionOperand::CONSTANT: |
316 | case InstructionOperand::IMMEDIATE: |
317 | case InstructionOperand::EXPLICIT: |
318 | return UsePositionHintType::kNone; |
319 | case InstructionOperand::UNALLOCATED: |
320 | return UsePositionHintType::kUnresolved; |
321 | case InstructionOperand::ALLOCATED: |
322 | if (op.IsRegister() || op.IsFPRegister()) { |
323 | return UsePositionHintType::kOperand; |
324 | } else { |
325 | DCHECK(op.IsStackSlot() || op.IsFPStackSlot()); |
326 | return UsePositionHintType::kNone; |
327 | } |
328 | case InstructionOperand::INVALID: |
329 | break; |
330 | } |
331 | UNREACHABLE(); |
332 | } |
333 | |
334 | void UsePosition::SetHint(UsePosition* use_pos) { |
335 | DCHECK_NOT_NULL(use_pos); |
336 | hint_ = use_pos; |
337 | flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos); |
338 | } |
339 | |
340 | void UsePosition::ResolveHint(UsePosition* use_pos) { |
341 | DCHECK_NOT_NULL(use_pos); |
342 | if (HintTypeField::decode(flags_) != UsePositionHintType::kUnresolved) return; |
343 | hint_ = use_pos; |
344 | flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos); |
345 | } |
346 | |
347 | void UsePosition::set_type(UsePositionType type, bool register_beneficial) { |
348 | DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial); |
349 | DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_)); |
350 | flags_ = TypeField::encode(type) | |
351 | RegisterBeneficialField::encode(register_beneficial) | |
352 | HintTypeField::encode(HintTypeField::decode(flags_)) | |
353 | AssignedRegisterField::encode(kUnassignedRegister); |
354 | } |
355 | |
356 | UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) { |
357 | DCHECK(Contains(pos) && pos != start()); |
358 | UseInterval* after = new (zone) UseInterval(pos, end_); |
359 | after->next_ = next_; |
360 | next_ = nullptr; |
361 | end_ = pos; |
362 | return after; |
363 | } |
364 | |
365 | void LifetimePosition::Print() const { StdoutStream{} << *this << std::endl; } |
366 | |
367 | std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) { |
368 | os << '@' << pos.ToInstructionIndex(); |
369 | if (pos.IsGapPosition()) { |
370 | os << 'g'; |
371 | } else { |
372 | os << 'i'; |
373 | } |
374 | if (pos.IsStart()) { |
375 | os << 's'; |
376 | } else { |
377 | os << 'e'; |
378 | } |
379 | return os; |
380 | } |
381 | |
382 | LiveRange::LiveRange(int relative_id, MachineRepresentation rep, |
383 | TopLevelLiveRange* top_level) |
384 | : relative_id_(relative_id), |
385 | bits_(0), |
386 | last_interval_(nullptr), |
387 | first_interval_(nullptr), |
388 | first_pos_(nullptr), |
389 | top_level_(top_level), |
390 | next_(nullptr), |
391 | current_interval_(nullptr), |
392 | last_processed_use_(nullptr), |
393 | current_hint_position_(nullptr), |
394 | splitting_pointer_(nullptr) { |
395 | DCHECK(AllocatedOperand::IsSupportedRepresentation(rep)); |
396 | bits_ = AssignedRegisterField::encode(kUnassignedRegister) | |
397 | RepresentationField::encode(rep) | |
398 | ControlFlowRegisterHint::encode(kUnassignedRegister); |
399 | } |
400 | |
401 | void LiveRange::VerifyPositions() const { |
402 | // Walk the positions, verifying that each is in an interval. |
403 | UseInterval* interval = first_interval_; |
404 | for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) { |
405 | CHECK(Start() <= pos->pos()); |
406 | CHECK(pos->pos() <= End()); |
407 | CHECK_NOT_NULL(interval); |
408 | while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) { |
409 | interval = interval->next(); |
410 | CHECK_NOT_NULL(interval); |
411 | } |
412 | } |
413 | } |
414 | |
415 | void LiveRange::VerifyIntervals() const { |
416 | DCHECK(first_interval()->start() == Start()); |
417 | LifetimePosition last_end = first_interval()->end(); |
418 | for (UseInterval* interval = first_interval()->next(); interval != nullptr; |
419 | interval = interval->next()) { |
420 | DCHECK(last_end <= interval->start()); |
421 | last_end = interval->end(); |
422 | } |
423 | DCHECK(last_end == End()); |
424 | } |
425 | |
426 | void LiveRange::set_assigned_register(int reg) { |
427 | DCHECK(!HasRegisterAssigned() && !spilled()); |
428 | bits_ = AssignedRegisterField::update(bits_, reg); |
429 | } |
430 | |
431 | void LiveRange::UnsetAssignedRegister() { |
432 | DCHECK(HasRegisterAssigned() && !spilled()); |
433 | bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister); |
434 | } |
435 | |
436 | void LiveRange::AttachToNext() { |
437 | DCHECK_NOT_NULL(next_); |
438 | DCHECK_NE(TopLevel()->last_child_covers_, next_); |
439 | last_interval_->set_next(next_->first_interval()); |
440 | next_->first_interval_ = nullptr; |
441 | last_interval_ = next_->last_interval_; |
442 | next_->last_interval_ = nullptr; |
443 | if (first_pos() == nullptr) { |
444 | first_pos_ = next_->first_pos(); |
445 | } else { |
446 | UsePosition* ptr = first_pos_; |
447 | while (ptr->next() != nullptr) { |
448 | ptr = ptr->next(); |
449 | } |
450 | ptr->set_next(next_->first_pos()); |
451 | } |
452 | next_->first_pos_ = nullptr; |
453 | LiveRange* old_next = next_; |
454 | next_ = next_->next_; |
455 | old_next->next_ = nullptr; |
456 | } |
457 | |
458 | void LiveRange::Unspill() { |
459 | DCHECK(spilled()); |
460 | set_spilled(false); |
461 | bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister); |
462 | } |
463 | |
464 | void LiveRange::Spill() { |
465 | DCHECK(!spilled()); |
466 | DCHECK(!TopLevel()->HasNoSpillType()); |
467 | set_spilled(true); |
468 | bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister); |
469 | } |
470 | |
471 | RegisterKind LiveRange::kind() const { |
472 | return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS; |
473 | } |
474 | |
475 | UsePosition* LiveRange::FirstHintPosition(int* register_index) const { |
476 | for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) { |
477 | if (pos->HintRegister(register_index)) return pos; |
478 | } |
479 | return nullptr; |
480 | } |
481 | |
482 | UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const { |
483 | UsePosition* use_pos = last_processed_use_; |
484 | if (use_pos == nullptr || use_pos->pos() > start) { |
485 | use_pos = first_pos(); |
486 | } |
487 | while (use_pos != nullptr && use_pos->pos() < start) { |
488 | use_pos = use_pos->next(); |
489 | } |
490 | last_processed_use_ = use_pos; |
491 | return use_pos; |
492 | } |
493 | |
494 | UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial( |
495 | LifetimePosition start) const { |
496 | UsePosition* pos = NextUsePosition(start); |
497 | while (pos != nullptr && !pos->RegisterIsBeneficial()) { |
498 | pos = pos->next(); |
499 | } |
500 | return pos; |
501 | } |
502 | |
503 | LifetimePosition LiveRange::NextLifetimePositionRegisterIsBeneficial( |
504 | const LifetimePosition& start) const { |
505 | UsePosition* next_use = NextUsePositionRegisterIsBeneficial(start); |
506 | if (next_use == nullptr) return End(); |
507 | return next_use->pos(); |
508 | } |
509 | |
510 | UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial( |
511 | LifetimePosition start) const { |
512 | UsePosition* pos = first_pos(); |
513 | UsePosition* prev = nullptr; |
514 | while (pos != nullptr && pos->pos() < start) { |
515 | if (pos->RegisterIsBeneficial()) prev = pos; |
516 | pos = pos->next(); |
517 | } |
518 | return prev; |
519 | } |
520 | |
521 | UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const { |
522 | UsePosition* pos = NextUsePosition(start); |
523 | while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) { |
524 | pos = pos->next(); |
525 | } |
526 | return pos; |
527 | } |
528 | |
529 | UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const { |
530 | for (UsePosition* pos = NextUsePosition(start); pos != nullptr; |
531 | pos = pos->next()) { |
532 | if (pos->type() != UsePositionType::kRequiresSlot) continue; |
533 | return pos; |
534 | } |
535 | return nullptr; |
536 | } |
537 | |
538 | bool LiveRange::CanBeSpilled(LifetimePosition pos) const { |
539 | // We cannot spill a live range that has a use requiring a register |
540 | // at the current or the immediate next position. |
541 | UsePosition* use_pos = NextRegisterPosition(pos); |
542 | if (use_pos == nullptr) return true; |
543 | return use_pos->pos() > pos.NextStart().End(); |
544 | } |
545 | |
546 | bool LiveRange::IsTopLevel() const { return top_level_ == this; } |
547 | |
548 | InstructionOperand LiveRange::GetAssignedOperand() const { |
549 | DCHECK(!IsEmpty()); |
550 | if (HasRegisterAssigned()) { |
551 | DCHECK(!spilled()); |
552 | return AllocatedOperand(LocationOperand::REGISTER, representation(), |
553 | assigned_register()); |
554 | } |
555 | DCHECK(spilled()); |
556 | DCHECK(!HasRegisterAssigned()); |
557 | if (TopLevel()->HasSpillOperand()) { |
558 | InstructionOperand* op = TopLevel()->GetSpillOperand(); |
559 | DCHECK(!op->IsUnallocated()); |
560 | return *op; |
561 | } |
562 | return TopLevel()->GetSpillRangeOperand(); |
563 | } |
564 | |
565 | UseInterval* LiveRange::FirstSearchIntervalForPosition( |
566 | LifetimePosition position) const { |
567 | if (current_interval_ == nullptr) return first_interval_; |
568 | if (current_interval_->start() > position) { |
569 | current_interval_ = nullptr; |
570 | return first_interval_; |
571 | } |
572 | return current_interval_; |
573 | } |
574 | |
575 | void LiveRange::AdvanceLastProcessedMarker( |
576 | UseInterval* to_start_of, LifetimePosition but_not_past) const { |
577 | if (to_start_of == nullptr) return; |
578 | if (to_start_of->start() > but_not_past) return; |
579 | LifetimePosition start = current_interval_ == nullptr |
580 | ? LifetimePosition::Invalid() |
581 | : current_interval_->start(); |
582 | if (to_start_of->start() > start) { |
583 | current_interval_ = to_start_of; |
584 | } |
585 | } |
586 | |
587 | LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) { |
588 | int new_id = TopLevel()->GetNextChildId(); |
589 | LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel()); |
590 | child->set_bundle(bundle_); |
591 | // If we split, we do so because we're about to switch registers or move |
592 | // to/from a slot, so there's no value in connecting hints. |
593 | DetachAt(position, child, zone, DoNotConnectHints); |
594 | |
595 | child->top_level_ = TopLevel(); |
596 | child->next_ = next_; |
597 | next_ = child; |
598 | return child; |
599 | } |
600 | |
601 | UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result, |
602 | Zone* zone, |
603 | HintConnectionOption connect_hints) { |
604 | DCHECK(Start() < position); |
605 | DCHECK(End() > position); |
606 | DCHECK(result->IsEmpty()); |
607 | // Find the last interval that ends before the position. If the |
608 | // position is contained in one of the intervals in the chain, we |
609 | // split that interval and use the first part. |
610 | UseInterval* current = FirstSearchIntervalForPosition(position); |
611 | |
612 | // If the split position coincides with the beginning of a use interval |
613 | // we need to split use positons in a special way. |
614 | bool split_at_start = false; |
615 | |
616 | if (current->start() == position) { |
617 | // When splitting at start we need to locate the previous use interval. |
618 | current = first_interval_; |
619 | } |
620 | |
621 | UseInterval* after = nullptr; |
622 | while (current != nullptr) { |
623 | if (current->Contains(position)) { |
624 | after = current->SplitAt(position, zone); |
625 | break; |
626 | } |
627 | UseInterval* next = current->next(); |
628 | if (next->start() >= position) { |
629 | split_at_start = (next->start() == position); |
630 | after = next; |
631 | current->set_next(nullptr); |
632 | break; |
633 | } |
634 | current = next; |
635 | } |
636 | DCHECK_NOT_NULL(after); |
637 | |
638 | // Partition original use intervals to the two live ranges. |
639 | UseInterval* before = current; |
640 | result->last_interval_ = |
641 | (last_interval_ == before) |
642 | ? after // Only interval in the range after split. |
643 | : last_interval_; // Last interval of the original range. |
644 | result->first_interval_ = after; |
645 | last_interval_ = before; |
646 | |
647 | // Find the last use position before the split and the first use |
648 | // position after it. |
649 | UsePosition* use_after = |
650 | splitting_pointer_ == nullptr || splitting_pointer_->pos() > position |
651 | ? first_pos() |
652 | : splitting_pointer_; |
653 | UsePosition* use_before = nullptr; |
654 | if (split_at_start) { |
655 | // The split position coincides with the beginning of a use interval (the |
656 | // end of a lifetime hole). Use at this position should be attributed to |
657 | // the split child because split child owns use interval covering it. |
658 | while (use_after != nullptr && use_after->pos() < position) { |
659 | use_before = use_after; |
660 | use_after = use_after->next(); |
661 | } |
662 | } else { |
663 | while (use_after != nullptr && use_after->pos() <= position) { |
664 | use_before = use_after; |
665 | use_after = use_after->next(); |
666 | } |
667 | } |
668 | |
669 | // Partition original use positions to the two live ranges. |
670 | if (use_before != nullptr) { |
671 | use_before->set_next(nullptr); |
672 | } else { |
673 | first_pos_ = nullptr; |
674 | } |
675 | result->first_pos_ = use_after; |
676 | |
677 | // Discard cached iteration state. It might be pointing |
678 | // to the use that no longer belongs to this live range. |
679 | last_processed_use_ = nullptr; |
680 | current_interval_ = nullptr; |
681 | |
682 | if (connect_hints == ConnectHints && use_before != nullptr && |
683 | use_after != nullptr) { |
684 | use_after->SetHint(use_before); |
685 | } |
686 | #ifdef DEBUG |
687 | VerifyChildStructure(); |
688 | result->VerifyChildStructure(); |
689 | #endif |
690 | return use_before; |
691 | } |
692 | |
693 | void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) { |
694 | LiveRange* child = this; |
695 | for (; child != nullptr; child = child->next()) { |
696 | child->top_level_ = new_top_level; |
697 | } |
698 | } |
699 | |
700 | void LiveRange::ConvertUsesToOperand(const InstructionOperand& op, |
701 | const InstructionOperand& spill_op) { |
702 | for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) { |
703 | DCHECK(Start() <= pos->pos() && pos->pos() <= End()); |
704 | if (!pos->HasOperand()) continue; |
705 | switch (pos->type()) { |
706 | case UsePositionType::kRequiresSlot: |
707 | DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot()); |
708 | InstructionOperand::ReplaceWith(pos->operand(), &spill_op); |
709 | break; |
710 | case UsePositionType::kRequiresRegister: |
711 | DCHECK(op.IsRegister() || op.IsFPRegister()); |
712 | V8_FALLTHROUGH; |
713 | case UsePositionType::kRegisterOrSlot: |
714 | case UsePositionType::kRegisterOrSlotOrConstant: |
715 | InstructionOperand::ReplaceWith(pos->operand(), &op); |
716 | break; |
717 | } |
718 | } |
719 | } |
720 | |
721 | // This implements an ordering on live ranges so that they are ordered by their |
722 | // start positions. This is needed for the correctness of the register |
723 | // allocation algorithm. If two live ranges start at the same offset then there |
724 | // is a tie breaker based on where the value is first used. This part of the |
725 | // ordering is merely a heuristic. |
726 | bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const { |
727 | LifetimePosition start = Start(); |
728 | LifetimePosition other_start = other->Start(); |
729 | if (start == other_start) { |
730 | // Prefer register that has a controlflow hint to make sure it gets |
731 | // allocated first. This allows the control flow aware alloction to |
732 | // just put ranges back into the queue without other ranges interfering. |
733 | if (controlflow_hint() < other->controlflow_hint()) { |
734 | return true; |
735 | } |
736 | // The other has a smaller hint. |
737 | if (controlflow_hint() > other->controlflow_hint()) { |
738 | return false; |
739 | } |
740 | // Both have the same hint or no hint at all. Use first use position. |
741 | UsePosition* pos = first_pos(); |
742 | UsePosition* other_pos = other->first_pos(); |
743 | // To make the order total, handle the case where both positions are null. |
744 | if (pos == other_pos) return TopLevel()->vreg() < other->TopLevel()->vreg(); |
745 | if (pos == nullptr) return false; |
746 | if (other_pos == nullptr) return true; |
747 | // To make the order total, handle the case where both positions are equal. |
748 | if (pos->pos() == other_pos->pos()) |
749 | return TopLevel()->vreg() < other->TopLevel()->vreg(); |
750 | return pos->pos() < other_pos->pos(); |
751 | } |
752 | return start < other_start; |
753 | } |
754 | |
755 | void LiveRange::SetUseHints(int register_index) { |
756 | for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) { |
757 | if (!pos->HasOperand()) continue; |
758 | switch (pos->type()) { |
759 | case UsePositionType::kRequiresSlot: |
760 | break; |
761 | case UsePositionType::kRequiresRegister: |
762 | case UsePositionType::kRegisterOrSlot: |
763 | case UsePositionType::kRegisterOrSlotOrConstant: |
764 | pos->set_assigned_register(register_index); |
765 | break; |
766 | } |
767 | } |
768 | } |
769 | |
770 | bool LiveRange::CanCover(LifetimePosition position) const { |
771 | if (IsEmpty()) return false; |
772 | return Start() <= position && position < End(); |
773 | } |
774 | |
775 | bool LiveRange::Covers(LifetimePosition position) const { |
776 | if (!CanCover(position)) return false; |
777 | UseInterval* start_search = FirstSearchIntervalForPosition(position); |
778 | for (UseInterval* interval = start_search; interval != nullptr; |
779 | interval = interval->next()) { |
780 | DCHECK(interval->next() == nullptr || |
781 | interval->next()->start() >= interval->start()); |
782 | AdvanceLastProcessedMarker(interval, position); |
783 | if (interval->Contains(position)) return true; |
784 | if (interval->start() > position) return false; |
785 | } |
786 | return false; |
787 | } |
788 | |
789 | LifetimePosition LiveRange::NextEndAfter(LifetimePosition position) const { |
790 | UseInterval* start_search = FirstSearchIntervalForPosition(position); |
791 | while (start_search->end() < position) { |
792 | start_search = start_search->next(); |
793 | } |
794 | return start_search->end(); |
795 | } |
796 | |
797 | LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) const { |
798 | UseInterval* start_search = FirstSearchIntervalForPosition(position); |
799 | while (start_search->start() < position) { |
800 | start_search = start_search->next(); |
801 | } |
802 | return start_search->start(); |
803 | } |
804 | |
805 | LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const { |
806 | UseInterval* b = other->first_interval(); |
807 | if (b == nullptr) return LifetimePosition::Invalid(); |
808 | LifetimePosition advance_last_processed_up_to = b->start(); |
809 | UseInterval* a = FirstSearchIntervalForPosition(b->start()); |
810 | while (a != nullptr && b != nullptr) { |
811 | if (a->start() > other->End()) break; |
812 | if (b->start() > End()) break; |
813 | LifetimePosition cur_intersection = a->Intersect(b); |
814 | if (cur_intersection.IsValid()) { |
815 | return cur_intersection; |
816 | } |
817 | if (a->start() < b->start()) { |
818 | a = a->next(); |
819 | if (a == nullptr || a->start() > other->End()) break; |
820 | AdvanceLastProcessedMarker(a, advance_last_processed_up_to); |
821 | } else { |
822 | b = b->next(); |
823 | } |
824 | } |
825 | return LifetimePosition::Invalid(); |
826 | } |
827 | |
828 | void LiveRange::Print(const RegisterConfiguration* config, |
829 | bool with_children) const { |
830 | StdoutStream os; |
831 | PrintableLiveRange wrapper; |
832 | wrapper.register_configuration_ = config; |
833 | for (const LiveRange* i = this; i != nullptr; i = i->next()) { |
834 | wrapper.range_ = i; |
835 | os << wrapper << std::endl; |
836 | if (!with_children) break; |
837 | } |
838 | } |
839 | |
840 | void LiveRange::Print(bool with_children) const { |
841 | Print(RegisterConfiguration::Default(), with_children); |
842 | } |
843 | |
844 | bool LiveRange::RegisterFromBundle(int* hint) const { |
845 | if (bundle_ == nullptr || bundle_->reg() == kUnassignedRegister) return false; |
846 | *hint = bundle_->reg(); |
847 | return true; |
848 | } |
849 | |
850 | void LiveRange::UpdateBundleRegister(int reg) const { |
851 | if (bundle_ == nullptr || bundle_->reg() != kUnassignedRegister) return; |
852 | bundle_->set_reg(reg); |
853 | } |
854 | |
855 | struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject { |
856 | SpillMoveInsertionList(int gap_index, InstructionOperand* operand, |
857 | SpillMoveInsertionList* next) |
858 | : gap_index(gap_index), operand(operand), next(next) {} |
859 | const int gap_index; |
860 | InstructionOperand* const operand; |
861 | SpillMoveInsertionList* const next; |
862 | }; |
863 | |
864 | TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep) |
865 | : LiveRange(0, rep, this), |
866 | vreg_(vreg), |
867 | last_child_id_(0), |
868 | splintered_from_(nullptr), |
869 | spill_operand_(nullptr), |
870 | spill_move_insertion_locations_(nullptr), |
871 | spilled_in_deferred_blocks_(false), |
872 | spill_start_index_(kMaxInt), |
873 | last_pos_(nullptr), |
874 | last_child_covers_(this), |
875 | splinter_(nullptr), |
876 | has_preassigned_slot_(false) { |
877 | bits_ |= SpillTypeField::encode(SpillType::kNoSpillType); |
878 | } |
879 | |
880 | #if DEBUG |
881 | int TopLevelLiveRange::debug_virt_reg() const { |
882 | return IsSplinter() ? splintered_from()->vreg() : vreg(); |
883 | } |
884 | #endif |
885 | |
886 | void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index, |
887 | InstructionOperand* operand) { |
888 | DCHECK(HasNoSpillType()); |
889 | spill_move_insertion_locations_ = new (zone) SpillMoveInsertionList( |
890 | gap_index, operand, spill_move_insertion_locations_); |
891 | } |
892 | |
893 | void TopLevelLiveRange::CommitSpillMoves(RegisterAllocationData* data, |
894 | const InstructionOperand& op, |
895 | bool might_be_duplicated) { |
896 | DCHECK_IMPLIES(op.IsConstant(), |
897 | GetSpillMoveInsertionLocations(data) == nullptr); |
898 | InstructionSequence* sequence = data->code(); |
899 | Zone* zone = sequence->zone(); |
900 | |
901 | for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations(data); |
902 | to_spill != nullptr; to_spill = to_spill->next) { |
903 | Instruction* instr = sequence->InstructionAt(to_spill->gap_index); |
904 | ParallelMove* move = |
905 | instr->GetOrCreateParallelMove(Instruction::START, zone); |
906 | // Skip insertion if it's possible that the move exists already as a |
907 | // constraint move from a fixed output register to a slot. |
908 | if (might_be_duplicated || has_preassigned_slot()) { |
909 | bool found = false; |
910 | for (MoveOperands* move_op : *move) { |
911 | if (move_op->IsEliminated()) continue; |
912 | if (move_op->source().Equals(*to_spill->operand) && |
913 | move_op->destination().Equals(op)) { |
914 | found = true; |
915 | if (has_preassigned_slot()) move_op->Eliminate(); |
916 | break; |
917 | } |
918 | } |
919 | if (found) continue; |
920 | } |
921 | if (!has_preassigned_slot()) { |
922 | move->AddMove(*to_spill->operand, op); |
923 | } |
924 | } |
925 | } |
926 | |
927 | void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) { |
928 | DCHECK(HasNoSpillType()); |
929 | DCHECK(!operand->IsUnallocated() && !operand->IsImmediate()); |
930 | set_spill_type(SpillType::kSpillOperand); |
931 | spill_operand_ = operand; |
932 | } |
933 | |
934 | void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) { |
935 | DCHECK(!HasSpillOperand()); |
936 | DCHECK(spill_range); |
937 | spill_range_ = spill_range; |
938 | } |
939 | |
940 | AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const { |
941 | SpillRange* spill_range = GetSpillRange(); |
942 | int index = spill_range->assigned_slot(); |
943 | return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index); |
944 | } |
945 | |
946 | void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end, |
947 | Zone* zone) { |
948 | DCHECK(start != Start() || end != End()); |
949 | DCHECK(start < end); |
950 | |
951 | TopLevelLiveRange splinter_temp(-1, representation()); |
952 | UsePosition* last_in_splinter = nullptr; |
953 | // Live ranges defined in deferred blocks stay in deferred blocks, so we |
954 | // don't need to splinter them. That means that start should always be |
955 | // after the beginning of the range. |
956 | DCHECK(start > Start()); |
957 | |
958 | if (end >= End()) { |
959 | DCHECK(start > Start()); |
960 | DetachAt(start, &splinter_temp, zone, ConnectHints); |
961 | next_ = nullptr; |
962 | } else { |
963 | DCHECK(start < End() && Start() < end); |
964 | |
965 | const int kInvalidId = std::numeric_limits<int>::max(); |
966 | |
967 | UsePosition* last = DetachAt(start, &splinter_temp, zone, ConnectHints); |
968 | |
969 | LiveRange end_part(kInvalidId, this->representation(), nullptr); |
970 | // The last chunk exits the deferred region, and we don't want to connect |
971 | // hints here, because the non-deferred region shouldn't be affected |
972 | // by allocation decisions on the deferred path. |
973 | last_in_splinter = |
974 | splinter_temp.DetachAt(end, &end_part, zone, DoNotConnectHints); |
975 | |
976 | next_ = end_part.next_; |
977 | last_interval_->set_next(end_part.first_interval_); |
978 | // The next splinter will happen either at or after the current interval. |
979 | // We can optimize DetachAt by setting current_interval_ accordingly, |
980 | // which will then be picked up by FirstSearchIntervalForPosition. |
981 | current_interval_ = last_interval_; |
982 | last_interval_ = end_part.last_interval_; |
983 | |
984 | if (first_pos_ == nullptr) { |
985 | first_pos_ = end_part.first_pos_; |
986 | } else { |
987 | splitting_pointer_ = last; |
988 | if (last != nullptr) last->set_next(end_part.first_pos_); |
989 | } |
990 | } |
991 | |
992 | if (splinter()->IsEmpty()) { |
993 | splinter()->first_interval_ = splinter_temp.first_interval_; |
994 | splinter()->last_interval_ = splinter_temp.last_interval_; |
995 | } else { |
996 | splinter()->last_interval_->set_next(splinter_temp.first_interval_); |
997 | splinter()->last_interval_ = splinter_temp.last_interval_; |
998 | } |
999 | if (splinter()->first_pos() == nullptr) { |
1000 | splinter()->first_pos_ = splinter_temp.first_pos_; |
1001 | } else { |
1002 | splinter()->last_pos_->set_next(splinter_temp.first_pos_); |
1003 | } |
1004 | if (last_in_splinter != nullptr) { |
1005 | splinter()->last_pos_ = last_in_splinter; |
1006 | } else { |
1007 | if (splinter()->first_pos() != nullptr && |
1008 | splinter()->last_pos_ == nullptr) { |
1009 | splinter()->last_pos_ = splinter()->first_pos(); |
1010 | for (UsePosition* pos = splinter()->first_pos(); pos != nullptr; |
1011 | pos = pos->next()) { |
1012 | splinter()->last_pos_ = pos; |
1013 | } |
1014 | } |
1015 | } |
1016 | #if DEBUG |
1017 | Verify(); |
1018 | splinter()->Verify(); |
1019 | #endif |
1020 | } |
1021 | |
1022 | void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) { |
1023 | splintered_from_ = splinter_parent; |
1024 | if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) { |
1025 | SetSpillRange(splinter_parent->spill_range_); |
1026 | } |
1027 | } |
1028 | |
1029 | void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) { |
1030 | DCHECK(merged->TopLevel() == this); |
1031 | |
1032 | if (HasNoSpillType() && merged->HasSpillRange()) { |
1033 | set_spill_type(merged->spill_type()); |
1034 | DCHECK_LT(0, GetSpillRange()->live_ranges().size()); |
1035 | merged->spill_range_ = nullptr; |
1036 | merged->bits_ = |
1037 | SpillTypeField::update(merged->bits_, SpillType::kNoSpillType); |
1038 | } |
1039 | } |
1040 | |
1041 | void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) { |
1042 | DCHECK(Start() < other->Start()); |
1043 | DCHECK(other->splintered_from() == this); |
1044 | |
1045 | LiveRange* first = this; |
1046 | LiveRange* second = other; |
1047 | DCHECK(first->Start() < second->Start()); |
1048 | while (first != nullptr && second != nullptr) { |
1049 | DCHECK(first != second); |
1050 | // Make sure the ranges are in order each time we iterate. |
1051 | if (second->Start() < first->Start()) { |
1052 | LiveRange* tmp = second; |
1053 | second = first; |
1054 | first = tmp; |
1055 | continue; |
1056 | } |
1057 | |
1058 | if (first->End() <= second->Start()) { |
1059 | if (first->next() == nullptr || |
1060 | first->next()->Start() > second->Start()) { |
1061 | // First is in order before second. |
1062 | LiveRange* temp = first->next(); |
1063 | first->next_ = second; |
1064 | first = temp; |
1065 | } else { |
1066 | // First is in order before its successor (or second), so advance first. |
1067 | first = first->next(); |
1068 | } |
1069 | continue; |
1070 | } |
1071 | |
1072 | DCHECK(first->Start() < second->Start()); |
1073 | // If first and second intersect, split first. |
1074 | if (first->Start() < second->End() && second->Start() < first->End()) { |
1075 | LiveRange* temp = first->SplitAt(second->Start(), zone); |
1076 | CHECK(temp != first); |
1077 | temp->set_spilled(first->spilled()); |
1078 | if (!temp->spilled()) |
1079 | temp->set_assigned_register(first->assigned_register()); |
1080 | |
1081 | first->next_ = second; |
1082 | first = temp; |
1083 | continue; |
1084 | } |
1085 | DCHECK(first->End() <= second->Start()); |
1086 | } |
1087 | |
1088 | TopLevel()->UpdateParentForAllChildren(TopLevel()); |
1089 | TopLevel()->UpdateSpillRangePostMerge(other); |
1090 | TopLevel()->register_slot_use(other->slot_use_kind()); |
1091 | |
1092 | #if DEBUG |
1093 | Verify(); |
1094 | #endif |
1095 | } |
1096 | |
1097 | void TopLevelLiveRange::VerifyChildrenInOrder() const { |
1098 | LifetimePosition last_end = End(); |
1099 | for (const LiveRange* child = this->next(); child != nullptr; |
1100 | child = child->next()) { |
1101 | DCHECK(last_end <= child->Start()); |
1102 | last_end = child->End(); |
1103 | } |
1104 | } |
1105 | |
1106 | LiveRange* TopLevelLiveRange::GetChildCovers(LifetimePosition pos) { |
1107 | LiveRange* child = last_child_covers_; |
1108 | while (child != nullptr && child->End() <= pos) { |
1109 | child = child->next(); |
1110 | } |
1111 | last_child_covers_ = child; |
1112 | return !child || !child->Covers(pos) ? nullptr : child; |
1113 | } |
1114 | |
1115 | void TopLevelLiveRange::Verify() const { |
1116 | VerifyChildrenInOrder(); |
1117 | for (const LiveRange* child = this; child != nullptr; child = child->next()) { |
1118 | VerifyChildStructure(); |
1119 | } |
1120 | } |
1121 | |
1122 | void TopLevelLiveRange::ShortenTo(LifetimePosition start) { |
1123 | TRACE("Shorten live range %d to [%d\n" , vreg(), start.value()); |
1124 | DCHECK_NOT_NULL(first_interval_); |
1125 | DCHECK(first_interval_->start() <= start); |
1126 | DCHECK(start < first_interval_->end()); |
1127 | first_interval_->set_start(start); |
1128 | } |
1129 | |
1130 | void TopLevelLiveRange::EnsureInterval(LifetimePosition start, |
1131 | LifetimePosition end, Zone* zone) { |
1132 | TRACE("Ensure live range %d in interval [%d %d[\n" , vreg(), start.value(), |
1133 | end.value()); |
1134 | LifetimePosition new_end = end; |
1135 | while (first_interval_ != nullptr && first_interval_->start() <= end) { |
1136 | if (first_interval_->end() > end) { |
1137 | new_end = first_interval_->end(); |
1138 | } |
1139 | first_interval_ = first_interval_->next(); |
1140 | } |
1141 | |
1142 | UseInterval* new_interval = new (zone) UseInterval(start, new_end); |
1143 | new_interval->set_next(first_interval_); |
1144 | first_interval_ = new_interval; |
1145 | if (new_interval->next() == nullptr) { |
1146 | last_interval_ = new_interval; |
1147 | } |
1148 | } |
1149 | |
1150 | void TopLevelLiveRange::AddUseInterval(LifetimePosition start, |
1151 | LifetimePosition end, Zone* zone) { |
1152 | TRACE("Add to live range %d interval [%d %d[\n" , vreg(), start.value(), |
1153 | end.value()); |
1154 | if (first_interval_ == nullptr) { |
1155 | UseInterval* interval = new (zone) UseInterval(start, end); |
1156 | first_interval_ = interval; |
1157 | last_interval_ = interval; |
1158 | } else { |
1159 | if (end == first_interval_->start()) { |
1160 | first_interval_->set_start(start); |
1161 | } else if (end < first_interval_->start()) { |
1162 | UseInterval* interval = new (zone) UseInterval(start, end); |
1163 | interval->set_next(first_interval_); |
1164 | first_interval_ = interval; |
1165 | } else { |
1166 | // Order of instruction's processing (see ProcessInstructions) guarantees |
1167 | // that each new use interval either precedes, intersects with or touches |
1168 | // the last added interval. |
1169 | DCHECK(start <= first_interval_->end()); |
1170 | first_interval_->set_start(Min(start, first_interval_->start())); |
1171 | first_interval_->set_end(Max(end, first_interval_->end())); |
1172 | } |
1173 | } |
1174 | } |
1175 | |
1176 | void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) { |
1177 | LifetimePosition pos = use_pos->pos(); |
1178 | TRACE("Add to live range %d use position %d\n" , vreg(), pos.value()); |
1179 | UsePosition* prev_hint = nullptr; |
1180 | UsePosition* prev = nullptr; |
1181 | UsePosition* current = first_pos_; |
1182 | while (current != nullptr && current->pos() < pos) { |
1183 | prev_hint = current->HasHint() ? current : prev_hint; |
1184 | prev = current; |
1185 | current = current->next(); |
1186 | } |
1187 | |
1188 | if (prev == nullptr) { |
1189 | use_pos->set_next(first_pos_); |
1190 | first_pos_ = use_pos; |
1191 | } else { |
1192 | use_pos->set_next(prev->next()); |
1193 | prev->set_next(use_pos); |
1194 | } |
1195 | |
1196 | if (prev_hint == nullptr && use_pos->HasHint()) { |
1197 | current_hint_position_ = use_pos; |
1198 | } |
1199 | } |
1200 | |
1201 | static bool AreUseIntervalsIntersecting(UseInterval* interval1, |
1202 | UseInterval* interval2) { |
1203 | while (interval1 != nullptr && interval2 != nullptr) { |
1204 | if (interval1->start() < interval2->start()) { |
1205 | if (interval1->end() > interval2->start()) { |
1206 | return true; |
1207 | } |
1208 | interval1 = interval1->next(); |
1209 | } else { |
1210 | if (interval2->end() > interval1->start()) { |
1211 | return true; |
1212 | } |
1213 | interval2 = interval2->next(); |
1214 | } |
1215 | } |
1216 | return false; |
1217 | } |
1218 | |
1219 | std::ostream& operator<<(std::ostream& os, |
1220 | const PrintableLiveRange& printable_range) { |
1221 | const LiveRange* range = printable_range.range_; |
1222 | os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id() |
1223 | << " " ; |
1224 | if (range->TopLevel()->is_phi()) os << "phi " ; |
1225 | if (range->TopLevel()->is_non_loop_phi()) os << "nlphi " ; |
1226 | |
1227 | os << "{" << std::endl; |
1228 | UseInterval* interval = range->first_interval(); |
1229 | UsePosition* use_pos = range->first_pos(); |
1230 | while (use_pos != nullptr) { |
1231 | if (use_pos->HasOperand()) { |
1232 | os << *use_pos->operand() << use_pos->pos() << " " ; |
1233 | } |
1234 | use_pos = use_pos->next(); |
1235 | } |
1236 | os << std::endl; |
1237 | |
1238 | while (interval != nullptr) { |
1239 | os << '[' << interval->start() << ", " << interval->end() << ')' |
1240 | << std::endl; |
1241 | interval = interval->next(); |
1242 | } |
1243 | os << "}" ; |
1244 | return os; |
1245 | } |
1246 | |
1247 | namespace { |
1248 | void PrintBlockRow(std::ostream& os, const InstructionBlocks& blocks) { |
1249 | os << " " ; |
1250 | for (auto block : blocks) { |
1251 | LifetimePosition start_pos = LifetimePosition::GapFromInstructionIndex( |
1252 | block->first_instruction_index()); |
1253 | LifetimePosition end_pos = LifetimePosition::GapFromInstructionIndex( |
1254 | block->last_instruction_index()) |
1255 | .NextFullStart(); |
1256 | int length = end_pos.value() - start_pos.value(); |
1257 | constexpr int kMaxPrefixLength = 32; |
1258 | char buffer[kMaxPrefixLength]; |
1259 | int rpo_number = block->rpo_number().ToInt(); |
1260 | const char* deferred_marker = block->IsDeferred() ? "(deferred)" : "" ; |
1261 | int max_prefix_length = std::min(length, kMaxPrefixLength); |
1262 | int prefix = snprintf(buffer, max_prefix_length, "[-B%d-%s" , rpo_number, |
1263 | deferred_marker); |
1264 | os << buffer; |
1265 | int remaining = length - std::min(prefix, max_prefix_length) - 1; |
1266 | for (int i = 0; i < remaining; ++i) os << '-'; |
1267 | os << ']'; |
1268 | } |
1269 | os << '\n'; |
1270 | } |
1271 | } // namespace |
1272 | |
1273 | void LinearScanAllocator::PrintRangeRow(std::ostream& os, |
1274 | const TopLevelLiveRange* toplevel) { |
1275 | int position = 0; |
1276 | os << std::setw(3) << toplevel->vreg() |
1277 | << (toplevel->IsSplinter() ? "s:" : ": " ); |
1278 | |
1279 | const char* kind_string; |
1280 | switch (toplevel->spill_type()) { |
1281 | case TopLevelLiveRange::SpillType::kSpillRange: |
1282 | kind_string = "ss" ; |
1283 | break; |
1284 | case TopLevelLiveRange::SpillType::kDeferredSpillRange: |
1285 | kind_string = "sd" ; |
1286 | break; |
1287 | case TopLevelLiveRange::SpillType::kSpillOperand: |
1288 | kind_string = "so" ; |
1289 | break; |
1290 | default: |
1291 | kind_string = "s?" ; |
1292 | } |
1293 | |
1294 | for (const LiveRange* range = toplevel; range != nullptr; |
1295 | range = range->next()) { |
1296 | for (UseInterval* interval = range->first_interval(); interval != nullptr; |
1297 | interval = interval->next()) { |
1298 | LifetimePosition start = interval->start(); |
1299 | LifetimePosition end = interval->end(); |
1300 | CHECK_GE(start.value(), position); |
1301 | for (; start.value() > position; position++) { |
1302 | os << ' '; |
1303 | } |
1304 | int length = end.value() - start.value(); |
1305 | constexpr int kMaxPrefixLength = 32; |
1306 | char buffer[kMaxPrefixLength]; |
1307 | int max_prefix_length = std::min(length + 1, kMaxPrefixLength); |
1308 | int prefix; |
1309 | if (range->spilled()) { |
1310 | prefix = snprintf(buffer, max_prefix_length, "|%s" , kind_string); |
1311 | } else { |
1312 | const char* reg_name; |
1313 | if (range->assigned_register() == kUnassignedRegister) { |
1314 | reg_name = "???" ; |
1315 | } else { |
1316 | reg_name = RegisterName(range->assigned_register()); |
1317 | } |
1318 | prefix = snprintf(buffer, max_prefix_length, "|%s" , reg_name); |
1319 | } |
1320 | os << buffer; |
1321 | position += std::min(prefix, max_prefix_length - 1); |
1322 | CHECK_GE(end.value(), position); |
1323 | const char line_style = range->spilled() ? '-' : '='; |
1324 | for (; end.value() > position; position++) { |
1325 | os << line_style; |
1326 | } |
1327 | } |
1328 | } |
1329 | os << '\n'; |
1330 | } |
1331 | |
1332 | void LinearScanAllocator::PrintRangeOverview(std::ostream& os) { |
1333 | PrintBlockRow(os, code()->instruction_blocks()); |
1334 | for (auto const toplevel : data()->fixed_live_ranges()) { |
1335 | if (toplevel == nullptr) continue; |
1336 | PrintRangeRow(os, toplevel); |
1337 | } |
1338 | int rowcount = 0; |
1339 | for (auto toplevel : data()->live_ranges()) { |
1340 | if (!CanProcessRange(toplevel)) continue; |
1341 | if (rowcount++ % 10 == 0) PrintBlockRow(os, code()->instruction_blocks()); |
1342 | PrintRangeRow(os, toplevel); |
1343 | } |
1344 | } |
1345 | |
1346 | SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone) |
1347 | : live_ranges_(zone), |
1348 | assigned_slot_(kUnassignedSlot), |
1349 | byte_width_(GetByteWidth(parent->representation())) { |
1350 | // Spill ranges are created for top level, non-splintered ranges. This is so |
1351 | // that, when merging decisions are made, we consider the full extent of the |
1352 | // virtual register, and avoid clobbering it. |
1353 | DCHECK(!parent->IsSplinter()); |
1354 | UseInterval* result = nullptr; |
1355 | UseInterval* node = nullptr; |
1356 | // Copy the intervals for all ranges. |
1357 | for (LiveRange* range = parent; range != nullptr; range = range->next()) { |
1358 | UseInterval* src = range->first_interval(); |
1359 | while (src != nullptr) { |
1360 | UseInterval* new_node = new (zone) UseInterval(src->start(), src->end()); |
1361 | if (result == nullptr) { |
1362 | result = new_node; |
1363 | } else { |
1364 | node->set_next(new_node); |
1365 | } |
1366 | node = new_node; |
1367 | src = src->next(); |
1368 | } |
1369 | } |
1370 | use_interval_ = result; |
1371 | live_ranges().push_back(parent); |
1372 | end_position_ = node->end(); |
1373 | parent->SetSpillRange(this); |
1374 | } |
1375 | |
1376 | bool SpillRange::IsIntersectingWith(SpillRange* other) const { |
1377 | if (this->use_interval_ == nullptr || other->use_interval_ == nullptr || |
1378 | this->End() <= other->use_interval_->start() || |
1379 | other->End() <= this->use_interval_->start()) { |
1380 | return false; |
1381 | } |
1382 | return AreUseIntervalsIntersecting(use_interval_, other->use_interval_); |
1383 | } |
1384 | |
1385 | bool SpillRange::TryMerge(SpillRange* other) { |
1386 | if (HasSlot() || other->HasSlot()) return false; |
1387 | if (byte_width() != other->byte_width() || IsIntersectingWith(other)) |
1388 | return false; |
1389 | |
1390 | LifetimePosition max = LifetimePosition::MaxPosition(); |
1391 | if (End() < other->End() && other->End() != max) { |
1392 | end_position_ = other->End(); |
1393 | } |
1394 | other->end_position_ = max; |
1395 | |
1396 | MergeDisjointIntervals(other->use_interval_); |
1397 | other->use_interval_ = nullptr; |
1398 | |
1399 | for (TopLevelLiveRange* range : other->live_ranges()) { |
1400 | DCHECK(range->GetSpillRange() == other); |
1401 | range->SetSpillRange(this); |
1402 | } |
1403 | |
1404 | live_ranges().insert(live_ranges().end(), other->live_ranges().begin(), |
1405 | other->live_ranges().end()); |
1406 | other->live_ranges().clear(); |
1407 | |
1408 | return true; |
1409 | } |
1410 | |
1411 | void SpillRange::MergeDisjointIntervals(UseInterval* other) { |
1412 | UseInterval* tail = nullptr; |
1413 | UseInterval* current = use_interval_; |
1414 | while (other != nullptr) { |
1415 | // Make sure the 'current' list starts first |
1416 | if (current == nullptr || current->start() > other->start()) { |
1417 | std::swap(current, other); |
1418 | } |
1419 | // Check disjointness |
1420 | DCHECK(other == nullptr || current->end() <= other->start()); |
1421 | // Append the 'current' node to the result accumulator and move forward |
1422 | if (tail == nullptr) { |
1423 | use_interval_ = current; |
1424 | } else { |
1425 | tail->set_next(current); |
1426 | } |
1427 | tail = current; |
1428 | current = current->next(); |
1429 | } |
1430 | // Other list is empty => we are done |
1431 | } |
1432 | |
1433 | void SpillRange::Print() const { |
1434 | StdoutStream os; |
1435 | os << "{" << std::endl; |
1436 | for (TopLevelLiveRange* range : live_ranges()) { |
1437 | os << range->vreg() << " " ; |
1438 | } |
1439 | os << std::endl; |
1440 | |
1441 | for (UseInterval* i = interval(); i != nullptr; i = i->next()) { |
1442 | os << '[' << i->start() << ", " << i->end() << ')' << std::endl; |
1443 | } |
1444 | os << "}" << std::endl; |
1445 | } |
1446 | |
1447 | RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi, |
1448 | const InstructionBlock* block, |
1449 | Zone* zone) |
1450 | : phi_(phi), |
1451 | block_(block), |
1452 | incoming_operands_(zone), |
1453 | assigned_register_(kUnassignedRegister) { |
1454 | incoming_operands_.reserve(phi->operands().size()); |
1455 | } |
1456 | |
1457 | void RegisterAllocationData::PhiMapValue::AddOperand( |
1458 | InstructionOperand* operand) { |
1459 | incoming_operands_.push_back(operand); |
1460 | } |
1461 | |
1462 | void RegisterAllocationData::PhiMapValue::CommitAssignment( |
1463 | const InstructionOperand& assigned) { |
1464 | for (InstructionOperand* operand : incoming_operands_) { |
1465 | InstructionOperand::ReplaceWith(operand, &assigned); |
1466 | } |
1467 | } |
1468 | |
1469 | RegisterAllocationData::RegisterAllocationData( |
1470 | const RegisterConfiguration* config, Zone* zone, Frame* frame, |
1471 | InstructionSequence* code, RegisterAllocationFlags flags, |
1472 | const char* debug_name) |
1473 | : allocation_zone_(zone), |
1474 | frame_(frame), |
1475 | code_(code), |
1476 | debug_name_(debug_name), |
1477 | config_(config), |
1478 | phi_map_(allocation_zone()), |
1479 | live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()), |
1480 | live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()), |
1481 | live_ranges_(code->VirtualRegisterCount() * 2, nullptr, |
1482 | allocation_zone()), |
1483 | fixed_live_ranges_(kNumberOfFixedRangesPerRegister * |
1484 | this->config()->num_general_registers(), |
1485 | nullptr, allocation_zone()), |
1486 | fixed_float_live_ranges_(allocation_zone()), |
1487 | fixed_double_live_ranges_(kNumberOfFixedRangesPerRegister * |
1488 | this->config()->num_double_registers(), |
1489 | nullptr, allocation_zone()), |
1490 | fixed_simd128_live_ranges_(allocation_zone()), |
1491 | spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()), |
1492 | delayed_references_(allocation_zone()), |
1493 | assigned_registers_(nullptr), |
1494 | assigned_double_registers_(nullptr), |
1495 | virtual_register_count_(code->VirtualRegisterCount()), |
1496 | preassigned_slot_ranges_(zone), |
1497 | spill_state_(code->InstructionBlockCount(), ZoneVector<LiveRange*>(zone), |
1498 | zone), |
1499 | flags_(flags) { |
1500 | if (!kSimpleFPAliasing) { |
1501 | fixed_float_live_ranges_.resize( |
1502 | kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(), |
1503 | nullptr); |
1504 | fixed_simd128_live_ranges_.resize( |
1505 | kNumberOfFixedRangesPerRegister * |
1506 | this->config()->num_simd128_registers(), |
1507 | nullptr); |
1508 | } |
1509 | |
1510 | assigned_registers_ = new (code_zone()) |
1511 | BitVector(this->config()->num_general_registers(), code_zone()); |
1512 | assigned_double_registers_ = new (code_zone()) |
1513 | BitVector(this->config()->num_double_registers(), code_zone()); |
1514 | fixed_register_use_ = new (code_zone()) |
1515 | BitVector(this->config()->num_general_registers(), code_zone()); |
1516 | fixed_fp_register_use_ = new (code_zone()) |
1517 | BitVector(this->config()->num_double_registers(), code_zone()); |
1518 | |
1519 | this->frame()->SetAllocatedRegisters(assigned_registers_); |
1520 | this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_); |
1521 | } |
1522 | |
1523 | MoveOperands* RegisterAllocationData::AddGapMove( |
1524 | int index, Instruction::GapPosition position, |
1525 | const InstructionOperand& from, const InstructionOperand& to) { |
1526 | Instruction* instr = code()->InstructionAt(index); |
1527 | ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone()); |
1528 | return moves->AddMove(from, to); |
1529 | } |
1530 | |
1531 | MachineRepresentation RegisterAllocationData::RepresentationFor( |
1532 | int virtual_register) { |
1533 | DCHECK_LT(virtual_register, code()->VirtualRegisterCount()); |
1534 | return code()->GetRepresentation(virtual_register); |
1535 | } |
1536 | |
1537 | TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) { |
1538 | if (index >= static_cast<int>(live_ranges().size())) { |
1539 | live_ranges().resize(index + 1, nullptr); |
1540 | } |
1541 | TopLevelLiveRange* result = live_ranges()[index]; |
1542 | if (result == nullptr) { |
1543 | result = NewLiveRange(index, RepresentationFor(index)); |
1544 | live_ranges()[index] = result; |
1545 | } |
1546 | return result; |
1547 | } |
1548 | |
1549 | TopLevelLiveRange* RegisterAllocationData::NewLiveRange( |
1550 | int index, MachineRepresentation rep) { |
1551 | return new (allocation_zone()) TopLevelLiveRange(index, rep); |
1552 | } |
1553 | |
1554 | int RegisterAllocationData::GetNextLiveRangeId() { |
1555 | int vreg = virtual_register_count_++; |
1556 | if (vreg >= static_cast<int>(live_ranges().size())) { |
1557 | live_ranges().resize(vreg + 1, nullptr); |
1558 | } |
1559 | return vreg; |
1560 | } |
1561 | |
1562 | TopLevelLiveRange* RegisterAllocationData::NextLiveRange( |
1563 | MachineRepresentation rep) { |
1564 | int vreg = GetNextLiveRangeId(); |
1565 | TopLevelLiveRange* ret = NewLiveRange(vreg, rep); |
1566 | return ret; |
1567 | } |
1568 | |
1569 | RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap( |
1570 | const InstructionBlock* block, PhiInstruction* phi) { |
1571 | RegisterAllocationData::PhiMapValue* map_value = new (allocation_zone()) |
1572 | RegisterAllocationData::PhiMapValue(phi, block, allocation_zone()); |
1573 | auto res = |
1574 | phi_map_.insert(std::make_pair(phi->virtual_register(), map_value)); |
1575 | DCHECK(res.second); |
1576 | USE(res); |
1577 | return map_value; |
1578 | } |
1579 | |
1580 | RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor( |
1581 | int virtual_register) { |
1582 | auto it = phi_map_.find(virtual_register); |
1583 | DCHECK(it != phi_map_.end()); |
1584 | return it->second; |
1585 | } |
1586 | |
1587 | RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor( |
1588 | TopLevelLiveRange* top_range) { |
1589 | return GetPhiMapValueFor(top_range->vreg()); |
1590 | } |
1591 | |
1592 | bool RegisterAllocationData::ExistsUseWithoutDefinition() { |
1593 | bool found = false; |
1594 | BitVector::Iterator iterator(live_in_sets()[0]); |
1595 | while (!iterator.Done()) { |
1596 | found = true; |
1597 | int operand_index = iterator.Current(); |
1598 | PrintF("Register allocator error: live v%d reached first block.\n" , |
1599 | operand_index); |
1600 | LiveRange* range = GetOrCreateLiveRangeFor(operand_index); |
1601 | PrintF(" (first use is at %d)\n" , range->first_pos()->pos().value()); |
1602 | if (debug_name() == nullptr) { |
1603 | PrintF("\n" ); |
1604 | } else { |
1605 | PrintF(" (function: %s)\n" , debug_name()); |
1606 | } |
1607 | iterator.Advance(); |
1608 | } |
1609 | return found; |
1610 | } |
1611 | |
1612 | // If a range is defined in a deferred block, we can expect all the range |
1613 | // to only cover positions in deferred blocks. Otherwise, a block on the |
1614 | // hot path would be dominated by a deferred block, meaning it is unreachable |
1615 | // without passing through the deferred block, which is contradictory. |
1616 | // In particular, when such a range contributes a result back on the hot |
1617 | // path, it will be as one of the inputs of a phi. In that case, the value |
1618 | // will be transferred via a move in the Gap::END's of the last instruction |
1619 | // of a deferred block. |
1620 | bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() { |
1621 | const size_t live_ranges_size = live_ranges().size(); |
1622 | for (const TopLevelLiveRange* range : live_ranges()) { |
1623 | CHECK_EQ(live_ranges_size, |
1624 | live_ranges().size()); // TODO(neis): crbug.com/831822 |
1625 | if (range == nullptr || range->IsEmpty() || |
1626 | !code() |
1627 | ->GetInstructionBlock(range->Start().ToInstructionIndex()) |
1628 | ->IsDeferred()) { |
1629 | continue; |
1630 | } |
1631 | for (const UseInterval* i = range->first_interval(); i != nullptr; |
1632 | i = i->next()) { |
1633 | int first = i->FirstGapIndex(); |
1634 | int last = i->LastGapIndex(); |
1635 | for (int instr = first; instr <= last;) { |
1636 | const InstructionBlock* block = code()->GetInstructionBlock(instr); |
1637 | if (!block->IsDeferred()) return false; |
1638 | instr = block->last_instruction_index() + 1; |
1639 | } |
1640 | } |
1641 | } |
1642 | return true; |
1643 | } |
1644 | |
1645 | SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange( |
1646 | TopLevelLiveRange* range, SpillMode spill_mode) { |
1647 | using SpillType = TopLevelLiveRange::SpillType; |
1648 | DCHECK(!range->HasSpillOperand()); |
1649 | |
1650 | SpillRange* spill_range = range->GetAllocatedSpillRange(); |
1651 | if (spill_range == nullptr) { |
1652 | DCHECK(!range->IsSplinter()); |
1653 | spill_range = new (allocation_zone()) SpillRange(range, allocation_zone()); |
1654 | } |
1655 | if (spill_mode == SpillMode::kSpillDeferred && |
1656 | (range->spill_type() != SpillType::kSpillRange)) { |
1657 | DCHECK(is_turbo_control_flow_aware_allocation()); |
1658 | range->set_spill_type(SpillType::kDeferredSpillRange); |
1659 | } else { |
1660 | range->set_spill_type(SpillType::kSpillRange); |
1661 | } |
1662 | |
1663 | int spill_range_index = |
1664 | range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg(); |
1665 | |
1666 | spill_ranges()[spill_range_index] = spill_range; |
1667 | |
1668 | return spill_range; |
1669 | } |
1670 | |
1671 | SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange( |
1672 | TopLevelLiveRange* range) { |
1673 | DCHECK(is_turbo_preprocess_ranges()); |
1674 | DCHECK(!range->HasSpillOperand()); |
1675 | DCHECK(!range->IsSplinter()); |
1676 | SpillRange* spill_range = |
1677 | new (allocation_zone()) SpillRange(range, allocation_zone()); |
1678 | return spill_range; |
1679 | } |
1680 | |
1681 | void RegisterAllocationData::MarkFixedUse(MachineRepresentation rep, |
1682 | int index) { |
1683 | switch (rep) { |
1684 | case MachineRepresentation::kFloat32: |
1685 | case MachineRepresentation::kSimd128: |
1686 | if (kSimpleFPAliasing) { |
1687 | fixed_fp_register_use_->Add(index); |
1688 | } else { |
1689 | int alias_base_index = -1; |
1690 | int aliases = config()->GetAliases( |
1691 | rep, index, MachineRepresentation::kFloat64, &alias_base_index); |
1692 | DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); |
1693 | while (aliases--) { |
1694 | int aliased_reg = alias_base_index + aliases; |
1695 | fixed_fp_register_use_->Add(aliased_reg); |
1696 | } |
1697 | } |
1698 | break; |
1699 | case MachineRepresentation::kFloat64: |
1700 | fixed_fp_register_use_->Add(index); |
1701 | break; |
1702 | default: |
1703 | DCHECK(!IsFloatingPoint(rep)); |
1704 | fixed_register_use_->Add(index); |
1705 | break; |
1706 | } |
1707 | } |
1708 | |
1709 | bool RegisterAllocationData::HasFixedUse(MachineRepresentation rep, int index) { |
1710 | switch (rep) { |
1711 | case MachineRepresentation::kFloat32: |
1712 | case MachineRepresentation::kSimd128: |
1713 | if (kSimpleFPAliasing) { |
1714 | return fixed_fp_register_use_->Contains(index); |
1715 | } else { |
1716 | int alias_base_index = -1; |
1717 | int aliases = config()->GetAliases( |
1718 | rep, index, MachineRepresentation::kFloat64, &alias_base_index); |
1719 | DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); |
1720 | bool result = false; |
1721 | while (aliases-- && !result) { |
1722 | int aliased_reg = alias_base_index + aliases; |
1723 | result |= fixed_fp_register_use_->Contains(aliased_reg); |
1724 | } |
1725 | return result; |
1726 | } |
1727 | break; |
1728 | case MachineRepresentation::kFloat64: |
1729 | return fixed_fp_register_use_->Contains(index); |
1730 | break; |
1731 | default: |
1732 | DCHECK(!IsFloatingPoint(rep)); |
1733 | return fixed_register_use_->Contains(index); |
1734 | break; |
1735 | } |
1736 | } |
1737 | |
1738 | void RegisterAllocationData::MarkAllocated(MachineRepresentation rep, |
1739 | int index) { |
1740 | switch (rep) { |
1741 | case MachineRepresentation::kFloat32: |
1742 | case MachineRepresentation::kSimd128: |
1743 | if (kSimpleFPAliasing) { |
1744 | assigned_double_registers_->Add(index); |
1745 | } else { |
1746 | int alias_base_index = -1; |
1747 | int aliases = config()->GetAliases( |
1748 | rep, index, MachineRepresentation::kFloat64, &alias_base_index); |
1749 | DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); |
1750 | while (aliases--) { |
1751 | int aliased_reg = alias_base_index + aliases; |
1752 | assigned_double_registers_->Add(aliased_reg); |
1753 | } |
1754 | } |
1755 | break; |
1756 | case MachineRepresentation::kFloat64: |
1757 | assigned_double_registers_->Add(index); |
1758 | break; |
1759 | default: |
1760 | DCHECK(!IsFloatingPoint(rep)); |
1761 | assigned_registers_->Add(index); |
1762 | break; |
1763 | } |
1764 | } |
1765 | |
1766 | bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const { |
1767 | return pos.IsFullStart() && |
1768 | code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() == |
1769 | pos.ToInstructionIndex(); |
1770 | } |
1771 | |
1772 | ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data) |
1773 | : data_(data) {} |
1774 | |
1775 | InstructionOperand* ConstraintBuilder::AllocateFixed( |
1776 | UnallocatedOperand* operand, int pos, bool is_tagged, bool is_input) { |
1777 | TRACE("Allocating fixed reg for op %d\n" , operand->virtual_register()); |
1778 | DCHECK(operand->HasFixedPolicy()); |
1779 | InstructionOperand allocated; |
1780 | MachineRepresentation rep = InstructionSequence::DefaultRepresentation(); |
1781 | int virtual_register = operand->virtual_register(); |
1782 | if (virtual_register != InstructionOperand::kInvalidVirtualRegister) { |
1783 | rep = data()->RepresentationFor(virtual_register); |
1784 | } |
1785 | if (operand->HasFixedSlotPolicy()) { |
1786 | allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep, |
1787 | operand->fixed_slot_index()); |
1788 | } else if (operand->HasFixedRegisterPolicy()) { |
1789 | DCHECK(!IsFloatingPoint(rep)); |
1790 | DCHECK(data()->config()->IsAllocatableGeneralCode( |
1791 | operand->fixed_register_index())); |
1792 | allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep, |
1793 | operand->fixed_register_index()); |
1794 | } else if (operand->HasFixedFPRegisterPolicy()) { |
1795 | DCHECK(IsFloatingPoint(rep)); |
1796 | DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register); |
1797 | allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep, |
1798 | operand->fixed_register_index()); |
1799 | } else { |
1800 | UNREACHABLE(); |
1801 | } |
1802 | if (is_input && allocated.IsAnyRegister()) { |
1803 | data()->MarkFixedUse(rep, operand->fixed_register_index()); |
1804 | } |
1805 | InstructionOperand::ReplaceWith(operand, &allocated); |
1806 | if (is_tagged) { |
1807 | TRACE("Fixed reg is tagged at %d\n" , pos); |
1808 | Instruction* instr = code()->InstructionAt(pos); |
1809 | if (instr->HasReferenceMap()) { |
1810 | instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand)); |
1811 | } |
1812 | } |
1813 | return operand; |
1814 | } |
1815 | |
1816 | void ConstraintBuilder::MeetRegisterConstraints() { |
1817 | for (InstructionBlock* block : code()->instruction_blocks()) { |
1818 | MeetRegisterConstraints(block); |
1819 | } |
1820 | } |
1821 | |
1822 | void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) { |
1823 | int start = block->first_instruction_index(); |
1824 | int end = block->last_instruction_index(); |
1825 | DCHECK_NE(-1, start); |
1826 | for (int i = start; i <= end; ++i) { |
1827 | MeetConstraintsBefore(i); |
1828 | if (i != end) MeetConstraintsAfter(i); |
1829 | } |
1830 | // Meet register constraints for the instruction in the end. |
1831 | MeetRegisterConstraintsForLastInstructionInBlock(block); |
1832 | } |
1833 | |
1834 | void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock( |
1835 | const InstructionBlock* block) { |
1836 | int end = block->last_instruction_index(); |
1837 | Instruction* last_instruction = code()->InstructionAt(end); |
1838 | for (size_t i = 0; i < last_instruction->OutputCount(); i++) { |
1839 | InstructionOperand* output_operand = last_instruction->OutputAt(i); |
1840 | DCHECK(!output_operand->IsConstant()); |
1841 | UnallocatedOperand* output = UnallocatedOperand::cast(output_operand); |
1842 | int output_vreg = output->virtual_register(); |
1843 | TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg); |
1844 | bool assigned = false; |
1845 | if (output->HasFixedPolicy()) { |
1846 | AllocateFixed(output, -1, false, false); |
1847 | // This value is produced on the stack, we never need to spill it. |
1848 | if (output->IsStackSlot()) { |
1849 | DCHECK(LocationOperand::cast(output)->index() < |
1850 | data()->frame()->GetSpillSlotCount()); |
1851 | range->SetSpillOperand(LocationOperand::cast(output)); |
1852 | range->SetSpillStartIndex(end); |
1853 | assigned = true; |
1854 | } |
1855 | |
1856 | for (const RpoNumber& succ : block->successors()) { |
1857 | const InstructionBlock* successor = code()->InstructionBlockAt(succ); |
1858 | DCHECK_EQ(1, successor->PredecessorCount()); |
1859 | int gap_index = successor->first_instruction_index(); |
1860 | // Create an unconstrained operand for the same virtual register |
1861 | // and insert a gap move from the fixed output to the operand. |
1862 | UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT, |
1863 | output_vreg); |
1864 | data()->AddGapMove(gap_index, Instruction::START, *output, output_copy); |
1865 | } |
1866 | } |
1867 | |
1868 | if (!assigned) { |
1869 | for (const RpoNumber& succ : block->successors()) { |
1870 | const InstructionBlock* successor = code()->InstructionBlockAt(succ); |
1871 | DCHECK_EQ(1, successor->PredecessorCount()); |
1872 | int gap_index = successor->first_instruction_index(); |
1873 | range->RecordSpillLocation(allocation_zone(), gap_index, output); |
1874 | range->SetSpillStartIndex(gap_index); |
1875 | } |
1876 | } |
1877 | } |
1878 | } |
1879 | |
1880 | void ConstraintBuilder::MeetConstraintsAfter(int instr_index) { |
1881 | Instruction* first = code()->InstructionAt(instr_index); |
1882 | // Handle fixed temporaries. |
1883 | for (size_t i = 0; i < first->TempCount(); i++) { |
1884 | UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i)); |
1885 | if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false, false); |
1886 | } |
1887 | // Handle constant/fixed output operands. |
1888 | for (size_t i = 0; i < first->OutputCount(); i++) { |
1889 | InstructionOperand* output = first->OutputAt(i); |
1890 | if (output->IsConstant()) { |
1891 | int output_vreg = ConstantOperand::cast(output)->virtual_register(); |
1892 | TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg); |
1893 | range->SetSpillStartIndex(instr_index + 1); |
1894 | range->SetSpillOperand(output); |
1895 | continue; |
1896 | } |
1897 | UnallocatedOperand* first_output = UnallocatedOperand::cast(output); |
1898 | TopLevelLiveRange* range = |
1899 | data()->GetOrCreateLiveRangeFor(first_output->virtual_register()); |
1900 | bool assigned = false; |
1901 | if (first_output->HasFixedPolicy()) { |
1902 | int output_vreg = first_output->virtual_register(); |
1903 | UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT, |
1904 | output_vreg); |
1905 | bool is_tagged = code()->IsReference(output_vreg); |
1906 | if (first_output->HasSecondaryStorage()) { |
1907 | range->MarkHasPreassignedSlot(); |
1908 | data()->preassigned_slot_ranges().push_back( |
1909 | std::make_pair(range, first_output->GetSecondaryStorage())); |
1910 | } |
1911 | AllocateFixed(first_output, instr_index, is_tagged, false); |
1912 | |
1913 | // This value is produced on the stack, we never need to spill it. |
1914 | if (first_output->IsStackSlot()) { |
1915 | DCHECK(LocationOperand::cast(first_output)->index() < |
1916 | data()->frame()->GetTotalFrameSlotCount()); |
1917 | range->SetSpillOperand(LocationOperand::cast(first_output)); |
1918 | range->SetSpillStartIndex(instr_index + 1); |
1919 | assigned = true; |
1920 | } |
1921 | data()->AddGapMove(instr_index + 1, Instruction::START, *first_output, |
1922 | output_copy); |
1923 | } |
1924 | // Make sure we add a gap move for spilling (if we have not done |
1925 | // so already). |
1926 | if (!assigned) { |
1927 | range->RecordSpillLocation(allocation_zone(), instr_index + 1, |
1928 | first_output); |
1929 | range->SetSpillStartIndex(instr_index + 1); |
1930 | } |
1931 | } |
1932 | } |
1933 | |
1934 | void ConstraintBuilder::MeetConstraintsBefore(int instr_index) { |
1935 | Instruction* second = code()->InstructionAt(instr_index); |
1936 | // Handle fixed input operands of second instruction. |
1937 | for (size_t i = 0; i < second->InputCount(); i++) { |
1938 | InstructionOperand* input = second->InputAt(i); |
1939 | if (input->IsImmediate() || input->IsExplicit()) { |
1940 | continue; // Ignore immediates and explicitly reserved registers. |
1941 | } |
1942 | UnallocatedOperand* cur_input = UnallocatedOperand::cast(input); |
1943 | if (cur_input->HasFixedPolicy()) { |
1944 | int input_vreg = cur_input->virtual_register(); |
1945 | UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT, |
1946 | input_vreg); |
1947 | bool is_tagged = code()->IsReference(input_vreg); |
1948 | AllocateFixed(cur_input, instr_index, is_tagged, true); |
1949 | data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input); |
1950 | } |
1951 | } |
1952 | // Handle "output same as input" for second instruction. |
1953 | for (size_t i = 0; i < second->OutputCount(); i++) { |
1954 | InstructionOperand* output = second->OutputAt(i); |
1955 | if (!output->IsUnallocated()) continue; |
1956 | UnallocatedOperand* second_output = UnallocatedOperand::cast(output); |
1957 | if (!second_output->HasSameAsInputPolicy()) continue; |
1958 | DCHECK_EQ(0, i); // Only valid for first output. |
1959 | UnallocatedOperand* cur_input = |
1960 | UnallocatedOperand::cast(second->InputAt(0)); |
1961 | int output_vreg = second_output->virtual_register(); |
1962 | int input_vreg = cur_input->virtual_register(); |
1963 | UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT, |
1964 | input_vreg); |
1965 | *cur_input = |
1966 | UnallocatedOperand(*cur_input, second_output->virtual_register()); |
1967 | MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END, |
1968 | input_copy, *cur_input); |
1969 | DCHECK_NOT_NULL(gap_move); |
1970 | if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) { |
1971 | if (second->HasReferenceMap()) { |
1972 | RegisterAllocationData::DelayedReference delayed_reference = { |
1973 | second->reference_map(), &gap_move->source()}; |
1974 | data()->delayed_references().push_back(delayed_reference); |
1975 | } |
1976 | } else if (!code()->IsReference(input_vreg) && |
1977 | code()->IsReference(output_vreg)) { |
1978 | // The input is assumed to immediately have a tagged representation, |
1979 | // before the pointer map can be used. I.e. the pointer map at the |
1980 | // instruction will include the output operand (whose value at the |
1981 | // beginning of the instruction is equal to the input operand). If |
1982 | // this is not desired, then the pointer map at this instruction needs |
1983 | // to be adjusted manually. |
1984 | } |
1985 | } |
1986 | } |
1987 | |
1988 | void ConstraintBuilder::ResolvePhis() { |
1989 | // Process the blocks in reverse order. |
1990 | for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) { |
1991 | ResolvePhis(block); |
1992 | } |
1993 | } |
1994 | |
1995 | void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) { |
1996 | for (PhiInstruction* phi : block->phis()) { |
1997 | int phi_vreg = phi->virtual_register(); |
1998 | RegisterAllocationData::PhiMapValue* map_value = |
1999 | data()->InitializePhiMap(block, phi); |
2000 | InstructionOperand& output = phi->output(); |
2001 | // Map the destination operands, so the commitment phase can find them. |
2002 | for (size_t i = 0; i < phi->operands().size(); ++i) { |
2003 | InstructionBlock* cur_block = |
2004 | code()->InstructionBlockAt(block->predecessors()[i]); |
2005 | UnallocatedOperand input(UnallocatedOperand::REGISTER_OR_SLOT, |
2006 | phi->operands()[i]); |
2007 | MoveOperands* move = data()->AddGapMove( |
2008 | cur_block->last_instruction_index(), Instruction::END, input, output); |
2009 | map_value->AddOperand(&move->destination()); |
2010 | DCHECK(!code() |
2011 | ->InstructionAt(cur_block->last_instruction_index()) |
2012 | ->HasReferenceMap()); |
2013 | } |
2014 | TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg); |
2015 | int gap_index = block->first_instruction_index(); |
2016 | live_range->RecordSpillLocation(allocation_zone(), gap_index, &output); |
2017 | live_range->SetSpillStartIndex(gap_index); |
2018 | // We use the phi-ness of some nodes in some later heuristics. |
2019 | live_range->set_is_phi(true); |
2020 | live_range->set_is_non_loop_phi(!block->IsLoopHeader()); |
2021 | } |
2022 | } |
2023 | |
2024 | LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data, |
2025 | Zone* local_zone) |
2026 | : data_(data), phi_hints_(local_zone) {} |
2027 | |
2028 | BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block, |
2029 | RegisterAllocationData* data) { |
2030 | size_t block_index = block->rpo_number().ToSize(); |
2031 | BitVector* live_out = data->live_out_sets()[block_index]; |
2032 | if (live_out == nullptr) { |
2033 | // Compute live out for the given block, except not including backward |
2034 | // successor edges. |
2035 | Zone* zone = data->allocation_zone(); |
2036 | const InstructionSequence* code = data->code(); |
2037 | |
2038 | live_out = new (zone) BitVector(code->VirtualRegisterCount(), zone); |
2039 | |
2040 | // Process all successor blocks. |
2041 | for (const RpoNumber& succ : block->successors()) { |
2042 | // Add values live on entry to the successor. |
2043 | if (succ <= block->rpo_number()) continue; |
2044 | BitVector* live_in = data->live_in_sets()[succ.ToSize()]; |
2045 | if (live_in != nullptr) live_out->Union(*live_in); |
2046 | |
2047 | // All phi input operands corresponding to this successor edge are live |
2048 | // out from this block. |
2049 | const InstructionBlock* successor = code->InstructionBlockAt(succ); |
2050 | size_t index = successor->PredecessorIndexOf(block->rpo_number()); |
2051 | DCHECK(index < successor->PredecessorCount()); |
2052 | for (PhiInstruction* phi : successor->phis()) { |
2053 | live_out->Add(phi->operands()[index]); |
2054 | } |
2055 | } |
2056 | data->live_out_sets()[block_index] = live_out; |
2057 | } |
2058 | return live_out; |
2059 | } |
2060 | |
2061 | void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block, |
2062 | BitVector* live_out) { |
2063 | // Add an interval that includes the entire block to the live range for |
2064 | // each live_out value. |
2065 | LifetimePosition start = LifetimePosition::GapFromInstructionIndex( |
2066 | block->first_instruction_index()); |
2067 | LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex( |
2068 | block->last_instruction_index()) |
2069 | .NextStart(); |
2070 | BitVector::Iterator iterator(live_out); |
2071 | while (!iterator.Done()) { |
2072 | int operand_index = iterator.Current(); |
2073 | TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index); |
2074 | range->AddUseInterval(start, end, allocation_zone()); |
2075 | iterator.Advance(); |
2076 | } |
2077 | } |
2078 | |
2079 | int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) { |
2080 | int result = -index - 1; |
2081 | switch (rep) { |
2082 | case MachineRepresentation::kSimd128: |
2083 | result -= |
2084 | kNumberOfFixedRangesPerRegister * config()->num_float_registers(); |
2085 | V8_FALLTHROUGH; |
2086 | case MachineRepresentation::kFloat32: |
2087 | result -= |
2088 | kNumberOfFixedRangesPerRegister * config()->num_double_registers(); |
2089 | V8_FALLTHROUGH; |
2090 | case MachineRepresentation::kFloat64: |
2091 | result -= |
2092 | kNumberOfFixedRangesPerRegister * config()->num_general_registers(); |
2093 | break; |
2094 | default: |
2095 | UNREACHABLE(); |
2096 | break; |
2097 | } |
2098 | return result; |
2099 | } |
2100 | |
2101 | TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index, |
2102 | SpillMode spill_mode) { |
2103 | int offset = spill_mode == SpillMode::kSpillAtDefinition |
2104 | ? 0 |
2105 | : config()->num_general_registers(); |
2106 | DCHECK(index < config()->num_general_registers()); |
2107 | TopLevelLiveRange* result = data()->fixed_live_ranges()[offset + index]; |
2108 | if (result == nullptr) { |
2109 | MachineRepresentation rep = InstructionSequence::DefaultRepresentation(); |
2110 | result = data()->NewLiveRange(FixedLiveRangeID(offset + index), rep); |
2111 | DCHECK(result->IsFixed()); |
2112 | result->set_assigned_register(index); |
2113 | data()->MarkAllocated(rep, index); |
2114 | if (spill_mode == SpillMode::kSpillDeferred) { |
2115 | result->set_deferred_fixed(); |
2116 | } |
2117 | data()->fixed_live_ranges()[offset + index] = result; |
2118 | } |
2119 | return result; |
2120 | } |
2121 | |
2122 | TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor( |
2123 | int index, MachineRepresentation rep, SpillMode spill_mode) { |
2124 | int num_regs = config()->num_double_registers(); |
2125 | ZoneVector<TopLevelLiveRange*>* live_ranges = |
2126 | &data()->fixed_double_live_ranges(); |
2127 | if (!kSimpleFPAliasing) { |
2128 | switch (rep) { |
2129 | case MachineRepresentation::kFloat32: |
2130 | num_regs = config()->num_float_registers(); |
2131 | live_ranges = &data()->fixed_float_live_ranges(); |
2132 | break; |
2133 | case MachineRepresentation::kSimd128: |
2134 | num_regs = config()->num_simd128_registers(); |
2135 | live_ranges = &data()->fixed_simd128_live_ranges(); |
2136 | break; |
2137 | default: |
2138 | break; |
2139 | } |
2140 | } |
2141 | |
2142 | int offset = spill_mode == SpillMode::kSpillAtDefinition ? 0 : num_regs; |
2143 | |
2144 | DCHECK(index < num_regs); |
2145 | USE(num_regs); |
2146 | TopLevelLiveRange* result = (*live_ranges)[offset + index]; |
2147 | if (result == nullptr) { |
2148 | result = data()->NewLiveRange(FixedFPLiveRangeID(offset + index, rep), rep); |
2149 | DCHECK(result->IsFixed()); |
2150 | result->set_assigned_register(index); |
2151 | data()->MarkAllocated(rep, index); |
2152 | if (spill_mode == SpillMode::kSpillDeferred) { |
2153 | result->set_deferred_fixed(); |
2154 | } |
2155 | (*live_ranges)[offset + index] = result; |
2156 | } |
2157 | return result; |
2158 | } |
2159 | |
2160 | TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand, |
2161 | SpillMode spill_mode) { |
2162 | if (operand->IsUnallocated()) { |
2163 | return data()->GetOrCreateLiveRangeFor( |
2164 | UnallocatedOperand::cast(operand)->virtual_register()); |
2165 | } else if (operand->IsConstant()) { |
2166 | return data()->GetOrCreateLiveRangeFor( |
2167 | ConstantOperand::cast(operand)->virtual_register()); |
2168 | } else if (operand->IsRegister()) { |
2169 | return FixedLiveRangeFor( |
2170 | LocationOperand::cast(operand)->GetRegister().code(), spill_mode); |
2171 | } else if (operand->IsFPRegister()) { |
2172 | LocationOperand* op = LocationOperand::cast(operand); |
2173 | return FixedFPLiveRangeFor(op->register_code(), op->representation(), |
2174 | spill_mode); |
2175 | } else { |
2176 | return nullptr; |
2177 | } |
2178 | } |
2179 | |
2180 | UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos, |
2181 | InstructionOperand* operand, |
2182 | void* hint, |
2183 | UsePositionHintType hint_type) { |
2184 | return new (allocation_zone()) UsePosition(pos, operand, hint, hint_type); |
2185 | } |
2186 | |
2187 | UsePosition* LiveRangeBuilder::Define(LifetimePosition position, |
2188 | InstructionOperand* operand, void* hint, |
2189 | UsePositionHintType hint_type, |
2190 | SpillMode spill_mode) { |
2191 | TopLevelLiveRange* range = LiveRangeFor(operand, spill_mode); |
2192 | if (range == nullptr) return nullptr; |
2193 | |
2194 | if (range->IsEmpty() || range->Start() > position) { |
2195 | // Can happen if there is a definition without use. |
2196 | range->AddUseInterval(position, position.NextStart(), allocation_zone()); |
2197 | range->AddUsePosition(NewUsePosition(position.NextStart())); |
2198 | } else { |
2199 | range->ShortenTo(position); |
2200 | } |
2201 | if (!operand->IsUnallocated()) return nullptr; |
2202 | UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand); |
2203 | UsePosition* use_pos = |
2204 | NewUsePosition(position, unalloc_operand, hint, hint_type); |
2205 | range->AddUsePosition(use_pos); |
2206 | return use_pos; |
2207 | } |
2208 | |
2209 | UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start, |
2210 | LifetimePosition position, |
2211 | InstructionOperand* operand, void* hint, |
2212 | UsePositionHintType hint_type, |
2213 | SpillMode spill_mode) { |
2214 | TopLevelLiveRange* range = LiveRangeFor(operand, spill_mode); |
2215 | if (range == nullptr) return nullptr; |
2216 | UsePosition* use_pos = nullptr; |
2217 | if (operand->IsUnallocated()) { |
2218 | UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand); |
2219 | use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type); |
2220 | range->AddUsePosition(use_pos); |
2221 | } |
2222 | range->AddUseInterval(block_start, position, allocation_zone()); |
2223 | return use_pos; |
2224 | } |
2225 | |
2226 | void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, |
2227 | BitVector* live) { |
2228 | int block_start = block->first_instruction_index(); |
2229 | LifetimePosition block_start_position = |
2230 | LifetimePosition::GapFromInstructionIndex(block_start); |
2231 | bool fixed_float_live_ranges = false; |
2232 | bool fixed_simd128_live_ranges = false; |
2233 | if (!kSimpleFPAliasing) { |
2234 | int mask = data()->code()->representation_mask(); |
2235 | fixed_float_live_ranges = (mask & kFloat32Bit) != 0; |
2236 | fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0; |
2237 | } |
2238 | SpillMode spill_mode = SpillModeForBlock(block); |
2239 | |
2240 | for (int index = block->last_instruction_index(); index >= block_start; |
2241 | index--) { |
2242 | LifetimePosition curr_position = |
2243 | LifetimePosition::InstructionFromInstructionIndex(index); |
2244 | Instruction* instr = code()->InstructionAt(index); |
2245 | DCHECK_NOT_NULL(instr); |
2246 | DCHECK(curr_position.IsInstructionPosition()); |
2247 | // Process output, inputs, and temps of this instruction. |
2248 | for (size_t i = 0; i < instr->OutputCount(); i++) { |
2249 | InstructionOperand* output = instr->OutputAt(i); |
2250 | if (output->IsUnallocated()) { |
2251 | // Unsupported. |
2252 | DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy()); |
2253 | int out_vreg = UnallocatedOperand::cast(output)->virtual_register(); |
2254 | live->Remove(out_vreg); |
2255 | } else if (output->IsConstant()) { |
2256 | int out_vreg = ConstantOperand::cast(output)->virtual_register(); |
2257 | live->Remove(out_vreg); |
2258 | } |
2259 | if (block->IsHandler() && index == block_start && output->IsAllocated() && |
2260 | output->IsRegister() && |
2261 | AllocatedOperand::cast(output)->GetRegister() == |
2262 | v8::internal::kReturnRegister0) { |
2263 | // The register defined here is blocked from gap start - it is the |
2264 | // exception value. |
2265 | // TODO(mtrofin): should we explore an explicit opcode for |
2266 | // the first instruction in the handler? |
2267 | Define(LifetimePosition::GapFromInstructionIndex(index), output, |
2268 | spill_mode); |
2269 | } else { |
2270 | Define(curr_position, output, spill_mode); |
2271 | } |
2272 | } |
2273 | |
2274 | if (instr->ClobbersRegisters()) { |
2275 | for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) { |
2276 | // Create a UseInterval at this instruction for all fixed registers, |
2277 | // (including the instruction outputs). Adding another UseInterval here |
2278 | // is OK because AddUseInterval will just merge it with the existing |
2279 | // one at the end of the range. |
2280 | int code = config()->GetAllocatableGeneralCode(i); |
2281 | TopLevelLiveRange* range = FixedLiveRangeFor(code, spill_mode); |
2282 | range->AddUseInterval(curr_position, curr_position.End(), |
2283 | allocation_zone()); |
2284 | } |
2285 | } |
2286 | |
2287 | if (instr->ClobbersDoubleRegisters()) { |
2288 | for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) { |
2289 | // Add a UseInterval for all DoubleRegisters. See comment above for |
2290 | // general registers. |
2291 | int code = config()->GetAllocatableDoubleCode(i); |
2292 | TopLevelLiveRange* range = FixedFPLiveRangeFor( |
2293 | code, MachineRepresentation::kFloat64, spill_mode); |
2294 | range->AddUseInterval(curr_position, curr_position.End(), |
2295 | allocation_zone()); |
2296 | } |
2297 | // Clobber fixed float registers on archs with non-simple aliasing. |
2298 | if (!kSimpleFPAliasing) { |
2299 | if (fixed_float_live_ranges) { |
2300 | for (int i = 0; i < config()->num_allocatable_float_registers(); |
2301 | ++i) { |
2302 | // Add a UseInterval for all FloatRegisters. See comment above for |
2303 | // general registers. |
2304 | int code = config()->GetAllocatableFloatCode(i); |
2305 | TopLevelLiveRange* range = FixedFPLiveRangeFor( |
2306 | code, MachineRepresentation::kFloat32, spill_mode); |
2307 | range->AddUseInterval(curr_position, curr_position.End(), |
2308 | allocation_zone()); |
2309 | } |
2310 | } |
2311 | if (fixed_simd128_live_ranges) { |
2312 | for (int i = 0; i < config()->num_allocatable_simd128_registers(); |
2313 | ++i) { |
2314 | int code = config()->GetAllocatableSimd128Code(i); |
2315 | TopLevelLiveRange* range = FixedFPLiveRangeFor( |
2316 | code, MachineRepresentation::kSimd128, spill_mode); |
2317 | range->AddUseInterval(curr_position, curr_position.End(), |
2318 | allocation_zone()); |
2319 | } |
2320 | } |
2321 | } |
2322 | } |
2323 | |
2324 | for (size_t i = 0; i < instr->InputCount(); i++) { |
2325 | InstructionOperand* input = instr->InputAt(i); |
2326 | if (input->IsImmediate() || input->IsExplicit()) { |
2327 | continue; // Ignore immediates and explicitly reserved registers. |
2328 | } |
2329 | LifetimePosition use_pos; |
2330 | if (input->IsUnallocated() && |
2331 | UnallocatedOperand::cast(input)->IsUsedAtStart()) { |
2332 | use_pos = curr_position; |
2333 | } else { |
2334 | use_pos = curr_position.End(); |
2335 | } |
2336 | |
2337 | if (input->IsUnallocated()) { |
2338 | UnallocatedOperand* unalloc = UnallocatedOperand::cast(input); |
2339 | int vreg = unalloc->virtual_register(); |
2340 | live->Add(vreg); |
2341 | if (unalloc->HasSlotPolicy()) { |
2342 | if (data()->is_turbo_control_flow_aware_allocation()) { |
2343 | data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use( |
2344 | block->IsDeferred() |
2345 | ? TopLevelLiveRange::SlotUseKind::kDeferredSlotUse |
2346 | : TopLevelLiveRange::SlotUseKind::kGeneralSlotUse); |
2347 | } else { |
2348 | data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use( |
2349 | TopLevelLiveRange::SlotUseKind::kGeneralSlotUse); |
2350 | } |
2351 | } |
2352 | } |
2353 | Use(block_start_position, use_pos, input, spill_mode); |
2354 | } |
2355 | |
2356 | for (size_t i = 0; i < instr->TempCount(); i++) { |
2357 | InstructionOperand* temp = instr->TempAt(i); |
2358 | // Unsupported. |
2359 | DCHECK_IMPLIES(temp->IsUnallocated(), |
2360 | !UnallocatedOperand::cast(temp)->HasSlotPolicy()); |
2361 | if (instr->ClobbersTemps()) { |
2362 | if (temp->IsRegister()) continue; |
2363 | if (temp->IsUnallocated()) { |
2364 | UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp); |
2365 | if (temp_unalloc->HasFixedPolicy()) { |
2366 | continue; |
2367 | } |
2368 | } |
2369 | } |
2370 | Use(block_start_position, curr_position.End(), temp, spill_mode); |
2371 | Define(curr_position, temp, spill_mode); |
2372 | } |
2373 | |
2374 | // Process the moves of the instruction's gaps, making their sources live. |
2375 | const Instruction::GapPosition kPositions[] = {Instruction::END, |
2376 | Instruction::START}; |
2377 | curr_position = curr_position.PrevStart(); |
2378 | DCHECK(curr_position.IsGapPosition()); |
2379 | for (const Instruction::GapPosition& position : kPositions) { |
2380 | ParallelMove* move = instr->GetParallelMove(position); |
2381 | if (move == nullptr) continue; |
2382 | if (position == Instruction::END) { |
2383 | curr_position = curr_position.End(); |
2384 | } else { |
2385 | curr_position = curr_position.Start(); |
2386 | } |
2387 | for (MoveOperands* cur : *move) { |
2388 | InstructionOperand& from = cur->source(); |
2389 | InstructionOperand& to = cur->destination(); |
2390 | void* hint = &to; |
2391 | UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to); |
2392 | UsePosition* to_use = nullptr; |
2393 | int phi_vreg = -1; |
2394 | if (to.IsUnallocated()) { |
2395 | int to_vreg = UnallocatedOperand::cast(to).virtual_register(); |
2396 | TopLevelLiveRange* to_range = |
2397 | data()->GetOrCreateLiveRangeFor(to_vreg); |
2398 | if (to_range->is_phi()) { |
2399 | phi_vreg = to_vreg; |
2400 | if (to_range->is_non_loop_phi()) { |
2401 | hint = to_range->current_hint_position(); |
2402 | hint_type = hint == nullptr ? UsePositionHintType::kNone |
2403 | : UsePositionHintType::kUsePos; |
2404 | } else { |
2405 | hint_type = UsePositionHintType::kPhi; |
2406 | hint = data()->GetPhiMapValueFor(to_vreg); |
2407 | } |
2408 | } else { |
2409 | if (live->Contains(to_vreg)) { |
2410 | to_use = |
2411 | Define(curr_position, &to, &from, |
2412 | UsePosition::HintTypeForOperand(from), spill_mode); |
2413 | live->Remove(to_vreg); |
2414 | } else { |
2415 | cur->Eliminate(); |
2416 | continue; |
2417 | } |
2418 | } |
2419 | } else { |
2420 | Define(curr_position, &to, spill_mode); |
2421 | } |
2422 | UsePosition* from_use = Use(block_start_position, curr_position, &from, |
2423 | hint, hint_type, spill_mode); |
2424 | // Mark range live. |
2425 | if (from.IsUnallocated()) { |
2426 | live->Add(UnallocatedOperand::cast(from).virtual_register()); |
2427 | } |
2428 | // Resolve use position hints just created. |
2429 | if (to_use != nullptr && from_use != nullptr) { |
2430 | to_use->ResolveHint(from_use); |
2431 | from_use->ResolveHint(to_use); |
2432 | } |
2433 | DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved()); |
2434 | DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved()); |
2435 | // Potentially resolve phi hint. |
2436 | if (phi_vreg != -1) ResolvePhiHint(&from, from_use); |
2437 | } |
2438 | } |
2439 | } |
2440 | } |
2441 | |
2442 | void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block, |
2443 | BitVector* live) { |
2444 | for (PhiInstruction* phi : block->phis()) { |
2445 | // The live range interval already ends at the first instruction of the |
2446 | // block. |
2447 | int phi_vreg = phi->virtual_register(); |
2448 | live->Remove(phi_vreg); |
2449 | // Select a hint from a predecessor block that precedes this block in the |
2450 | // rpo order. In order of priority: |
2451 | // - Avoid hints from deferred blocks. |
2452 | // - Prefer hints from allocated (or explicit) operands. |
2453 | // - Prefer hints from empty blocks (containing just parallel moves and a |
2454 | // jump). In these cases, if we can elide the moves, the jump threader |
2455 | // is likely to be able to elide the jump. |
2456 | // The enforcement of hinting in rpo order is required because hint |
2457 | // resolution that happens later in the compiler pipeline visits |
2458 | // instructions in reverse rpo order, relying on the fact that phis are |
2459 | // encountered before their hints. |
2460 | InstructionOperand* hint = nullptr; |
2461 | int hint_preference = 0; |
2462 | |
2463 | // The cost of hinting increases with the number of predecessors. At the |
2464 | // same time, the typical benefit decreases, since this hinting only |
2465 | // optimises the execution path through one predecessor. A limit of 2 is |
2466 | // sufficient to hit the common if/else pattern. |
2467 | int predecessor_limit = 2; |
2468 | |
2469 | for (RpoNumber predecessor : block->predecessors()) { |
2470 | const InstructionBlock* predecessor_block = |
2471 | code()->InstructionBlockAt(predecessor); |
2472 | DCHECK_EQ(predecessor_block->rpo_number(), predecessor); |
2473 | |
2474 | // Only take hints from earlier rpo numbers. |
2475 | if (predecessor >= block->rpo_number()) continue; |
2476 | |
2477 | // Look up the predecessor instruction. |
2478 | const Instruction* predecessor_instr = |
2479 | GetLastInstruction(code(), predecessor_block); |
2480 | InstructionOperand* predecessor_hint = nullptr; |
2481 | // Phis are assigned in the END position of the last instruction in each |
2482 | // predecessor block. |
2483 | for (MoveOperands* move : |
2484 | *predecessor_instr->GetParallelMove(Instruction::END)) { |
2485 | InstructionOperand& to = move->destination(); |
2486 | if (to.IsUnallocated() && |
2487 | UnallocatedOperand::cast(to).virtual_register() == phi_vreg) { |
2488 | predecessor_hint = &move->source(); |
2489 | break; |
2490 | } |
2491 | } |
2492 | DCHECK_NOT_NULL(predecessor_hint); |
2493 | |
2494 | // For each predecessor, generate a score according to the priorities |
2495 | // described above, and pick the best one. Flags in higher-order bits have |
2496 | // a higher priority than those in lower-order bits. |
2497 | int predecessor_hint_preference = 0; |
2498 | const int kNotDeferredBlockPreference = (1 << 2); |
2499 | const int kMoveIsAllocatedPreference = (1 << 1); |
2500 | const int kBlockIsEmptyPreference = (1 << 0); |
2501 | |
2502 | // - Avoid hints from deferred blocks. |
2503 | if (!predecessor_block->IsDeferred()) { |
2504 | predecessor_hint_preference |= kNotDeferredBlockPreference; |
2505 | } |
2506 | |
2507 | // - Prefer hints from allocated (or explicit) operands. |
2508 | // |
2509 | // Already-allocated or explicit operands are typically assigned using |
2510 | // the parallel moves on the last instruction. For example: |
2511 | // |
2512 | // gap (v101 = [x0|R|w32]) (v100 = v101) |
2513 | // ArchJmp |
2514 | // ... |
2515 | // phi: v100 = v101 v102 |
2516 | // |
2517 | // We have already found the END move, so look for a matching START move |
2518 | // from an allocated (or explicit) operand. |
2519 | // |
2520 | // Note that we cannot simply look up data()->live_ranges()[vreg] here |
2521 | // because the live ranges are still being built when this function is |
2522 | // called. |
2523 | // TODO(v8): Find a way to separate hinting from live range analysis in |
2524 | // BuildLiveRanges so that we can use the O(1) live-range look-up. |
2525 | auto moves = predecessor_instr->GetParallelMove(Instruction::START); |
2526 | if (moves != nullptr) { |
2527 | for (MoveOperands* move : *moves) { |
2528 | InstructionOperand& to = move->destination(); |
2529 | if (predecessor_hint->Equals(to)) { |
2530 | if (move->source().IsAllocated() || move->source().IsExplicit()) { |
2531 | predecessor_hint_preference |= kMoveIsAllocatedPreference; |
2532 | } |
2533 | break; |
2534 | } |
2535 | } |
2536 | } |
2537 | |
2538 | // - Prefer hints from empty blocks. |
2539 | if (predecessor_block->last_instruction_index() == |
2540 | predecessor_block->first_instruction_index()) { |
2541 | predecessor_hint_preference |= kBlockIsEmptyPreference; |
2542 | } |
2543 | |
2544 | if ((hint == nullptr) || |
2545 | (predecessor_hint_preference > hint_preference)) { |
2546 | // Take the hint from this predecessor. |
2547 | hint = predecessor_hint; |
2548 | hint_preference = predecessor_hint_preference; |
2549 | } |
2550 | |
2551 | if (--predecessor_limit <= 0) break; |
2552 | } |
2553 | DCHECK_NOT_NULL(hint); |
2554 | |
2555 | LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex( |
2556 | block->first_instruction_index()); |
2557 | UsePosition* use_pos = Define(block_start, &phi->output(), hint, |
2558 | UsePosition::HintTypeForOperand(*hint), |
2559 | SpillModeForBlock(block)); |
2560 | MapPhiHint(hint, use_pos); |
2561 | } |
2562 | } |
2563 | |
2564 | void LiveRangeBuilder::(const InstructionBlock* block, |
2565 | BitVector* live) { |
2566 | DCHECK(block->IsLoopHeader()); |
2567 | // Add a live range stretching from the first loop instruction to the last |
2568 | // for each value live on entry to the header. |
2569 | BitVector::Iterator iterator(live); |
2570 | LifetimePosition start = LifetimePosition::GapFromInstructionIndex( |
2571 | block->first_instruction_index()); |
2572 | LifetimePosition end = LifetimePosition::GapFromInstructionIndex( |
2573 | code()->LastLoopInstructionIndex(block)) |
2574 | .NextFullStart(); |
2575 | while (!iterator.Done()) { |
2576 | int operand_index = iterator.Current(); |
2577 | TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index); |
2578 | range->EnsureInterval(start, end, allocation_zone()); |
2579 | iterator.Advance(); |
2580 | } |
2581 | // Insert all values into the live in sets of all blocks in the loop. |
2582 | for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt(); |
2583 | ++i) { |
2584 | live_in_sets()[i]->Union(*live); |
2585 | } |
2586 | } |
2587 | |
2588 | void LiveRangeBuilder::BuildLiveRanges() { |
2589 | // Process the blocks in reverse order. |
2590 | for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0; |
2591 | --block_id) { |
2592 | InstructionBlock* block = |
2593 | code()->InstructionBlockAt(RpoNumber::FromInt(block_id)); |
2594 | BitVector* live = ComputeLiveOut(block, data()); |
2595 | // Initially consider all live_out values live for the entire block. We |
2596 | // will shorten these intervals if necessary. |
2597 | AddInitialIntervals(block, live); |
2598 | // Process the instructions in reverse order, generating and killing |
2599 | // live values. |
2600 | ProcessInstructions(block, live); |
2601 | // All phi output operands are killed by this block. |
2602 | ProcessPhis(block, live); |
2603 | // Now live is live_in for this block except not including values live |
2604 | // out on backward successor edges. |
2605 | if (block->IsLoopHeader()) ProcessLoopHeader(block, live); |
2606 | live_in_sets()[block_id] = live; |
2607 | } |
2608 | // Postprocess the ranges. |
2609 | const size_t live_ranges_size = data()->live_ranges().size(); |
2610 | for (TopLevelLiveRange* range : data()->live_ranges()) { |
2611 | CHECK_EQ(live_ranges_size, |
2612 | data()->live_ranges().size()); // TODO(neis): crbug.com/831822 |
2613 | if (range == nullptr) continue; |
2614 | // Give slots to all ranges with a non fixed slot use. |
2615 | if (range->has_slot_use() && range->HasNoSpillType()) { |
2616 | SpillMode spill_mode = |
2617 | range->slot_use_kind() == |
2618 | TopLevelLiveRange::SlotUseKind::kDeferredSlotUse |
2619 | ? SpillMode::kSpillDeferred |
2620 | : SpillMode::kSpillAtDefinition; |
2621 | data()->AssignSpillRangeToLiveRange(range, spill_mode); |
2622 | } |
2623 | // TODO(bmeurer): This is a horrible hack to make sure that for constant |
2624 | // live ranges, every use requires the constant to be in a register. |
2625 | // Without this hack, all uses with "any" policy would get the constant |
2626 | // operand assigned. |
2627 | if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) { |
2628 | for (UsePosition* pos = range->first_pos(); pos != nullptr; |
2629 | pos = pos->next()) { |
2630 | if (pos->type() == UsePositionType::kRequiresSlot || |
2631 | pos->type() == UsePositionType::kRegisterOrSlotOrConstant) { |
2632 | continue; |
2633 | } |
2634 | UsePositionType new_type = UsePositionType::kRegisterOrSlot; |
2635 | // Can't mark phis as needing a register. |
2636 | if (!pos->pos().IsGapPosition()) { |
2637 | new_type = UsePositionType::kRequiresRegister; |
2638 | } |
2639 | pos->set_type(new_type, true); |
2640 | } |
2641 | } |
2642 | } |
2643 | for (auto preassigned : data()->preassigned_slot_ranges()) { |
2644 | TopLevelLiveRange* range = preassigned.first; |
2645 | int slot_id = preassigned.second; |
2646 | SpillRange* spill = range->HasSpillRange() |
2647 | ? range->GetSpillRange() |
2648 | : data()->AssignSpillRangeToLiveRange( |
2649 | range, SpillMode::kSpillAtDefinition); |
2650 | spill->set_assigned_slot(slot_id); |
2651 | } |
2652 | #ifdef DEBUG |
2653 | Verify(); |
2654 | #endif |
2655 | } |
2656 | |
2657 | void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand, |
2658 | UsePosition* use_pos) { |
2659 | DCHECK(!use_pos->IsResolved()); |
2660 | auto res = phi_hints_.insert(std::make_pair(operand, use_pos)); |
2661 | DCHECK(res.second); |
2662 | USE(res); |
2663 | } |
2664 | |
2665 | void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand, |
2666 | UsePosition* use_pos) { |
2667 | auto it = phi_hints_.find(operand); |
2668 | if (it == phi_hints_.end()) return; |
2669 | DCHECK(!it->second->IsResolved()); |
2670 | it->second->ResolveHint(use_pos); |
2671 | } |
2672 | |
2673 | void LiveRangeBuilder::Verify() const { |
2674 | for (auto& hint : phi_hints_) { |
2675 | CHECK(hint.second->IsResolved()); |
2676 | } |
2677 | for (const TopLevelLiveRange* current : data()->live_ranges()) { |
2678 | if (current != nullptr && !current->IsEmpty()) { |
2679 | // New LiveRanges should not be split. |
2680 | CHECK_NULL(current->next()); |
2681 | // General integrity check. |
2682 | current->Verify(); |
2683 | const UseInterval* first = current->first_interval(); |
2684 | if (first->next() == nullptr) continue; |
2685 | |
2686 | // Consecutive intervals should not end and start in the same block, |
2687 | // otherwise the intervals should have been joined, because the |
2688 | // variable is live throughout that block. |
2689 | CHECK(NextIntervalStartsInDifferentBlocks(first)); |
2690 | |
2691 | for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) { |
2692 | // Except for the first interval, the other intevals must start at |
2693 | // a block boundary, otherwise data wouldn't flow to them. |
2694 | CHECK(IntervalStartsAtBlockBoundary(i)); |
2695 | // The last instruction of the predecessors of the block the interval |
2696 | // starts must be covered by the range. |
2697 | CHECK(IntervalPredecessorsCoveredByRange(i, current)); |
2698 | if (i->next() != nullptr) { |
2699 | // Check the consecutive intervals property, except for the last |
2700 | // interval, where it doesn't apply. |
2701 | CHECK(NextIntervalStartsInDifferentBlocks(i)); |
2702 | } |
2703 | } |
2704 | } |
2705 | } |
2706 | } |
2707 | |
2708 | bool LiveRangeBuilder::IntervalStartsAtBlockBoundary( |
2709 | const UseInterval* interval) const { |
2710 | LifetimePosition start = interval->start(); |
2711 | if (!start.IsFullStart()) return false; |
2712 | int instruction_index = start.ToInstructionIndex(); |
2713 | const InstructionBlock* block = |
2714 | data()->code()->GetInstructionBlock(instruction_index); |
2715 | return block->first_instruction_index() == instruction_index; |
2716 | } |
2717 | |
2718 | bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange( |
2719 | const UseInterval* interval, const TopLevelLiveRange* range) const { |
2720 | LifetimePosition start = interval->start(); |
2721 | int instruction_index = start.ToInstructionIndex(); |
2722 | const InstructionBlock* block = |
2723 | data()->code()->GetInstructionBlock(instruction_index); |
2724 | for (RpoNumber pred_index : block->predecessors()) { |
2725 | const InstructionBlock* predecessor = |
2726 | data()->code()->InstructionBlockAt(pred_index); |
2727 | LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex( |
2728 | predecessor->last_instruction_index()); |
2729 | last_pos = last_pos.NextStart().End(); |
2730 | if (!range->Covers(last_pos)) return false; |
2731 | } |
2732 | return true; |
2733 | } |
2734 | |
2735 | bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks( |
2736 | const UseInterval* interval) const { |
2737 | DCHECK_NOT_NULL(interval->next()); |
2738 | LifetimePosition end = interval->end(); |
2739 | LifetimePosition next_start = interval->next()->start(); |
2740 | // Since end is not covered, but the previous position is, move back a |
2741 | // position |
2742 | end = end.IsStart() ? end.PrevStart().End() : end.Start(); |
2743 | int last_covered_index = end.ToInstructionIndex(); |
2744 | const InstructionBlock* block = |
2745 | data()->code()->GetInstructionBlock(last_covered_index); |
2746 | const InstructionBlock* next_block = |
2747 | data()->code()->GetInstructionBlock(next_start.ToInstructionIndex()); |
2748 | return block->rpo_number() < next_block->rpo_number(); |
2749 | } |
2750 | |
2751 | void BundleBuilder::BuildBundles() { |
2752 | TRACE("Build bundles\n" ); |
2753 | // Process the blocks in reverse order. |
2754 | for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0; |
2755 | --block_id) { |
2756 | InstructionBlock* block = |
2757 | code()->InstructionBlockAt(RpoNumber::FromInt(block_id)); |
2758 | TRACE("Block B%d\n" , block_id); |
2759 | for (auto phi : block->phis()) { |
2760 | LiveRange* out_range = |
2761 | data()->GetOrCreateLiveRangeFor(phi->virtual_register()); |
2762 | LiveRangeBundle* out = out_range->get_bundle(); |
2763 | if (out == nullptr) { |
2764 | out = new (data()->allocation_zone()) |
2765 | LiveRangeBundle(data()->allocation_zone(), next_bundle_id_++); |
2766 | out->TryAddRange(out_range); |
2767 | } |
2768 | TRACE("Processing phi for v%d with %d:%d\n" , phi->virtual_register(), |
2769 | out_range->TopLevel()->vreg(), out_range->relative_id()); |
2770 | for (auto input : phi->operands()) { |
2771 | LiveRange* input_range = data()->GetOrCreateLiveRangeFor(input); |
2772 | TRACE("Input value v%d with range %d:%d\n" , input, |
2773 | input_range->TopLevel()->vreg(), input_range->relative_id()); |
2774 | LiveRangeBundle* input_bundle = input_range->get_bundle(); |
2775 | if (input_bundle != nullptr) { |
2776 | TRACE("Merge\n" ); |
2777 | if (out->TryMerge(input_bundle)) |
2778 | TRACE("Merged %d and %d to %d\n" , phi->virtual_register(), input, |
2779 | out->id()); |
2780 | } else { |
2781 | TRACE("Add\n" ); |
2782 | if (out->TryAddRange(input_range)) |
2783 | TRACE("Added %d and %d to %d\n" , phi->virtual_register(), input, |
2784 | out->id()); |
2785 | } |
2786 | } |
2787 | } |
2788 | TRACE("Done block B%d\n" , block_id); |
2789 | } |
2790 | } |
2791 | |
2792 | bool LiveRangeBundle::TryAddRange(LiveRange* range) { |
2793 | DCHECK_NULL(range->get_bundle()); |
2794 | // We may only add a new live range if its use intervals do not |
2795 | // overlap with existing intervals in the bundle. |
2796 | if (UsesOverlap(range->first_interval())) return false; |
2797 | ranges_.insert(range); |
2798 | range->set_bundle(this); |
2799 | InsertUses(range->first_interval()); |
2800 | return true; |
2801 | } |
2802 | bool LiveRangeBundle::TryMerge(LiveRangeBundle* other) { |
2803 | if (other == this) return true; |
2804 | |
2805 | auto iter1 = uses_.begin(); |
2806 | auto iter2 = other->uses_.begin(); |
2807 | |
2808 | while (iter1 != uses_.end() && iter2 != other->uses_.end()) { |
2809 | if (iter1->start > iter2->end) { |
2810 | ++iter2; |
2811 | } else if (iter2->start > iter1->end) { |
2812 | ++iter1; |
2813 | } else { |
2814 | TRACE("No merge %d:%d %d:%d\n" , iter1->start, iter1->end, iter2->start, |
2815 | iter2->end); |
2816 | return false; |
2817 | } |
2818 | } |
2819 | // Uses are disjoint, merging is possible. |
2820 | for (auto it = other->ranges_.begin(); it != other->ranges_.end(); ++it) { |
2821 | (*it)->set_bundle(this); |
2822 | InsertUses((*it)->first_interval()); |
2823 | } |
2824 | ranges_.insert(other->ranges_.begin(), other->ranges_.end()); |
2825 | other->ranges_.clear(); |
2826 | |
2827 | return true; |
2828 | } |
2829 | |
2830 | void LiveRangeBundle::MergeSpillRanges() { |
2831 | SpillRange* target = nullptr; |
2832 | for (auto range : ranges_) { |
2833 | if (range->TopLevel()->HasSpillRange()) { |
2834 | SpillRange* current = range->TopLevel()->GetSpillRange(); |
2835 | if (target == nullptr) { |
2836 | target = current; |
2837 | } else if (target != current) { |
2838 | target->TryMerge(current); |
2839 | } |
2840 | } |
2841 | } |
2842 | } |
2843 | |
2844 | RegisterAllocator::RegisterAllocator(RegisterAllocationData* data, |
2845 | RegisterKind kind) |
2846 | : data_(data), |
2847 | mode_(kind), |
2848 | num_registers_(GetRegisterCount(data->config(), kind)), |
2849 | num_allocatable_registers_( |
2850 | GetAllocatableRegisterCount(data->config(), kind)), |
2851 | allocatable_register_codes_( |
2852 | GetAllocatableRegisterCodes(data->config(), kind)), |
2853 | check_fp_aliasing_(false) { |
2854 | if (!kSimpleFPAliasing && kind == FP_REGISTERS) { |
2855 | check_fp_aliasing_ = (data->code()->representation_mask() & |
2856 | (kFloat32Bit | kSimd128Bit)) != 0; |
2857 | } |
2858 | } |
2859 | |
2860 | LifetimePosition RegisterAllocator::GetSplitPositionForInstruction( |
2861 | const LiveRange* range, int instruction_index) { |
2862 | LifetimePosition ret = LifetimePosition::Invalid(); |
2863 | |
2864 | ret = LifetimePosition::GapFromInstructionIndex(instruction_index); |
2865 | if (range->Start() >= ret || ret >= range->End()) { |
2866 | return LifetimePosition::Invalid(); |
2867 | } |
2868 | return ret; |
2869 | } |
2870 | |
2871 | void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() { |
2872 | size_t initial_range_count = data()->live_ranges().size(); |
2873 | for (size_t i = 0; i < initial_range_count; ++i) { |
2874 | CHECK_EQ(initial_range_count, |
2875 | data()->live_ranges().size()); // TODO(neis): crbug.com/831822 |
2876 | TopLevelLiveRange* range = data()->live_ranges()[i]; |
2877 | if (!CanProcessRange(range)) continue; |
2878 | // Only assume defined by memory operand if we are guaranteed to spill it or |
2879 | // it has a spill operand. |
2880 | if (range->HasNoSpillType() || |
2881 | (range->HasSpillRange() && !range->has_non_deferred_slot_use())) { |
2882 | continue; |
2883 | } |
2884 | LifetimePosition start = range->Start(); |
2885 | TRACE("Live range %d:%d is defined by a spill operand.\n" , |
2886 | range->TopLevel()->vreg(), range->relative_id()); |
2887 | LifetimePosition next_pos = start; |
2888 | if (next_pos.IsGapPosition()) { |
2889 | next_pos = next_pos.NextStart(); |
2890 | } |
2891 | |
2892 | // With splinters, we can be more strict and skip over positions |
2893 | // not strictly needing registers. |
2894 | UsePosition* pos = |
2895 | range->IsSplinter() |
2896 | ? range->NextRegisterPosition(next_pos) |
2897 | : range->NextUsePositionRegisterIsBeneficial(next_pos); |
2898 | // If the range already has a spill operand and it doesn't need a |
2899 | // register immediately, split it and spill the first part of the range. |
2900 | if (pos == nullptr) { |
2901 | Spill(range, SpillMode::kSpillAtDefinition); |
2902 | } else if (pos->pos() > range->Start().NextStart()) { |
2903 | // Do not spill live range eagerly if use position that can benefit from |
2904 | // the register is too close to the start of live range. |
2905 | LifetimePosition split_pos = GetSplitPositionForInstruction( |
2906 | range, pos->pos().ToInstructionIndex()); |
2907 | // There is no place to split, so we can't split and spill. |
2908 | if (!split_pos.IsValid()) continue; |
2909 | |
2910 | split_pos = |
2911 | FindOptimalSplitPos(range->Start().NextFullStart(), split_pos); |
2912 | |
2913 | SplitRangeAt(range, split_pos); |
2914 | Spill(range, SpillMode::kSpillAtDefinition); |
2915 | } |
2916 | } |
2917 | } |
2918 | |
2919 | LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range, |
2920 | LifetimePosition pos) { |
2921 | DCHECK(!range->TopLevel()->IsFixed()); |
2922 | TRACE("Splitting live range %d:%d at %d\n" , range->TopLevel()->vreg(), |
2923 | range->relative_id(), pos.value()); |
2924 | |
2925 | if (pos <= range->Start()) return range; |
2926 | |
2927 | // We can't properly connect liveranges if splitting occurred at the end |
2928 | // a block. |
2929 | DCHECK(pos.IsStart() || pos.IsGapPosition() || |
2930 | (GetInstructionBlock(code(), pos)->last_instruction_index() != |
2931 | pos.ToInstructionIndex())); |
2932 | |
2933 | LiveRange* result = range->SplitAt(pos, allocation_zone()); |
2934 | return result; |
2935 | } |
2936 | |
2937 | LiveRange* RegisterAllocator::SplitBetween(LiveRange* range, |
2938 | LifetimePosition start, |
2939 | LifetimePosition end) { |
2940 | DCHECK(!range->TopLevel()->IsFixed()); |
2941 | TRACE("Splitting live range %d:%d in position between [%d, %d]\n" , |
2942 | range->TopLevel()->vreg(), range->relative_id(), start.value(), |
2943 | end.value()); |
2944 | |
2945 | LifetimePosition split_pos = FindOptimalSplitPos(start, end); |
2946 | DCHECK(split_pos >= start); |
2947 | return SplitRangeAt(range, split_pos); |
2948 | } |
2949 | |
2950 | LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start, |
2951 | LifetimePosition end) { |
2952 | int start_instr = start.ToInstructionIndex(); |
2953 | int end_instr = end.ToInstructionIndex(); |
2954 | DCHECK_LE(start_instr, end_instr); |
2955 | |
2956 | // We have no choice |
2957 | if (start_instr == end_instr) return end; |
2958 | |
2959 | const InstructionBlock* start_block = GetInstructionBlock(code(), start); |
2960 | const InstructionBlock* end_block = GetInstructionBlock(code(), end); |
2961 | |
2962 | if (end_block == start_block) { |
2963 | // The interval is split in the same basic block. Split at the latest |
2964 | // possible position. |
2965 | return end; |
2966 | } |
2967 | |
2968 | const InstructionBlock* block = end_block; |
2969 | // Find header of outermost loop. |
2970 | do { |
2971 | const InstructionBlock* loop = GetContainingLoop(code(), block); |
2972 | if (loop == nullptr || |
2973 | loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) { |
2974 | // No more loops or loop starts before the lifetime start. |
2975 | break; |
2976 | } |
2977 | block = loop; |
2978 | } while (true); |
2979 | |
2980 | // We did not find any suitable outer loop. Split at the latest possible |
2981 | // position unless end_block is a loop header itself. |
2982 | if (block == end_block && !end_block->IsLoopHeader()) return end; |
2983 | |
2984 | return LifetimePosition::GapFromInstructionIndex( |
2985 | block->first_instruction_index()); |
2986 | } |
2987 | |
2988 | LifetimePosition RegisterAllocator::FindOptimalSpillingPos( |
2989 | LiveRange* range, LifetimePosition pos) { |
2990 | const InstructionBlock* block = GetInstructionBlock(code(), pos.Start()); |
2991 | const InstructionBlock* = |
2992 | block->IsLoopHeader() ? block : GetContainingLoop(code(), block); |
2993 | |
2994 | if (loop_header == nullptr) return pos; |
2995 | |
2996 | const UsePosition* prev_use = |
2997 | range->PreviousUsePositionRegisterIsBeneficial(pos); |
2998 | |
2999 | while (loop_header != nullptr) { |
3000 | // We are going to spill live range inside the loop. |
3001 | // If possible try to move spilling position backwards to loop header. |
3002 | // This will reduce number of memory moves on the back edge. |
3003 | LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex( |
3004 | loop_header->first_instruction_index()); |
3005 | |
3006 | if (range->Covers(loop_start)) { |
3007 | if (prev_use == nullptr || prev_use->pos() < loop_start) { |
3008 | // No register beneficial use inside the loop before the pos. |
3009 | pos = loop_start; |
3010 | } |
3011 | } |
3012 | |
3013 | // Try hoisting out to an outer loop. |
3014 | loop_header = GetContainingLoop(code(), loop_header); |
3015 | } |
3016 | |
3017 | return pos; |
3018 | } |
3019 | |
3020 | void RegisterAllocator::Spill(LiveRange* range, SpillMode spill_mode) { |
3021 | DCHECK(!range->spilled()); |
3022 | DCHECK(spill_mode == SpillMode::kSpillAtDefinition || |
3023 | GetInstructionBlock(code(), range->Start())->IsDeferred()); |
3024 | TopLevelLiveRange* first = range->TopLevel(); |
3025 | TRACE("Spilling live range %d:%d mode %d\n" , first->vreg(), |
3026 | range->relative_id(), spill_mode); |
3027 | |
3028 | TRACE("Starting spill type is %d\n" , static_cast<int>(first->spill_type())); |
3029 | if (first->HasNoSpillType()) { |
3030 | TRACE("New spill range needed" ); |
3031 | data()->AssignSpillRangeToLiveRange(first, spill_mode); |
3032 | } |
3033 | // Upgrade the spillmode, in case this was only spilled in deferred code so |
3034 | // far. |
3035 | if ((spill_mode == SpillMode::kSpillAtDefinition) && |
3036 | (first->spill_type() == |
3037 | TopLevelLiveRange::SpillType::kDeferredSpillRange)) { |
3038 | TRACE("Upgrading\n" ); |
3039 | first->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange); |
3040 | } |
3041 | TRACE("Final spill type is %d\n" , static_cast<int>(first->spill_type())); |
3042 | range->Spill(); |
3043 | } |
3044 | |
3045 | const char* RegisterAllocator::RegisterName(int register_code) const { |
3046 | return mode() == GENERAL_REGISTERS |
3047 | ? i::RegisterName(Register::from_code(register_code)) |
3048 | : i::RegisterName(DoubleRegister::from_code(register_code)); |
3049 | } |
3050 | |
3051 | LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data, |
3052 | RegisterKind kind, Zone* local_zone) |
3053 | : RegisterAllocator(data, kind), |
3054 | unhandled_live_ranges_(local_zone), |
3055 | active_live_ranges_(local_zone), |
3056 | inactive_live_ranges_(local_zone), |
3057 | next_active_ranges_change_(LifetimePosition::Invalid()), |
3058 | next_inactive_ranges_change_(LifetimePosition::Invalid()) { |
3059 | active_live_ranges().reserve(8); |
3060 | inactive_live_ranges().reserve(8); |
3061 | } |
3062 | |
3063 | void LinearScanAllocator::MaybeUndoPreviousSplit(LiveRange* range) { |
3064 | if (range->next() != nullptr && range->next()->ShouldRecombine()) { |
3065 | LiveRange* to_remove = range->next(); |
3066 | TRACE("Recombining %d:%d with %d\n" , range->TopLevel()->vreg(), |
3067 | range->relative_id(), to_remove->relative_id()); |
3068 | |
3069 | // Remove the range from unhandled, as attaching it will change its |
3070 | // state and hence ordering in the unhandled set. |
3071 | auto removed_cnt = unhandled_live_ranges().erase(to_remove); |
3072 | DCHECK_EQ(removed_cnt, 1); |
3073 | USE(removed_cnt); |
3074 | |
3075 | range->AttachToNext(); |
3076 | } else if (range->next() != nullptr) { |
3077 | TRACE("No recombine for %d:%d to %d\n" , range->TopLevel()->vreg(), |
3078 | range->relative_id(), range->next()->relative_id()); |
3079 | } |
3080 | } |
3081 | |
3082 | void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet& to_be_live, |
3083 | LifetimePosition position, |
3084 | SpillMode spill_mode) { |
3085 | for (auto it = active_live_ranges().begin(); |
3086 | it != active_live_ranges().end();) { |
3087 | LiveRange* active_range = *it; |
3088 | TopLevelLiveRange* toplevel = (*it)->TopLevel(); |
3089 | auto found = to_be_live.find({toplevel, kUnassignedRegister}); |
3090 | if (found == to_be_live.end()) { |
3091 | // Is not contained in {to_be_live}, spill it. |
3092 | // Fixed registers are exempt from this. They might have been |
3093 | // added from inactive at the block boundary but we know that |
3094 | // they cannot conflict as they are built before register |
3095 | // allocation starts. It would be algorithmically fine to split |
3096 | // them and reschedule but the code does not allow to do this. |
3097 | if (toplevel->IsFixed()) { |
3098 | TRACE("Keeping reactivated fixed range for %s\n" , |
3099 | RegisterName(toplevel->assigned_register())); |
3100 | ++it; |
3101 | } else { |
3102 | // When spilling a previously spilled/reloaded range, we add back the |
3103 | // tail that we might have split off when we reloaded/spilled it |
3104 | // previously. Otherwise we might keep generating small split-offs. |
3105 | MaybeUndoPreviousSplit(active_range); |
3106 | TRACE("Putting back %d:%d\n" , toplevel->vreg(), |
3107 | active_range->relative_id()); |
3108 | LiveRange* split = SplitRangeAt(active_range, position); |
3109 | DCHECK_NE(split, active_range); |
3110 | |
3111 | // Make sure we revisit this range once it has a use that requires |
3112 | // a register. |
3113 | UsePosition* next_use = split->NextRegisterPosition(position); |
3114 | if (next_use != nullptr) { |
3115 | // Move to the start of the gap before use so that we have a space |
3116 | // to perform the potential reload. Otherwise, do not spill but add |
3117 | // to unhandled for reallocation. |
3118 | LifetimePosition revisit_at = next_use->pos().FullStart(); |
3119 | TRACE("Next use at %d\n" , revisit_at.value()); |
3120 | if (!data()->IsBlockBoundary(revisit_at)) { |
3121 | // Leave some space so we have enough gap room. |
3122 | revisit_at = revisit_at.PrevStart().FullStart(); |
3123 | } |
3124 | // If this range became life right at the block boundary that we are |
3125 | // currently processing, we do not need to split it. Instead move it |
3126 | // to unhandled right away. |
3127 | if (position < revisit_at) { |
3128 | LiveRange* third_part = SplitRangeAt(split, revisit_at); |
3129 | DCHECK_NE(split, third_part); |
3130 | Spill(split, spill_mode); |
3131 | TRACE("Marking %d:%d to recombine\n" , toplevel->vreg(), |
3132 | third_part->relative_id()); |
3133 | third_part->SetRecombine(); |
3134 | AddToUnhandled(third_part); |
3135 | } else { |
3136 | AddToUnhandled(split); |
3137 | } |
3138 | } else { |
3139 | Spill(split, spill_mode); |
3140 | } |
3141 | it = ActiveToHandled(it); |
3142 | } |
3143 | } else { |
3144 | // This range is contained in {to_be_live}, so we can keep it. |
3145 | int expected_register = (*found).expected_register; |
3146 | to_be_live.erase(found); |
3147 | if (expected_register == active_range->assigned_register()) { |
3148 | // Was life and in correct register, simply pass through. |
3149 | TRACE("Keeping %d:%d in %s\n" , toplevel->vreg(), |
3150 | active_range->relative_id(), |
3151 | RegisterName(active_range->assigned_register())); |
3152 | ++it; |
3153 | } else { |
3154 | // Was life but wrong register. Split and schedule for |
3155 | // allocation. |
3156 | TRACE("Scheduling %d:%d\n" , toplevel->vreg(), |
3157 | active_range->relative_id()); |
3158 | LiveRange* split = SplitRangeAt(active_range, position); |
3159 | split->set_controlflow_hint(expected_register); |
3160 | AddToUnhandled(split); |
3161 | it = ActiveToHandled(it); |
3162 | } |
3163 | } |
3164 | } |
3165 | } |
3166 | |
3167 | LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range, |
3168 | int reg) { |
3169 | // We know the register is currently free but it might be in |
3170 | // use by a currently inactive range. So we might not be able |
3171 | // to reload for the full distance. In such case, split here. |
3172 | // TODO(herhut): |
3173 | // It might be better if we could use the normal unhandled queue and |
3174 | // give reloading registers pecedence. That way we would compute the |
3175 | // intersection for the entire future. |
3176 | LifetimePosition new_end = range->End(); |
3177 | for (const auto inactive : inactive_live_ranges()) { |
3178 | if (kSimpleFPAliasing || !check_fp_aliasing()) { |
3179 | if (inactive->assigned_register() != reg) continue; |
3180 | } else { |
3181 | bool conflict = inactive->assigned_register() == reg; |
3182 | if (!conflict) { |
3183 | int alias_base_index = -1; |
3184 | int aliases = data()->config()->GetAliases(range->representation(), reg, |
3185 | inactive->representation(), |
3186 | &alias_base_index); |
3187 | DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); |
3188 | while (aliases-- && !conflict) { |
3189 | int aliased_reg = alias_base_index + aliases; |
3190 | if (aliased_reg == reg) { |
3191 | conflict = true; |
3192 | } |
3193 | } |
3194 | } |
3195 | if (!conflict) continue; |
3196 | } |
3197 | for (auto interval = inactive->first_interval(); interval != nullptr; |
3198 | interval = interval->next()) { |
3199 | if (interval->start() > new_end) break; |
3200 | if (interval->end() <= range->Start()) continue; |
3201 | if (new_end > interval->start()) new_end = interval->start(); |
3202 | } |
3203 | } |
3204 | if (new_end != range->End()) { |
3205 | TRACE("Found new end for %d:%d at %d\n" , range->TopLevel()->vreg(), |
3206 | range->relative_id(), new_end.value()); |
3207 | LiveRange* tail = SplitRangeAt(range, new_end); |
3208 | AddToUnhandled(tail); |
3209 | } |
3210 | SetLiveRangeAssignedRegister(range, reg); |
3211 | return range; |
3212 | } |
3213 | |
3214 | void LinearScanAllocator::ReloadLiveRanges(RangeWithRegisterSet& to_be_live, |
3215 | LifetimePosition position) { |
3216 | // Assumption: All ranges in {to_be_live} are currently spilled and there are |
3217 | // no conflicting registers in the active ranges. |
3218 | // The former is ensured by SpillNotLiveRanges, the latter is by construction |
3219 | // of the to_be_live set. |
3220 | for (RangeWithRegister range_with_register : to_be_live) { |
3221 | TopLevelLiveRange* range = range_with_register.range; |
3222 | int reg = range_with_register.expected_register; |
3223 | LiveRange* to_resurrect = range->GetChildCovers(position); |
3224 | if (to_resurrect == nullptr) { |
3225 | // While the range was life until the end of the predecessor block, it is |
3226 | // not live in this block. Either there is a lifetime gap or the range |
3227 | // died. |
3228 | TRACE("No candidate for %d at %d\n" , range->vreg(), position.value()); |
3229 | } else { |
3230 | // We might be resurrecting a range that we spilled until its next use |
3231 | // before. In such cases, we have to unsplit it before processing as |
3232 | // otherwise we might get register changes from one range to the other |
3233 | // in the middle of blocks. |
3234 | // If there is a gap between this range and the next, we can just keep |
3235 | // it as a register change won't hurt. |
3236 | MaybeUndoPreviousSplit(to_resurrect); |
3237 | if (to_resurrect->Start() == position) { |
3238 | // This range already starts at this block. It might have been spilled, |
3239 | // so we have to unspill it. Otherwise, it is already in the unhandled |
3240 | // queue waiting for processing. |
3241 | DCHECK(!to_resurrect->HasRegisterAssigned()); |
3242 | TRACE("Reload %d:%d starting at %d itself\n" , range->vreg(), |
3243 | to_resurrect->relative_id(), position.value()); |
3244 | if (to_resurrect->spilled()) { |
3245 | to_resurrect->Unspill(); |
3246 | to_resurrect->set_controlflow_hint(reg); |
3247 | AddToUnhandled(to_resurrect); |
3248 | } else { |
3249 | // Assign the preassigned register if we know. Otherwise, nothing to |
3250 | // do as already in unhandeled. |
3251 | if (reg != kUnassignedRegister) { |
3252 | auto erased_cnt = unhandled_live_ranges().erase(to_resurrect); |
3253 | DCHECK_EQ(erased_cnt, 1); |
3254 | USE(erased_cnt); |
3255 | // We know that there is no conflict with active ranges, so just |
3256 | // assign the register to the range. |
3257 | to_resurrect = AssignRegisterOnReload(to_resurrect, reg); |
3258 | AddToActive(to_resurrect); |
3259 | } |
3260 | } |
3261 | } else { |
3262 | // This range was spilled before. We have to split it and schedule the |
3263 | // second part for allocation (or assign the register if we know). |
3264 | DCHECK(to_resurrect->spilled()); |
3265 | LiveRange* split = SplitRangeAt(to_resurrect, position); |
3266 | TRACE("Reload %d:%d starting at %d as %d\n" , range->vreg(), |
3267 | to_resurrect->relative_id(), split->Start().value(), |
3268 | split->relative_id()); |
3269 | DCHECK_NE(split, to_resurrect); |
3270 | if (reg != kUnassignedRegister) { |
3271 | // We know that there is no conflict with active ranges, so just |
3272 | // assign the register to the range. |
3273 | split = AssignRegisterOnReload(split, reg); |
3274 | AddToActive(split); |
3275 | } else { |
3276 | // Let normal register assignment find a suitable register. |
3277 | split->set_controlflow_hint(reg); |
3278 | AddToUnhandled(split); |
3279 | } |
3280 | } |
3281 | } |
3282 | } |
3283 | } |
3284 | |
3285 | RpoNumber LinearScanAllocator::ChooseOneOfTwoPredecessorStates( |
3286 | InstructionBlock* current_block, LifetimePosition boundary) { |
3287 | using SmallRangeVector = |
3288 | base::SmallVector<TopLevelLiveRange*, |
3289 | RegisterConfiguration::kMaxRegisters>; |
3290 | // Pick the state that would generate the least spill/reloads. |
3291 | // Compute vectors of ranges with imminent use for both sides. |
3292 | // As GetChildCovers is cached, it is cheaper to repeatedly |
3293 | // call is rather than compute a shared set first. |
3294 | auto& left = data()->GetSpillState(current_block->predecessors()[0]); |
3295 | auto& right = data()->GetSpillState(current_block->predecessors()[1]); |
3296 | SmallRangeVector left_used; |
3297 | for (const auto item : left) { |
3298 | LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary); |
3299 | if (at_next_block != nullptr && |
3300 | at_next_block->NextUsePositionRegisterIsBeneficial(boundary) != |
3301 | nullptr) { |
3302 | left_used.emplace_back(item->TopLevel()); |
3303 | } |
3304 | } |
3305 | SmallRangeVector right_used; |
3306 | for (const auto item : right) { |
3307 | LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary); |
3308 | if (at_next_block != nullptr && |
3309 | at_next_block->NextUsePositionRegisterIsBeneficial(boundary) != |
3310 | nullptr) { |
3311 | right_used.emplace_back(item->TopLevel()); |
3312 | } |
3313 | } |
3314 | if (left_used.empty() && right_used.empty()) { |
3315 | // There are no beneficial register uses. Look at any use at |
3316 | // all. We do not account for all uses, like flowing into a phi. |
3317 | // So we just look at ranges still being live. |
3318 | TRACE("Looking at only uses\n" ); |
3319 | for (const auto item : left) { |
3320 | LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary); |
3321 | if (at_next_block != nullptr && |
3322 | at_next_block->NextUsePosition(boundary) != nullptr) { |
3323 | left_used.emplace_back(item->TopLevel()); |
3324 | } |
3325 | } |
3326 | for (const auto item : right) { |
3327 | LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary); |
3328 | if (at_next_block != nullptr && |
3329 | at_next_block->NextUsePosition(boundary) != nullptr) { |
3330 | right_used.emplace_back(item->TopLevel()); |
3331 | } |
3332 | } |
3333 | } |
3334 | // Now left_used and right_used contains those ranges that matter. |
3335 | // Count which side matches this most. |
3336 | TRACE("Vote went %zu vs %zu\n" , left_used.size(), right_used.size()); |
3337 | return left_used.size() > right_used.size() |
3338 | ? current_block->predecessors()[0] |
3339 | : current_block->predecessors()[1]; |
3340 | } |
3341 | |
3342 | void LinearScanAllocator::ComputeStateFromManyPredecessors( |
3343 | InstructionBlock* current_block, RangeWithRegisterSet* to_be_live) { |
3344 | struct Vote { |
3345 | size_t count; |
3346 | int used_registers[RegisterConfiguration::kMaxRegisters]; |
3347 | }; |
3348 | struct TopLevelLiveRangeComparator { |
3349 | bool operator()(const TopLevelLiveRange* lhs, |
3350 | const TopLevelLiveRange* rhs) const { |
3351 | return lhs->vreg() < rhs->vreg(); |
3352 | } |
3353 | }; |
3354 | ZoneMap<TopLevelLiveRange*, Vote, TopLevelLiveRangeComparator> counts( |
3355 | data()->allocation_zone()); |
3356 | int deferred_blocks = 0; |
3357 | for (RpoNumber pred : current_block->predecessors()) { |
3358 | if (!ConsiderBlockForControlFlow(current_block, pred)) { |
3359 | // Back edges of a loop count as deferred here too. |
3360 | deferred_blocks++; |
3361 | continue; |
3362 | } |
3363 | const auto& pred_state = data()->GetSpillState(pred); |
3364 | for (LiveRange* range : pred_state) { |
3365 | // We might have spilled the register backwards, so the range we |
3366 | // stored might have lost its register. Ignore those. |
3367 | if (!range->HasRegisterAssigned()) continue; |
3368 | TopLevelLiveRange* toplevel = range->TopLevel(); |
3369 | auto previous = counts.find(toplevel); |
3370 | if (previous == counts.end()) { |
3371 | auto result = counts.emplace(std::make_pair(toplevel, Vote{1, {0}})); |
3372 | CHECK(result.second); |
3373 | result.first->second.used_registers[range->assigned_register()]++; |
3374 | } else { |
3375 | previous->second.count++; |
3376 | previous->second.used_registers[range->assigned_register()]++; |
3377 | } |
3378 | } |
3379 | } |
3380 | |
3381 | // Choose the live ranges from the majority. |
3382 | const size_t majority = |
3383 | (current_block->PredecessorCount() + 2 - deferred_blocks) / 2; |
3384 | bool taken_registers[RegisterConfiguration::kMaxRegisters] = {0}; |
3385 | auto assign_to_live = [this, counts, majority]( |
3386 | std::function<bool(TopLevelLiveRange*)> filter, |
3387 | RangeWithRegisterSet* to_be_live, |
3388 | bool* taken_registers) { |
3389 | for (const auto& val : counts) { |
3390 | if (!filter(val.first)) continue; |
3391 | if (val.second.count >= majority) { |
3392 | int register_max = 0; |
3393 | int reg = kUnassignedRegister; |
3394 | for (int idx = 0; idx < RegisterConfiguration::kMaxRegisters; idx++) { |
3395 | int uses = val.second.used_registers[idx]; |
3396 | if (uses == 0) continue; |
3397 | if (uses > register_max) { |
3398 | reg = idx; |
3399 | register_max = val.second.used_registers[idx]; |
3400 | } else if (taken_registers[reg] && uses == register_max) { |
3401 | reg = idx; |
3402 | } |
3403 | } |
3404 | if (taken_registers[reg]) { |
3405 | reg = kUnassignedRegister; |
3406 | } else { |
3407 | taken_registers[reg] = true; |
3408 | } |
3409 | to_be_live->emplace(val.first, reg); |
3410 | TRACE("Reset %d as live due vote %zu in %s\n" , |
3411 | val.first->TopLevel()->vreg(), val.second.count, |
3412 | reg == kUnassignedRegister ? "unassigned" : RegisterName(reg)); |
3413 | } |
3414 | } |
3415 | }; |
3416 | // First round, process fixed registers, as these have precedence. |
3417 | // There is only one fixed range per register, so we cannot have |
3418 | // conflicts. |
3419 | assign_to_live([](TopLevelLiveRange* r) { return r->IsFixed(); }, to_be_live, |
3420 | taken_registers); |
3421 | // Second round, process the rest. |
3422 | assign_to_live([](TopLevelLiveRange* r) { return !r->IsFixed(); }, to_be_live, |
3423 | taken_registers); |
3424 | } |
3425 | |
3426 | bool LinearScanAllocator::ConsiderBlockForControlFlow( |
3427 | InstructionBlock* current_block, RpoNumber predecessor) { |
3428 | // We ignore predecessors on back edges when looking for control flow effects, |
3429 | // as those lie in the future of allocation and we have no data yet. Also, |
3430 | // deferred bocks are ignored on deferred to non-deferred boundaries, as we do |
3431 | // not want them to influence allocation of non deferred code. |
3432 | return (predecessor < current_block->rpo_number()) && |
3433 | (current_block->IsDeferred() || |
3434 | !code()->InstructionBlockAt(predecessor)->IsDeferred()); |
3435 | } |
3436 | |
3437 | void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode, |
3438 | InstructionBlock* block) { |
3439 | if (spill_mode == SpillMode::kSpillDeferred) { |
3440 | LifetimePosition max = LifetimePosition::InstructionFromInstructionIndex( |
3441 | LastDeferredInstructionIndex(block)); |
3442 | // Adds range back to inactive, resolving resulting conflicts. |
3443 | auto add_to_inactive = [this, max](LiveRange* range) { |
3444 | AddToInactive(range); |
3445 | // Splits other if it conflicts with range. Other is placed in unhandled |
3446 | // for later reallocation. |
3447 | auto split_conflicting = [this, max](LiveRange* range, LiveRange* other, |
3448 | std::function<void(LiveRange*)> |
3449 | update_caches) { |
3450 | if (other->TopLevel()->IsFixed()) return; |
3451 | int reg = range->assigned_register(); |
3452 | if (kSimpleFPAliasing || !check_fp_aliasing()) { |
3453 | if (other->assigned_register() != reg) { |
3454 | return; |
3455 | } |
3456 | } else { |
3457 | if (!data()->config()->AreAliases(range->representation(), reg, |
3458 | other->representation(), |
3459 | other->assigned_register())) { |
3460 | return; |
3461 | } |
3462 | } |
3463 | // The inactive range might conflict, so check whether we need to |
3464 | // split and spill. We can look for the first intersection, as there |
3465 | // cannot be any intersections in the past, as those would have been a |
3466 | // conflict then. |
3467 | LifetimePosition next_start = range->FirstIntersection(other); |
3468 | if (!next_start.IsValid() || (next_start > max)) { |
3469 | // There is no conflict or the conflict is outside of the current |
3470 | // stretch of deferred code. In either case we can ignore the |
3471 | // inactive range. |
3472 | return; |
3473 | } |
3474 | // They overlap. So we need to split active and reschedule it |
3475 | // for allocation. |
3476 | TRACE("Resolving conflict of %d with deferred fixed for register %s\n" , |
3477 | other->TopLevel()->vreg(), |
3478 | RegisterName(other->assigned_register())); |
3479 | LiveRange* split_off = |
3480 | other->SplitAt(next_start, data()->allocation_zone()); |
3481 | DCHECK_NE(split_off, other); |
3482 | AddToUnhandled(split_off); |
3483 | update_caches(other); |
3484 | }; |
3485 | // Now check for conflicts in active and inactive ranges. We might have |
3486 | // conflicts in inactive, as we do not do this check on every block |
3487 | // boundary but only on deferred/non-deferred changes but inactive |
3488 | // live ranges might become live on any block boundary. |
3489 | for (auto active : active_live_ranges()) { |
3490 | split_conflicting(range, active, [this](LiveRange* updated) { |
3491 | next_active_ranges_change_ = |
3492 | Min(updated->End(), next_active_ranges_change_); |
3493 | }); |
3494 | } |
3495 | for (auto inactive : inactive_live_ranges()) { |
3496 | split_conflicting(range, inactive, [this](LiveRange* updated) { |
3497 | next_inactive_ranges_change_ = |
3498 | Min(updated->End(), next_inactive_ranges_change_); |
3499 | }); |
3500 | } |
3501 | }; |
3502 | if (mode() == GENERAL_REGISTERS) { |
3503 | for (TopLevelLiveRange* current : data()->fixed_live_ranges()) { |
3504 | if (current != nullptr) { |
3505 | if (current->IsDeferredFixed()) { |
3506 | add_to_inactive(current); |
3507 | } |
3508 | } |
3509 | } |
3510 | } else { |
3511 | for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) { |
3512 | if (current != nullptr) { |
3513 | if (current->IsDeferredFixed()) { |
3514 | add_to_inactive(current); |
3515 | } |
3516 | } |
3517 | } |
3518 | if (!kSimpleFPAliasing && check_fp_aliasing()) { |
3519 | for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) { |
3520 | if (current != nullptr) { |
3521 | if (current->IsDeferredFixed()) { |
3522 | add_to_inactive(current); |
3523 | } |
3524 | } |
3525 | } |
3526 | for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) { |
3527 | if (current != nullptr) { |
3528 | if (current->IsDeferredFixed()) { |
3529 | add_to_inactive(current); |
3530 | } |
3531 | } |
3532 | } |
3533 | } |
3534 | } |
3535 | } else { |
3536 | // Remove all ranges. |
3537 | for (auto it = inactive_live_ranges().begin(); |
3538 | it != inactive_live_ranges().end();) { |
3539 | if ((*it)->TopLevel()->IsDeferredFixed()) { |
3540 | it = inactive_live_ranges().erase(it); |
3541 | } else { |
3542 | ++it; |
3543 | } |
3544 | } |
3545 | } |
3546 | } |
3547 | |
3548 | bool LinearScanAllocator::BlockIsDeferredOrImmediatePredecessorIsNotDeferred( |
3549 | const InstructionBlock* block) { |
3550 | if (block->IsDeferred()) return true; |
3551 | if (block->PredecessorCount() == 0) return true; |
3552 | bool pred_is_deferred = false; |
3553 | for (auto pred : block->predecessors()) { |
3554 | if (pred.IsNext(block->rpo_number())) { |
3555 | pred_is_deferred = code()->InstructionBlockAt(pred)->IsDeferred(); |
3556 | break; |
3557 | } |
3558 | } |
3559 | return !pred_is_deferred; |
3560 | } |
3561 | |
3562 | bool LinearScanAllocator::HasNonDeferredPredecessor(InstructionBlock* block) { |
3563 | for (auto pred : block->predecessors()) { |
3564 | InstructionBlock* pred_block = code()->InstructionBlockAt(pred); |
3565 | if (!pred_block->IsDeferred()) return true; |
3566 | } |
3567 | return false; |
3568 | } |
3569 | |
3570 | void LinearScanAllocator::AllocateRegisters() { |
3571 | DCHECK(unhandled_live_ranges().empty()); |
3572 | DCHECK(active_live_ranges().empty()); |
3573 | DCHECK(inactive_live_ranges().empty()); |
3574 | |
3575 | SplitAndSpillRangesDefinedByMemoryOperand(); |
3576 | data()->ResetSpillState(); |
3577 | |
3578 | if (FLAG_trace_alloc) { |
3579 | PrintRangeOverview(std::cout); |
3580 | } |
3581 | |
3582 | const size_t live_ranges_size = data()->live_ranges().size(); |
3583 | for (TopLevelLiveRange* range : data()->live_ranges()) { |
3584 | CHECK_EQ(live_ranges_size, |
3585 | data()->live_ranges().size()); // TODO(neis): crbug.com/831822 |
3586 | if (!CanProcessRange(range)) continue; |
3587 | for (LiveRange* to_add = range; to_add != nullptr; |
3588 | to_add = to_add->next()) { |
3589 | if (!to_add->spilled()) { |
3590 | AddToUnhandled(to_add); |
3591 | } |
3592 | } |
3593 | } |
3594 | |
3595 | if (mode() == GENERAL_REGISTERS) { |
3596 | for (TopLevelLiveRange* current : data()->fixed_live_ranges()) { |
3597 | if (current != nullptr) { |
3598 | if (current->IsDeferredFixed()) continue; |
3599 | AddToInactive(current); |
3600 | } |
3601 | } |
3602 | } else { |
3603 | for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) { |
3604 | if (current != nullptr) { |
3605 | if (current->IsDeferredFixed()) continue; |
3606 | AddToInactive(current); |
3607 | } |
3608 | } |
3609 | if (!kSimpleFPAliasing && check_fp_aliasing()) { |
3610 | for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) { |
3611 | if (current != nullptr) { |
3612 | if (current->IsDeferredFixed()) continue; |
3613 | AddToInactive(current); |
3614 | } |
3615 | } |
3616 | for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) { |
3617 | if (current != nullptr) { |
3618 | if (current->IsDeferredFixed()) continue; |
3619 | AddToInactive(current); |
3620 | } |
3621 | } |
3622 | } |
3623 | } |
3624 | |
3625 | RpoNumber last_block = RpoNumber::FromInt(0); |
3626 | RpoNumber max_blocks = |
3627 | RpoNumber::FromInt(code()->InstructionBlockCount() - 1); |
3628 | LifetimePosition next_block_boundary = |
3629 | LifetimePosition::InstructionFromInstructionIndex( |
3630 | data() |
3631 | ->code() |
3632 | ->InstructionBlockAt(last_block) |
3633 | ->last_instruction_index()) |
3634 | .NextFullStart(); |
3635 | SpillMode spill_mode = SpillMode::kSpillAtDefinition; |
3636 | |
3637 | // Process all ranges. We also need to ensure that we have seen all block |
3638 | // boundaries. Linear scan might have assigned and spilled ranges before |
3639 | // reaching the last block and hence we would ignore control flow effects for |
3640 | // those. Not only does this produce a potentially bad assignment, it also |
3641 | // breaks with the invariant that we undo spills that happen in deferred code |
3642 | // when crossing a deferred/non-deferred boundary. |
3643 | while (!unhandled_live_ranges().empty() || |
3644 | (data()->is_turbo_control_flow_aware_allocation() && |
3645 | last_block < max_blocks)) { |
3646 | LiveRange* current = unhandled_live_ranges().empty() |
3647 | ? nullptr |
3648 | : *unhandled_live_ranges().begin(); |
3649 | LifetimePosition position = |
3650 | current ? current->Start() : next_block_boundary; |
3651 | #ifdef DEBUG |
3652 | allocation_finger_ = position; |
3653 | #endif |
3654 | if (data()->is_turbo_control_flow_aware_allocation()) { |
3655 | // Splintering is not supported. |
3656 | CHECK(!data()->is_turbo_preprocess_ranges()); |
3657 | // Check whether we just moved across a block boundary. This will trigger |
3658 | // for the first range that is past the current boundary. |
3659 | if (position >= next_block_boundary) { |
3660 | TRACE("Processing boundary at %d leaving %d\n" , |
3661 | next_block_boundary.value(), last_block.ToInt()); |
3662 | |
3663 | // Forward state to before block boundary |
3664 | LifetimePosition end_of_block = next_block_boundary.PrevStart().End(); |
3665 | ForwardStateTo(end_of_block); |
3666 | |
3667 | // Remember this state. |
3668 | InstructionBlock* current_block = data()->code()->GetInstructionBlock( |
3669 | next_block_boundary.ToInstructionIndex()); |
3670 | |
3671 | // Store current spill state (as the state at end of block). For |
3672 | // simplicity, we store the active ranges, e.g., the live ranges that |
3673 | // are not spilled. |
3674 | data()->RememberSpillState(last_block, active_live_ranges()); |
3675 | |
3676 | // Only reset the state if this was not a direct fallthrough. Otherwise |
3677 | // control flow resolution will get confused (it does not expect changes |
3678 | // across fallthrough edges.). |
3679 | bool fallthrough = (current_block->PredecessorCount() == 1) && |
3680 | current_block->predecessors()[0].IsNext( |
3681 | current_block->rpo_number()); |
3682 | |
3683 | // When crossing a deferred/non-deferred boundary, we have to load or |
3684 | // remove the deferred fixed ranges from inactive. |
3685 | if ((spill_mode == SpillMode::kSpillDeferred) != |
3686 | current_block->IsDeferred()) { |
3687 | // Update spill mode. |
3688 | spill_mode = current_block->IsDeferred() |
3689 | ? SpillMode::kSpillDeferred |
3690 | : SpillMode::kSpillAtDefinition; |
3691 | |
3692 | ForwardStateTo(next_block_boundary); |
3693 | |
3694 | #ifdef DEBUG |
3695 | // Allow allocation at current position. |
3696 | allocation_finger_ = next_block_boundary; |
3697 | #endif |
3698 | UpdateDeferredFixedRanges(spill_mode, current_block); |
3699 | } |
3700 | |
3701 | // Allocation relies on the fact that each non-deferred block has at |
3702 | // least one non-deferred predecessor. Check this invariant here. |
3703 | DCHECK_IMPLIES(!current_block->IsDeferred(), |
3704 | HasNonDeferredPredecessor(current_block)); |
3705 | |
3706 | if (!fallthrough) { |
3707 | #ifdef DEBUG |
3708 | // Allow allocation at current position. |
3709 | allocation_finger_ = next_block_boundary; |
3710 | #endif |
3711 | |
3712 | // We are currently at next_block_boundary - 1. Move the state to the |
3713 | // actual block boundary position. In particular, we have to |
3714 | // reactivate inactive ranges so that they get rescheduled for |
3715 | // allocation if they were not live at the predecessors. |
3716 | ForwardStateTo(next_block_boundary); |
3717 | |
3718 | RangeWithRegisterSet to_be_live(data()->allocation_zone()); |
3719 | |
3720 | // If we end up deciding to use the state of the immediate |
3721 | // predecessor, it is better not to perform a change. It would lead to |
3722 | // the same outcome anyway. |
3723 | // This may never happen on boundaries between deferred and |
3724 | // non-deferred code, as we rely on explicit respill to ensure we |
3725 | // spill at definition. |
3726 | bool no_change_required = false; |
3727 | |
3728 | auto pick_state_from = [this, current_block]( |
3729 | RpoNumber pred, |
3730 | RangeWithRegisterSet* to_be_live) -> bool { |
3731 | TRACE("Using information from B%d\n" , pred.ToInt()); |
3732 | // If this is a fall-through that is not across a deferred |
3733 | // boundary, there is nothing to do. |
3734 | bool is_noop = pred.IsNext(current_block->rpo_number()); |
3735 | if (!is_noop) { |
3736 | auto& spill_state = data()->GetSpillState(pred); |
3737 | TRACE("Not a fallthrough. Adding %zu elements...\n" , |
3738 | spill_state.size()); |
3739 | for (const auto range : spill_state) { |
3740 | // Filter out ranges that had their register stolen by backwards |
3741 | // working spill heuristics. These have been spilled after the |
3742 | // fact, so ignore them. |
3743 | if (!range->HasRegisterAssigned()) continue; |
3744 | to_be_live->emplace(range); |
3745 | } |
3746 | } |
3747 | return is_noop; |
3748 | }; |
3749 | |
3750 | // Multiple cases here: |
3751 | // 1) We have a single predecessor => this is a control flow split, so |
3752 | // just restore the predecessor state. |
3753 | // 2) We have two predecessors => this is a conditional, so break ties |
3754 | // based on what to do based on forward uses, trying to benefit |
3755 | // the same branch if in doubt (make one path fast). |
3756 | // 3) We have many predecessors => this is a switch. Compute union |
3757 | // based on majority, break ties by looking forward. |
3758 | if (current_block->PredecessorCount() == 1) { |
3759 | TRACE("Single predecessor for B%d\n" , |
3760 | current_block->rpo_number().ToInt()); |
3761 | no_change_required = |
3762 | pick_state_from(current_block->predecessors()[0], &to_be_live); |
3763 | } else if (current_block->PredecessorCount() == 2) { |
3764 | TRACE("Two predecessors for B%d\n" , |
3765 | current_block->rpo_number().ToInt()); |
3766 | // If one of the branches does not contribute any information, |
3767 | // e.g. because it is deferred or a back edge, we can short cut |
3768 | // here right away. |
3769 | RpoNumber chosen_predecessor = RpoNumber::Invalid(); |
3770 | if (!ConsiderBlockForControlFlow( |
3771 | current_block, current_block->predecessors()[0])) { |
3772 | chosen_predecessor = current_block->predecessors()[1]; |
3773 | } else if (!ConsiderBlockForControlFlow( |
3774 | current_block, current_block->predecessors()[1])) { |
3775 | chosen_predecessor = current_block->predecessors()[0]; |
3776 | } else { |
3777 | chosen_predecessor = ChooseOneOfTwoPredecessorStates( |
3778 | current_block, next_block_boundary); |
3779 | } |
3780 | no_change_required = |
3781 | pick_state_from(chosen_predecessor, &to_be_live); |
3782 | |
3783 | } else { |
3784 | // Merge at the end of, e.g., a switch. |
3785 | ComputeStateFromManyPredecessors(current_block, &to_be_live); |
3786 | } |
3787 | |
3788 | if (!no_change_required) { |
3789 | SpillNotLiveRanges(to_be_live, next_block_boundary, spill_mode); |
3790 | ReloadLiveRanges(to_be_live, next_block_boundary); |
3791 | } |
3792 | |
3793 | // TODO(herhut) Check removal. |
3794 | // Now forward to current position |
3795 | ForwardStateTo(next_block_boundary); |
3796 | } |
3797 | // Update block information |
3798 | last_block = current_block->rpo_number(); |
3799 | next_block_boundary = LifetimePosition::InstructionFromInstructionIndex( |
3800 | current_block->last_instruction_index()) |
3801 | .NextFullStart(); |
3802 | |
3803 | // We might have created new unhandled live ranges, so cycle around the |
3804 | // loop to make sure we pick the top most range in unhandled for |
3805 | // processing. |
3806 | continue; |
3807 | } |
3808 | } |
3809 | |
3810 | DCHECK_NOT_NULL(current); |
3811 | |
3812 | TRACE("Processing interval %d:%d start=%d\n" , current->TopLevel()->vreg(), |
3813 | current->relative_id(), position.value()); |
3814 | |
3815 | // Now we can erase current, as we are sure to process it. |
3816 | unhandled_live_ranges().erase(unhandled_live_ranges().begin()); |
3817 | |
3818 | if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel())) |
3819 | continue; |
3820 | |
3821 | ForwardStateTo(position); |
3822 | |
3823 | DCHECK(!current->HasRegisterAssigned() && !current->spilled()); |
3824 | |
3825 | ProcessCurrentRange(current, spill_mode); |
3826 | } |
3827 | |
3828 | if (FLAG_trace_alloc) { |
3829 | PrintRangeOverview(std::cout); |
3830 | } |
3831 | } |
3832 | |
3833 | bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) { |
3834 | DCHECK(!data()->is_turbo_control_flow_aware_allocation()); |
3835 | DCHECK(range->TopLevel()->IsSplinter()); |
3836 | // If we can spill the whole range, great. Otherwise, split above the |
3837 | // first use needing a register and spill the top part. |
3838 | const UsePosition* next_reg = range->NextRegisterPosition(range->Start()); |
3839 | if (next_reg == nullptr) { |
3840 | Spill(range, SpillMode::kSpillAtDefinition); |
3841 | return true; |
3842 | } else if (range->FirstHintPosition() == nullptr) { |
3843 | // If there was no hint, but we have a use position requiring a |
3844 | // register, apply the hot path heuristics. |
3845 | return false; |
3846 | } else if (next_reg->pos().PrevStart() > range->Start()) { |
3847 | LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart()); |
3848 | AddToUnhandled(tail); |
3849 | Spill(range, SpillMode::kSpillAtDefinition); |
3850 | return true; |
3851 | } |
3852 | return false; |
3853 | } |
3854 | |
3855 | void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range, |
3856 | int reg) { |
3857 | data()->MarkAllocated(range->representation(), reg); |
3858 | range->set_assigned_register(reg); |
3859 | range->SetUseHints(reg); |
3860 | range->UpdateBundleRegister(reg); |
3861 | if (range->IsTopLevel() && range->TopLevel()->is_phi()) { |
3862 | data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg); |
3863 | } |
3864 | } |
3865 | |
3866 | void LinearScanAllocator::AddToActive(LiveRange* range) { |
3867 | TRACE("Add live range %d:%d in %s to active\n" , range->TopLevel()->vreg(), |
3868 | range->relative_id(), RegisterName(range->assigned_register())); |
3869 | active_live_ranges().push_back(range); |
3870 | next_active_ranges_change_ = |
3871 | std::min(next_active_ranges_change_, range->NextEndAfter(range->Start())); |
3872 | } |
3873 | |
3874 | void LinearScanAllocator::AddToInactive(LiveRange* range) { |
3875 | TRACE("Add live range %d:%d to inactive\n" , range->TopLevel()->vreg(), |
3876 | range->relative_id()); |
3877 | inactive_live_ranges().push_back(range); |
3878 | next_inactive_ranges_change_ = std::min( |
3879 | next_inactive_ranges_change_, range->NextStartAfter(range->Start())); |
3880 | } |
3881 | |
3882 | void LinearScanAllocator::AddToUnhandled(LiveRange* range) { |
3883 | if (range == nullptr || range->IsEmpty()) return; |
3884 | DCHECK(!range->HasRegisterAssigned() && !range->spilled()); |
3885 | DCHECK(allocation_finger_ <= range->Start()); |
3886 | |
3887 | TRACE("Add live range %d:%d to unhandled\n" , range->TopLevel()->vreg(), |
3888 | range->relative_id()); |
3889 | unhandled_live_ranges().insert(range); |
3890 | } |
3891 | |
3892 | ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToHandled( |
3893 | const ZoneVector<LiveRange*>::iterator it) { |
3894 | TRACE("Moving live range %d:%d from active to handled\n" , |
3895 | (*it)->TopLevel()->vreg(), (*it)->relative_id()); |
3896 | return active_live_ranges().erase(it); |
3897 | } |
3898 | |
3899 | ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToInactive( |
3900 | const ZoneVector<LiveRange*>::iterator it, LifetimePosition position) { |
3901 | LiveRange* range = *it; |
3902 | inactive_live_ranges().push_back(range); |
3903 | TRACE("Moving live range %d:%d from active to inactive\n" , |
3904 | (range)->TopLevel()->vreg(), range->relative_id()); |
3905 | next_inactive_ranges_change_ = |
3906 | std::min(next_inactive_ranges_change_, range->NextStartAfter(position)); |
3907 | return active_live_ranges().erase(it); |
3908 | } |
3909 | |
3910 | ZoneVector<LiveRange*>::iterator LinearScanAllocator::InactiveToHandled( |
3911 | ZoneVector<LiveRange*>::iterator it) { |
3912 | TRACE("Moving live range %d:%d from inactive to handled\n" , |
3913 | (*it)->TopLevel()->vreg(), (*it)->relative_id()); |
3914 | return inactive_live_ranges().erase(it); |
3915 | } |
3916 | |
3917 | ZoneVector<LiveRange*>::iterator LinearScanAllocator::InactiveToActive( |
3918 | ZoneVector<LiveRange*>::iterator it, LifetimePosition position) { |
3919 | LiveRange* range = *it; |
3920 | active_live_ranges().push_back(range); |
3921 | TRACE("Moving live range %d:%d from inactive to active\n" , |
3922 | range->TopLevel()->vreg(), range->relative_id()); |
3923 | next_active_ranges_change_ = |
3924 | std::min(next_active_ranges_change_, range->NextEndAfter(position)); |
3925 | return inactive_live_ranges().erase(it); |
3926 | } |
3927 | |
3928 | void LinearScanAllocator::ForwardStateTo(LifetimePosition position) { |
3929 | if (position >= next_active_ranges_change_) { |
3930 | next_active_ranges_change_ = LifetimePosition::MaxPosition(); |
3931 | for (auto it = active_live_ranges().begin(); |
3932 | it != active_live_ranges().end();) { |
3933 | LiveRange* cur_active = *it; |
3934 | if (cur_active->End() <= position) { |
3935 | it = ActiveToHandled(it); |
3936 | } else if (!cur_active->Covers(position)) { |
3937 | it = ActiveToInactive(it, position); |
3938 | } else { |
3939 | next_active_ranges_change_ = std::min( |
3940 | next_active_ranges_change_, cur_active->NextEndAfter(position)); |
3941 | ++it; |
3942 | } |
3943 | } |
3944 | } |
3945 | |
3946 | if (position >= next_inactive_ranges_change_) { |
3947 | next_inactive_ranges_change_ = LifetimePosition::MaxPosition(); |
3948 | for (auto it = inactive_live_ranges().begin(); |
3949 | it != inactive_live_ranges().end();) { |
3950 | LiveRange* cur_inactive = *it; |
3951 | if (cur_inactive->End() <= position) { |
3952 | it = InactiveToHandled(it); |
3953 | } else if (cur_inactive->Covers(position)) { |
3954 | it = InactiveToActive(it, position); |
3955 | } else { |
3956 | next_inactive_ranges_change_ = |
3957 | std::min(next_inactive_ranges_change_, |
3958 | cur_inactive->NextStartAfter(position)); |
3959 | ++it; |
3960 | } |
3961 | } |
3962 | } |
3963 | } |
3964 | |
3965 | int LinearScanAllocator::LastDeferredInstructionIndex(InstructionBlock* start) { |
3966 | DCHECK(start->IsDeferred()); |
3967 | RpoNumber last_block = |
3968 | RpoNumber::FromInt(code()->InstructionBlockCount() - 1); |
3969 | while ((start->rpo_number() < last_block)) { |
3970 | InstructionBlock* next = |
3971 | code()->InstructionBlockAt(start->rpo_number().Next()); |
3972 | if (!next->IsDeferred()) break; |
3973 | start = next; |
3974 | } |
3975 | return start->last_instruction_index(); |
3976 | } |
3977 | |
3978 | void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep, |
3979 | int* num_regs, int* num_codes, |
3980 | const int** codes) const { |
3981 | DCHECK(!kSimpleFPAliasing); |
3982 | if (rep == MachineRepresentation::kFloat32) { |
3983 | *num_regs = data()->config()->num_float_registers(); |
3984 | *num_codes = data()->config()->num_allocatable_float_registers(); |
3985 | *codes = data()->config()->allocatable_float_codes(); |
3986 | } else if (rep == MachineRepresentation::kSimd128) { |
3987 | *num_regs = data()->config()->num_simd128_registers(); |
3988 | *num_codes = data()->config()->num_allocatable_simd128_registers(); |
3989 | *codes = data()->config()->allocatable_simd128_codes(); |
3990 | } else { |
3991 | UNREACHABLE(); |
3992 | } |
3993 | } |
3994 | |
3995 | void LinearScanAllocator::FindFreeRegistersForRange( |
3996 | LiveRange* range, Vector<LifetimePosition> positions) { |
3997 | int num_regs = num_registers(); |
3998 | int num_codes = num_allocatable_registers(); |
3999 | const int* codes = allocatable_register_codes(); |
4000 | MachineRepresentation rep = range->representation(); |
4001 | if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 || |
4002 | rep == MachineRepresentation::kSimd128)) |
4003 | GetFPRegisterSet(rep, &num_regs, &num_codes, &codes); |
4004 | DCHECK_GE(positions.length(), num_regs); |
4005 | |
4006 | for (int i = 0; i < num_regs; ++i) { |
4007 | positions[i] = LifetimePosition::MaxPosition(); |
4008 | } |
4009 | |
4010 | for (LiveRange* cur_active : active_live_ranges()) { |
4011 | int cur_reg = cur_active->assigned_register(); |
4012 | if (kSimpleFPAliasing || !check_fp_aliasing()) { |
4013 | positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0); |
4014 | TRACE("Register %s is free until pos %d (1) due to %d\n" , |
4015 | RegisterName(cur_reg), |
4016 | LifetimePosition::GapFromInstructionIndex(0).value(), |
4017 | cur_active->TopLevel()->vreg()); |
4018 | } else { |
4019 | int alias_base_index = -1; |
4020 | int aliases = data()->config()->GetAliases( |
4021 | cur_active->representation(), cur_reg, rep, &alias_base_index); |
4022 | DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); |
4023 | while (aliases--) { |
4024 | int aliased_reg = alias_base_index + aliases; |
4025 | positions[aliased_reg] = LifetimePosition::GapFromInstructionIndex(0); |
4026 | } |
4027 | } |
4028 | } |
4029 | |
4030 | for (LiveRange* cur_inactive : inactive_live_ranges()) { |
4031 | DCHECK(cur_inactive->End() > range->Start()); |
4032 | int cur_reg = cur_inactive->assigned_register(); |
4033 | // No need to carry out intersections, when this register won't be |
4034 | // interesting to this range anyway. |
4035 | // TODO(mtrofin): extend to aliased ranges, too. |
4036 | if ((kSimpleFPAliasing || !check_fp_aliasing()) && |
4037 | positions[cur_reg] < range->Start()) { |
4038 | continue; |
4039 | } |
4040 | |
4041 | LifetimePosition next_intersection = cur_inactive->FirstIntersection(range); |
4042 | if (!next_intersection.IsValid()) continue; |
4043 | if (kSimpleFPAliasing || !check_fp_aliasing()) { |
4044 | positions[cur_reg] = Min(positions[cur_reg], next_intersection); |
4045 | TRACE("Register %s is free until pos %d (2)\n" , RegisterName(cur_reg), |
4046 | Min(positions[cur_reg], next_intersection).value()); |
4047 | } else { |
4048 | int alias_base_index = -1; |
4049 | int aliases = data()->config()->GetAliases( |
4050 | cur_inactive->representation(), cur_reg, rep, &alias_base_index); |
4051 | DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); |
4052 | while (aliases--) { |
4053 | int aliased_reg = alias_base_index + aliases; |
4054 | positions[aliased_reg] = Min(positions[aliased_reg], next_intersection); |
4055 | } |
4056 | } |
4057 | } |
4058 | } |
4059 | |
4060 | // High-level register allocation summary: |
4061 | // |
4062 | // For regular, or hot (i.e. not splinter) ranges, we attempt to first |
4063 | // allocate first the preferred (hint) register. If that is not possible, |
4064 | // we find a register that's free, and allocate that. If that's not possible, |
4065 | // we search for a register to steal from a range that was allocated. The |
4066 | // goal is to optimize for throughput by avoiding register-to-memory |
4067 | // moves, which are expensive. |
4068 | // |
4069 | // For splinters, the goal is to minimize the number of moves. First we try |
4070 | // to allocate the preferred register (more discussion follows). Failing that, |
4071 | // we bail out and spill as far as we can, unless the first use is at start, |
4072 | // case in which we apply the same behavior as we do for regular ranges. |
4073 | // If there is no hint, we apply the hot-path behavior. |
4074 | // |
4075 | // For the splinter, the hint register may come from: |
4076 | // |
4077 | // - the hot path (we set it at splintering time with SetHint). In this case, if |
4078 | // we cannot offer the hint register, spilling is better because it's at most |
4079 | // 1 move, while trying to find and offer another register is at least 1 move. |
4080 | // |
4081 | // - a constraint. If we cannot offer that register, it's because there is some |
4082 | // interference. So offering the hint register up to the interference would |
4083 | // result |
4084 | // in a move at the interference, plus a move to satisfy the constraint. This is |
4085 | // also the number of moves if we spill, with the potential of the range being |
4086 | // already spilled and thus saving a move (the spill). |
4087 | // Note that this can only be an input constraint, if it were an output one, |
4088 | // the range wouldn't be a splinter because it means it'd be defined in a |
4089 | // deferred |
4090 | // block, and we don't mark those as splinters (they live in deferred blocks |
4091 | // only). |
4092 | // |
4093 | // - a phi. The same analysis as in the case of the input constraint applies. |
4094 | // |
4095 | void LinearScanAllocator::ProcessCurrentRange(LiveRange* current, |
4096 | SpillMode spill_mode) { |
4097 | EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters> |
4098 | free_until_pos; |
4099 | FindFreeRegistersForRange(current, free_until_pos); |
4100 | if (!TryAllocatePreferredReg(current, free_until_pos)) { |
4101 | if (current->TopLevel()->IsSplinter()) { |
4102 | DCHECK(!data()->is_turbo_control_flow_aware_allocation()); |
4103 | if (TrySplitAndSpillSplinter(current)) return; |
4104 | } |
4105 | if (!TryAllocateFreeReg(current, free_until_pos)) { |
4106 | AllocateBlockedReg(current, spill_mode); |
4107 | } |
4108 | } |
4109 | if (current->HasRegisterAssigned()) { |
4110 | AddToActive(current); |
4111 | } |
4112 | } |
4113 | |
4114 | bool LinearScanAllocator::TryAllocatePreferredReg( |
4115 | LiveRange* current, const Vector<LifetimePosition>& free_until_pos) { |
4116 | int hint_register; |
4117 | if (current->RegisterFromControlFlow(&hint_register) || |
4118 | current->FirstHintPosition(&hint_register) != nullptr || |
4119 | current->RegisterFromBundle(&hint_register)) { |
4120 | TRACE( |
4121 | "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n" , |
4122 | RegisterName(hint_register), free_until_pos[hint_register].value(), |
4123 | current->TopLevel()->vreg(), current->relative_id(), |
4124 | current->End().value()); |
4125 | |
4126 | // The desired register is free until the end of the current live range. |
4127 | if (free_until_pos[hint_register] >= current->End()) { |
4128 | TRACE("Assigning preferred reg %s to live range %d:%d\n" , |
4129 | RegisterName(hint_register), current->TopLevel()->vreg(), |
4130 | current->relative_id()); |
4131 | SetLiveRangeAssignedRegister(current, hint_register); |
4132 | return true; |
4133 | } |
4134 | } |
4135 | return false; |
4136 | } |
4137 | |
4138 | int LinearScanAllocator::PickRegisterThatIsAvailableLongest( |
4139 | LiveRange* current, int hint_reg, |
4140 | const Vector<LifetimePosition>& free_until_pos) { |
4141 | int num_regs = 0; // used only for the call to GetFPRegisterSet. |
4142 | int num_codes = num_allocatable_registers(); |
4143 | const int* codes = allocatable_register_codes(); |
4144 | MachineRepresentation rep = current->representation(); |
4145 | if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 || |
4146 | rep == MachineRepresentation::kSimd128)) { |
4147 | GetFPRegisterSet(rep, &num_regs, &num_codes, &codes); |
4148 | } |
4149 | |
4150 | DCHECK_GE(free_until_pos.length(), num_codes); |
4151 | |
4152 | // Find the register which stays free for the longest time. Check for |
4153 | // the hinted register first, as we might want to use that one. Only |
4154 | // count full instructions for free ranges, as an instruction's internal |
4155 | // positions do not help but might shadow a hinted register. This is |
4156 | // typically the case for function calls, where all registered are |
4157 | // cloberred after the call except for the argument registers, which are |
4158 | // set before the call. Hence, the argument registers always get ignored, |
4159 | // as their available time is shorter. |
4160 | int reg = (hint_reg == kUnassignedRegister) ? codes[0] : hint_reg; |
4161 | int current_free = -1; |
4162 | for (int i = 0; i < num_codes; ++i) { |
4163 | int code = codes[i]; |
4164 | // Prefer registers that have no fixed uses to avoid blocking later hints. |
4165 | // We use the first register that has no fixed uses to ensure we use |
4166 | // byte addressable registers in ia32 first. |
4167 | int candidate_free = free_until_pos[code].ToInstructionIndex(); |
4168 | TRACE("Register %s in free until %d\n" , RegisterName(code), candidate_free); |
4169 | if ((candidate_free > current_free) || |
4170 | (candidate_free == current_free && reg != hint_reg && |
4171 | (data()->HasFixedUse(current->representation(), reg) && |
4172 | !data()->HasFixedUse(current->representation(), code)))) { |
4173 | reg = code; |
4174 | current_free = candidate_free; |
4175 | } |
4176 | } |
4177 | |
4178 | return reg; |
4179 | } |
4180 | |
4181 | bool LinearScanAllocator::TryAllocateFreeReg( |
4182 | LiveRange* current, const Vector<LifetimePosition>& free_until_pos) { |
4183 | // Compute register hint, if such exists. |
4184 | int hint_reg = kUnassignedRegister; |
4185 | current->RegisterFromControlFlow(&hint_reg) || |
4186 | current->FirstHintPosition(&hint_reg) != nullptr || |
4187 | current->RegisterFromBundle(&hint_reg); |
4188 | |
4189 | int reg = |
4190 | PickRegisterThatIsAvailableLongest(current, hint_reg, free_until_pos); |
4191 | |
4192 | LifetimePosition pos = free_until_pos[reg]; |
4193 | |
4194 | if (pos <= current->Start()) { |
4195 | // All registers are blocked. |
4196 | return false; |
4197 | } |
4198 | |
4199 | if (pos < current->End()) { |
4200 | // Register reg is available at the range start but becomes blocked before |
4201 | // the range end. Split current at position where it becomes blocked. |
4202 | LiveRange* tail = SplitRangeAt(current, pos); |
4203 | AddToUnhandled(tail); |
4204 | |
4205 | // Try to allocate preferred register once more. |
4206 | if (TryAllocatePreferredReg(current, free_until_pos)) return true; |
4207 | } |
4208 | |
4209 | // Register reg is available at the range start and is free until the range |
4210 | // end. |
4211 | DCHECK(pos >= current->End()); |
4212 | TRACE("Assigning free reg %s to live range %d:%d\n" , RegisterName(reg), |
4213 | current->TopLevel()->vreg(), current->relative_id()); |
4214 | SetLiveRangeAssignedRegister(current, reg); |
4215 | |
4216 | return true; |
4217 | } |
4218 | |
4219 | void LinearScanAllocator::AllocateBlockedReg(LiveRange* current, |
4220 | SpillMode spill_mode) { |
4221 | UsePosition* register_use = current->NextRegisterPosition(current->Start()); |
4222 | if (register_use == nullptr) { |
4223 | // There is no use in the current live range that requires a register. |
4224 | // We can just spill it. |
4225 | Spill(current, spill_mode); |
4226 | return; |
4227 | } |
4228 | |
4229 | MachineRepresentation rep = current->representation(); |
4230 | |
4231 | // use_pos keeps track of positions a register/alias is used at. |
4232 | // block_pos keeps track of positions where a register/alias is blocked |
4233 | // from. |
4234 | EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters> |
4235 | use_pos(LifetimePosition::MaxPosition()); |
4236 | EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters> |
4237 | block_pos(LifetimePosition::MaxPosition()); |
4238 | |
4239 | for (LiveRange* range : active_live_ranges()) { |
4240 | int cur_reg = range->assigned_register(); |
4241 | bool is_fixed_or_cant_spill = |
4242 | range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start()); |
4243 | if (kSimpleFPAliasing || !check_fp_aliasing()) { |
4244 | if (is_fixed_or_cant_spill) { |
4245 | block_pos[cur_reg] = use_pos[cur_reg] = |
4246 | LifetimePosition::GapFromInstructionIndex(0); |
4247 | } else { |
4248 | DCHECK_NE(LifetimePosition::GapFromInstructionIndex(0), |
4249 | block_pos[cur_reg]); |
4250 | use_pos[cur_reg] = |
4251 | range->NextLifetimePositionRegisterIsBeneficial(current->Start()); |
4252 | } |
4253 | } else { |
4254 | int alias_base_index = -1; |
4255 | int aliases = data()->config()->GetAliases( |
4256 | range->representation(), cur_reg, rep, &alias_base_index); |
4257 | DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); |
4258 | while (aliases--) { |
4259 | int aliased_reg = alias_base_index + aliases; |
4260 | if (is_fixed_or_cant_spill) { |
4261 | block_pos[aliased_reg] = use_pos[aliased_reg] = |
4262 | LifetimePosition::GapFromInstructionIndex(0); |
4263 | } else { |
4264 | use_pos[aliased_reg] = |
4265 | Min(block_pos[aliased_reg], |
4266 | range->NextLifetimePositionRegisterIsBeneficial( |
4267 | current->Start())); |
4268 | } |
4269 | } |
4270 | } |
4271 | } |
4272 | |
4273 | for (LiveRange* range : inactive_live_ranges()) { |
4274 | DCHECK(range->End() > current->Start()); |
4275 | int cur_reg = range->assigned_register(); |
4276 | bool is_fixed = range->TopLevel()->IsFixed(); |
4277 | |
4278 | // Don't perform costly intersections if they are guaranteed to not update |
4279 | // block_pos or use_pos. |
4280 | // TODO(mtrofin): extend to aliased ranges, too. |
4281 | if ((kSimpleFPAliasing || !check_fp_aliasing())) { |
4282 | if (is_fixed) { |
4283 | if (block_pos[cur_reg] < range->Start()) continue; |
4284 | } else { |
4285 | if (use_pos[cur_reg] < range->Start()) continue; |
4286 | } |
4287 | } |
4288 | |
4289 | LifetimePosition next_intersection = range->FirstIntersection(current); |
4290 | if (!next_intersection.IsValid()) continue; |
4291 | |
4292 | if (kSimpleFPAliasing || !check_fp_aliasing()) { |
4293 | if (is_fixed) { |
4294 | block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection); |
4295 | use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]); |
4296 | } else { |
4297 | use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection); |
4298 | } |
4299 | } else { |
4300 | int alias_base_index = -1; |
4301 | int aliases = data()->config()->GetAliases( |
4302 | range->representation(), cur_reg, rep, &alias_base_index); |
4303 | DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); |
4304 | while (aliases--) { |
4305 | int aliased_reg = alias_base_index + aliases; |
4306 | if (is_fixed) { |
4307 | block_pos[aliased_reg] = |
4308 | Min(block_pos[aliased_reg], next_intersection); |
4309 | use_pos[aliased_reg] = |
4310 | Min(block_pos[aliased_reg], use_pos[aliased_reg]); |
4311 | } else { |
4312 | use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection); |
4313 | } |
4314 | } |
4315 | } |
4316 | } |
4317 | |
4318 | // Compute register hint if it exists. |
4319 | int hint_reg = kUnassignedRegister; |
4320 | current->RegisterFromControlFlow(&hint_reg) || |
4321 | register_use->HintRegister(&hint_reg) || |
4322 | current->RegisterFromBundle(&hint_reg); |
4323 | int reg = PickRegisterThatIsAvailableLongest(current, hint_reg, use_pos); |
4324 | |
4325 | if (use_pos[reg] < register_use->pos()) { |
4326 | // If there is a gap position before the next register use, we can |
4327 | // spill until there. The gap position will then fit the fill move. |
4328 | if (LifetimePosition::ExistsGapPositionBetween(current->Start(), |
4329 | register_use->pos())) { |
4330 | SpillBetween(current, current->Start(), register_use->pos(), spill_mode); |
4331 | return; |
4332 | } |
4333 | } |
4334 | |
4335 | // When in deferred spilling mode avoid stealing registers beyond the current |
4336 | // deferred region. This is required as we otherwise might spill an inactive |
4337 | // range with a start outside of deferred code and that would not be reloaded. |
4338 | LifetimePosition new_end = current->End(); |
4339 | if (spill_mode == SpillMode::kSpillDeferred) { |
4340 | InstructionBlock* deferred_block = |
4341 | code()->GetInstructionBlock(current->Start().ToInstructionIndex()); |
4342 | new_end = Min(new_end, LifetimePosition::GapFromInstructionIndex( |
4343 | LastDeferredInstructionIndex(deferred_block))); |
4344 | } |
4345 | |
4346 | // We couldn't spill until the next register use. Split before the register |
4347 | // is blocked, if applicable. |
4348 | if (block_pos[reg] < new_end) { |
4349 | // Register becomes blocked before the current range end. Split before that |
4350 | // position. |
4351 | new_end = block_pos[reg].Start(); |
4352 | } |
4353 | |
4354 | // If there is no register available at all, we can only spill this range. |
4355 | // Happens for instance on entry to deferred code where registers might |
4356 | // become blocked yet we aim to reload ranges. |
4357 | if (new_end == current->Start()) { |
4358 | SpillBetween(current, new_end, register_use->pos(), spill_mode); |
4359 | return; |
4360 | } |
4361 | |
4362 | // Split at the new end if we found one. |
4363 | if (new_end != current->End()) { |
4364 | LiveRange* tail = SplitBetween(current, current->Start(), new_end); |
4365 | AddToUnhandled(tail); |
4366 | } |
4367 | |
4368 | // Register reg is not blocked for the whole range. |
4369 | DCHECK(block_pos[reg] >= current->End()); |
4370 | TRACE("Assigning blocked reg %s to live range %d:%d\n" , RegisterName(reg), |
4371 | current->TopLevel()->vreg(), current->relative_id()); |
4372 | SetLiveRangeAssignedRegister(current, reg); |
4373 | |
4374 | // This register was not free. Thus we need to find and spill |
4375 | // parts of active and inactive live regions that use the same register |
4376 | // at the same lifetime positions as current. |
4377 | SplitAndSpillIntersecting(current, spill_mode); |
4378 | } |
4379 | |
4380 | void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current, |
4381 | SpillMode spill_mode) { |
4382 | DCHECK(current->HasRegisterAssigned()); |
4383 | int reg = current->assigned_register(); |
4384 | LifetimePosition split_pos = current->Start(); |
4385 | for (auto it = active_live_ranges().begin(); |
4386 | it != active_live_ranges().end();) { |
4387 | LiveRange* range = *it; |
4388 | if (kSimpleFPAliasing || !check_fp_aliasing()) { |
4389 | if (range->assigned_register() != reg) { |
4390 | ++it; |
4391 | continue; |
4392 | } |
4393 | } else { |
4394 | if (!data()->config()->AreAliases(current->representation(), reg, |
4395 | range->representation(), |
4396 | range->assigned_register())) { |
4397 | ++it; |
4398 | continue; |
4399 | } |
4400 | } |
4401 | |
4402 | UsePosition* next_pos = range->NextRegisterPosition(current->Start()); |
4403 | // TODO(herhut): Be more clever here as long as we do not move split_pos |
4404 | // out of deferred code. |
4405 | LifetimePosition spill_pos = spill_mode == SpillMode::kSpillDeferred |
4406 | ? split_pos |
4407 | : FindOptimalSpillingPos(range, split_pos); |
4408 | if (next_pos == nullptr) { |
4409 | SpillAfter(range, spill_pos, spill_mode); |
4410 | } else { |
4411 | // When spilling between spill_pos and next_pos ensure that the range |
4412 | // remains spilled at least until the start of the current live range. |
4413 | // This guarantees that we will not introduce new unhandled ranges that |
4414 | // start before the current range as this violates allocation invariants |
4415 | // and will lead to an inconsistent state of active and inactive |
4416 | // live-ranges: ranges are allocated in order of their start positions, |
4417 | // ranges are retired from active/inactive when the start of the |
4418 | // current live-range is larger than their end. |
4419 | DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(), |
4420 | next_pos->pos())); |
4421 | SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos(), |
4422 | spill_mode); |
4423 | } |
4424 | it = ActiveToHandled(it); |
4425 | } |
4426 | |
4427 | for (auto it = inactive_live_ranges().begin(); |
4428 | it != inactive_live_ranges().end();) { |
4429 | LiveRange* range = *it; |
4430 | DCHECK(range->End() > current->Start()); |
4431 | if (range->TopLevel()->IsFixed()) { |
4432 | ++it; |
4433 | continue; |
4434 | } |
4435 | if (kSimpleFPAliasing || !check_fp_aliasing()) { |
4436 | if (range->assigned_register() != reg) { |
4437 | ++it; |
4438 | continue; |
4439 | } |
4440 | } else { |
4441 | if (!data()->config()->AreAliases(current->representation(), reg, |
4442 | range->representation(), |
4443 | range->assigned_register())) { |
4444 | ++it; |
4445 | continue; |
4446 | } |
4447 | } |
4448 | |
4449 | LifetimePosition next_intersection = range->FirstIntersection(current); |
4450 | if (next_intersection.IsValid()) { |
4451 | UsePosition* next_pos = range->NextRegisterPosition(current->Start()); |
4452 | if (next_pos == nullptr) { |
4453 | SpillAfter(range, split_pos, spill_mode); |
4454 | } else { |
4455 | next_intersection = Min(next_intersection, next_pos->pos()); |
4456 | SpillBetween(range, split_pos, next_intersection, spill_mode); |
4457 | } |
4458 | it = InactiveToHandled(it); |
4459 | } else { |
4460 | ++it; |
4461 | } |
4462 | } |
4463 | } |
4464 | |
4465 | bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) { |
4466 | if (!range->is_phi()) return false; |
4467 | |
4468 | DCHECK(!range->HasSpillOperand()); |
4469 | // Check how many operands belong to the same bundle as the output. |
4470 | LiveRangeBundle* out_bundle = range->get_bundle(); |
4471 | RegisterAllocationData::PhiMapValue* phi_map_value = |
4472 | data()->GetPhiMapValueFor(range); |
4473 | const PhiInstruction* phi = phi_map_value->phi(); |
4474 | const InstructionBlock* block = phi_map_value->block(); |
4475 | // Count the number of spilled operands. |
4476 | size_t spilled_count = 0; |
4477 | for (size_t i = 0; i < phi->operands().size(); i++) { |
4478 | int op = phi->operands()[i]; |
4479 | LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op); |
4480 | if (!op_range->TopLevel()->HasSpillRange()) continue; |
4481 | const InstructionBlock* pred = |
4482 | code()->InstructionBlockAt(block->predecessors()[i]); |
4483 | LifetimePosition pred_end = |
4484 | LifetimePosition::InstructionFromInstructionIndex( |
4485 | pred->last_instruction_index()); |
4486 | while (op_range != nullptr && !op_range->CanCover(pred_end)) { |
4487 | op_range = op_range->next(); |
4488 | } |
4489 | if (op_range != nullptr && op_range->spilled() && |
4490 | op_range->get_bundle() == out_bundle) { |
4491 | spilled_count++; |
4492 | } |
4493 | } |
4494 | |
4495 | // Only continue if more than half of the operands are spilled to the same |
4496 | // slot (because part of same bundle). |
4497 | if (spilled_count * 2 <= phi->operands().size()) { |
4498 | return false; |
4499 | } |
4500 | |
4501 | // If the range does not need register soon, spill it to the merged |
4502 | // spill range. |
4503 | LifetimePosition next_pos = range->Start(); |
4504 | if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart(); |
4505 | UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos); |
4506 | if (pos == nullptr) { |
4507 | Spill(range, SpillMode::kSpillAtDefinition); |
4508 | return true; |
4509 | } else if (pos->pos() > range->Start().NextStart()) { |
4510 | SpillBetween(range, range->Start(), pos->pos(), |
4511 | SpillMode::kSpillAtDefinition); |
4512 | return true; |
4513 | } |
4514 | return false; |
4515 | } |
4516 | |
4517 | void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos, |
4518 | SpillMode spill_mode) { |
4519 | LiveRange* second_part = SplitRangeAt(range, pos); |
4520 | Spill(second_part, spill_mode); |
4521 | } |
4522 | |
4523 | void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start, |
4524 | LifetimePosition end, |
4525 | SpillMode spill_mode) { |
4526 | SpillBetweenUntil(range, start, start, end, spill_mode); |
4527 | } |
4528 | |
4529 | void LinearScanAllocator::SpillBetweenUntil(LiveRange* range, |
4530 | LifetimePosition start, |
4531 | LifetimePosition until, |
4532 | LifetimePosition end, |
4533 | SpillMode spill_mode) { |
4534 | CHECK(start < end); |
4535 | LiveRange* second_part = SplitRangeAt(range, start); |
4536 | |
4537 | if (second_part->Start() < end) { |
4538 | // The split result intersects with [start, end[. |
4539 | // Split it at position between ]start+1, end[, spill the middle part |
4540 | // and put the rest to unhandled. |
4541 | |
4542 | // Make sure that the third part always starts after the start of the |
4543 | // second part, as that likely is the current position of the register |
4544 | // allocator and we cannot add ranges to unhandled that start before |
4545 | // the current position. |
4546 | LifetimePosition split_start = Max(second_part->Start().End(), until); |
4547 | |
4548 | // If end is an actual use (which it typically is) we have to split |
4549 | // so that there is a gap before so that we have space for moving the |
4550 | // value into its position. |
4551 | // However, if we have no choice, split right where asked. |
4552 | LifetimePosition third_part_end = Max(split_start, end.PrevStart().End()); |
4553 | // Instead of spliting right after or even before the block boundary, |
4554 | // split on the boumndary to avoid extra moves. |
4555 | if (data()->IsBlockBoundary(end.Start())) { |
4556 | third_part_end = Max(split_start, end.Start()); |
4557 | } |
4558 | |
4559 | LiveRange* third_part = |
4560 | SplitBetween(second_part, split_start, third_part_end); |
4561 | |
4562 | AddToUnhandled(third_part); |
4563 | // This can happen, even if we checked for start < end above, as we fiddle |
4564 | // with the end location. However, we are guaranteed to be after or at |
4565 | // until, so this is fine. |
4566 | if (third_part != second_part) { |
4567 | Spill(second_part, spill_mode); |
4568 | } |
4569 | } else { |
4570 | // The split result does not intersect with [start, end[. |
4571 | // Nothing to spill. Just put it to unhandled as whole. |
4572 | AddToUnhandled(second_part); |
4573 | } |
4574 | } |
4575 | |
4576 | SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data) |
4577 | : data_(data) {} |
4578 | |
4579 | void SpillSlotLocator::LocateSpillSlots() { |
4580 | const InstructionSequence* code = data()->code(); |
4581 | const size_t live_ranges_size = data()->live_ranges().size(); |
4582 | for (TopLevelLiveRange* range : data()->live_ranges()) { |
4583 | CHECK_EQ(live_ranges_size, |
4584 | data()->live_ranges().size()); // TODO(neis): crbug.com/831822 |
4585 | if (range == nullptr || range->IsEmpty()) continue; |
4586 | // We care only about ranges which spill in the frame. |
4587 | if (!range->HasSpillRange() || |
4588 | range->IsSpilledOnlyInDeferredBlocks(data())) { |
4589 | continue; |
4590 | } |
4591 | TopLevelLiveRange::SpillMoveInsertionList* spills = |
4592 | range->GetSpillMoveInsertionLocations(data()); |
4593 | DCHECK_NOT_NULL(spills); |
4594 | for (; spills != nullptr; spills = spills->next) { |
4595 | code->GetInstructionBlock(spills->gap_index)->mark_needs_frame(); |
4596 | } |
4597 | } |
4598 | } |
4599 | |
4600 | OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {} |
4601 | |
4602 | void OperandAssigner::DecideSpillingMode() { |
4603 | if (data()->is_turbo_control_flow_aware_allocation()) { |
4604 | for (auto range : data()->live_ranges()) { |
4605 | int max_blocks = data()->code()->InstructionBlockCount(); |
4606 | if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks(data())) { |
4607 | // If the range is spilled only in deferred blocks and starts in |
4608 | // a non-deferred block, we transition its representation here so |
4609 | // that the LiveRangeConnector processes them correctly. If, |
4610 | // however, they start in a deferred block, we uograde them to |
4611 | // spill at definition, as that definition is in a deferred block |
4612 | // anyway. While this is an optimization, the code in LiveRangeConnector |
4613 | // relies on it! |
4614 | if (GetInstructionBlock(data()->code(), range->Start())->IsDeferred()) { |
4615 | TRACE("Live range %d is spilled and alive in deferred code only\n" , |
4616 | range->vreg()); |
4617 | range->TransitionRangeToSpillAtDefinition(); |
4618 | } else { |
4619 | TRACE( |
4620 | "Live range %d is spilled deferred code only but alive outside\n" , |
4621 | range->vreg()); |
4622 | DCHECK(data()->is_turbo_control_flow_aware_allocation()); |
4623 | range->TransitionRangeToDeferredSpill(data()->allocation_zone(), |
4624 | max_blocks); |
4625 | } |
4626 | } |
4627 | } |
4628 | } |
4629 | } |
4630 | |
4631 | void OperandAssigner::AssignSpillSlots() { |
4632 | for (auto range : data()->live_ranges()) { |
4633 | if (range != nullptr && range->get_bundle() != nullptr) { |
4634 | range->get_bundle()->MergeSpillRanges(); |
4635 | } |
4636 | } |
4637 | ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges(); |
4638 | // Merge disjoint spill ranges |
4639 | for (size_t i = 0; i < spill_ranges.size(); ++i) { |
4640 | SpillRange* range = spill_ranges[i]; |
4641 | if (range == nullptr) continue; |
4642 | if (range->IsEmpty()) continue; |
4643 | for (size_t j = i + 1; j < spill_ranges.size(); ++j) { |
4644 | SpillRange* other = spill_ranges[j]; |
4645 | if (other != nullptr && !other->IsEmpty()) { |
4646 | range->TryMerge(other); |
4647 | } |
4648 | } |
4649 | } |
4650 | // Allocate slots for the merged spill ranges. |
4651 | for (SpillRange* range : spill_ranges) { |
4652 | if (range == nullptr || range->IsEmpty()) continue; |
4653 | // Allocate a new operand referring to the spill slot. |
4654 | if (!range->HasSlot()) { |
4655 | int index = data()->frame()->AllocateSpillSlot(range->byte_width()); |
4656 | range->set_assigned_slot(index); |
4657 | } |
4658 | } |
4659 | } |
4660 | |
4661 | void OperandAssigner::CommitAssignment() { |
4662 | const size_t live_ranges_size = data()->live_ranges().size(); |
4663 | for (TopLevelLiveRange* top_range : data()->live_ranges()) { |
4664 | CHECK_EQ(live_ranges_size, |
4665 | data()->live_ranges().size()); // TODO(neis): crbug.com/831822 |
4666 | if (top_range == nullptr || top_range->IsEmpty()) continue; |
4667 | InstructionOperand spill_operand; |
4668 | if (top_range->HasSpillOperand()) { |
4669 | spill_operand = *top_range->TopLevel()->GetSpillOperand(); |
4670 | } else if (top_range->TopLevel()->HasSpillRange()) { |
4671 | spill_operand = top_range->TopLevel()->GetSpillRangeOperand(); |
4672 | } |
4673 | if (top_range->is_phi()) { |
4674 | data()->GetPhiMapValueFor(top_range)->CommitAssignment( |
4675 | top_range->GetAssignedOperand()); |
4676 | } |
4677 | for (LiveRange* range = top_range; range != nullptr; |
4678 | range = range->next()) { |
4679 | InstructionOperand assigned = range->GetAssignedOperand(); |
4680 | DCHECK(!assigned.IsUnallocated()); |
4681 | range->ConvertUsesToOperand(assigned, spill_operand); |
4682 | } |
4683 | |
4684 | if (!spill_operand.IsInvalid()) { |
4685 | // If this top level range has a child spilled in a deferred block, we use |
4686 | // the range and control flow connection mechanism instead of spilling at |
4687 | // definition. Refer to the ConnectLiveRanges and ResolveControlFlow |
4688 | // phases. Normally, when we spill at definition, we do not insert a |
4689 | // connecting move when a successor child range is spilled - because the |
4690 | // spilled range picks up its value from the slot which was assigned at |
4691 | // definition. For ranges that are determined to spill only in deferred |
4692 | // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks |
4693 | // where a spill operand is expected, and then finalize by inserting the |
4694 | // spills in the deferred blocks dominators. |
4695 | if (!top_range->IsSpilledOnlyInDeferredBlocks(data())) { |
4696 | // Spill at definition if the range isn't spilled only in deferred |
4697 | // blocks. |
4698 | top_range->CommitSpillMoves( |
4699 | data(), spill_operand, |
4700 | top_range->has_slot_use() || top_range->spilled()); |
4701 | } |
4702 | } |
4703 | } |
4704 | } |
4705 | |
4706 | ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data) |
4707 | : data_(data) {} |
4708 | |
4709 | bool ReferenceMapPopulator::SafePointsAreInOrder() const { |
4710 | int safe_point = 0; |
4711 | for (ReferenceMap* map : *data()->code()->reference_maps()) { |
4712 | if (safe_point > map->instruction_position()) return false; |
4713 | safe_point = map->instruction_position(); |
4714 | } |
4715 | return true; |
4716 | } |
4717 | |
4718 | void ReferenceMapPopulator::PopulateReferenceMaps() { |
4719 | DCHECK(SafePointsAreInOrder()); |
4720 | // Map all delayed references. |
4721 | for (RegisterAllocationData::DelayedReference& delayed_reference : |
4722 | data()->delayed_references()) { |
4723 | delayed_reference.map->RecordReference( |
4724 | AllocatedOperand::cast(*delayed_reference.operand)); |
4725 | } |
4726 | // Iterate over all safe point positions and record a pointer |
4727 | // for all spilled live ranges at this point. |
4728 | int last_range_start = 0; |
4729 | const ReferenceMapDeque* reference_maps = data()->code()->reference_maps(); |
4730 | ReferenceMapDeque::const_iterator first_it = reference_maps->begin(); |
4731 | const size_t live_ranges_size = data()->live_ranges().size(); |
4732 | for (TopLevelLiveRange* range : data()->live_ranges()) { |
4733 | CHECK_EQ(live_ranges_size, |
4734 | data()->live_ranges().size()); // TODO(neis): crbug.com/831822 |
4735 | if (range == nullptr) continue; |
4736 | // Skip non-reference values. |
4737 | if (!data()->code()->IsReference(range->vreg())) continue; |
4738 | // Skip empty live ranges. |
4739 | if (range->IsEmpty()) continue; |
4740 | if (range->has_preassigned_slot()) continue; |
4741 | |
4742 | // Find the extent of the range and its children. |
4743 | int start = range->Start().ToInstructionIndex(); |
4744 | int end = 0; |
4745 | for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) { |
4746 | LifetimePosition this_end = cur->End(); |
4747 | if (this_end.ToInstructionIndex() > end) |
4748 | end = this_end.ToInstructionIndex(); |
4749 | DCHECK(cur->Start().ToInstructionIndex() >= start); |
4750 | } |
4751 | |
4752 | // Most of the ranges are in order, but not all. Keep an eye on when they |
4753 | // step backwards and reset the first_it so we don't miss any safe points. |
4754 | if (start < last_range_start) first_it = reference_maps->begin(); |
4755 | last_range_start = start; |
4756 | |
4757 | // Step across all the safe points that are before the start of this range, |
4758 | // recording how far we step in order to save doing this for the next range. |
4759 | for (; first_it != reference_maps->end(); ++first_it) { |
4760 | ReferenceMap* map = *first_it; |
4761 | if (map->instruction_position() >= start) break; |
4762 | } |
4763 | |
4764 | InstructionOperand spill_operand; |
4765 | if (((range->HasSpillOperand() && |
4766 | !range->GetSpillOperand()->IsConstant()) || |
4767 | range->HasSpillRange())) { |
4768 | if (range->HasSpillOperand()) { |
4769 | spill_operand = *range->GetSpillOperand(); |
4770 | } else { |
4771 | spill_operand = range->GetSpillRangeOperand(); |
4772 | } |
4773 | DCHECK(spill_operand.IsStackSlot()); |
4774 | DCHECK(CanBeTaggedOrCompressedPointer( |
4775 | AllocatedOperand::cast(spill_operand).representation())); |
4776 | } |
4777 | |
4778 | LiveRange* cur = range; |
4779 | // Step through the safe points to see whether they are in the range. |
4780 | for (auto it = first_it; it != reference_maps->end(); ++it) { |
4781 | ReferenceMap* map = *it; |
4782 | int safe_point = map->instruction_position(); |
4783 | |
4784 | // The safe points are sorted so we can stop searching here. |
4785 | if (safe_point - 1 > end) break; |
4786 | |
4787 | // Advance to the next active range that covers the current |
4788 | // safe point position. |
4789 | LifetimePosition safe_point_pos = |
4790 | LifetimePosition::InstructionFromInstructionIndex(safe_point); |
4791 | |
4792 | // Search for the child range (cur) that covers safe_point_pos. If we |
4793 | // don't find it before the children pass safe_point_pos, keep cur at |
4794 | // the last child, because the next safe_point_pos may be covered by cur. |
4795 | // This may happen if cur has more than one interval, and the current |
4796 | // safe_point_pos is in between intervals. |
4797 | // For that reason, cur may be at most the last child. |
4798 | DCHECK_NOT_NULL(cur); |
4799 | DCHECK(safe_point_pos >= cur->Start() || range == cur); |
4800 | bool found = false; |
4801 | while (!found) { |
4802 | if (cur->Covers(safe_point_pos)) { |
4803 | found = true; |
4804 | } else { |
4805 | LiveRange* next = cur->next(); |
4806 | if (next == nullptr || next->Start() > safe_point_pos) { |
4807 | break; |
4808 | } |
4809 | cur = next; |
4810 | } |
4811 | } |
4812 | |
4813 | if (!found) { |
4814 | continue; |
4815 | } |
4816 | |
4817 | // Check if the live range is spilled and the safe point is after |
4818 | // the spill position. |
4819 | int spill_index = range->IsSpilledOnlyInDeferredBlocks(data()) |
4820 | ? cur->Start().ToInstructionIndex() |
4821 | : range->spill_start_index(); |
4822 | |
4823 | if (!spill_operand.IsInvalid() && safe_point >= spill_index) { |
4824 | TRACE("Pointer for range %d (spilled at %d) at safe point %d\n" , |
4825 | range->vreg(), spill_index, safe_point); |
4826 | map->RecordReference(AllocatedOperand::cast(spill_operand)); |
4827 | } |
4828 | |
4829 | if (!cur->spilled()) { |
4830 | TRACE( |
4831 | "Pointer in register for range %d:%d (start at %d) " |
4832 | "at safe point %d\n" , |
4833 | range->vreg(), cur->relative_id(), cur->Start().value(), |
4834 | safe_point); |
4835 | InstructionOperand operand = cur->GetAssignedOperand(); |
4836 | DCHECK(!operand.IsStackSlot()); |
4837 | DCHECK(CanBeTaggedOrCompressedPointer( |
4838 | AllocatedOperand::cast(operand).representation())); |
4839 | map->RecordReference(AllocatedOperand::cast(operand)); |
4840 | } |
4841 | } |
4842 | } |
4843 | } |
4844 | |
4845 | LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data) |
4846 | : data_(data) {} |
4847 | |
4848 | bool LiveRangeConnector::CanEagerlyResolveControlFlow( |
4849 | const InstructionBlock* block) const { |
4850 | if (block->PredecessorCount() != 1) return false; |
4851 | return block->predecessors()[0].IsNext(block->rpo_number()); |
4852 | } |
4853 | |
4854 | void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) { |
4855 | // Lazily linearize live ranges in memory for fast lookup. |
4856 | LiveRangeFinder finder(data(), local_zone); |
4857 | ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets(); |
4858 | for (const InstructionBlock* block : code()->instruction_blocks()) { |
4859 | if (CanEagerlyResolveControlFlow(block)) continue; |
4860 | BitVector* live = live_in_sets[block->rpo_number().ToInt()]; |
4861 | BitVector::Iterator iterator(live); |
4862 | while (!iterator.Done()) { |
4863 | int vreg = iterator.Current(); |
4864 | LiveRangeBoundArray* array = finder.ArrayFor(vreg); |
4865 | for (const RpoNumber& pred : block->predecessors()) { |
4866 | FindResult result; |
4867 | const InstructionBlock* pred_block = code()->InstructionBlockAt(pred); |
4868 | if (!array->FindConnectableSubranges(block, pred_block, &result)) { |
4869 | continue; |
4870 | } |
4871 | InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand(); |
4872 | InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand(); |
4873 | if (pred_op.Equals(cur_op)) continue; |
4874 | if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) { |
4875 | // We're doing a reload. |
4876 | // We don't need to, if: |
4877 | // 1) there's no register use in this block, and |
4878 | // 2) the range ends before the block does, and |
4879 | // 3) we don't have a successor, or the successor is spilled. |
4880 | LifetimePosition block_start = |
4881 | LifetimePosition::GapFromInstructionIndex(block->code_start()); |
4882 | LifetimePosition block_end = |
4883 | LifetimePosition::GapFromInstructionIndex(block->code_end()); |
4884 | const LiveRange* current = result.cur_cover_; |
4885 | // TODO(herhut): This is not the successor if we have control flow! |
4886 | const LiveRange* successor = current->next(); |
4887 | if (current->End() < block_end && |
4888 | (successor == nullptr || successor->spilled())) { |
4889 | // verify point 1: no register use. We can go to the end of the |
4890 | // range, since it's all within the block. |
4891 | |
4892 | bool uses_reg = false; |
4893 | for (const UsePosition* use = current->NextUsePosition(block_start); |
4894 | use != nullptr; use = use->next()) { |
4895 | if (use->operand()->IsAnyRegister()) { |
4896 | uses_reg = true; |
4897 | break; |
4898 | } |
4899 | } |
4900 | if (!uses_reg) continue; |
4901 | } |
4902 | if (current->TopLevel()->IsSpilledOnlyInDeferredBlocks(data()) && |
4903 | pred_block->IsDeferred()) { |
4904 | // The spill location should be defined in pred_block, so add |
4905 | // pred_block to the list of blocks requiring a spill operand. |
4906 | TRACE("Adding B%d to list of spill blocks for %d\n" , |
4907 | pred_block->rpo_number().ToInt(), |
4908 | current->TopLevel()->vreg()); |
4909 | current->TopLevel() |
4910 | ->GetListOfBlocksRequiringSpillOperands(data()) |
4911 | ->Add(pred_block->rpo_number().ToInt()); |
4912 | } |
4913 | } |
4914 | int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op); |
4915 | USE(move_loc); |
4916 | DCHECK_IMPLIES( |
4917 | result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks( |
4918 | data()) && |
4919 | !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()), |
4920 | code()->GetInstructionBlock(move_loc)->IsDeferred()); |
4921 | } |
4922 | iterator.Advance(); |
4923 | } |
4924 | } |
4925 | |
4926 | // At this stage, we collected blocks needing a spill operand from |
4927 | // ConnectRanges and from ResolveControlFlow. Time to commit the spills for |
4928 | // deferred blocks. |
4929 | const size_t live_ranges_size = data()->live_ranges().size(); |
4930 | for (TopLevelLiveRange* top : data()->live_ranges()) { |
4931 | CHECK_EQ(live_ranges_size, |
4932 | data()->live_ranges().size()); // TODO(neis): crbug.com/831822 |
4933 | if (top == nullptr || top->IsEmpty() || |
4934 | !top->IsSpilledOnlyInDeferredBlocks(data())) |
4935 | continue; |
4936 | CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()), local_zone); |
4937 | } |
4938 | } |
4939 | |
4940 | int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block, |
4941 | const InstructionOperand& cur_op, |
4942 | const InstructionBlock* pred, |
4943 | const InstructionOperand& pred_op) { |
4944 | DCHECK(!pred_op.Equals(cur_op)); |
4945 | int gap_index; |
4946 | Instruction::GapPosition position; |
4947 | if (block->PredecessorCount() == 1) { |
4948 | gap_index = block->first_instruction_index(); |
4949 | position = Instruction::START; |
4950 | } else { |
4951 | DCHECK_EQ(1, pred->SuccessorCount()); |
4952 | DCHECK(!code() |
4953 | ->InstructionAt(pred->last_instruction_index()) |
4954 | ->HasReferenceMap()); |
4955 | gap_index = pred->last_instruction_index(); |
4956 | position = Instruction::END; |
4957 | } |
4958 | data()->AddGapMove(gap_index, position, pred_op, cur_op); |
4959 | return gap_index; |
4960 | } |
4961 | |
4962 | void LiveRangeConnector::ConnectRanges(Zone* local_zone) { |
4963 | DelayedInsertionMap delayed_insertion_map(local_zone); |
4964 | const size_t live_ranges_size = data()->live_ranges().size(); |
4965 | for (TopLevelLiveRange* top_range : data()->live_ranges()) { |
4966 | CHECK_EQ(live_ranges_size, |
4967 | data()->live_ranges().size()); // TODO(neis): crbug.com/831822 |
4968 | if (top_range == nullptr) continue; |
4969 | bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks(data()); |
4970 | LiveRange* first_range = top_range; |
4971 | for (LiveRange *second_range = first_range->next(); second_range != nullptr; |
4972 | first_range = second_range, second_range = second_range->next()) { |
4973 | LifetimePosition pos = second_range->Start(); |
4974 | // Add gap move if the two live ranges touch and there is no block |
4975 | // boundary. |
4976 | if (second_range->spilled()) continue; |
4977 | if (first_range->End() != pos) continue; |
4978 | if (data()->IsBlockBoundary(pos) && |
4979 | !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) { |
4980 | continue; |
4981 | } |
4982 | InstructionOperand prev_operand = first_range->GetAssignedOperand(); |
4983 | InstructionOperand cur_operand = second_range->GetAssignedOperand(); |
4984 | if (prev_operand.Equals(cur_operand)) continue; |
4985 | bool delay_insertion = false; |
4986 | Instruction::GapPosition gap_pos; |
4987 | int gap_index = pos.ToInstructionIndex(); |
4988 | if (connect_spilled && !prev_operand.IsAnyRegister() && |
4989 | cur_operand.IsAnyRegister()) { |
4990 | const InstructionBlock* block = code()->GetInstructionBlock(gap_index); |
4991 | DCHECK(block->IsDeferred()); |
4992 | // Performing a reload in this block, meaning the spill operand must |
4993 | // be defined here. |
4994 | top_range->GetListOfBlocksRequiringSpillOperands(data())->Add( |
4995 | block->rpo_number().ToInt()); |
4996 | } |
4997 | |
4998 | if (pos.IsGapPosition()) { |
4999 | gap_pos = pos.IsStart() ? Instruction::START : Instruction::END; |
5000 | } else { |
5001 | if (pos.IsStart()) { |
5002 | delay_insertion = true; |
5003 | } else { |
5004 | gap_index++; |
5005 | } |
5006 | gap_pos = delay_insertion ? Instruction::END : Instruction::START; |
5007 | } |
5008 | // Reloads or spills for spilled in deferred blocks ranges must happen |
5009 | // only in deferred blocks. |
5010 | DCHECK_IMPLIES(connect_spilled && !(prev_operand.IsAnyRegister() && |
5011 | cur_operand.IsAnyRegister()), |
5012 | code()->GetInstructionBlock(gap_index)->IsDeferred()); |
5013 | |
5014 | ParallelMove* move = |
5015 | code()->InstructionAt(gap_index)->GetOrCreateParallelMove( |
5016 | gap_pos, code_zone()); |
5017 | if (!delay_insertion) { |
5018 | move->AddMove(prev_operand, cur_operand); |
5019 | } else { |
5020 | delayed_insertion_map.insert( |
5021 | std::make_pair(std::make_pair(move, prev_operand), cur_operand)); |
5022 | } |
5023 | } |
5024 | } |
5025 | if (delayed_insertion_map.empty()) return; |
5026 | // Insert all the moves which should occur after the stored move. |
5027 | ZoneVector<MoveOperands*> to_insert(local_zone); |
5028 | ZoneVector<MoveOperands*> to_eliminate(local_zone); |
5029 | to_insert.reserve(4); |
5030 | to_eliminate.reserve(4); |
5031 | ParallelMove* moves = delayed_insertion_map.begin()->first.first; |
5032 | for (auto it = delayed_insertion_map.begin();; ++it) { |
5033 | bool done = it == delayed_insertion_map.end(); |
5034 | if (done || it->first.first != moves) { |
5035 | // Commit the MoveOperands for current ParallelMove. |
5036 | for (MoveOperands* move : to_eliminate) { |
5037 | move->Eliminate(); |
5038 | } |
5039 | for (MoveOperands* move : to_insert) { |
5040 | moves->push_back(move); |
5041 | } |
5042 | if (done) break; |
5043 | // Reset state. |
5044 | to_eliminate.clear(); |
5045 | to_insert.clear(); |
5046 | moves = it->first.first; |
5047 | } |
5048 | // Gather all MoveOperands for a single ParallelMove. |
5049 | MoveOperands* move = |
5050 | new (code_zone()) MoveOperands(it->first.second, it->second); |
5051 | moves->PrepareInsertAfter(move, &to_eliminate); |
5052 | to_insert.push_back(move); |
5053 | } |
5054 | } |
5055 | |
5056 | void LiveRangeConnector::CommitSpillsInDeferredBlocks( |
5057 | TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) { |
5058 | DCHECK(range->IsSpilledOnlyInDeferredBlocks(data())); |
5059 | DCHECK(!range->spilled()); |
5060 | |
5061 | InstructionSequence* code = data()->code(); |
5062 | InstructionOperand spill_operand = range->GetSpillRangeOperand(); |
5063 | |
5064 | TRACE("Live Range %d will be spilled only in deferred blocks.\n" , |
5065 | range->vreg()); |
5066 | // If we have ranges that aren't spilled but require the operand on the stack, |
5067 | // make sure we insert the spill. |
5068 | for (const LiveRange* child = range; child != nullptr; |
5069 | child = child->next()) { |
5070 | for (const UsePosition* pos = child->first_pos(); pos != nullptr; |
5071 | pos = pos->next()) { |
5072 | if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled()) |
5073 | continue; |
5074 | range->AddBlockRequiringSpillOperand( |
5075 | code->GetInstructionBlock(pos->pos().ToInstructionIndex()) |
5076 | ->rpo_number(), |
5077 | data()); |
5078 | } |
5079 | } |
5080 | |
5081 | ZoneQueue<int> worklist(temp_zone); |
5082 | |
5083 | for (BitVector::Iterator iterator( |
5084 | range->GetListOfBlocksRequiringSpillOperands(data())); |
5085 | !iterator.Done(); iterator.Advance()) { |
5086 | worklist.push(iterator.Current()); |
5087 | } |
5088 | |
5089 | ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone); |
5090 | // Seek the deferred blocks that dominate locations requiring spill operands, |
5091 | // and spill there. We only need to spill at the start of such blocks. |
5092 | BitVector done_blocks( |
5093 | range->GetListOfBlocksRequiringSpillOperands(data())->length(), |
5094 | temp_zone); |
5095 | while (!worklist.empty()) { |
5096 | int block_id = worklist.front(); |
5097 | worklist.pop(); |
5098 | if (done_blocks.Contains(block_id)) continue; |
5099 | done_blocks.Add(block_id); |
5100 | InstructionBlock* spill_block = |
5101 | code->InstructionBlockAt(RpoNumber::FromInt(block_id)); |
5102 | |
5103 | for (const RpoNumber& pred : spill_block->predecessors()) { |
5104 | const InstructionBlock* pred_block = code->InstructionBlockAt(pred); |
5105 | |
5106 | if (pred_block->IsDeferred()) { |
5107 | worklist.push(pred_block->rpo_number().ToInt()); |
5108 | } else { |
5109 | LifetimePosition pred_end = |
5110 | LifetimePosition::InstructionFromInstructionIndex( |
5111 | pred_block->last_instruction_index()); |
5112 | |
5113 | LiveRangeBound* bound = array->Find(pred_end); |
5114 | |
5115 | InstructionOperand pred_op = bound->range_->GetAssignedOperand(); |
5116 | |
5117 | RpoNumber spill_block_number = spill_block->rpo_number(); |
5118 | if (done_moves.find(std::make_pair( |
5119 | spill_block_number, range->vreg())) == done_moves.end()) { |
5120 | TRACE("Spilling deferred spill for range %d at B%d\n" , range->vreg(), |
5121 | spill_block_number.ToInt()); |
5122 | data()->AddGapMove(spill_block->first_instruction_index(), |
5123 | Instruction::GapPosition::START, pred_op, |
5124 | spill_operand); |
5125 | done_moves.insert(std::make_pair(spill_block_number, range->vreg())); |
5126 | spill_block->mark_needs_frame(); |
5127 | } |
5128 | } |
5129 | } |
5130 | } |
5131 | } |
5132 | |
5133 | #undef TRACE |
5134 | |
5135 | } // namespace compiler |
5136 | } // namespace internal |
5137 | } // namespace v8 |
5138 | |