1 | // Copyright 2016 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/compiler/bytecode-analysis.h" |
6 | |
7 | #include "src/interpreter/bytecode-array-iterator.h" |
8 | #include "src/interpreter/bytecode-array-random-iterator.h" |
9 | #include "src/objects-inl.h" |
10 | #include "src/ostreams.h" |
11 | |
12 | namespace v8 { |
13 | namespace internal { |
14 | namespace compiler { |
15 | |
16 | using interpreter::Bytecode; |
17 | using interpreter::Bytecodes; |
18 | using interpreter::OperandType; |
19 | |
20 | BytecodeLoopAssignments::BytecodeLoopAssignments(int parameter_count, |
21 | int register_count, Zone* zone) |
22 | : parameter_count_(parameter_count), |
23 | bit_vector_(new (zone) |
24 | BitVector(parameter_count + register_count, zone)) {} |
25 | |
26 | void BytecodeLoopAssignments::Add(interpreter::Register r) { |
27 | if (r.is_parameter()) { |
28 | bit_vector_->Add(r.ToParameterIndex(parameter_count_)); |
29 | } else { |
30 | bit_vector_->Add(parameter_count_ + r.index()); |
31 | } |
32 | } |
33 | |
34 | void BytecodeLoopAssignments::AddList(interpreter::Register r, uint32_t count) { |
35 | if (r.is_parameter()) { |
36 | for (uint32_t i = 0; i < count; i++) { |
37 | DCHECK(interpreter::Register(r.index() + i).is_parameter()); |
38 | bit_vector_->Add(r.ToParameterIndex(parameter_count_) + i); |
39 | } |
40 | } else { |
41 | for (uint32_t i = 0; i < count; i++) { |
42 | DCHECK(!interpreter::Register(r.index() + i).is_parameter()); |
43 | bit_vector_->Add(parameter_count_ + r.index() + i); |
44 | } |
45 | } |
46 | } |
47 | |
48 | |
49 | void BytecodeLoopAssignments::Union(const BytecodeLoopAssignments& other) { |
50 | bit_vector_->Union(*other.bit_vector_); |
51 | } |
52 | |
53 | bool BytecodeLoopAssignments::ContainsParameter(int index) const { |
54 | DCHECK_GE(index, 0); |
55 | DCHECK_LT(index, parameter_count()); |
56 | return bit_vector_->Contains(index); |
57 | } |
58 | |
59 | bool BytecodeLoopAssignments::ContainsLocal(int index) const { |
60 | DCHECK_GE(index, 0); |
61 | DCHECK_LT(index, local_count()); |
62 | return bit_vector_->Contains(parameter_count_ + index); |
63 | } |
64 | |
65 | ResumeJumpTarget::ResumeJumpTarget(int suspend_id, int target_offset, |
66 | int final_target_offset) |
67 | : suspend_id_(suspend_id), |
68 | target_offset_(target_offset), |
69 | final_target_offset_(final_target_offset) {} |
70 | |
71 | ResumeJumpTarget ResumeJumpTarget::Leaf(int suspend_id, int target_offset) { |
72 | return ResumeJumpTarget(suspend_id, target_offset, target_offset); |
73 | } |
74 | |
75 | ResumeJumpTarget ResumeJumpTarget::(int , |
76 | const ResumeJumpTarget& next) { |
77 | return ResumeJumpTarget(next.suspend_id(), loop_header_offset, |
78 | next.target_offset()); |
79 | } |
80 | |
81 | BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, |
82 | Zone* zone, bool do_liveness_analysis) |
83 | : bytecode_array_(bytecode_array), |
84 | do_liveness_analysis_(do_liveness_analysis), |
85 | zone_(zone), |
86 | loop_stack_(zone), |
87 | loop_end_index_queue_(zone), |
88 | resume_jump_targets_(zone), |
89 | end_to_header_(zone), |
90 | header_to_info_(zone), |
91 | osr_entry_point_(-1), |
92 | liveness_map_(bytecode_array->length(), zone) {} |
93 | |
94 | namespace { |
95 | |
96 | void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness, |
97 | const interpreter::BytecodeArrayAccessor& accessor) { |
98 | int num_operands = Bytecodes::NumberOfOperands(bytecode); |
99 | const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode); |
100 | |
101 | // Special case Suspend and Resume to just pass through liveness. |
102 | if (bytecode == Bytecode::kSuspendGenerator) { |
103 | // The generator object has to be live. |
104 | in_liveness.MarkRegisterLive(accessor.GetRegisterOperand(0).index()); |
105 | // Suspend additionally reads and returns the accumulator |
106 | DCHECK(Bytecodes::ReadsAccumulator(bytecode)); |
107 | in_liveness.MarkAccumulatorLive(); |
108 | return; |
109 | } |
110 | if (bytecode == Bytecode::kResumeGenerator) { |
111 | // The generator object has to be live. |
112 | in_liveness.MarkRegisterLive(accessor.GetRegisterOperand(0).index()); |
113 | return; |
114 | } |
115 | |
116 | if (Bytecodes::WritesAccumulator(bytecode)) { |
117 | in_liveness.MarkAccumulatorDead(); |
118 | } |
119 | for (int i = 0; i < num_operands; ++i) { |
120 | switch (operand_types[i]) { |
121 | case OperandType::kRegOut: { |
122 | interpreter::Register r = accessor.GetRegisterOperand(i); |
123 | if (!r.is_parameter()) { |
124 | in_liveness.MarkRegisterDead(r.index()); |
125 | } |
126 | break; |
127 | } |
128 | case OperandType::kRegOutList: { |
129 | interpreter::Register r = accessor.GetRegisterOperand(i++); |
130 | uint32_t reg_count = accessor.GetRegisterCountOperand(i); |
131 | if (!r.is_parameter()) { |
132 | for (uint32_t j = 0; j < reg_count; ++j) { |
133 | DCHECK(!interpreter::Register(r.index() + j).is_parameter()); |
134 | in_liveness.MarkRegisterDead(r.index() + j); |
135 | } |
136 | } |
137 | break; |
138 | } |
139 | case OperandType::kRegOutPair: { |
140 | interpreter::Register r = accessor.GetRegisterOperand(i); |
141 | if (!r.is_parameter()) { |
142 | DCHECK(!interpreter::Register(r.index() + 1).is_parameter()); |
143 | in_liveness.MarkRegisterDead(r.index()); |
144 | in_liveness.MarkRegisterDead(r.index() + 1); |
145 | } |
146 | break; |
147 | } |
148 | case OperandType::kRegOutTriple: { |
149 | interpreter::Register r = accessor.GetRegisterOperand(i); |
150 | if (!r.is_parameter()) { |
151 | DCHECK(!interpreter::Register(r.index() + 1).is_parameter()); |
152 | DCHECK(!interpreter::Register(r.index() + 2).is_parameter()); |
153 | in_liveness.MarkRegisterDead(r.index()); |
154 | in_liveness.MarkRegisterDead(r.index() + 1); |
155 | in_liveness.MarkRegisterDead(r.index() + 2); |
156 | } |
157 | break; |
158 | } |
159 | default: |
160 | DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_types[i])); |
161 | break; |
162 | } |
163 | } |
164 | |
165 | if (Bytecodes::ReadsAccumulator(bytecode)) { |
166 | in_liveness.MarkAccumulatorLive(); |
167 | } |
168 | for (int i = 0; i < num_operands; ++i) { |
169 | switch (operand_types[i]) { |
170 | case OperandType::kReg: { |
171 | interpreter::Register r = accessor.GetRegisterOperand(i); |
172 | if (!r.is_parameter()) { |
173 | in_liveness.MarkRegisterLive(r.index()); |
174 | } |
175 | break; |
176 | } |
177 | case OperandType::kRegPair: { |
178 | interpreter::Register r = accessor.GetRegisterOperand(i); |
179 | if (!r.is_parameter()) { |
180 | DCHECK(!interpreter::Register(r.index() + 1).is_parameter()); |
181 | in_liveness.MarkRegisterLive(r.index()); |
182 | in_liveness.MarkRegisterLive(r.index() + 1); |
183 | } |
184 | break; |
185 | } |
186 | case OperandType::kRegList: { |
187 | interpreter::Register r = accessor.GetRegisterOperand(i++); |
188 | uint32_t reg_count = accessor.GetRegisterCountOperand(i); |
189 | if (!r.is_parameter()) { |
190 | for (uint32_t j = 0; j < reg_count; ++j) { |
191 | DCHECK(!interpreter::Register(r.index() + j).is_parameter()); |
192 | in_liveness.MarkRegisterLive(r.index() + j); |
193 | } |
194 | } |
195 | break; |
196 | } |
197 | default: |
198 | DCHECK(!Bytecodes::IsRegisterInputOperandType(operand_types[i])); |
199 | break; |
200 | } |
201 | } |
202 | } |
203 | |
204 | void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness, |
205 | BytecodeLivenessState* next_bytecode_in_liveness, |
206 | const interpreter::BytecodeArrayAccessor& accessor, |
207 | const BytecodeLivenessMap& liveness_map) { |
208 | int current_offset = accessor.current_offset(); |
209 | const Handle<BytecodeArray>& bytecode_array = accessor.bytecode_array(); |
210 | |
211 | // Special case Suspend and Resume to just pass through liveness. |
212 | if (bytecode == Bytecode::kSuspendGenerator || |
213 | bytecode == Bytecode::kResumeGenerator) { |
214 | out_liveness.Union(*next_bytecode_in_liveness); |
215 | return; |
216 | } |
217 | |
218 | // Update from jump target (if any). Skip loops, we update these manually in |
219 | // the liveness iterations. |
220 | if (Bytecodes::IsForwardJump(bytecode)) { |
221 | int target_offset = accessor.GetJumpTargetOffset(); |
222 | out_liveness.Union(*liveness_map.GetInLiveness(target_offset)); |
223 | } else if (Bytecodes::IsSwitch(bytecode)) { |
224 | for (const auto& entry : accessor.GetJumpTableTargetOffsets()) { |
225 | out_liveness.Union(*liveness_map.GetInLiveness(entry.target_offset)); |
226 | } |
227 | } |
228 | |
229 | // Update from next bytecode (unless there isn't one or this is an |
230 | // unconditional jump). |
231 | if (next_bytecode_in_liveness != nullptr && |
232 | !Bytecodes::IsUnconditionalJump(bytecode)) { |
233 | out_liveness.Union(*next_bytecode_in_liveness); |
234 | } |
235 | |
236 | // Update from exception handler (if any). |
237 | if (!interpreter::Bytecodes::IsWithoutExternalSideEffects(bytecode)) { |
238 | int handler_context; |
239 | // TODO(leszeks): We should look up this range only once per entry. |
240 | HandlerTable table(*bytecode_array); |
241 | int handler_offset = |
242 | table.LookupRange(current_offset, &handler_context, nullptr); |
243 | |
244 | if (handler_offset != -1) { |
245 | bool was_accumulator_live = out_liveness.AccumulatorIsLive(); |
246 | out_liveness.Union(*liveness_map.GetInLiveness(handler_offset)); |
247 | out_liveness.MarkRegisterLive(handler_context); |
248 | if (!was_accumulator_live) { |
249 | // The accumulator is reset to the exception on entry into a handler, |
250 | // and so shouldn't be considered live coming out of this bytecode just |
251 | // because it's live coming into the handler. So, kill the accumulator |
252 | // if the handler is the only thing that made it live. |
253 | out_liveness.MarkAccumulatorDead(); |
254 | |
255 | // TODO(leszeks): Ideally the accumulator wouldn't be considered live at |
256 | // the start of the handler, but looking up if the current bytecode is |
257 | // the start of a handler is not free, so we should only do it if we |
258 | // decide it's necessary. |
259 | } |
260 | } |
261 | } |
262 | } |
263 | |
264 | void UpdateLiveness(Bytecode bytecode, BytecodeLiveness& liveness, |
265 | BytecodeLivenessState** next_bytecode_in_liveness, |
266 | const interpreter::BytecodeArrayAccessor& accessor, |
267 | const BytecodeLivenessMap& liveness_map) { |
268 | UpdateOutLiveness(bytecode, *liveness.out, *next_bytecode_in_liveness, |
269 | accessor, liveness_map); |
270 | liveness.in->CopyFrom(*liveness.out); |
271 | UpdateInLiveness(bytecode, *liveness.in, accessor); |
272 | |
273 | *next_bytecode_in_liveness = liveness.in; |
274 | } |
275 | |
276 | void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments, |
277 | const interpreter::BytecodeArrayAccessor& accessor) { |
278 | int num_operands = Bytecodes::NumberOfOperands(bytecode); |
279 | const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode); |
280 | |
281 | for (int i = 0; i < num_operands; ++i) { |
282 | switch (operand_types[i]) { |
283 | case OperandType::kRegOut: { |
284 | assignments.Add(accessor.GetRegisterOperand(i)); |
285 | break; |
286 | } |
287 | case OperandType::kRegOutList: { |
288 | interpreter::Register r = accessor.GetRegisterOperand(i++); |
289 | uint32_t reg_count = accessor.GetRegisterCountOperand(i); |
290 | assignments.AddList(r, reg_count); |
291 | break; |
292 | } |
293 | case OperandType::kRegOutPair: { |
294 | assignments.AddList(accessor.GetRegisterOperand(i), 2); |
295 | break; |
296 | } |
297 | case OperandType::kRegOutTriple: { |
298 | assignments.AddList(accessor.GetRegisterOperand(i), 3); |
299 | break; |
300 | } |
301 | default: |
302 | DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_types[i])); |
303 | break; |
304 | } |
305 | } |
306 | } |
307 | |
308 | } // namespace |
309 | |
310 | void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { |
311 | loop_stack_.push({-1, nullptr}); |
312 | |
313 | BytecodeLivenessState* next_bytecode_in_liveness = nullptr; |
314 | |
315 | bool is_osr = !osr_bailout_id.IsNone(); |
316 | int osr_loop_end_offset = is_osr ? osr_bailout_id.ToInt() : -1; |
317 | |
318 | int generator_switch_index = -1; |
319 | |
320 | interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone()); |
321 | for (iterator.GoToEnd(); iterator.IsValid(); --iterator) { |
322 | Bytecode bytecode = iterator.current_bytecode(); |
323 | int current_offset = iterator.current_offset(); |
324 | |
325 | if (bytecode == Bytecode::kSwitchOnGeneratorState) { |
326 | DCHECK_EQ(generator_switch_index, -1); |
327 | generator_switch_index = iterator.current_index(); |
328 | } |
329 | |
330 | if (bytecode == Bytecode::kJumpLoop) { |
331 | // Every byte up to and including the last byte within the backwards jump |
332 | // instruction is considered part of the loop, set loop end accordingly. |
333 | int loop_end = current_offset + iterator.current_bytecode_size(); |
334 | int = iterator.GetJumpTargetOffset(); |
335 | PushLoop(loop_header, loop_end); |
336 | |
337 | if (current_offset == osr_loop_end_offset) { |
338 | osr_entry_point_ = loop_header; |
339 | } else if (current_offset < osr_loop_end_offset) { |
340 | // Check we've found the osr_entry_point if we've gone past the |
341 | // osr_loop_end_offset. Note, we are iterating the bytecode in reverse, |
342 | // so the less than in the check is correct. |
343 | DCHECK_NE(-1, osr_entry_point_); |
344 | } |
345 | |
346 | // Save the index so that we can do another pass later. |
347 | if (do_liveness_analysis_) { |
348 | loop_end_index_queue_.push_back(iterator.current_index()); |
349 | } |
350 | } else if (loop_stack_.size() > 1) { |
351 | LoopStackEntry& current_loop = loop_stack_.top(); |
352 | LoopInfo* current_loop_info = current_loop.loop_info; |
353 | |
354 | // TODO(leszeks): Ideally, we'd only set values that were assigned in |
355 | // the loop *and* are live when the loop exits. However, this requires |
356 | // tracking the out-liveness of *all* loop exits, which is not |
357 | // information we currently have. |
358 | UpdateAssignments(bytecode, current_loop_info->assignments(), iterator); |
359 | |
360 | // Update suspend counts for this loop, though only if not OSR. |
361 | if (!is_osr && bytecode == Bytecode::kSuspendGenerator) { |
362 | int suspend_id = iterator.GetUnsignedImmediateOperand(3); |
363 | int resume_offset = current_offset + iterator.current_bytecode_size(); |
364 | current_loop_info->AddResumeTarget( |
365 | ResumeJumpTarget::Leaf(suspend_id, resume_offset)); |
366 | } |
367 | |
368 | // If we've reached the header of the loop, pop it off the stack. |
369 | if (current_offset == current_loop.header_offset) { |
370 | loop_stack_.pop(); |
371 | if (loop_stack_.size() > 1) { |
372 | // If there is still an outer loop, propagate inner loop assignments. |
373 | LoopInfo* parent_loop_info = loop_stack_.top().loop_info; |
374 | |
375 | parent_loop_info->assignments().Union( |
376 | current_loop_info->assignments()); |
377 | |
378 | // Also, propagate resume targets. Instead of jumping to the target |
379 | // itself, the outer loop will jump to this loop header for any |
380 | // targets that are inside the current loop, so that this loop stays |
381 | // reducible. Hence, a nested loop of the form: |
382 | // |
383 | // switch (#1 -> suspend1, #2 -> suspend2) |
384 | // loop { |
385 | // suspend1: suspend #1 |
386 | // loop { |
387 | // suspend2: suspend #2 |
388 | // } |
389 | // } |
390 | // |
391 | // becomes: |
392 | // |
393 | // switch (#1 -> loop1, #2 -> loop1) |
394 | // loop1: loop { |
395 | // switch (#1 -> suspend1, #2 -> loop2) |
396 | // suspend1: suspend #1 |
397 | // loop2: loop { |
398 | // switch (#2 -> suspend2) |
399 | // suspend2: suspend #2 |
400 | // } |
401 | // } |
402 | for (const auto& target : current_loop_info->resume_jump_targets()) { |
403 | parent_loop_info->AddResumeTarget( |
404 | ResumeJumpTarget::AtLoopHeader(current_offset, target)); |
405 | } |
406 | |
407 | } else { |
408 | // Otherwise, just propagate inner loop suspends to top-level. |
409 | for (const auto& target : current_loop_info->resume_jump_targets()) { |
410 | resume_jump_targets_.push_back( |
411 | ResumeJumpTarget::AtLoopHeader(current_offset, target)); |
412 | } |
413 | } |
414 | } |
415 | } else if (!is_osr && bytecode == Bytecode::kSuspendGenerator) { |
416 | // If we're not in a loop, we still need to look for suspends. |
417 | // TODO(leszeks): It would be nice to de-duplicate this with the in-loop |
418 | // case |
419 | int suspend_id = iterator.GetUnsignedImmediateOperand(3); |
420 | int resume_offset = current_offset + iterator.current_bytecode_size(); |
421 | resume_jump_targets_.push_back( |
422 | ResumeJumpTarget::Leaf(suspend_id, resume_offset)); |
423 | } |
424 | |
425 | if (do_liveness_analysis_) { |
426 | BytecodeLiveness& liveness = liveness_map_.InitializeLiveness( |
427 | current_offset, bytecode_array()->register_count(), zone()); |
428 | UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator, |
429 | liveness_map_); |
430 | } |
431 | } |
432 | |
433 | DCHECK_EQ(loop_stack_.size(), 1u); |
434 | DCHECK_EQ(loop_stack_.top().header_offset, -1); |
435 | |
436 | DCHECK(ResumeJumpTargetsAreValid()); |
437 | |
438 | if (!do_liveness_analysis_) return; |
439 | |
440 | // At this point, every bytecode has a valid in and out liveness, except for |
441 | // propagating liveness across back edges (i.e. JumpLoop). Subsequent liveness |
442 | // analysis iterations can only add additional liveness bits that are pulled |
443 | // across these back edges. |
444 | // |
445 | // Furthermore, a loop header's in-liveness can only change based on any |
446 | // bytecodes *after* the loop end -- it cannot change as a result of the |
447 | // JumpLoop liveness being updated, as the only liveness bits than can be |
448 | // added to the loop body are those of the loop header. |
449 | // |
450 | // So, if we know that the liveness of bytecodes after a loop header won't |
451 | // change (e.g. because there are no loops in them, or we have already ensured |
452 | // those loops are valid), we can safely update the loop end and pass over the |
453 | // loop body, and then never have to pass over that loop end again, because we |
454 | // have shown that its target, the loop header, can't change from the entries |
455 | // after the loop, and can't change from any loop body pass. |
456 | // |
457 | // This means that in a pass, we can iterate backwards over the bytecode |
458 | // array, process any loops that we encounter, and on subsequent passes we can |
459 | // skip processing those loops (though we still have to process inner loops). |
460 | // |
461 | // Equivalently, we can queue up loop ends from back to front, and pass over |
462 | // the loops in that order, as this preserves both the bottom-to-top and |
463 | // outer-to-inner requirements. |
464 | |
465 | for (int loop_end_index : loop_end_index_queue_) { |
466 | iterator.GoToIndex(loop_end_index); |
467 | |
468 | DCHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop); |
469 | |
470 | int = iterator.GetJumpTargetOffset(); |
471 | int end_offset = iterator.current_offset(); |
472 | |
473 | BytecodeLiveness& = |
474 | liveness_map_.GetLiveness(header_offset); |
475 | BytecodeLiveness& end_liveness = liveness_map_.GetLiveness(end_offset); |
476 | |
477 | if (!end_liveness.out->UnionIsChanged(*header_liveness.in)) { |
478 | // Only update the loop body if the loop end liveness changed. |
479 | continue; |
480 | } |
481 | end_liveness.in->CopyFrom(*end_liveness.out); |
482 | next_bytecode_in_liveness = end_liveness.in; |
483 | |
484 | // Advance into the loop body. |
485 | --iterator; |
486 | for (; iterator.current_offset() > header_offset; --iterator) { |
487 | Bytecode bytecode = iterator.current_bytecode(); |
488 | int current_offset = iterator.current_offset(); |
489 | BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset); |
490 | |
491 | UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator, |
492 | liveness_map_); |
493 | } |
494 | // Now we are at the loop header. Since the in-liveness of the header |
495 | // can't change, we need only to update the out-liveness. |
496 | UpdateOutLiveness(iterator.current_bytecode(), *header_liveness.out, |
497 | next_bytecode_in_liveness, iterator, liveness_map_); |
498 | } |
499 | |
500 | // Process the generator switch statement separately, once the loops are done. |
501 | // This has to be a separate pass because the generator switch can jump into |
502 | // the middle of loops (and is the only kind of jump that can jump across a |
503 | // loop header). |
504 | if (generator_switch_index != -1) { |
505 | iterator.GoToIndex(generator_switch_index); |
506 | DCHECK_EQ(iterator.current_bytecode(), Bytecode::kSwitchOnGeneratorState); |
507 | |
508 | int current_offset = iterator.current_offset(); |
509 | BytecodeLiveness& switch_liveness = |
510 | liveness_map_.GetLiveness(current_offset); |
511 | |
512 | bool any_changed = false; |
513 | for (const auto& entry : iterator.GetJumpTableTargetOffsets()) { |
514 | if (switch_liveness.out->UnionIsChanged( |
515 | *liveness_map_.GetInLiveness(entry.target_offset))) { |
516 | any_changed = true; |
517 | } |
518 | } |
519 | |
520 | // If the switch liveness changed, we have to propagate it up the remaining |
521 | // bytecodes before it. |
522 | if (any_changed) { |
523 | switch_liveness.in->CopyFrom(*switch_liveness.out); |
524 | UpdateInLiveness(Bytecode::kSwitchOnGeneratorState, *switch_liveness.in, |
525 | iterator); |
526 | next_bytecode_in_liveness = switch_liveness.in; |
527 | for (--iterator; iterator.IsValid(); --iterator) { |
528 | Bytecode bytecode = iterator.current_bytecode(); |
529 | int current_offset = iterator.current_offset(); |
530 | BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset); |
531 | |
532 | // There shouldn't be any more loops. |
533 | DCHECK_NE(bytecode, Bytecode::kJumpLoop); |
534 | |
535 | UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator, |
536 | liveness_map_); |
537 | } |
538 | } |
539 | } |
540 | |
541 | DCHECK(LivenessIsValid()); |
542 | } |
543 | |
544 | void BytecodeAnalysis::PushLoop(int , int loop_end) { |
545 | DCHECK(loop_header < loop_end); |
546 | DCHECK(loop_stack_.top().header_offset < loop_header); |
547 | DCHECK(end_to_header_.find(loop_end) == end_to_header_.end()); |
548 | DCHECK(header_to_info_.find(loop_header) == header_to_info_.end()); |
549 | |
550 | int parent_offset = loop_stack_.top().header_offset; |
551 | |
552 | end_to_header_.insert({loop_end, loop_header}); |
553 | auto it = header_to_info_.insert( |
554 | {loop_header, LoopInfo(parent_offset, bytecode_array_->parameter_count(), |
555 | bytecode_array_->register_count(), zone_)}); |
556 | // Get the loop info pointer from the output of insert. |
557 | LoopInfo* loop_info = &it.first->second; |
558 | |
559 | loop_stack_.push({loop_header, loop_info}); |
560 | } |
561 | |
562 | bool BytecodeAnalysis::(int offset) const { |
563 | return header_to_info_.find(offset) != header_to_info_.end(); |
564 | } |
565 | |
566 | int BytecodeAnalysis::GetLoopOffsetFor(int offset) const { |
567 | auto = end_to_header_.upper_bound(offset); |
568 | // If there is no next end => offset is not in a loop. |
569 | if (loop_end_to_header == end_to_header_.end()) { |
570 | return -1; |
571 | } |
572 | // If the header precedes the offset, this is the loop |
573 | // |
574 | // .> header <--loop_end_to_header |
575 | // | |
576 | // | <--offset |
577 | // | |
578 | // `- end |
579 | if (loop_end_to_header->second <= offset) { |
580 | return loop_end_to_header->second; |
581 | } |
582 | // Otherwise there is a (potentially nested) loop after this offset. |
583 | // |
584 | // <--offset |
585 | // |
586 | // .> header |
587 | // | |
588 | // | .> header <--loop_end_to_header |
589 | // | | |
590 | // | `- end |
591 | // | |
592 | // `- end |
593 | // We just return the parent of the next loop (might be -1). |
594 | DCHECK(header_to_info_.upper_bound(offset) != header_to_info_.end()); |
595 | |
596 | return header_to_info_.upper_bound(offset)->second.parent_offset(); |
597 | } |
598 | |
599 | const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int ) const { |
600 | DCHECK(IsLoopHeader(header_offset)); |
601 | |
602 | return header_to_info_.find(header_offset)->second; |
603 | } |
604 | |
605 | const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor( |
606 | int offset) const { |
607 | if (!do_liveness_analysis_) return nullptr; |
608 | |
609 | return liveness_map_.GetInLiveness(offset); |
610 | } |
611 | |
612 | const BytecodeLivenessState* BytecodeAnalysis::GetOutLivenessFor( |
613 | int offset) const { |
614 | if (!do_liveness_analysis_) return nullptr; |
615 | |
616 | return liveness_map_.GetOutLiveness(offset); |
617 | } |
618 | |
619 | std::ostream& BytecodeAnalysis::PrintLivenessTo(std::ostream& os) const { |
620 | interpreter::BytecodeArrayIterator iterator(bytecode_array()); |
621 | |
622 | for (; !iterator.done(); iterator.Advance()) { |
623 | int current_offset = iterator.current_offset(); |
624 | |
625 | const BitVector& in_liveness = |
626 | GetInLivenessFor(current_offset)->bit_vector(); |
627 | const BitVector& out_liveness = |
628 | GetOutLivenessFor(current_offset)->bit_vector(); |
629 | |
630 | for (int i = 0; i < in_liveness.length(); ++i) { |
631 | os << (in_liveness.Contains(i) ? "L" : "." ); |
632 | } |
633 | os << " -> " ; |
634 | |
635 | for (int i = 0; i < out_liveness.length(); ++i) { |
636 | os << (out_liveness.Contains(i) ? "L" : "." ); |
637 | } |
638 | |
639 | os << " | " << current_offset << ": " ; |
640 | iterator.PrintTo(os) << std::endl; |
641 | } |
642 | |
643 | return os; |
644 | } |
645 | |
646 | #if DEBUG |
647 | bool BytecodeAnalysis::ResumeJumpTargetsAreValid() { |
648 | bool valid = true; |
649 | |
650 | // Find the generator switch. |
651 | interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone()); |
652 | for (iterator.GoToStart(); iterator.IsValid(); ++iterator) { |
653 | if (iterator.current_bytecode() == Bytecode::kSwitchOnGeneratorState) { |
654 | break; |
655 | } |
656 | } |
657 | |
658 | // If the iterator is invalid, we've reached the end without finding the |
659 | // generator switch. Similarly, if we are OSR-ing, we're not resuming, so we |
660 | // need no jump targets. So, ensure there are no jump targets and exit. |
661 | if (!iterator.IsValid() || HasOsrEntryPoint()) { |
662 | // Check top-level. |
663 | if (!resume_jump_targets().empty()) { |
664 | PrintF(stderr, |
665 | "Found %zu top-level resume targets but no resume switch\n" , |
666 | resume_jump_targets().size()); |
667 | valid = false; |
668 | } |
669 | // Check loops. |
670 | for (const std::pair<const int, LoopInfo>& loop_info : header_to_info_) { |
671 | if (!loop_info.second.resume_jump_targets().empty()) { |
672 | PrintF(stderr, |
673 | "Found %zu resume targets at loop at offset %d, but no resume " |
674 | "switch\n" , |
675 | loop_info.second.resume_jump_targets().size(), loop_info.first); |
676 | valid = false; |
677 | } |
678 | } |
679 | |
680 | return valid; |
681 | } |
682 | |
683 | // Otherwise, we've found the resume switch. Check that the top level jumps |
684 | // only to leaves and loop headers, then check that each loop header handles |
685 | // all the unresolved jumps, also jumping only to leaves and inner loop |
686 | // headers. |
687 | |
688 | // First collect all required suspend ids. |
689 | std::map<int, int> unresolved_suspend_ids; |
690 | for (const interpreter::JumpTableTargetOffset& offset : |
691 | iterator.GetJumpTableTargetOffsets()) { |
692 | int suspend_id = offset.case_value; |
693 | int resume_offset = offset.target_offset; |
694 | |
695 | unresolved_suspend_ids[suspend_id] = resume_offset; |
696 | } |
697 | |
698 | // Check top-level. |
699 | if (!ResumeJumpTargetLeavesResolveSuspendIds(-1, resume_jump_targets(), |
700 | &unresolved_suspend_ids)) { |
701 | valid = false; |
702 | } |
703 | // Check loops. |
704 | for (const std::pair<const int, LoopInfo>& loop_info : header_to_info_) { |
705 | if (!ResumeJumpTargetLeavesResolveSuspendIds( |
706 | loop_info.first, loop_info.second.resume_jump_targets(), |
707 | &unresolved_suspend_ids)) { |
708 | valid = false; |
709 | } |
710 | } |
711 | |
712 | // Check that everything is resolved. |
713 | if (!unresolved_suspend_ids.empty()) { |
714 | PrintF(stderr, |
715 | "Found suspend ids that are not resolved by a final leaf resume " |
716 | "jump:\n" ); |
717 | |
718 | for (const std::pair<const int, int>& target : unresolved_suspend_ids) { |
719 | PrintF(stderr, " %d -> %d\n" , target.first, target.second); |
720 | } |
721 | valid = false; |
722 | } |
723 | |
724 | return valid; |
725 | } |
726 | |
727 | bool BytecodeAnalysis::ResumeJumpTargetLeavesResolveSuspendIds( |
728 | int parent_offset, const ZoneVector<ResumeJumpTarget>& resume_jump_targets, |
729 | std::map<int, int>* unresolved_suspend_ids) { |
730 | bool valid = true; |
731 | for (const ResumeJumpTarget& target : resume_jump_targets) { |
732 | std::map<int, int>::iterator it = |
733 | unresolved_suspend_ids->find(target.suspend_id()); |
734 | if (it == unresolved_suspend_ids->end()) { |
735 | PrintF( |
736 | stderr, |
737 | "No unresolved suspend found for resume target with suspend id %d\n" , |
738 | target.suspend_id()); |
739 | valid = false; |
740 | continue; |
741 | } |
742 | int expected_target = it->second; |
743 | |
744 | if (target.is_leaf()) { |
745 | // Leaves should have the expected target as their target. |
746 | if (target.target_offset() != expected_target) { |
747 | PrintF( |
748 | stderr, |
749 | "Expected leaf resume target for id %d to have target offset %d, " |
750 | "but had %d\n" , |
751 | target.suspend_id(), expected_target, target.target_offset()); |
752 | valid = false; |
753 | } else { |
754 | // Make sure we're resuming to a Resume bytecode |
755 | interpreter::BytecodeArrayAccessor assessor(bytecode_array(), |
756 | target.target_offset()); |
757 | if (assessor.current_bytecode() != Bytecode::kResumeGenerator) { |
758 | PrintF(stderr, |
759 | "Expected resume target for id %d, offset %d, to be " |
760 | "ResumeGenerator, but found %s\n" , |
761 | target.suspend_id(), target.target_offset(), |
762 | Bytecodes::ToString(assessor.current_bytecode())); |
763 | |
764 | valid = false; |
765 | } |
766 | } |
767 | // We've resolved this suspend id, so erase it to make sure we don't |
768 | // resolve it twice. |
769 | unresolved_suspend_ids->erase(it); |
770 | } else { |
771 | // Non-leaves should have a direct inner loop header as their target. |
772 | if (!IsLoopHeader(target.target_offset())) { |
773 | PrintF(stderr, |
774 | "Expected non-leaf resume target for id %d to have a loop " |
775 | "header at target offset %d\n" , |
776 | target.suspend_id(), target.target_offset()); |
777 | valid = false; |
778 | } else { |
779 | LoopInfo loop_info = GetLoopInfoFor(target.target_offset()); |
780 | if (loop_info.parent_offset() != parent_offset) { |
781 | PrintF(stderr, |
782 | "Expected non-leaf resume target for id %d to have a direct " |
783 | "inner loop at target offset %d\n" , |
784 | target.suspend_id(), target.target_offset()); |
785 | valid = false; |
786 | } |
787 | // If the target loop is a valid inner loop, we'll check its validity |
788 | // when we analyze its resume targets. |
789 | } |
790 | } |
791 | } |
792 | return valid; |
793 | } |
794 | |
795 | bool BytecodeAnalysis::LivenessIsValid() { |
796 | interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone()); |
797 | |
798 | BytecodeLivenessState previous_liveness(bytecode_array()->register_count(), |
799 | zone()); |
800 | |
801 | int invalid_offset = -1; |
802 | int which_invalid = -1; |
803 | |
804 | BytecodeLivenessState* next_bytecode_in_liveness = nullptr; |
805 | |
806 | // Ensure that there are no liveness changes if we iterate one more time. |
807 | for (iterator.GoToEnd(); iterator.IsValid(); --iterator) { |
808 | Bytecode bytecode = iterator.current_bytecode(); |
809 | |
810 | int current_offset = iterator.current_offset(); |
811 | |
812 | BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset); |
813 | |
814 | previous_liveness.CopyFrom(*liveness.out); |
815 | |
816 | UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness, |
817 | iterator, liveness_map_); |
818 | // UpdateOutLiveness skips kJumpLoop, so we update it manually. |
819 | if (bytecode == Bytecode::kJumpLoop) { |
820 | int target_offset = iterator.GetJumpTargetOffset(); |
821 | liveness.out->Union(*liveness_map_.GetInLiveness(target_offset)); |
822 | } |
823 | |
824 | if (!liveness.out->Equals(previous_liveness)) { |
825 | // Reset the invalid liveness. |
826 | liveness.out->CopyFrom(previous_liveness); |
827 | invalid_offset = current_offset; |
828 | which_invalid = 1; |
829 | break; |
830 | } |
831 | |
832 | previous_liveness.CopyFrom(*liveness.in); |
833 | |
834 | liveness.in->CopyFrom(*liveness.out); |
835 | UpdateInLiveness(bytecode, *liveness.in, iterator); |
836 | |
837 | if (!liveness.in->Equals(previous_liveness)) { |
838 | // Reset the invalid liveness. |
839 | liveness.in->CopyFrom(previous_liveness); |
840 | invalid_offset = current_offset; |
841 | which_invalid = 0; |
842 | break; |
843 | } |
844 | |
845 | next_bytecode_in_liveness = liveness.in; |
846 | } |
847 | |
848 | // Ensure that the accumulator is not live when jumping out of a loop, or on |
849 | // the back-edge of a loop. |
850 | for (iterator.GoToStart(); iterator.IsValid() && invalid_offset == -1; |
851 | ++iterator) { |
852 | Bytecode bytecode = iterator.current_bytecode(); |
853 | int current_offset = iterator.current_offset(); |
854 | int = GetLoopOffsetFor(current_offset); |
855 | |
856 | // We only care if we're inside a loop. |
857 | if (loop_header == -1) continue; |
858 | |
859 | // We only care about jumps. |
860 | if (!Bytecodes::IsJump(bytecode)) continue; |
861 | |
862 | int jump_target = iterator.GetJumpTargetOffset(); |
863 | |
864 | // If this is a forward jump to somewhere else in the same loop, ignore it. |
865 | if (Bytecodes::IsForwardJump(bytecode) && |
866 | GetLoopOffsetFor(jump_target) == loop_header) { |
867 | continue; |
868 | } |
869 | |
870 | // The accumulator must be dead at the start of the target of the jump. |
871 | if (liveness_map_.GetLiveness(jump_target).in->AccumulatorIsLive()) { |
872 | invalid_offset = jump_target; |
873 | which_invalid = 0; |
874 | break; |
875 | } |
876 | } |
877 | |
878 | if (invalid_offset != -1) { |
879 | OFStream of(stderr); |
880 | of << "Invalid liveness:" << std::endl; |
881 | |
882 | // Dump the bytecode, annotated with the liveness and marking loops. |
883 | |
884 | int loop_indent = 0; |
885 | |
886 | interpreter::BytecodeArrayIterator forward_iterator(bytecode_array()); |
887 | for (; !forward_iterator.done(); forward_iterator.Advance()) { |
888 | int current_offset = forward_iterator.current_offset(); |
889 | const BitVector& in_liveness = |
890 | GetInLivenessFor(current_offset)->bit_vector(); |
891 | const BitVector& out_liveness = |
892 | GetOutLivenessFor(current_offset)->bit_vector(); |
893 | |
894 | for (int i = 0; i < in_liveness.length(); ++i) { |
895 | of << (in_liveness.Contains(i) ? 'L' : '.'); |
896 | } |
897 | |
898 | of << " | " ; |
899 | |
900 | for (int i = 0; i < out_liveness.length(); ++i) { |
901 | of << (out_liveness.Contains(i) ? 'L' : '.'); |
902 | } |
903 | |
904 | of << " : " << current_offset << " : " ; |
905 | |
906 | // Draw loop back edges by indentin everything between loop headers and |
907 | // jump loop instructions. |
908 | if (forward_iterator.current_bytecode() == Bytecode::kJumpLoop) { |
909 | loop_indent--; |
910 | } |
911 | for (int i = 0; i < loop_indent; ++i) { |
912 | of << "| " ; |
913 | } |
914 | if (forward_iterator.current_bytecode() == Bytecode::kJumpLoop) { |
915 | of << "`-" ; |
916 | } else if (IsLoopHeader(current_offset)) { |
917 | of << ".>" ; |
918 | loop_indent++; |
919 | } |
920 | forward_iterator.PrintTo(of); |
921 | if (Bytecodes::IsJump(forward_iterator.current_bytecode())) { |
922 | of << " (@" << forward_iterator.GetJumpTargetOffset() << ")" ; |
923 | } |
924 | of << std::endl; |
925 | |
926 | if (current_offset == invalid_offset) { |
927 | // Underline the invalid liveness. |
928 | if (which_invalid == 0) { |
929 | for (int i = 0; i < in_liveness.length(); ++i) { |
930 | of << '^'; |
931 | } |
932 | for (int i = 0; i < out_liveness.length() + 3; ++i) { |
933 | of << ' '; |
934 | } |
935 | } else { |
936 | for (int i = 0; i < in_liveness.length() + 3; ++i) { |
937 | of << ' '; |
938 | } |
939 | for (int i = 0; i < out_liveness.length(); ++i) { |
940 | of << '^'; |
941 | } |
942 | } |
943 | |
944 | // Make sure to draw the loop indentation marks on this additional line. |
945 | of << " : " << current_offset << " : " ; |
946 | for (int i = 0; i < loop_indent; ++i) { |
947 | of << "| " ; |
948 | } |
949 | |
950 | of << std::endl; |
951 | } |
952 | } |
953 | } |
954 | |
955 | return invalid_offset == -1; |
956 | } |
957 | #endif |
958 | |
959 | } // namespace compiler |
960 | } // namespace internal |
961 | } // namespace v8 |
962 | |