1 | // Copyright 2013 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/deoptimizer.h" |
6 | |
7 | #include <memory> |
8 | |
9 | #include "src/accessors.h" |
10 | #include "src/assembler-inl.h" |
11 | #include "src/ast/prettyprinter.h" |
12 | #include "src/callable.h" |
13 | #include "src/counters.h" |
14 | #include "src/disasm.h" |
15 | #include "src/frames-inl.h" |
16 | #include "src/global-handles.h" |
17 | #include "src/heap/heap-inl.h" |
18 | #include "src/interpreter/interpreter.h" |
19 | #include "src/log.h" |
20 | #include "src/macro-assembler.h" |
21 | #include "src/objects/debug-objects-inl.h" |
22 | #include "src/objects/heap-number-inl.h" |
23 | #include "src/objects/smi.h" |
24 | #include "src/register-configuration.h" |
25 | #include "src/tracing/trace-event.h" |
26 | #include "src/v8.h" |
27 | #include "src/v8threads.h" |
28 | |
29 | // Has to be the last include (doesn't have include guards) |
30 | #include "src/objects/object-macros.h" |
31 | |
32 | namespace v8 { |
33 | namespace internal { |
34 | |
35 | // {FrameWriter} offers a stack writer abstraction for writing |
36 | // FrameDescriptions. The main service the class provides is managing |
37 | // {top_offset_}, i.e. the offset of the next slot to write to. |
38 | class FrameWriter { |
39 | public: |
40 | static const int NO_INPUT_INDEX = -1; |
41 | FrameWriter(Deoptimizer* deoptimizer, FrameDescription* frame, |
42 | CodeTracer::Scope* trace_scope) |
43 | : deoptimizer_(deoptimizer), |
44 | frame_(frame), |
45 | trace_scope_(trace_scope), |
46 | top_offset_(frame->GetFrameSize()) {} |
47 | |
48 | void PushRawValue(intptr_t value, const char* debug_hint) { |
49 | PushValue(value); |
50 | |
51 | if (trace_scope_ != nullptr) { |
52 | DebugPrintOutputValue(value, debug_hint); |
53 | } |
54 | } |
55 | |
56 | void PushRawObject(Object obj, const char* debug_hint) { |
57 | intptr_t value = obj->ptr(); |
58 | PushValue(value); |
59 | if (trace_scope_ != nullptr) { |
60 | DebugPrintOutputObject(obj, top_offset_, debug_hint); |
61 | } |
62 | } |
63 | |
64 | void PushCallerPc(intptr_t pc) { |
65 | top_offset_ -= kPCOnStackSize; |
66 | frame_->SetCallerPc(top_offset_, pc); |
67 | DebugPrintOutputValue(pc, "caller's pc\n" ); |
68 | } |
69 | |
70 | void PushCallerFp(intptr_t fp) { |
71 | top_offset_ -= kFPOnStackSize; |
72 | frame_->SetCallerFp(top_offset_, fp); |
73 | DebugPrintOutputValue(fp, "caller's fp\n" ); |
74 | } |
75 | |
76 | void PushCallerConstantPool(intptr_t cp) { |
77 | top_offset_ -= kSystemPointerSize; |
78 | frame_->SetCallerConstantPool(top_offset_, cp); |
79 | DebugPrintOutputValue(cp, "caller's constant_pool\n" ); |
80 | } |
81 | |
82 | void PushTranslatedValue(const TranslatedFrame::iterator& iterator, |
83 | const char* debug_hint = "" ) { |
84 | Object obj = iterator->GetRawValue(); |
85 | |
86 | PushRawObject(obj, debug_hint); |
87 | |
88 | if (trace_scope_) { |
89 | PrintF(trace_scope_->file(), " (input #%d)\n" , iterator.input_index()); |
90 | } |
91 | |
92 | deoptimizer_->QueueValueForMaterialization(output_address(top_offset_), obj, |
93 | iterator); |
94 | } |
95 | |
96 | unsigned top_offset() const { return top_offset_; } |
97 | |
98 | private: |
99 | void PushValue(intptr_t value) { |
100 | CHECK_GE(top_offset_, 0); |
101 | top_offset_ -= kSystemPointerSize; |
102 | frame_->SetFrameSlot(top_offset_, value); |
103 | } |
104 | |
105 | Address output_address(unsigned output_offset) { |
106 | Address output_address = |
107 | static_cast<Address>(frame_->GetTop()) + output_offset; |
108 | return output_address; |
109 | } |
110 | |
111 | void DebugPrintOutputValue(intptr_t value, const char* debug_hint = "" ) { |
112 | if (trace_scope_ != nullptr) { |
113 | PrintF(trace_scope_->file(), |
114 | " " V8PRIxPTR_FMT ": [top + %3d] <- " V8PRIxPTR_FMT " ; %s" , |
115 | output_address(top_offset_), top_offset_, value, debug_hint); |
116 | } |
117 | } |
118 | |
119 | void DebugPrintOutputObject(Object obj, unsigned output_offset, |
120 | const char* debug_hint = "" ) { |
121 | if (trace_scope_ != nullptr) { |
122 | PrintF(trace_scope_->file(), " " V8PRIxPTR_FMT ": [top + %3d] <- " , |
123 | output_address(output_offset), output_offset); |
124 | if (obj->IsSmi()) { |
125 | PrintF(V8PRIxPTR_FMT " <Smi %d>" , obj->ptr(), Smi::cast(obj)->value()); |
126 | } else { |
127 | obj->ShortPrint(trace_scope_->file()); |
128 | } |
129 | PrintF(trace_scope_->file(), " ; %s" , debug_hint); |
130 | } |
131 | } |
132 | |
133 | Deoptimizer* deoptimizer_; |
134 | FrameDescription* frame_; |
135 | CodeTracer::Scope* trace_scope_; |
136 | unsigned top_offset_; |
137 | }; |
138 | |
139 | DeoptimizerData::DeoptimizerData(Heap* heap) : heap_(heap), current_(nullptr) { |
140 | Code* start = &deopt_entry_code_[0]; |
141 | Code* end = &deopt_entry_code_[DeoptimizerData::kLastDeoptimizeKind + 1]; |
142 | heap_->RegisterStrongRoots(FullObjectSlot(start), FullObjectSlot(end)); |
143 | } |
144 | |
145 | |
146 | DeoptimizerData::~DeoptimizerData() { |
147 | Code* start = &deopt_entry_code_[0]; |
148 | heap_->UnregisterStrongRoots(FullObjectSlot(start)); |
149 | } |
150 | |
151 | Code DeoptimizerData::deopt_entry_code(DeoptimizeKind kind) { |
152 | return deopt_entry_code_[static_cast<int>(kind)]; |
153 | } |
154 | |
155 | void DeoptimizerData::set_deopt_entry_code(DeoptimizeKind kind, Code code) { |
156 | deopt_entry_code_[static_cast<int>(kind)] = code; |
157 | } |
158 | |
159 | Code Deoptimizer::FindDeoptimizingCode(Address addr) { |
160 | if (function_->IsHeapObject()) { |
161 | // Search all deoptimizing code in the native context of the function. |
162 | Isolate* isolate = isolate_; |
163 | Context native_context = function_->context()->native_context(); |
164 | Object element = native_context->DeoptimizedCodeListHead(); |
165 | while (!element->IsUndefined(isolate)) { |
166 | Code code = Code::cast(element); |
167 | CHECK(code->kind() == Code::OPTIMIZED_FUNCTION); |
168 | if (code->contains(addr)) return code; |
169 | element = code->next_code_link(); |
170 | } |
171 | } |
172 | return Code(); |
173 | } |
174 | |
175 | |
176 | // We rely on this function not causing a GC. It is called from generated code |
177 | // without having a real stack frame in place. |
178 | Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind, |
179 | unsigned bailout_id, Address from, |
180 | int fp_to_sp_delta, Isolate* isolate) { |
181 | JSFunction function = JSFunction::cast(Object(raw_function)); |
182 | Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, kind, |
183 | bailout_id, from, fp_to_sp_delta); |
184 | CHECK_NULL(isolate->deoptimizer_data()->current_); |
185 | isolate->deoptimizer_data()->current_ = deoptimizer; |
186 | return deoptimizer; |
187 | } |
188 | |
189 | Deoptimizer* Deoptimizer::Grab(Isolate* isolate) { |
190 | Deoptimizer* result = isolate->deoptimizer_data()->current_; |
191 | CHECK_NOT_NULL(result); |
192 | result->DeleteFrameDescriptions(); |
193 | isolate->deoptimizer_data()->current_ = nullptr; |
194 | return result; |
195 | } |
196 | |
197 | DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame( |
198 | JavaScriptFrame* frame, |
199 | int jsframe_index, |
200 | Isolate* isolate) { |
201 | CHECK(frame->is_optimized()); |
202 | |
203 | TranslatedState translated_values(frame); |
204 | translated_values.Prepare(frame->fp()); |
205 | |
206 | TranslatedState::iterator frame_it = translated_values.end(); |
207 | int counter = jsframe_index; |
208 | for (auto it = translated_values.begin(); it != translated_values.end(); |
209 | it++) { |
210 | if (it->kind() == TranslatedFrame::kInterpretedFunction || |
211 | it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuation || |
212 | it->kind() == |
213 | TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) { |
214 | if (counter == 0) { |
215 | frame_it = it; |
216 | break; |
217 | } |
218 | counter--; |
219 | } |
220 | } |
221 | CHECK(frame_it != translated_values.end()); |
222 | // We only include kJavaScriptBuiltinContinuation frames above to get the |
223 | // counting right. |
224 | CHECK_EQ(frame_it->kind(), TranslatedFrame::kInterpretedFunction); |
225 | |
226 | DeoptimizedFrameInfo* info = |
227 | new DeoptimizedFrameInfo(&translated_values, frame_it, isolate); |
228 | |
229 | return info; |
230 | } |
231 | |
232 | namespace { |
233 | class ActivationsFinder : public ThreadVisitor { |
234 | public: |
235 | explicit ActivationsFinder(std::set<Code>* codes, Code topmost_optimized_code, |
236 | bool safe_to_deopt_topmost_optimized_code) |
237 | : codes_(codes) { |
238 | #ifdef DEBUG |
239 | topmost_ = topmost_optimized_code; |
240 | safe_to_deopt_ = safe_to_deopt_topmost_optimized_code; |
241 | #endif |
242 | } |
243 | |
244 | // Find the frames with activations of codes marked for deoptimization, search |
245 | // for the trampoline to the deoptimizer call respective to each code, and use |
246 | // it to replace the current pc on the stack. |
247 | void VisitThread(Isolate* isolate, ThreadLocalTop* top) override { |
248 | for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { |
249 | if (it.frame()->type() == StackFrame::OPTIMIZED) { |
250 | Code code = it.frame()->LookupCode(); |
251 | if (code->kind() == Code::OPTIMIZED_FUNCTION && |
252 | code->marked_for_deoptimization()) { |
253 | codes_->erase(code); |
254 | // Obtain the trampoline to the deoptimizer call. |
255 | SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc()); |
256 | int trampoline_pc = safepoint.trampoline_pc(); |
257 | DCHECK_IMPLIES(code == topmost_, safe_to_deopt_); |
258 | // Replace the current pc on the stack with the trampoline. |
259 | it.frame()->set_pc(code->raw_instruction_start() + trampoline_pc); |
260 | } |
261 | } |
262 | } |
263 | } |
264 | |
265 | private: |
266 | std::set<Code>* codes_; |
267 | |
268 | #ifdef DEBUG |
269 | Code topmost_; |
270 | bool safe_to_deopt_; |
271 | #endif |
272 | }; |
273 | } // namespace |
274 | |
275 | // Move marked code from the optimized code list to the deoptimized code list, |
276 | // and replace pc on the stack for codes marked for deoptimization. |
277 | void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) { |
278 | DisallowHeapAllocation no_allocation; |
279 | |
280 | Isolate* isolate = context->GetIsolate(); |
281 | Code topmost_optimized_code; |
282 | bool safe_to_deopt_topmost_optimized_code = false; |
283 | #ifdef DEBUG |
284 | // Make sure all activations of optimized code can deopt at their current PC. |
285 | // The topmost optimized code has special handling because it cannot be |
286 | // deoptimized due to weak object dependency. |
287 | for (StackFrameIterator it(isolate, isolate->thread_local_top()); |
288 | !it.done(); it.Advance()) { |
289 | StackFrame::Type type = it.frame()->type(); |
290 | if (type == StackFrame::OPTIMIZED) { |
291 | Code code = it.frame()->LookupCode(); |
292 | JSFunction function = |
293 | static_cast<OptimizedFrame*>(it.frame())->function(); |
294 | if (FLAG_trace_deopt) { |
295 | CodeTracer::Scope scope(isolate->GetCodeTracer()); |
296 | PrintF(scope.file(), "[deoptimizer found activation of function: " ); |
297 | function->PrintName(scope.file()); |
298 | PrintF(scope.file(), " / %" V8PRIxPTR "]\n" , function.ptr()); |
299 | } |
300 | SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc()); |
301 | |
302 | // Turbofan deopt is checked when we are patching addresses on stack. |
303 | bool safe_if_deopt_triggered = safepoint.has_deoptimization_index(); |
304 | bool is_builtin_code = code->kind() == Code::BUILTIN; |
305 | DCHECK(topmost_optimized_code.is_null() || safe_if_deopt_triggered || |
306 | is_builtin_code); |
307 | if (topmost_optimized_code.is_null()) { |
308 | topmost_optimized_code = code; |
309 | safe_to_deopt_topmost_optimized_code = safe_if_deopt_triggered; |
310 | } |
311 | } |
312 | } |
313 | #endif |
314 | |
315 | // We will use this set to mark those Code objects that are marked for |
316 | // deoptimization and have not been found in stack frames. |
317 | std::set<Code> codes; |
318 | |
319 | // Move marked code from the optimized code list to the deoptimized code list. |
320 | // Walk over all optimized code objects in this native context. |
321 | Code prev; |
322 | Object element = context->OptimizedCodeListHead(); |
323 | while (!element->IsUndefined(isolate)) { |
324 | Code code = Code::cast(element); |
325 | CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION); |
326 | Object next = code->next_code_link(); |
327 | |
328 | if (code->marked_for_deoptimization()) { |
329 | codes.insert(code); |
330 | |
331 | if (!prev.is_null()) { |
332 | // Skip this code in the optimized code list. |
333 | prev->set_next_code_link(next); |
334 | } else { |
335 | // There was no previous node, the next node is the new head. |
336 | context->SetOptimizedCodeListHead(next); |
337 | } |
338 | |
339 | // Move the code to the _deoptimized_ code list. |
340 | code->set_next_code_link(context->DeoptimizedCodeListHead()); |
341 | context->SetDeoptimizedCodeListHead(code); |
342 | } else { |
343 | // Not marked; preserve this element. |
344 | prev = code; |
345 | } |
346 | element = next; |
347 | } |
348 | |
349 | ActivationsFinder visitor(&codes, topmost_optimized_code, |
350 | safe_to_deopt_topmost_optimized_code); |
351 | // Iterate over the stack of this thread. |
352 | visitor.VisitThread(isolate, isolate->thread_local_top()); |
353 | // In addition to iterate over the stack of this thread, we also |
354 | // need to consider all the other threads as they may also use |
355 | // the code currently beings deoptimized. |
356 | isolate->thread_manager()->IterateArchivedThreads(&visitor); |
357 | |
358 | // If there's no activation of a code in any stack then we can remove its |
359 | // deoptimization data. We do this to ensure that code objects that are |
360 | // unlinked don't transitively keep objects alive unnecessarily. |
361 | for (Code code : codes) { |
362 | isolate->heap()->InvalidateCodeDeoptimizationData(code); |
363 | } |
364 | } |
365 | |
366 | |
367 | void Deoptimizer::DeoptimizeAll(Isolate* isolate) { |
368 | RuntimeCallTimerScope runtimeTimer(isolate, |
369 | RuntimeCallCounterId::kDeoptimizeCode); |
370 | TimerEventScope<TimerEventDeoptimizeCode> timer(isolate); |
371 | TRACE_EVENT0("v8" , "V8.DeoptimizeCode" ); |
372 | if (FLAG_trace_deopt) { |
373 | CodeTracer::Scope scope(isolate->GetCodeTracer()); |
374 | PrintF(scope.file(), "[deoptimize all code in all contexts]\n" ); |
375 | } |
376 | isolate->AbortConcurrentOptimization(BlockingBehavior::kBlock); |
377 | DisallowHeapAllocation no_allocation; |
378 | // For all contexts, mark all code, then deoptimize. |
379 | Object context = isolate->heap()->native_contexts_list(); |
380 | while (!context->IsUndefined(isolate)) { |
381 | Context native_context = Context::cast(context); |
382 | MarkAllCodeForContext(native_context); |
383 | DeoptimizeMarkedCodeForContext(native_context); |
384 | context = native_context->next_context_link(); |
385 | } |
386 | } |
387 | |
388 | |
389 | void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) { |
390 | RuntimeCallTimerScope runtimeTimer(isolate, |
391 | RuntimeCallCounterId::kDeoptimizeCode); |
392 | TimerEventScope<TimerEventDeoptimizeCode> timer(isolate); |
393 | TRACE_EVENT0("v8" , "V8.DeoptimizeCode" ); |
394 | if (FLAG_trace_deopt) { |
395 | CodeTracer::Scope scope(isolate->GetCodeTracer()); |
396 | PrintF(scope.file(), "[deoptimize marked code in all contexts]\n" ); |
397 | } |
398 | DisallowHeapAllocation no_allocation; |
399 | // For all contexts, deoptimize code already marked. |
400 | Object context = isolate->heap()->native_contexts_list(); |
401 | while (!context->IsUndefined(isolate)) { |
402 | Context native_context = Context::cast(context); |
403 | DeoptimizeMarkedCodeForContext(native_context); |
404 | context = native_context->next_context_link(); |
405 | } |
406 | } |
407 | |
408 | void Deoptimizer::MarkAllCodeForContext(Context context) { |
409 | Object element = context->OptimizedCodeListHead(); |
410 | Isolate* isolate = context->GetIsolate(); |
411 | while (!element->IsUndefined(isolate)) { |
412 | Code code = Code::cast(element); |
413 | CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION); |
414 | code->set_marked_for_deoptimization(true); |
415 | element = code->next_code_link(); |
416 | } |
417 | } |
418 | |
419 | void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) { |
420 | Isolate* isolate = function->GetIsolate(); |
421 | RuntimeCallTimerScope runtimeTimer(isolate, |
422 | RuntimeCallCounterId::kDeoptimizeCode); |
423 | TimerEventScope<TimerEventDeoptimizeCode> timer(isolate); |
424 | TRACE_EVENT0("v8" , "V8.DeoptimizeCode" ); |
425 | function->ResetIfBytecodeFlushed(); |
426 | if (code.is_null()) code = function->code(); |
427 | |
428 | if (code->kind() == Code::OPTIMIZED_FUNCTION) { |
429 | // Mark the code for deoptimization and unlink any functions that also |
430 | // refer to that code. The code cannot be shared across native contexts, |
431 | // so we only need to search one. |
432 | code->set_marked_for_deoptimization(true); |
433 | // The code in the function's optimized code feedback vector slot might |
434 | // be different from the code on the function - evict it if necessary. |
435 | function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization( |
436 | function->shared(), "unlinking code marked for deopt" ); |
437 | if (!code->deopt_already_counted()) { |
438 | function->feedback_vector()->increment_deopt_count(); |
439 | code->set_deopt_already_counted(true); |
440 | } |
441 | DeoptimizeMarkedCodeForContext(function->context()->native_context()); |
442 | } |
443 | } |
444 | |
445 | void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) { |
446 | deoptimizer->DoComputeOutputFrames(); |
447 | } |
448 | |
449 | const char* Deoptimizer::MessageFor(DeoptimizeKind kind) { |
450 | switch (kind) { |
451 | case DeoptimizeKind::kEager: |
452 | return "eager" ; |
453 | case DeoptimizeKind::kSoft: |
454 | return "soft" ; |
455 | case DeoptimizeKind::kLazy: |
456 | return "lazy" ; |
457 | } |
458 | FATAL("Unsupported deopt kind" ); |
459 | return nullptr; |
460 | } |
461 | |
462 | Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function, |
463 | DeoptimizeKind kind, unsigned bailout_id, Address from, |
464 | int fp_to_sp_delta) |
465 | : isolate_(isolate), |
466 | function_(function), |
467 | bailout_id_(bailout_id), |
468 | deopt_kind_(kind), |
469 | from_(from), |
470 | fp_to_sp_delta_(fp_to_sp_delta), |
471 | deoptimizing_throw_(false), |
472 | catch_handler_data_(-1), |
473 | catch_handler_pc_offset_(-1), |
474 | input_(nullptr), |
475 | output_count_(0), |
476 | jsframe_count_(0), |
477 | output_(nullptr), |
478 | caller_frame_top_(0), |
479 | caller_fp_(0), |
480 | caller_pc_(0), |
481 | caller_constant_pool_(0), |
482 | input_frame_context_(0), |
483 | stack_fp_(0), |
484 | trace_scope_(nullptr) { |
485 | if (isolate->deoptimizer_lazy_throw()) { |
486 | isolate->set_deoptimizer_lazy_throw(false); |
487 | deoptimizing_throw_ = true; |
488 | } |
489 | |
490 | DCHECK_NE(from, kNullAddress); |
491 | compiled_code_ = FindOptimizedCode(); |
492 | DCHECK(!compiled_code_.is_null()); |
493 | |
494 | DCHECK(function->IsJSFunction()); |
495 | trace_scope_ = FLAG_trace_deopt |
496 | ? new CodeTracer::Scope(isolate->GetCodeTracer()) |
497 | : nullptr; |
498 | #ifdef DEBUG |
499 | DCHECK(AllowHeapAllocation::IsAllowed()); |
500 | disallow_heap_allocation_ = new DisallowHeapAllocation(); |
501 | #endif // DEBUG |
502 | if (compiled_code_->kind() != Code::OPTIMIZED_FUNCTION || |
503 | !compiled_code_->deopt_already_counted()) { |
504 | // If the function is optimized, and we haven't counted that deopt yet, then |
505 | // increment the function's deopt count so that we can avoid optimising |
506 | // functions that deopt too often. |
507 | |
508 | if (deopt_kind_ == DeoptimizeKind::kSoft) { |
509 | // Soft deopts shouldn't count against the overall deoptimization count |
510 | // that can eventually lead to disabling optimization for a function. |
511 | isolate->counters()->soft_deopts_executed()->Increment(); |
512 | } else if (!function.is_null()) { |
513 | function->feedback_vector()->increment_deopt_count(); |
514 | } |
515 | } |
516 | if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) { |
517 | compiled_code_->set_deopt_already_counted(true); |
518 | PROFILE(isolate_, |
519 | CodeDeoptEvent(compiled_code_, kind, from_, fp_to_sp_delta_)); |
520 | } |
521 | unsigned size = ComputeInputFrameSize(); |
522 | int parameter_count = |
523 | function->shared()->internal_formal_parameter_count() + 1; |
524 | input_ = new (size) FrameDescription(size, parameter_count); |
525 | } |
526 | |
527 | Code Deoptimizer::FindOptimizedCode() { |
528 | Code compiled_code = FindDeoptimizingCode(from_); |
529 | return !compiled_code.is_null() ? compiled_code |
530 | : isolate_->FindCodeObject(from_); |
531 | } |
532 | |
533 | |
534 | void Deoptimizer::PrintFunctionName() { |
535 | if (function_->IsHeapObject() && function_->IsJSFunction()) { |
536 | function_->ShortPrint(trace_scope_->file()); |
537 | } else { |
538 | PrintF(trace_scope_->file(), |
539 | "%s" , Code::Kind2String(compiled_code_->kind())); |
540 | } |
541 | } |
542 | |
543 | Handle<JSFunction> Deoptimizer::function() const { |
544 | return Handle<JSFunction>(function_, isolate()); |
545 | } |
546 | Handle<Code> Deoptimizer::compiled_code() const { |
547 | return Handle<Code>(compiled_code_, isolate()); |
548 | } |
549 | |
550 | Deoptimizer::~Deoptimizer() { |
551 | DCHECK(input_ == nullptr && output_ == nullptr); |
552 | DCHECK_NULL(disallow_heap_allocation_); |
553 | delete trace_scope_; |
554 | } |
555 | |
556 | |
557 | void Deoptimizer::DeleteFrameDescriptions() { |
558 | delete input_; |
559 | for (int i = 0; i < output_count_; ++i) { |
560 | if (output_[i] != input_) delete output_[i]; |
561 | } |
562 | delete[] output_; |
563 | input_ = nullptr; |
564 | output_ = nullptr; |
565 | #ifdef DEBUG |
566 | DCHECK(!AllowHeapAllocation::IsAllowed()); |
567 | DCHECK_NOT_NULL(disallow_heap_allocation_); |
568 | delete disallow_heap_allocation_; |
569 | disallow_heap_allocation_ = nullptr; |
570 | #endif // DEBUG |
571 | } |
572 | |
573 | Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate, |
574 | DeoptimizeKind kind) { |
575 | DeoptimizerData* data = isolate->deoptimizer_data(); |
576 | CHECK_LE(kind, DeoptimizerData::kLastDeoptimizeKind); |
577 | CHECK(!data->deopt_entry_code(kind).is_null()); |
578 | return data->deopt_entry_code(kind)->raw_instruction_start(); |
579 | } |
580 | |
581 | bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr, |
582 | DeoptimizeKind type) { |
583 | DeoptimizerData* data = isolate->deoptimizer_data(); |
584 | CHECK_LE(type, DeoptimizerData::kLastDeoptimizeKind); |
585 | Code code = data->deopt_entry_code(type); |
586 | if (code.is_null()) return false; |
587 | return addr == code->raw_instruction_start(); |
588 | } |
589 | |
590 | bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr, |
591 | DeoptimizeKind* type) { |
592 | if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kEager)) { |
593 | *type = DeoptimizeKind::kEager; |
594 | return true; |
595 | } |
596 | if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kSoft)) { |
597 | *type = DeoptimizeKind::kSoft; |
598 | return true; |
599 | } |
600 | if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kLazy)) { |
601 | *type = DeoptimizeKind::kLazy; |
602 | return true; |
603 | } |
604 | return false; |
605 | } |
606 | |
607 | int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) { |
608 | int length = 0; |
609 | // Count all entries in the deoptimizing code list of every context. |
610 | Object context = isolate->heap()->native_contexts_list(); |
611 | while (!context->IsUndefined(isolate)) { |
612 | Context native_context = Context::cast(context); |
613 | Object element = native_context->DeoptimizedCodeListHead(); |
614 | while (!element->IsUndefined(isolate)) { |
615 | Code code = Code::cast(element); |
616 | DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION); |
617 | if (!code->marked_for_deoptimization()) { |
618 | length++; |
619 | } |
620 | element = code->next_code_link(); |
621 | } |
622 | context = Context::cast(context)->next_context_link(); |
623 | } |
624 | return length; |
625 | } |
626 | |
627 | namespace { |
628 | |
629 | int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) { |
630 | switch (translated_frame->kind()) { |
631 | case TranslatedFrame::kInterpretedFunction: { |
632 | int bytecode_offset = translated_frame->node_id().ToInt(); |
633 | HandlerTable table( |
634 | translated_frame->raw_shared_info()->GetBytecodeArray()); |
635 | return table.LookupRange(bytecode_offset, data_out, nullptr); |
636 | } |
637 | case TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch: { |
638 | return 0; |
639 | } |
640 | default: |
641 | break; |
642 | } |
643 | return -1; |
644 | } |
645 | |
646 | bool ShouldPadArguments(int arg_count) { |
647 | return kPadArguments && (arg_count % 2 != 0); |
648 | } |
649 | |
650 | } // namespace |
651 | |
652 | // We rely on this function not causing a GC. It is called from generated code |
653 | // without having a real stack frame in place. |
654 | void Deoptimizer::DoComputeOutputFrames() { |
655 | base::ElapsedTimer timer; |
656 | |
657 | // Determine basic deoptimization information. The optimized frame is |
658 | // described by the input data. |
659 | DeoptimizationData input_data = |
660 | DeoptimizationData::cast(compiled_code_->deoptimization_data()); |
661 | |
662 | { |
663 | // Read caller's PC, caller's FP and caller's constant pool values |
664 | // from input frame. Compute caller's frame top address. |
665 | |
666 | Register fp_reg = JavaScriptFrame::fp_register(); |
667 | stack_fp_ = input_->GetRegister(fp_reg.code()); |
668 | |
669 | caller_frame_top_ = stack_fp_ + ComputeInputFrameAboveFpFixedSize(); |
670 | |
671 | Address fp_address = input_->GetFramePointerAddress(); |
672 | caller_fp_ = Memory<intptr_t>(fp_address); |
673 | caller_pc_ = |
674 | Memory<intptr_t>(fp_address + CommonFrameConstants::kCallerPCOffset); |
675 | input_frame_context_ = Memory<intptr_t>( |
676 | fp_address + CommonFrameConstants::kContextOrFrameTypeOffset); |
677 | |
678 | if (FLAG_enable_embedded_constant_pool) { |
679 | caller_constant_pool_ = Memory<intptr_t>( |
680 | fp_address + CommonFrameConstants::kConstantPoolOffset); |
681 | } |
682 | } |
683 | |
684 | if (trace_scope_ != nullptr) { |
685 | timer.Start(); |
686 | PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin " , |
687 | MessageFor(deopt_kind_)); |
688 | PrintFunctionName(); |
689 | PrintF(trace_scope_->file(), |
690 | " (opt #%d) @%d, FP to SP delta: %d, caller sp: " V8PRIxPTR_FMT |
691 | "]\n" , |
692 | input_data->OptimizationId()->value(), bailout_id_, fp_to_sp_delta_, |
693 | caller_frame_top_); |
694 | if (deopt_kind_ == DeoptimizeKind::kEager || |
695 | deopt_kind_ == DeoptimizeKind::kSoft) { |
696 | compiled_code_->PrintDeoptLocation( |
697 | trace_scope_->file(), " ;;; deoptimize at " , from_); |
698 | } |
699 | } |
700 | |
701 | BailoutId node_id = input_data->BytecodeOffset(bailout_id_); |
702 | ByteArray translations = input_data->TranslationByteArray(); |
703 | unsigned translation_index = |
704 | input_data->TranslationIndex(bailout_id_)->value(); |
705 | |
706 | TranslationIterator state_iterator(translations, translation_index); |
707 | translated_state_.Init( |
708 | isolate_, input_->GetFramePointerAddress(), &state_iterator, |
709 | input_data->LiteralArray(), input_->GetRegisterValues(), |
710 | trace_scope_ == nullptr ? nullptr : trace_scope_->file(), |
711 | function_->IsHeapObject() |
712 | ? function_->shared()->internal_formal_parameter_count() |
713 | : 0); |
714 | |
715 | // Do the input frame to output frame(s) translation. |
716 | size_t count = translated_state_.frames().size(); |
717 | // If we are supposed to go to the catch handler, find the catching frame |
718 | // for the catch and make sure we only deoptimize upto that frame. |
719 | if (deoptimizing_throw_) { |
720 | size_t catch_handler_frame_index = count; |
721 | for (size_t i = count; i-- > 0;) { |
722 | catch_handler_pc_offset_ = LookupCatchHandler( |
723 | &(translated_state_.frames()[i]), &catch_handler_data_); |
724 | if (catch_handler_pc_offset_ >= 0) { |
725 | catch_handler_frame_index = i; |
726 | break; |
727 | } |
728 | } |
729 | CHECK_LT(catch_handler_frame_index, count); |
730 | count = catch_handler_frame_index + 1; |
731 | } |
732 | |
733 | DCHECK_NULL(output_); |
734 | output_ = new FrameDescription*[count]; |
735 | for (size_t i = 0; i < count; ++i) { |
736 | output_[i] = nullptr; |
737 | } |
738 | output_count_ = static_cast<int>(count); |
739 | |
740 | // Translate each output frame. |
741 | int frame_index = 0; // output_frame_index |
742 | for (size_t i = 0; i < count; ++i, ++frame_index) { |
743 | // Read the ast node id, function, and frame height for this output frame. |
744 | TranslatedFrame* translated_frame = &(translated_state_.frames()[i]); |
745 | bool handle_exception = deoptimizing_throw_ && i == count - 1; |
746 | switch (translated_frame->kind()) { |
747 | case TranslatedFrame::kInterpretedFunction: |
748 | DoComputeInterpretedFrame(translated_frame, frame_index, |
749 | handle_exception); |
750 | jsframe_count_++; |
751 | break; |
752 | case TranslatedFrame::kArgumentsAdaptor: |
753 | DoComputeArgumentsAdaptorFrame(translated_frame, frame_index); |
754 | break; |
755 | case TranslatedFrame::kConstructStub: |
756 | DoComputeConstructStubFrame(translated_frame, frame_index); |
757 | break; |
758 | case TranslatedFrame::kBuiltinContinuation: |
759 | DoComputeBuiltinContinuation(translated_frame, frame_index, |
760 | BuiltinContinuationMode::STUB); |
761 | break; |
762 | case TranslatedFrame::kJavaScriptBuiltinContinuation: |
763 | DoComputeBuiltinContinuation(translated_frame, frame_index, |
764 | BuiltinContinuationMode::JAVASCRIPT); |
765 | break; |
766 | case TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch: |
767 | DoComputeBuiltinContinuation( |
768 | translated_frame, frame_index, |
769 | handle_exception |
770 | ? BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION |
771 | : BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH); |
772 | break; |
773 | case TranslatedFrame::kInvalid: |
774 | FATAL("invalid frame" ); |
775 | break; |
776 | } |
777 | } |
778 | |
779 | FrameDescription* topmost = output_[count - 1]; |
780 | topmost->GetRegisterValues()->SetRegister(kRootRegister.code(), |
781 | isolate()->isolate_root()); |
782 | |
783 | // Print some helpful diagnostic information. |
784 | if (trace_scope_ != nullptr) { |
785 | double ms = timer.Elapsed().InMillisecondsF(); |
786 | int index = output_count_ - 1; // Index of the topmost frame. |
787 | PrintF(trace_scope_->file(), "[deoptimizing (%s): end " , |
788 | MessageFor(deopt_kind_)); |
789 | PrintFunctionName(); |
790 | PrintF(trace_scope_->file(), |
791 | " @%d => node=%d, pc=" V8PRIxPTR_FMT ", caller sp=" V8PRIxPTR_FMT |
792 | ", took %0.3f ms]\n" , |
793 | bailout_id_, node_id.ToInt(), output_[index]->GetPc(), |
794 | caller_frame_top_, ms); |
795 | } |
796 | } |
797 | |
798 | void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame, |
799 | int frame_index, |
800 | bool goto_catch_handler) { |
801 | SharedFunctionInfo shared = translated_frame->raw_shared_info(); |
802 | |
803 | TranslatedFrame::iterator value_iterator = translated_frame->begin(); |
804 | bool is_bottommost = (0 == frame_index); |
805 | bool is_topmost = (output_count_ - 1 == frame_index); |
806 | |
807 | int bytecode_offset = translated_frame->node_id().ToInt(); |
808 | int height = translated_frame->height(); |
809 | int register_count = height - 1; // Exclude accumulator. |
810 | int register_stack_slot_count = |
811 | InterpreterFrameConstants::RegisterStackSlotCount(register_count); |
812 | int height_in_bytes = register_stack_slot_count * kSystemPointerSize; |
813 | |
814 | // The topmost frame will contain the accumulator. |
815 | if (is_topmost) { |
816 | height_in_bytes += kSystemPointerSize; |
817 | if (PadTopOfStackRegister()) height_in_bytes += kSystemPointerSize; |
818 | } |
819 | |
820 | TranslatedFrame::iterator function_iterator = value_iterator++; |
821 | if (trace_scope_ != nullptr) { |
822 | PrintF(trace_scope_->file(), " translating interpreted frame " ); |
823 | std::unique_ptr<char[]> name = shared->DebugName()->ToCString(); |
824 | PrintF(trace_scope_->file(), "%s" , name.get()); |
825 | PrintF(trace_scope_->file(), " => bytecode_offset=%d, height=%d%s\n" , |
826 | bytecode_offset, height_in_bytes, |
827 | goto_catch_handler ? " (throw)" : "" ); |
828 | } |
829 | if (goto_catch_handler) { |
830 | bytecode_offset = catch_handler_pc_offset_; |
831 | } |
832 | |
833 | // The 'fixed' part of the frame consists of the incoming parameters and |
834 | // the part described by InterpreterFrameConstants. This will include |
835 | // argument padding, when needed. |
836 | unsigned fixed_frame_size = ComputeInterpretedFixedSize(shared); |
837 | unsigned output_frame_size = height_in_bytes + fixed_frame_size; |
838 | |
839 | // Allocate and store the output frame description. |
840 | int parameter_count = shared->internal_formal_parameter_count() + 1; |
841 | FrameDescription* output_frame = new (output_frame_size) |
842 | FrameDescription(output_frame_size, parameter_count); |
843 | FrameWriter frame_writer(this, output_frame, trace_scope_); |
844 | |
845 | CHECK(frame_index >= 0 && frame_index < output_count_); |
846 | CHECK_NULL(output_[frame_index]); |
847 | output_[frame_index] = output_frame; |
848 | |
849 | // The top address of the frame is computed from the previous frame's top and |
850 | // this frame's size. |
851 | intptr_t top_address; |
852 | if (is_bottommost) { |
853 | top_address = caller_frame_top_ - output_frame_size; |
854 | } else { |
855 | top_address = output_[frame_index - 1]->GetTop() - output_frame_size; |
856 | } |
857 | output_frame->SetTop(top_address); |
858 | |
859 | // Compute the incoming parameter translation. |
860 | |
861 | ReadOnlyRoots roots(isolate()); |
862 | if (ShouldPadArguments(parameter_count)) { |
863 | frame_writer.PushRawObject(roots.the_hole_value(), "padding\n" ); |
864 | } |
865 | |
866 | for (int i = 0; i < parameter_count; ++i, ++value_iterator) { |
867 | frame_writer.PushTranslatedValue(value_iterator, "stack parameter" ); |
868 | } |
869 | |
870 | DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(), |
871 | frame_writer.top_offset()); |
872 | if (trace_scope_ != nullptr) { |
873 | PrintF(trace_scope_->file(), " -------------------------\n" ); |
874 | } |
875 | |
876 | // There are no translation commands for the caller's pc and fp, the |
877 | // context, the function and the bytecode offset. Synthesize |
878 | // their values and set them up |
879 | // explicitly. |
880 | // |
881 | // The caller's pc for the bottommost output frame is the same as in the |
882 | // input frame. For all subsequent output frames, it can be read from the |
883 | // previous one. This frame's pc can be computed from the non-optimized |
884 | // function code and AST id of the bailout. |
885 | const intptr_t caller_pc = |
886 | is_bottommost ? caller_pc_ : output_[frame_index - 1]->GetPc(); |
887 | frame_writer.PushCallerPc(caller_pc); |
888 | |
889 | // The caller's frame pointer for the bottommost output frame is the same |
890 | // as in the input frame. For all subsequent output frames, it can be |
891 | // read from the previous one. Also compute and set this frame's frame |
892 | // pointer. |
893 | const intptr_t caller_fp = |
894 | is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp(); |
895 | frame_writer.PushCallerFp(caller_fp); |
896 | |
897 | intptr_t fp_value = top_address + frame_writer.top_offset(); |
898 | output_frame->SetFp(fp_value); |
899 | if (is_topmost) { |
900 | Register fp_reg = InterpretedFrame::fp_register(); |
901 | output_frame->SetRegister(fp_reg.code(), fp_value); |
902 | } |
903 | |
904 | if (FLAG_enable_embedded_constant_pool) { |
905 | // For the bottommost output frame the constant pool pointer can be gotten |
906 | // from the input frame. For subsequent output frames, it can be read from |
907 | // the previous frame. |
908 | const intptr_t caller_cp = |
909 | is_bottommost ? caller_constant_pool_ |
910 | : output_[frame_index - 1]->GetConstantPool(); |
911 | frame_writer.PushCallerConstantPool(caller_cp); |
912 | } |
913 | |
914 | // For the bottommost output frame the context can be gotten from the input |
915 | // frame. For all subsequent output frames it can be gotten from the function |
916 | // so long as we don't inline functions that need local contexts. |
917 | |
918 | // When deoptimizing into a catch block, we need to take the context |
919 | // from a register that was specified in the handler table. |
920 | TranslatedFrame::iterator context_pos = value_iterator++; |
921 | if (goto_catch_handler) { |
922 | // Skip to the translated value of the register specified |
923 | // in the handler table. |
924 | for (int i = 0; i < catch_handler_data_ + 1; ++i) { |
925 | context_pos++; |
926 | } |
927 | } |
928 | // Read the context from the translations. |
929 | Object context = context_pos->GetRawValue(); |
930 | output_frame->SetContext(static_cast<intptr_t>(context->ptr())); |
931 | frame_writer.PushTranslatedValue(context_pos, "context" ); |
932 | |
933 | // The function was mentioned explicitly in the BEGIN_FRAME. |
934 | frame_writer.PushTranslatedValue(function_iterator, "function" ); |
935 | |
936 | // Set the bytecode array pointer. |
937 | Object bytecode_array = shared->HasBreakInfo() |
938 | ? shared->GetDebugInfo()->DebugBytecodeArray() |
939 | : shared->GetBytecodeArray(); |
940 | frame_writer.PushRawObject(bytecode_array, "bytecode array\n" ); |
941 | |
942 | // The bytecode offset was mentioned explicitly in the BEGIN_FRAME. |
943 | int raw_bytecode_offset = |
944 | BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset; |
945 | Smi smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset); |
946 | frame_writer.PushRawObject(smi_bytecode_offset, "bytecode offset\n" ); |
947 | |
948 | if (trace_scope_ != nullptr) { |
949 | PrintF(trace_scope_->file(), " -------------------------\n" ); |
950 | } |
951 | |
952 | // Translate the rest of the interpreter registers in the frame. |
953 | // The return_value_offset is counted from the top. Here, we compute the |
954 | // register index (counted from the start). |
955 | int return_value_first_reg = |
956 | register_count - translated_frame->return_value_offset(); |
957 | int return_value_count = translated_frame->return_value_count(); |
958 | for (int i = 0; i < register_count; ++i, ++value_iterator) { |
959 | // Ensure we write the return value if we have one and we are returning |
960 | // normally to a lazy deopt point. |
961 | if (is_topmost && !goto_catch_handler && |
962 | deopt_kind_ == DeoptimizeKind::kLazy && i >= return_value_first_reg && |
963 | i < return_value_first_reg + return_value_count) { |
964 | int return_index = i - return_value_first_reg; |
965 | if (return_index == 0) { |
966 | frame_writer.PushRawValue(input_->GetRegister(kReturnRegister0.code()), |
967 | "return value 0\n" ); |
968 | // We do not handle the situation when one return value should go into |
969 | // the accumulator and another one into an ordinary register. Since |
970 | // the interpreter should never create such situation, just assert |
971 | // this does not happen. |
972 | CHECK_LE(return_value_first_reg + return_value_count, register_count); |
973 | } else { |
974 | CHECK_EQ(return_index, 1); |
975 | frame_writer.PushRawValue(input_->GetRegister(kReturnRegister1.code()), |
976 | "return value 1\n" ); |
977 | } |
978 | } else { |
979 | // This is not return value, just write the value from the translations. |
980 | frame_writer.PushTranslatedValue(value_iterator, "stack parameter" ); |
981 | } |
982 | } |
983 | |
984 | int register_slots_written = register_count; |
985 | DCHECK_LE(register_slots_written, register_stack_slot_count); |
986 | // Some architectures must pad the stack frame with extra stack slots |
987 | // to ensure the stack frame is aligned. Do this now. |
988 | while (register_slots_written < register_stack_slot_count) { |
989 | register_slots_written++; |
990 | frame_writer.PushRawObject(roots.the_hole_value(), "padding\n" ); |
991 | } |
992 | |
993 | // Translate the accumulator register (depending on frame position). |
994 | if (is_topmost) { |
995 | if (PadTopOfStackRegister()) { |
996 | frame_writer.PushRawObject(roots.the_hole_value(), "padding\n" ); |
997 | } |
998 | // For topmost frame, put the accumulator on the stack. The |
999 | // {NotifyDeoptimized} builtin pops it off the topmost frame (possibly |
1000 | // after materialization). |
1001 | if (goto_catch_handler) { |
1002 | // If we are lazy deopting to a catch handler, we set the accumulator to |
1003 | // the exception (which lives in the result register). |
1004 | intptr_t accumulator_value = |
1005 | input_->GetRegister(kInterpreterAccumulatorRegister.code()); |
1006 | frame_writer.PushRawObject(Object(accumulator_value), "accumulator\n" ); |
1007 | } else { |
1008 | // If we are lazily deoptimizing make sure we store the deopt |
1009 | // return value into the appropriate slot. |
1010 | if (deopt_kind_ == DeoptimizeKind::kLazy && |
1011 | translated_frame->return_value_offset() == 0 && |
1012 | translated_frame->return_value_count() > 0) { |
1013 | CHECK_EQ(translated_frame->return_value_count(), 1); |
1014 | frame_writer.PushRawValue(input_->GetRegister(kReturnRegister0.code()), |
1015 | "return value 0\n" ); |
1016 | } else { |
1017 | frame_writer.PushTranslatedValue(value_iterator, "accumulator" ); |
1018 | } |
1019 | } |
1020 | ++value_iterator; // Move over the accumulator. |
1021 | } else { |
1022 | // For non-topmost frames, skip the accumulator translation. For those |
1023 | // frames, the return value from the callee will become the accumulator. |
1024 | ++value_iterator; |
1025 | } |
1026 | CHECK_EQ(translated_frame->end(), value_iterator); |
1027 | CHECK_EQ(0u, frame_writer.top_offset()); |
1028 | |
1029 | // Compute this frame's PC and state. The PC will be a special builtin that |
1030 | // continues the bytecode dispatch. Note that non-topmost and lazy-style |
1031 | // bailout handlers also advance the bytecode offset before dispatch, hence |
1032 | // simulating what normal handlers do upon completion of the operation. |
1033 | Builtins* builtins = isolate_->builtins(); |
1034 | Code dispatch_builtin = |
1035 | (!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) && |
1036 | !goto_catch_handler |
1037 | ? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance) |
1038 | : builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch); |
1039 | output_frame->SetPc( |
1040 | static_cast<intptr_t>(dispatch_builtin->InstructionStart())); |
1041 | |
1042 | // Update constant pool. |
1043 | if (FLAG_enable_embedded_constant_pool) { |
1044 | intptr_t constant_pool_value = |
1045 | static_cast<intptr_t>(dispatch_builtin->constant_pool()); |
1046 | output_frame->SetConstantPool(constant_pool_value); |
1047 | if (is_topmost) { |
1048 | Register constant_pool_reg = |
1049 | InterpretedFrame::constant_pool_pointer_register(); |
1050 | output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value); |
1051 | } |
1052 | } |
1053 | |
1054 | // Clear the context register. The context might be a de-materialized object |
1055 | // and will be materialized by {Runtime_NotifyDeoptimized}. For additional |
1056 | // safety we use Smi(0) instead of the potential {arguments_marker} here. |
1057 | if (is_topmost) { |
1058 | intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr()); |
1059 | Register context_reg = JavaScriptFrame::context_register(); |
1060 | output_frame->SetRegister(context_reg.code(), context_value); |
1061 | // Set the continuation for the topmost frame. |
1062 | Code continuation = builtins->builtin(Builtins::kNotifyDeoptimized); |
1063 | output_frame->SetContinuation( |
1064 | static_cast<intptr_t>(continuation->InstructionStart())); |
1065 | } |
1066 | } |
1067 | |
1068 | void Deoptimizer::DoComputeArgumentsAdaptorFrame( |
1069 | TranslatedFrame* translated_frame, int frame_index) { |
1070 | TranslatedFrame::iterator value_iterator = translated_frame->begin(); |
1071 | bool is_bottommost = (0 == frame_index); |
1072 | |
1073 | unsigned height = translated_frame->height(); |
1074 | unsigned height_in_bytes = height * kSystemPointerSize; |
1075 | int parameter_count = height; |
1076 | if (ShouldPadArguments(parameter_count)) |
1077 | height_in_bytes += kSystemPointerSize; |
1078 | |
1079 | TranslatedFrame::iterator function_iterator = value_iterator++; |
1080 | if (trace_scope_ != nullptr) { |
1081 | PrintF(trace_scope_->file(), |
1082 | " translating arguments adaptor => height=%d\n" , height_in_bytes); |
1083 | } |
1084 | |
1085 | unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFixedFrameSize; |
1086 | unsigned output_frame_size = height_in_bytes + fixed_frame_size; |
1087 | |
1088 | // Allocate and store the output frame description. |
1089 | FrameDescription* output_frame = new (output_frame_size) |
1090 | FrameDescription(output_frame_size, parameter_count); |
1091 | FrameWriter frame_writer(this, output_frame, trace_scope_); |
1092 | |
1093 | // Arguments adaptor can not be topmost. |
1094 | CHECK(frame_index < output_count_ - 1); |
1095 | CHECK_NULL(output_[frame_index]); |
1096 | output_[frame_index] = output_frame; |
1097 | |
1098 | // The top address of the frame is computed from the previous frame's top and |
1099 | // this frame's size. |
1100 | intptr_t top_address; |
1101 | if (is_bottommost) { |
1102 | top_address = caller_frame_top_ - output_frame_size; |
1103 | } else { |
1104 | top_address = output_[frame_index - 1]->GetTop() - output_frame_size; |
1105 | } |
1106 | output_frame->SetTop(top_address); |
1107 | |
1108 | ReadOnlyRoots roots(isolate()); |
1109 | if (ShouldPadArguments(parameter_count)) { |
1110 | frame_writer.PushRawObject(roots.the_hole_value(), "padding\n" ); |
1111 | } |
1112 | |
1113 | // Compute the incoming parameter translation. |
1114 | for (int i = 0; i < parameter_count; ++i, ++value_iterator) { |
1115 | frame_writer.PushTranslatedValue(value_iterator, "stack parameter" ); |
1116 | } |
1117 | |
1118 | DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(), |
1119 | frame_writer.top_offset()); |
1120 | |
1121 | // Read caller's PC from the previous frame. |
1122 | const intptr_t caller_pc = |
1123 | is_bottommost ? caller_pc_ : output_[frame_index - 1]->GetPc(); |
1124 | frame_writer.PushCallerPc(caller_pc); |
1125 | |
1126 | // Read caller's FP from the previous frame, and set this frame's FP. |
1127 | const intptr_t caller_fp = |
1128 | is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp(); |
1129 | frame_writer.PushCallerFp(caller_fp); |
1130 | |
1131 | intptr_t fp_value = top_address + frame_writer.top_offset(); |
1132 | output_frame->SetFp(fp_value); |
1133 | |
1134 | if (FLAG_enable_embedded_constant_pool) { |
1135 | // Read the caller's constant pool from the previous frame. |
1136 | const intptr_t caller_cp = |
1137 | is_bottommost ? caller_constant_pool_ |
1138 | : output_[frame_index - 1]->GetConstantPool(); |
1139 | frame_writer.PushCallerConstantPool(caller_cp); |
1140 | } |
1141 | |
1142 | // A marker value is used in place of the context. |
1143 | intptr_t marker = StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR); |
1144 | frame_writer.PushRawValue(marker, "context (adaptor sentinel)\n" ); |
1145 | |
1146 | // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME. |
1147 | frame_writer.PushTranslatedValue(function_iterator, "function\n" ); |
1148 | |
1149 | // Number of incoming arguments. |
1150 | frame_writer.PushRawObject(Smi::FromInt(height - 1), "argc\n" ); |
1151 | |
1152 | frame_writer.PushRawObject(roots.the_hole_value(), "padding\n" ); |
1153 | |
1154 | CHECK_EQ(translated_frame->end(), value_iterator); |
1155 | DCHECK_EQ(0, frame_writer.top_offset()); |
1156 | |
1157 | Builtins* builtins = isolate_->builtins(); |
1158 | Code adaptor_trampoline = |
1159 | builtins->builtin(Builtins::kArgumentsAdaptorTrampoline); |
1160 | intptr_t pc_value = static_cast<intptr_t>( |
1161 | adaptor_trampoline->InstructionStart() + |
1162 | isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value()); |
1163 | output_frame->SetPc(pc_value); |
1164 | if (FLAG_enable_embedded_constant_pool) { |
1165 | intptr_t constant_pool_value = |
1166 | static_cast<intptr_t>(adaptor_trampoline->constant_pool()); |
1167 | output_frame->SetConstantPool(constant_pool_value); |
1168 | } |
1169 | } |
1170 | |
1171 | void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame, |
1172 | int frame_index) { |
1173 | TranslatedFrame::iterator value_iterator = translated_frame->begin(); |
1174 | bool is_topmost = (output_count_ - 1 == frame_index); |
1175 | // The construct frame could become topmost only if we inlined a constructor |
1176 | // call which does a tail call (otherwise the tail callee's frame would be |
1177 | // the topmost one). So it could only be the DeoptimizeKind::kLazy case. |
1178 | CHECK(!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy); |
1179 | |
1180 | Builtins* builtins = isolate_->builtins(); |
1181 | Code construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric); |
1182 | BailoutId bailout_id = translated_frame->node_id(); |
1183 | unsigned height = translated_frame->height(); |
1184 | unsigned parameter_count = height - 1; // Exclude the context. |
1185 | unsigned height_in_bytes = parameter_count * kSystemPointerSize; |
1186 | |
1187 | // If the construct frame appears to be topmost we should ensure that the |
1188 | // value of result register is preserved during continuation execution. |
1189 | // We do this here by "pushing" the result of the constructor function to the |
1190 | // top of the reconstructed stack and popping it in |
1191 | // {Builtins::kNotifyDeoptimized}. |
1192 | if (is_topmost) { |
1193 | height_in_bytes += kSystemPointerSize; |
1194 | if (PadTopOfStackRegister()) height_in_bytes += kSystemPointerSize; |
1195 | } |
1196 | |
1197 | if (ShouldPadArguments(parameter_count)) |
1198 | height_in_bytes += kSystemPointerSize; |
1199 | |
1200 | TranslatedFrame::iterator function_iterator = value_iterator++; |
1201 | if (trace_scope_ != nullptr) { |
1202 | PrintF(trace_scope_->file(), |
1203 | " translating construct stub => bailout_id=%d (%s), height=%d\n" , |
1204 | bailout_id.ToInt(), |
1205 | bailout_id == BailoutId::ConstructStubCreate() ? "create" : "invoke" , |
1206 | height_in_bytes); |
1207 | } |
1208 | |
1209 | unsigned fixed_frame_size = ConstructFrameConstants::kFixedFrameSize; |
1210 | unsigned output_frame_size = height_in_bytes + fixed_frame_size; |
1211 | |
1212 | // Allocate and store the output frame description. |
1213 | FrameDescription* output_frame = new (output_frame_size) |
1214 | FrameDescription(output_frame_size, parameter_count); |
1215 | FrameWriter frame_writer(this, output_frame, trace_scope_); |
1216 | |
1217 | // Construct stub can not be topmost. |
1218 | DCHECK(frame_index > 0 && frame_index < output_count_); |
1219 | DCHECK_NULL(output_[frame_index]); |
1220 | output_[frame_index] = output_frame; |
1221 | |
1222 | // The top address of the frame is computed from the previous frame's top and |
1223 | // this frame's size. |
1224 | intptr_t top_address; |
1225 | top_address = output_[frame_index - 1]->GetTop() - output_frame_size; |
1226 | output_frame->SetTop(top_address); |
1227 | |
1228 | ReadOnlyRoots roots(isolate()); |
1229 | if (ShouldPadArguments(parameter_count)) { |
1230 | frame_writer.PushRawObject(roots.the_hole_value(), "padding\n" ); |
1231 | } |
1232 | |
1233 | // The allocated receiver of a construct stub frame is passed as the |
1234 | // receiver parameter through the translation. It might be encoding |
1235 | // a captured object, so we need save it for later. |
1236 | TranslatedFrame::iterator receiver_iterator = value_iterator; |
1237 | |
1238 | // Compute the incoming parameter translation. |
1239 | for (unsigned i = 0; i < parameter_count; ++i, ++value_iterator) { |
1240 | frame_writer.PushTranslatedValue(value_iterator, "stack parameter" ); |
1241 | } |
1242 | |
1243 | DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(), |
1244 | frame_writer.top_offset()); |
1245 | |
1246 | // Read caller's PC from the previous frame. |
1247 | const intptr_t caller_pc = output_[frame_index - 1]->GetPc(); |
1248 | frame_writer.PushCallerPc(caller_pc); |
1249 | |
1250 | // Read caller's FP from the previous frame, and set this frame's FP. |
1251 | const intptr_t caller_fp = output_[frame_index - 1]->GetFp(); |
1252 | frame_writer.PushCallerFp(caller_fp); |
1253 | |
1254 | intptr_t fp_value = top_address + frame_writer.top_offset(); |
1255 | output_frame->SetFp(fp_value); |
1256 | if (is_topmost) { |
1257 | Register fp_reg = JavaScriptFrame::fp_register(); |
1258 | output_frame->SetRegister(fp_reg.code(), fp_value); |
1259 | } |
1260 | |
1261 | if (FLAG_enable_embedded_constant_pool) { |
1262 | // Read the caller's constant pool from the previous frame. |
1263 | const intptr_t caller_cp = output_[frame_index - 1]->GetConstantPool(); |
1264 | frame_writer.PushCallerConstantPool(caller_cp); |
1265 | } |
1266 | |
1267 | // A marker value is used to mark the frame. |
1268 | intptr_t marker = StackFrame::TypeToMarker(StackFrame::CONSTRUCT); |
1269 | frame_writer.PushRawValue(marker, "context (construct stub sentinel)\n" ); |
1270 | |
1271 | frame_writer.PushTranslatedValue(value_iterator++, "context" ); |
1272 | |
1273 | // Number of incoming arguments. |
1274 | frame_writer.PushRawObject(Smi::FromInt(parameter_count - 1), "argc\n" ); |
1275 | |
1276 | // The constructor function was mentioned explicitly in the |
1277 | // CONSTRUCT_STUB_FRAME. |
1278 | frame_writer.PushTranslatedValue(function_iterator, "constructor function\n" ); |
1279 | |
1280 | // The deopt info contains the implicit receiver or the new target at the |
1281 | // position of the receiver. Copy it to the top of stack, with the hole value |
1282 | // as padding to maintain alignment. |
1283 | |
1284 | frame_writer.PushRawObject(roots.the_hole_value(), "padding\n" ); |
1285 | |
1286 | CHECK(bailout_id == BailoutId::ConstructStubCreate() || |
1287 | bailout_id == BailoutId::ConstructStubInvoke()); |
1288 | const char* debug_hint = bailout_id == BailoutId::ConstructStubCreate() |
1289 | ? "new target\n" |
1290 | : "allocated receiver\n" ; |
1291 | frame_writer.PushTranslatedValue(receiver_iterator, debug_hint); |
1292 | |
1293 | if (is_topmost) { |
1294 | if (PadTopOfStackRegister()) { |
1295 | frame_writer.PushRawObject(roots.the_hole_value(), "padding\n" ); |
1296 | } |
1297 | // Ensure the result is restored back when we return to the stub. |
1298 | Register result_reg = kReturnRegister0; |
1299 | intptr_t result = input_->GetRegister(result_reg.code()); |
1300 | frame_writer.PushRawValue(result, "subcall result\n" ); |
1301 | } |
1302 | |
1303 | CHECK_EQ(translated_frame->end(), value_iterator); |
1304 | CHECK_EQ(0u, frame_writer.top_offset()); |
1305 | |
1306 | // Compute this frame's PC. |
1307 | DCHECK(bailout_id.IsValidForConstructStub()); |
1308 | Address start = construct_stub->InstructionStart(); |
1309 | int pc_offset = |
1310 | bailout_id == BailoutId::ConstructStubCreate() |
1311 | ? isolate_->heap()->construct_stub_create_deopt_pc_offset()->value() |
1312 | : isolate_->heap()->construct_stub_invoke_deopt_pc_offset()->value(); |
1313 | intptr_t pc_value = static_cast<intptr_t>(start + pc_offset); |
1314 | output_frame->SetPc(pc_value); |
1315 | |
1316 | // Update constant pool. |
1317 | if (FLAG_enable_embedded_constant_pool) { |
1318 | intptr_t constant_pool_value = |
1319 | static_cast<intptr_t>(construct_stub->constant_pool()); |
1320 | output_frame->SetConstantPool(constant_pool_value); |
1321 | if (is_topmost) { |
1322 | Register constant_pool_reg = |
1323 | JavaScriptFrame::constant_pool_pointer_register(); |
1324 | output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value); |
1325 | } |
1326 | } |
1327 | |
1328 | // Clear the context register. The context might be a de-materialized object |
1329 | // and will be materialized by {Runtime_NotifyDeoptimized}. For additional |
1330 | // safety we use Smi(0) instead of the potential {arguments_marker} here. |
1331 | if (is_topmost) { |
1332 | intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr()); |
1333 | Register context_reg = JavaScriptFrame::context_register(); |
1334 | output_frame->SetRegister(context_reg.code(), context_value); |
1335 | } |
1336 | |
1337 | // Set the continuation for the topmost frame. |
1338 | if (is_topmost) { |
1339 | Builtins* builtins = isolate_->builtins(); |
1340 | DCHECK_EQ(DeoptimizeKind::kLazy, deopt_kind_); |
1341 | Code continuation = builtins->builtin(Builtins::kNotifyDeoptimized); |
1342 | output_frame->SetContinuation( |
1343 | static_cast<intptr_t>(continuation->InstructionStart())); |
1344 | } |
1345 | } |
1346 | |
1347 | bool Deoptimizer::BuiltinContinuationModeIsJavaScript( |
1348 | BuiltinContinuationMode mode) { |
1349 | switch (mode) { |
1350 | case BuiltinContinuationMode::STUB: |
1351 | return false; |
1352 | case BuiltinContinuationMode::JAVASCRIPT: |
1353 | case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: |
1354 | case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: |
1355 | return true; |
1356 | } |
1357 | UNREACHABLE(); |
1358 | } |
1359 | |
1360 | bool Deoptimizer::BuiltinContinuationModeIsWithCatch( |
1361 | BuiltinContinuationMode mode) { |
1362 | switch (mode) { |
1363 | case BuiltinContinuationMode::STUB: |
1364 | case BuiltinContinuationMode::JAVASCRIPT: |
1365 | return false; |
1366 | case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: |
1367 | case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: |
1368 | return true; |
1369 | } |
1370 | UNREACHABLE(); |
1371 | } |
1372 | |
1373 | StackFrame::Type Deoptimizer::BuiltinContinuationModeToFrameType( |
1374 | BuiltinContinuationMode mode) { |
1375 | switch (mode) { |
1376 | case BuiltinContinuationMode::STUB: |
1377 | return StackFrame::BUILTIN_CONTINUATION; |
1378 | case BuiltinContinuationMode::JAVASCRIPT: |
1379 | return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION; |
1380 | case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: |
1381 | return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH; |
1382 | case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: |
1383 | return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH; |
1384 | } |
1385 | UNREACHABLE(); |
1386 | } |
1387 | |
1388 | Builtins::Name Deoptimizer::TrampolineForBuiltinContinuation( |
1389 | BuiltinContinuationMode mode, bool must_handle_result) { |
1390 | switch (mode) { |
1391 | case BuiltinContinuationMode::STUB: |
1392 | return must_handle_result ? Builtins::kContinueToCodeStubBuiltinWithResult |
1393 | : Builtins::kContinueToCodeStubBuiltin; |
1394 | case BuiltinContinuationMode::JAVASCRIPT: |
1395 | case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: |
1396 | case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: |
1397 | return must_handle_result |
1398 | ? Builtins::kContinueToJavaScriptBuiltinWithResult |
1399 | : Builtins::kContinueToJavaScriptBuiltin; |
1400 | } |
1401 | UNREACHABLE(); |
1402 | } |
1403 | |
1404 | // BuiltinContinuationFrames capture the machine state that is expected as input |
1405 | // to a builtin, including both input register values and stack parameters. When |
1406 | // the frame is reactivated (i.e. the frame below it returns), a |
1407 | // ContinueToBuiltin stub restores the register state from the frame and tail |
1408 | // calls to the actual target builtin, making it appear that the stub had been |
1409 | // directly called by the frame above it. The input values to populate the frame |
1410 | // are taken from the deopt's FrameState. |
1411 | // |
1412 | // Frame translation happens in two modes, EAGER and LAZY. In EAGER mode, all of |
1413 | // the parameters to the Builtin are explicitly specified in the TurboFan |
1414 | // FrameState node. In LAZY mode, there is always one fewer parameters specified |
1415 | // in the FrameState than expected by the Builtin. In that case, construction of |
1416 | // BuiltinContinuationFrame adds the final missing parameter during |
1417 | // deoptimization, and that parameter is always on the stack and contains the |
1418 | // value returned from the callee of the call site triggering the LAZY deopt |
1419 | // (e.g. rax on x64). This requires that continuation Builtins for LAZY deopts |
1420 | // must have at least one stack parameter. |
1421 | // |
1422 | // TO |
1423 | // | .... | |
1424 | // +-------------------------+ |
1425 | // | arg padding (arch dept) |<- at most 1*kSystemPointerSize |
1426 | // +-------------------------+ |
1427 | // | builtin param 0 |<- FrameState input value n becomes |
1428 | // +-------------------------+ |
1429 | // | ... | |
1430 | // +-------------------------+ |
1431 | // | builtin param m |<- FrameState input value n+m-1, or in |
1432 | // +-----needs-alignment-----+ the LAZY case, return LAZY result value |
1433 | // | ContinueToBuiltin entry | |
1434 | // +-------------------------+ |
1435 | // | | saved frame (FP) | |
1436 | // | +=====needs=alignment=====+<- fpreg |
1437 | // | |constant pool (if ool_cp)| |
1438 | // v +-------------------------+ |
1439 | // |BUILTIN_CONTINUATION mark| |
1440 | // +-------------------------+ |
1441 | // | JSFunction (or zero) |<- only if JavaScript builtin |
1442 | // +-------------------------+ |
1443 | // | frame height above FP | |
1444 | // +-------------------------+ |
1445 | // | context |<- this non-standard context slot contains |
1446 | // +-------------------------+ the context, even for non-JS builtins. |
1447 | // | builtin address | |
1448 | // +-------------------------+ |
1449 | // | builtin input GPR reg0 |<- populated from deopt FrameState using |
1450 | // +-------------------------+ the builtin's CallInterfaceDescriptor |
1451 | // | ... | to map a FrameState's 0..n-1 inputs to |
1452 | // +-------------------------+ the builtin's n input register params. |
1453 | // | builtin input GPR regn | |
1454 | // +-------------------------+ |
1455 | // | reg padding (arch dept) | |
1456 | // +-----needs--alignment----+ |
1457 | // | res padding (arch dept) |<- only if {is_topmost}; result is pop'd by |
1458 | // +-------------------------+<- kNotifyDeopt ASM stub and moved to acc |
1459 | // | result value |<- reg, as ContinueToBuiltin stub expects. |
1460 | // +-----needs-alignment-----+<- spreg |
1461 | // |
1462 | void Deoptimizer::DoComputeBuiltinContinuation( |
1463 | TranslatedFrame* translated_frame, int frame_index, |
1464 | BuiltinContinuationMode mode) { |
1465 | TranslatedFrame::iterator value_iterator = translated_frame->begin(); |
1466 | |
1467 | // The output frame must have room for all of the parameters that need to be |
1468 | // passed to the builtin continuation. |
1469 | const int height_in_words = translated_frame->height(); |
1470 | |
1471 | BailoutId bailout_id = translated_frame->node_id(); |
1472 | Builtins::Name builtin_name = Builtins::GetBuiltinFromBailoutId(bailout_id); |
1473 | Code builtin = isolate()->builtins()->builtin(builtin_name); |
1474 | Callable continuation_callable = |
1475 | Builtins::CallableFor(isolate(), builtin_name); |
1476 | CallInterfaceDescriptor continuation_descriptor = |
1477 | continuation_callable.descriptor(); |
1478 | |
1479 | const bool is_bottommost = (0 == frame_index); |
1480 | const bool is_topmost = (output_count_ - 1 == frame_index); |
1481 | const bool must_handle_result = |
1482 | !is_topmost || deopt_kind_ == DeoptimizeKind::kLazy; |
1483 | |
1484 | const RegisterConfiguration* config(RegisterConfiguration::Default()); |
1485 | const int allocatable_register_count = |
1486 | config->num_allocatable_general_registers(); |
1487 | const int padding_slot_count = |
1488 | BuiltinContinuationFrameConstants::PaddingSlotCount( |
1489 | allocatable_register_count); |
1490 | |
1491 | const int register_parameter_count = |
1492 | continuation_descriptor.GetRegisterParameterCount(); |
1493 | // Make sure to account for the context by removing it from the register |
1494 | // parameter count. |
1495 | const int translated_stack_parameters = |
1496 | height_in_words - register_parameter_count - 1; |
1497 | const int stack_param_count = |
1498 | translated_stack_parameters + (must_handle_result ? 1 : 0) + |
1499 | (BuiltinContinuationModeIsWithCatch(mode) ? 1 : 0); |
1500 | const int stack_param_pad_count = |
1501 | ShouldPadArguments(stack_param_count) ? 1 : 0; |
1502 | |
1503 | // If the builtins frame appears to be topmost we should ensure that the |
1504 | // value of result register is preserved during continuation execution. |
1505 | // We do this here by "pushing" the result of callback function to the |
1506 | // top of the reconstructed stack and popping it in |
1507 | // {Builtins::kNotifyDeoptimized}. |
1508 | const int push_result_count = |
1509 | is_topmost ? (PadTopOfStackRegister() ? 2 : 1) : 0; |
1510 | |
1511 | const unsigned output_frame_size = |
1512 | kSystemPointerSize * (stack_param_count + stack_param_pad_count + |
1513 | allocatable_register_count + padding_slot_count + |
1514 | push_result_count) + |
1515 | BuiltinContinuationFrameConstants::kFixedFrameSize; |
1516 | |
1517 | const unsigned output_frame_size_above_fp = |
1518 | kSystemPointerSize * (allocatable_register_count + padding_slot_count + |
1519 | push_result_count) + |
1520 | (BuiltinContinuationFrameConstants::kFixedFrameSize - |
1521 | BuiltinContinuationFrameConstants::kFixedFrameSizeAboveFp); |
1522 | |
1523 | // Validate types of parameters. They must all be tagged except for argc for |
1524 | // JS builtins. |
1525 | bool has_argc = false; |
1526 | for (int i = 0; i < register_parameter_count; ++i) { |
1527 | MachineType type = continuation_descriptor.GetParameterType(i); |
1528 | int code = continuation_descriptor.GetRegisterParameter(i).code(); |
1529 | // Only tagged and int32 arguments are supported, and int32 only for the |
1530 | // arguments count on JavaScript builtins. |
1531 | if (type == MachineType::Int32()) { |
1532 | CHECK_EQ(code, kJavaScriptCallArgCountRegister.code()); |
1533 | has_argc = true; |
1534 | } else { |
1535 | // Any other argument must be a tagged value. |
1536 | CHECK(IsAnyTagged(type.representation())); |
1537 | } |
1538 | } |
1539 | CHECK_EQ(BuiltinContinuationModeIsJavaScript(mode), has_argc); |
1540 | |
1541 | if (trace_scope_ != nullptr) { |
1542 | PrintF(trace_scope_->file(), |
1543 | " translating BuiltinContinuation to %s," |
1544 | " register param count %d," |
1545 | " stack param count %d\n" , |
1546 | Builtins::name(builtin_name), register_parameter_count, |
1547 | stack_param_count); |
1548 | } |
1549 | |
1550 | FrameDescription* output_frame = new (output_frame_size) |
1551 | FrameDescription(output_frame_size, stack_param_count); |
1552 | output_[frame_index] = output_frame; |
1553 | FrameWriter frame_writer(this, output_frame, trace_scope_); |
1554 | |
1555 | // The top address of the frame is computed from the previous frame's top and |
1556 | // this frame's size. |
1557 | intptr_t top_address; |
1558 | if (is_bottommost) { |
1559 | top_address = caller_frame_top_ - output_frame_size; |
1560 | } else { |
1561 | top_address = output_[frame_index - 1]->GetTop() - output_frame_size; |
1562 | } |
1563 | output_frame->SetTop(top_address); |
1564 | |
1565 | // Get the possible JSFunction for the case that this is a |
1566 | // JavaScriptBuiltinContinuationFrame, which needs the JSFunction pointer |
1567 | // like a normal JavaScriptFrame. |
1568 | const intptr_t maybe_function = value_iterator->GetRawValue()->ptr(); |
1569 | ++value_iterator; |
1570 | |
1571 | ReadOnlyRoots roots(isolate()); |
1572 | if (ShouldPadArguments(stack_param_count)) { |
1573 | frame_writer.PushRawObject(roots.the_hole_value(), "padding\n" ); |
1574 | } |
1575 | |
1576 | for (int i = 0; i < translated_stack_parameters; ++i, ++value_iterator) { |
1577 | frame_writer.PushTranslatedValue(value_iterator, "stack parameter" ); |
1578 | } |
1579 | |
1580 | switch (mode) { |
1581 | case BuiltinContinuationMode::STUB: |
1582 | break; |
1583 | case BuiltinContinuationMode::JAVASCRIPT: |
1584 | break; |
1585 | case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: { |
1586 | frame_writer.PushRawObject(roots.the_hole_value(), |
1587 | "placeholder for exception on lazy deopt\n" ); |
1588 | } break; |
1589 | case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: { |
1590 | intptr_t accumulator_value = |
1591 | input_->GetRegister(kInterpreterAccumulatorRegister.code()); |
1592 | frame_writer.PushRawObject(Object(accumulator_value), |
1593 | "exception (from accumulator)\n" ); |
1594 | } break; |
1595 | } |
1596 | |
1597 | if (must_handle_result) { |
1598 | frame_writer.PushRawObject(roots.the_hole_value(), |
1599 | "placeholder for return result on lazy deopt\n" ); |
1600 | } |
1601 | |
1602 | DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(), |
1603 | frame_writer.top_offset()); |
1604 | |
1605 | std::vector<TranslatedFrame::iterator> register_values; |
1606 | int total_registers = config->num_general_registers(); |
1607 | register_values.resize(total_registers, {value_iterator}); |
1608 | |
1609 | for (int i = 0; i < register_parameter_count; ++i, ++value_iterator) { |
1610 | int code = continuation_descriptor.GetRegisterParameter(i).code(); |
1611 | register_values[code] = value_iterator; |
1612 | } |
1613 | |
1614 | // The context register is always implicit in the CallInterfaceDescriptor but |
1615 | // its register must be explicitly set when continuing to the builtin. Make |
1616 | // sure that it's harvested from the translation and copied into the register |
1617 | // set (it was automatically added at the end of the FrameState by the |
1618 | // instruction selector). |
1619 | Object context = value_iterator->GetRawValue(); |
1620 | const intptr_t value = context->ptr(); |
1621 | TranslatedFrame::iterator context_register_value = value_iterator++; |
1622 | register_values[kContextRegister.code()] = context_register_value; |
1623 | output_frame->SetContext(value); |
1624 | output_frame->SetRegister(kContextRegister.code(), value); |
1625 | |
1626 | // Set caller's PC (JSFunction continuation). |
1627 | const intptr_t caller_pc = |
1628 | is_bottommost ? caller_pc_ : output_[frame_index - 1]->GetPc(); |
1629 | frame_writer.PushCallerPc(caller_pc); |
1630 | |
1631 | // Read caller's FP from the previous frame, and set this frame's FP. |
1632 | const intptr_t caller_fp = |
1633 | is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp(); |
1634 | frame_writer.PushCallerFp(caller_fp); |
1635 | |
1636 | const intptr_t fp_value = top_address + frame_writer.top_offset(); |
1637 | output_frame->SetFp(fp_value); |
1638 | |
1639 | DCHECK_EQ(output_frame_size_above_fp, frame_writer.top_offset()); |
1640 | |
1641 | if (FLAG_enable_embedded_constant_pool) { |
1642 | // Read the caller's constant pool from the previous frame. |
1643 | const intptr_t caller_cp = |
1644 | is_bottommost ? caller_constant_pool_ |
1645 | : output_[frame_index - 1]->GetConstantPool(); |
1646 | frame_writer.PushCallerConstantPool(caller_cp); |
1647 | } |
1648 | |
1649 | // A marker value is used in place of the context. |
1650 | const intptr_t marker = |
1651 | StackFrame::TypeToMarker(BuiltinContinuationModeToFrameType(mode)); |
1652 | frame_writer.PushRawValue(marker, |
1653 | "context (builtin continuation sentinel)\n" ); |
1654 | |
1655 | if (BuiltinContinuationModeIsJavaScript(mode)) { |
1656 | frame_writer.PushRawValue(maybe_function, "JSFunction\n" ); |
1657 | } else { |
1658 | frame_writer.PushRawValue(0, "unused\n" ); |
1659 | } |
1660 | |
1661 | // The delta from the SP to the FP; used to reconstruct SP in |
1662 | // Isolate::UnwindAndFindHandler. |
1663 | frame_writer.PushRawObject(Smi::FromInt(output_frame_size_above_fp), |
1664 | "frame height at deoptimization\n" ); |
1665 | |
1666 | // The context even if this is a stub contininuation frame. We can't use the |
1667 | // usual context slot, because we must store the frame marker there. |
1668 | frame_writer.PushTranslatedValue(context_register_value, |
1669 | "builtin JavaScript context\n" ); |
1670 | |
1671 | // The builtin to continue to. |
1672 | frame_writer.PushRawObject(builtin, "builtin address\n" ); |
1673 | |
1674 | for (int i = 0; i < allocatable_register_count; ++i) { |
1675 | int code = config->GetAllocatableGeneralCode(i); |
1676 | ScopedVector<char> str(128); |
1677 | if (trace_scope_ != nullptr) { |
1678 | if (BuiltinContinuationModeIsJavaScript(mode) && |
1679 | code == kJavaScriptCallArgCountRegister.code()) { |
1680 | SNPrintF( |
1681 | str, |
1682 | "tagged argument count %s (will be untagged by continuation)\n" , |
1683 | RegisterName(Register::from_code(code))); |
1684 | } else { |
1685 | SNPrintF(str, "builtin register argument %s\n" , |
1686 | RegisterName(Register::from_code(code))); |
1687 | } |
1688 | } |
1689 | frame_writer.PushTranslatedValue( |
1690 | register_values[code], trace_scope_ != nullptr ? str.start() : "" ); |
1691 | } |
1692 | |
1693 | // Some architectures must pad the stack frame with extra stack slots |
1694 | // to ensure the stack frame is aligned. |
1695 | for (int i = 0; i < padding_slot_count; ++i) { |
1696 | frame_writer.PushRawObject(roots.the_hole_value(), "padding\n" ); |
1697 | } |
1698 | |
1699 | if (is_topmost) { |
1700 | if (PadTopOfStackRegister()) { |
1701 | frame_writer.PushRawObject(roots.the_hole_value(), "padding\n" ); |
1702 | } |
1703 | // Ensure the result is restored back when we return to the stub. |
1704 | |
1705 | if (must_handle_result) { |
1706 | Register result_reg = kReturnRegister0; |
1707 | frame_writer.PushRawValue(input_->GetRegister(result_reg.code()), |
1708 | "callback result\n" ); |
1709 | } else { |
1710 | frame_writer.PushRawObject(roots.undefined_value(), "callback result\n" ); |
1711 | } |
1712 | } |
1713 | |
1714 | CHECK_EQ(translated_frame->end(), value_iterator); |
1715 | CHECK_EQ(0u, frame_writer.top_offset()); |
1716 | |
1717 | // Clear the context register. The context might be a de-materialized object |
1718 | // and will be materialized by {Runtime_NotifyDeoptimized}. For additional |
1719 | // safety we use Smi(0) instead of the potential {arguments_marker} here. |
1720 | if (is_topmost) { |
1721 | intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr()); |
1722 | Register context_reg = JavaScriptFrame::context_register(); |
1723 | output_frame->SetRegister(context_reg.code(), context_value); |
1724 | } |
1725 | |
1726 | // Ensure the frame pointer register points to the callee's frame. The builtin |
1727 | // will build its own frame once we continue to it. |
1728 | Register fp_reg = JavaScriptFrame::fp_register(); |
1729 | output_frame->SetRegister(fp_reg.code(), fp_value); |
1730 | |
1731 | Code continue_to_builtin = isolate()->builtins()->builtin( |
1732 | TrampolineForBuiltinContinuation(mode, must_handle_result)); |
1733 | output_frame->SetPc( |
1734 | static_cast<intptr_t>(continue_to_builtin->InstructionStart())); |
1735 | |
1736 | Code continuation = |
1737 | isolate()->builtins()->builtin(Builtins::kNotifyDeoptimized); |
1738 | output_frame->SetContinuation( |
1739 | static_cast<intptr_t>(continuation->InstructionStart())); |
1740 | } |
1741 | |
1742 | void Deoptimizer::MaterializeHeapObjects() { |
1743 | translated_state_.Prepare(static_cast<Address>(stack_fp_)); |
1744 | if (FLAG_deopt_every_n_times > 0) { |
1745 | // Doing a GC here will find problems with the deoptimized frames. |
1746 | isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags, |
1747 | GarbageCollectionReason::kTesting); |
1748 | } |
1749 | |
1750 | for (auto& materialization : values_to_materialize_) { |
1751 | Handle<Object> value = materialization.value_->GetValue(); |
1752 | |
1753 | if (trace_scope_ != nullptr) { |
1754 | PrintF("Materialization [" V8PRIxPTR_FMT "] <- " V8PRIxPTR_FMT " ; " , |
1755 | static_cast<intptr_t>(materialization.output_slot_address_), |
1756 | value->ptr()); |
1757 | value->ShortPrint(trace_scope_->file()); |
1758 | PrintF(trace_scope_->file(), "\n" ); |
1759 | } |
1760 | |
1761 | *(reinterpret_cast<Address*>(materialization.output_slot_address_)) = |
1762 | value->ptr(); |
1763 | } |
1764 | |
1765 | translated_state_.VerifyMaterializedObjects(); |
1766 | |
1767 | bool feedback_updated = translated_state_.DoUpdateFeedback(); |
1768 | if (trace_scope_ != nullptr && feedback_updated) { |
1769 | PrintF(trace_scope_->file(), "Feedback updated" ); |
1770 | compiled_code_->PrintDeoptLocation(trace_scope_->file(), |
1771 | " from deoptimization at " , from_); |
1772 | } |
1773 | |
1774 | isolate_->materialized_object_store()->Remove( |
1775 | static_cast<Address>(stack_fp_)); |
1776 | } |
1777 | |
1778 | void Deoptimizer::QueueValueForMaterialization( |
1779 | Address output_address, Object obj, |
1780 | const TranslatedFrame::iterator& iterator) { |
1781 | if (obj == ReadOnlyRoots(isolate_).arguments_marker()) { |
1782 | values_to_materialize_.push_back({output_address, iterator}); |
1783 | } |
1784 | } |
1785 | |
1786 | unsigned Deoptimizer::ComputeInputFrameAboveFpFixedSize() const { |
1787 | unsigned fixed_size = CommonFrameConstants::kFixedFrameSizeAboveFp; |
1788 | // TODO(jkummerow): If {function_->IsSmi()} can indeed be true, then |
1789 | // {function_} should not have type {JSFunction}. |
1790 | if (!function_->IsSmi()) { |
1791 | fixed_size += ComputeIncomingArgumentSize(function_->shared()); |
1792 | } |
1793 | return fixed_size; |
1794 | } |
1795 | |
1796 | unsigned Deoptimizer::ComputeInputFrameSize() const { |
1797 | // The fp-to-sp delta already takes the context, constant pool pointer and the |
1798 | // function into account so we have to avoid double counting them. |
1799 | unsigned fixed_size_above_fp = ComputeInputFrameAboveFpFixedSize(); |
1800 | unsigned result = fixed_size_above_fp + fp_to_sp_delta_; |
1801 | if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) { |
1802 | unsigned stack_slots = compiled_code_->stack_slots(); |
1803 | unsigned outgoing_size = 0; |
1804 | // ComputeOutgoingArgumentSize(compiled_code_, bailout_id_); |
1805 | CHECK_EQ(fixed_size_above_fp + (stack_slots * kSystemPointerSize) - |
1806 | CommonFrameConstants::kFixedFrameSizeAboveFp + outgoing_size, |
1807 | result); |
1808 | } |
1809 | return result; |
1810 | } |
1811 | |
1812 | // static |
1813 | unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo shared) { |
1814 | // The fixed part of the frame consists of the return address, frame |
1815 | // pointer, function, context, bytecode offset and all the incoming arguments. |
1816 | return ComputeIncomingArgumentSize(shared) + |
1817 | InterpreterFrameConstants::kFixedFrameSize; |
1818 | } |
1819 | |
1820 | // static |
1821 | unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) { |
1822 | int parameter_slots = shared->internal_formal_parameter_count() + 1; |
1823 | if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2); |
1824 | return parameter_slots * kSystemPointerSize; |
1825 | } |
1826 | |
1827 | void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate, |
1828 | DeoptimizeKind kind) { |
1829 | CHECK(kind == DeoptimizeKind::kEager || kind == DeoptimizeKind::kSoft || |
1830 | kind == DeoptimizeKind::kLazy); |
1831 | DeoptimizerData* data = isolate->deoptimizer_data(); |
1832 | if (!data->deopt_entry_code(kind).is_null()) return; |
1833 | |
1834 | MacroAssembler masm(isolate, CodeObjectRequired::kYes, |
1835 | NewAssemblerBuffer(16 * KB)); |
1836 | masm.set_emit_debug_code(false); |
1837 | GenerateDeoptimizationEntries(&masm, masm.isolate(), kind); |
1838 | CodeDesc desc; |
1839 | masm.GetCode(isolate, &desc); |
1840 | DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc)); |
1841 | |
1842 | // Allocate the code as immovable since the entry addresses will be used |
1843 | // directly and there is no support for relocating them. |
1844 | Handle<Code> code = isolate->factory()->NewCode( |
1845 | desc, Code::STUB, Handle<Object>(), Builtins::kNoBuiltinId, |
1846 | MaybeHandle<ByteArray>(), MaybeHandle<DeoptimizationData>(), kImmovable); |
1847 | CHECK(isolate->heap()->IsImmovable(*code)); |
1848 | |
1849 | CHECK(data->deopt_entry_code(kind).is_null()); |
1850 | data->set_deopt_entry_code(kind, *code); |
1851 | } |
1852 | |
1853 | void Deoptimizer::EnsureCodeForDeoptimizationEntries(Isolate* isolate) { |
1854 | EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kEager); |
1855 | EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kLazy); |
1856 | EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kSoft); |
1857 | } |
1858 | |
1859 | FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count) |
1860 | : frame_size_(frame_size), |
1861 | parameter_count_(parameter_count), |
1862 | top_(kZapUint32), |
1863 | pc_(kZapUint32), |
1864 | fp_(kZapUint32), |
1865 | context_(kZapUint32), |
1866 | constant_pool_(kZapUint32) { |
1867 | // Zap all the registers. |
1868 | for (int r = 0; r < Register::kNumRegisters; r++) { |
1869 | // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register |
1870 | // isn't used before the next safepoint, the GC will try to scan it as a |
1871 | // tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't. |
1872 | #if defined(V8_OS_WIN) && defined(V8_TARGET_ARCH_ARM64) |
1873 | // x18 is reserved as platform register on Windows arm64 platform |
1874 | const int kPlatformRegister = 18; |
1875 | if (r != kPlatformRegister) { |
1876 | SetRegister(r, kZapUint32); |
1877 | } |
1878 | #else |
1879 | SetRegister(r, kZapUint32); |
1880 | #endif |
1881 | } |
1882 | |
1883 | // Zap all the slots. |
1884 | for (unsigned o = 0; o < frame_size; o += kSystemPointerSize) { |
1885 | SetFrameSlot(o, kZapUint32); |
1886 | } |
1887 | } |
1888 | |
1889 | void TranslationBuffer::Add(int32_t value) { |
1890 | // This wouldn't handle kMinInt correctly if it ever encountered it. |
1891 | DCHECK_NE(value, kMinInt); |
1892 | // Encode the sign bit in the least significant bit. |
1893 | bool is_negative = (value < 0); |
1894 | uint32_t bits = (static_cast<uint32_t>(is_negative ? -value : value) << 1) | |
1895 | static_cast<uint32_t>(is_negative); |
1896 | // Encode the individual bytes using the least significant bit of |
1897 | // each byte to indicate whether or not more bytes follow. |
1898 | do { |
1899 | uint32_t next = bits >> 7; |
1900 | contents_.push_back(((bits << 1) & 0xFF) | (next != 0)); |
1901 | bits = next; |
1902 | } while (bits != 0); |
1903 | } |
1904 | |
1905 | TranslationIterator::TranslationIterator(ByteArray buffer, int index) |
1906 | : buffer_(buffer), index_(index) { |
1907 | DCHECK(index >= 0 && index < buffer->length()); |
1908 | } |
1909 | |
1910 | int32_t TranslationIterator::Next() { |
1911 | // Run through the bytes until we reach one with a least significant |
1912 | // bit of zero (marks the end). |
1913 | uint32_t bits = 0; |
1914 | for (int i = 0; true; i += 7) { |
1915 | DCHECK(HasNext()); |
1916 | uint8_t next = buffer_->get(index_++); |
1917 | bits |= (next >> 1) << i; |
1918 | if ((next & 1) == 0) break; |
1919 | } |
1920 | // The bits encode the sign in the least significant bit. |
1921 | bool is_negative = (bits & 1) == 1; |
1922 | int32_t result = bits >> 1; |
1923 | return is_negative ? -result : result; |
1924 | } |
1925 | |
1926 | bool TranslationIterator::HasNext() const { return index_ < buffer_->length(); } |
1927 | |
1928 | Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) { |
1929 | Handle<ByteArray> result = |
1930 | factory->NewByteArray(CurrentIndex(), AllocationType::kOld); |
1931 | contents_.CopyTo(result->GetDataStartAddress()); |
1932 | return result; |
1933 | } |
1934 | |
1935 | void Translation::BeginBuiltinContinuationFrame(BailoutId bailout_id, |
1936 | int literal_id, |
1937 | unsigned height) { |
1938 | buffer_->Add(BUILTIN_CONTINUATION_FRAME); |
1939 | buffer_->Add(bailout_id.ToInt()); |
1940 | buffer_->Add(literal_id); |
1941 | buffer_->Add(height); |
1942 | } |
1943 | |
1944 | void Translation::BeginJavaScriptBuiltinContinuationFrame(BailoutId bailout_id, |
1945 | int literal_id, |
1946 | unsigned height) { |
1947 | buffer_->Add(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME); |
1948 | buffer_->Add(bailout_id.ToInt()); |
1949 | buffer_->Add(literal_id); |
1950 | buffer_->Add(height); |
1951 | } |
1952 | |
1953 | void Translation::BeginJavaScriptBuiltinContinuationWithCatchFrame( |
1954 | BailoutId bailout_id, int literal_id, unsigned height) { |
1955 | buffer_->Add(JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME); |
1956 | buffer_->Add(bailout_id.ToInt()); |
1957 | buffer_->Add(literal_id); |
1958 | buffer_->Add(height); |
1959 | } |
1960 | |
1961 | void Translation::BeginConstructStubFrame(BailoutId bailout_id, int literal_id, |
1962 | unsigned height) { |
1963 | buffer_->Add(CONSTRUCT_STUB_FRAME); |
1964 | buffer_->Add(bailout_id.ToInt()); |
1965 | buffer_->Add(literal_id); |
1966 | buffer_->Add(height); |
1967 | } |
1968 | |
1969 | |
1970 | void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) { |
1971 | buffer_->Add(ARGUMENTS_ADAPTOR_FRAME); |
1972 | buffer_->Add(literal_id); |
1973 | buffer_->Add(height); |
1974 | } |
1975 | |
1976 | void Translation::BeginInterpretedFrame(BailoutId bytecode_offset, |
1977 | int literal_id, unsigned height, |
1978 | int return_value_offset, |
1979 | int return_value_count) { |
1980 | buffer_->Add(INTERPRETED_FRAME); |
1981 | buffer_->Add(bytecode_offset.ToInt()); |
1982 | buffer_->Add(literal_id); |
1983 | buffer_->Add(height); |
1984 | buffer_->Add(return_value_offset); |
1985 | buffer_->Add(return_value_count); |
1986 | } |
1987 | |
1988 | void Translation::ArgumentsElements(CreateArgumentsType type) { |
1989 | buffer_->Add(ARGUMENTS_ELEMENTS); |
1990 | buffer_->Add(static_cast<uint8_t>(type)); |
1991 | } |
1992 | |
1993 | void Translation::ArgumentsLength(CreateArgumentsType type) { |
1994 | buffer_->Add(ARGUMENTS_LENGTH); |
1995 | buffer_->Add(static_cast<uint8_t>(type)); |
1996 | } |
1997 | |
1998 | void Translation::BeginCapturedObject(int length) { |
1999 | buffer_->Add(CAPTURED_OBJECT); |
2000 | buffer_->Add(length); |
2001 | } |
2002 | |
2003 | |
2004 | void Translation::DuplicateObject(int object_index) { |
2005 | buffer_->Add(DUPLICATED_OBJECT); |
2006 | buffer_->Add(object_index); |
2007 | } |
2008 | |
2009 | |
2010 | void Translation::StoreRegister(Register reg) { |
2011 | buffer_->Add(REGISTER); |
2012 | buffer_->Add(reg.code()); |
2013 | } |
2014 | |
2015 | |
2016 | void Translation::StoreInt32Register(Register reg) { |
2017 | buffer_->Add(INT32_REGISTER); |
2018 | buffer_->Add(reg.code()); |
2019 | } |
2020 | |
2021 | void Translation::StoreInt64Register(Register reg) { |
2022 | buffer_->Add(INT64_REGISTER); |
2023 | buffer_->Add(reg.code()); |
2024 | } |
2025 | |
2026 | void Translation::StoreUint32Register(Register reg) { |
2027 | buffer_->Add(UINT32_REGISTER); |
2028 | buffer_->Add(reg.code()); |
2029 | } |
2030 | |
2031 | |
2032 | void Translation::StoreBoolRegister(Register reg) { |
2033 | buffer_->Add(BOOL_REGISTER); |
2034 | buffer_->Add(reg.code()); |
2035 | } |
2036 | |
2037 | void Translation::StoreFloatRegister(FloatRegister reg) { |
2038 | buffer_->Add(FLOAT_REGISTER); |
2039 | buffer_->Add(reg.code()); |
2040 | } |
2041 | |
2042 | void Translation::StoreDoubleRegister(DoubleRegister reg) { |
2043 | buffer_->Add(DOUBLE_REGISTER); |
2044 | buffer_->Add(reg.code()); |
2045 | } |
2046 | |
2047 | |
2048 | void Translation::StoreStackSlot(int index) { |
2049 | buffer_->Add(STACK_SLOT); |
2050 | buffer_->Add(index); |
2051 | } |
2052 | |
2053 | |
2054 | void Translation::StoreInt32StackSlot(int index) { |
2055 | buffer_->Add(INT32_STACK_SLOT); |
2056 | buffer_->Add(index); |
2057 | } |
2058 | |
2059 | void Translation::StoreInt64StackSlot(int index) { |
2060 | buffer_->Add(INT64_STACK_SLOT); |
2061 | buffer_->Add(index); |
2062 | } |
2063 | |
2064 | void Translation::StoreUint32StackSlot(int index) { |
2065 | buffer_->Add(UINT32_STACK_SLOT); |
2066 | buffer_->Add(index); |
2067 | } |
2068 | |
2069 | |
2070 | void Translation::StoreBoolStackSlot(int index) { |
2071 | buffer_->Add(BOOL_STACK_SLOT); |
2072 | buffer_->Add(index); |
2073 | } |
2074 | |
2075 | void Translation::StoreFloatStackSlot(int index) { |
2076 | buffer_->Add(FLOAT_STACK_SLOT); |
2077 | buffer_->Add(index); |
2078 | } |
2079 | |
2080 | void Translation::StoreDoubleStackSlot(int index) { |
2081 | buffer_->Add(DOUBLE_STACK_SLOT); |
2082 | buffer_->Add(index); |
2083 | } |
2084 | |
2085 | |
2086 | void Translation::StoreLiteral(int literal_id) { |
2087 | buffer_->Add(LITERAL); |
2088 | buffer_->Add(literal_id); |
2089 | } |
2090 | |
2091 | void Translation::AddUpdateFeedback(int vector_literal, int slot) { |
2092 | buffer_->Add(UPDATE_FEEDBACK); |
2093 | buffer_->Add(vector_literal); |
2094 | buffer_->Add(slot); |
2095 | } |
2096 | |
2097 | void Translation::StoreJSFrameFunction() { |
2098 | StoreStackSlot((StandardFrameConstants::kCallerPCOffset - |
2099 | StandardFrameConstants::kFunctionOffset) / |
2100 | kSystemPointerSize); |
2101 | } |
2102 | |
2103 | int Translation::NumberOfOperandsFor(Opcode opcode) { |
2104 | switch (opcode) { |
2105 | case DUPLICATED_OBJECT: |
2106 | case ARGUMENTS_ELEMENTS: |
2107 | case ARGUMENTS_LENGTH: |
2108 | case CAPTURED_OBJECT: |
2109 | case REGISTER: |
2110 | case INT32_REGISTER: |
2111 | case INT64_REGISTER: |
2112 | case UINT32_REGISTER: |
2113 | case BOOL_REGISTER: |
2114 | case FLOAT_REGISTER: |
2115 | case DOUBLE_REGISTER: |
2116 | case STACK_SLOT: |
2117 | case INT32_STACK_SLOT: |
2118 | case INT64_STACK_SLOT: |
2119 | case UINT32_STACK_SLOT: |
2120 | case BOOL_STACK_SLOT: |
2121 | case FLOAT_STACK_SLOT: |
2122 | case DOUBLE_STACK_SLOT: |
2123 | case LITERAL: |
2124 | return 1; |
2125 | case ARGUMENTS_ADAPTOR_FRAME: |
2126 | case UPDATE_FEEDBACK: |
2127 | return 2; |
2128 | case BEGIN: |
2129 | case CONSTRUCT_STUB_FRAME: |
2130 | case BUILTIN_CONTINUATION_FRAME: |
2131 | case JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: |
2132 | case JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: |
2133 | return 3; |
2134 | case INTERPRETED_FRAME: |
2135 | return 5; |
2136 | } |
2137 | FATAL("Unexpected translation type" ); |
2138 | return -1; |
2139 | } |
2140 | |
2141 | |
2142 | #if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER) |
2143 | |
2144 | const char* Translation::StringFor(Opcode opcode) { |
2145 | #define TRANSLATION_OPCODE_CASE(item) case item: return #item; |
2146 | switch (opcode) { |
2147 | TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE) |
2148 | } |
2149 | #undef TRANSLATION_OPCODE_CASE |
2150 | UNREACHABLE(); |
2151 | } |
2152 | |
2153 | #endif |
2154 | |
2155 | |
2156 | Handle<FixedArray> MaterializedObjectStore::Get(Address fp) { |
2157 | int index = StackIdToIndex(fp); |
2158 | if (index == -1) { |
2159 | return Handle<FixedArray>::null(); |
2160 | } |
2161 | Handle<FixedArray> array = GetStackEntries(); |
2162 | CHECK_GT(array->length(), index); |
2163 | return Handle<FixedArray>::cast(Handle<Object>(array->get(index), isolate())); |
2164 | } |
2165 | |
2166 | |
2167 | void MaterializedObjectStore::Set(Address fp, |
2168 | Handle<FixedArray> materialized_objects) { |
2169 | int index = StackIdToIndex(fp); |
2170 | if (index == -1) { |
2171 | index = static_cast<int>(frame_fps_.size()); |
2172 | frame_fps_.push_back(fp); |
2173 | } |
2174 | |
2175 | Handle<FixedArray> array = EnsureStackEntries(index + 1); |
2176 | array->set(index, *materialized_objects); |
2177 | } |
2178 | |
2179 | |
2180 | bool MaterializedObjectStore::Remove(Address fp) { |
2181 | auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp); |
2182 | if (it == frame_fps_.end()) return false; |
2183 | int index = static_cast<int>(std::distance(frame_fps_.begin(), it)); |
2184 | |
2185 | frame_fps_.erase(it); |
2186 | FixedArray array = isolate()->heap()->materialized_objects(); |
2187 | |
2188 | CHECK_LT(index, array->length()); |
2189 | int fps_size = static_cast<int>(frame_fps_.size()); |
2190 | for (int i = index; i < fps_size; i++) { |
2191 | array->set(i, array->get(i + 1)); |
2192 | } |
2193 | array->set(fps_size, ReadOnlyRoots(isolate()).undefined_value()); |
2194 | return true; |
2195 | } |
2196 | |
2197 | |
2198 | int MaterializedObjectStore::StackIdToIndex(Address fp) { |
2199 | auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp); |
2200 | return it == frame_fps_.end() |
2201 | ? -1 |
2202 | : static_cast<int>(std::distance(frame_fps_.begin(), it)); |
2203 | } |
2204 | |
2205 | |
2206 | Handle<FixedArray> MaterializedObjectStore::GetStackEntries() { |
2207 | return Handle<FixedArray>(isolate()->heap()->materialized_objects(), |
2208 | isolate()); |
2209 | } |
2210 | |
2211 | |
2212 | Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) { |
2213 | Handle<FixedArray> array = GetStackEntries(); |
2214 | if (array->length() >= length) { |
2215 | return array; |
2216 | } |
2217 | |
2218 | int new_length = length > 10 ? length : 10; |
2219 | if (new_length < 2 * array->length()) { |
2220 | new_length = 2 * array->length(); |
2221 | } |
2222 | |
2223 | Handle<FixedArray> new_array = |
2224 | isolate()->factory()->NewFixedArray(new_length, AllocationType::kOld); |
2225 | for (int i = 0; i < array->length(); i++) { |
2226 | new_array->set(i, array->get(i)); |
2227 | } |
2228 | HeapObject undefined_value = ReadOnlyRoots(isolate()).undefined_value(); |
2229 | for (int i = array->length(); i < length; i++) { |
2230 | new_array->set(i, undefined_value); |
2231 | } |
2232 | isolate()->heap()->SetRootMaterializedObjects(*new_array); |
2233 | return new_array; |
2234 | } |
2235 | |
2236 | namespace { |
2237 | |
2238 | Handle<Object> GetValueForDebugger(TranslatedFrame::iterator it, |
2239 | Isolate* isolate) { |
2240 | if (it->GetRawValue() == ReadOnlyRoots(isolate).arguments_marker()) { |
2241 | if (!it->IsMaterializableByDebugger()) { |
2242 | return isolate->factory()->optimized_out(); |
2243 | } |
2244 | } |
2245 | return it->GetValue(); |
2246 | } |
2247 | |
2248 | } // namespace |
2249 | |
2250 | DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state, |
2251 | TranslatedState::iterator frame_it, |
2252 | Isolate* isolate) { |
2253 | int parameter_count = |
2254 | frame_it->shared_info()->internal_formal_parameter_count(); |
2255 | TranslatedFrame::iterator stack_it = frame_it->begin(); |
2256 | |
2257 | // Get the function. Note that this might materialize the function. |
2258 | // In case the debugger mutates this value, we should deoptimize |
2259 | // the function and remember the value in the materialized value store. |
2260 | function_ = Handle<JSFunction>::cast(stack_it->GetValue()); |
2261 | stack_it++; // Skip the function. |
2262 | stack_it++; // Skip the receiver. |
2263 | |
2264 | DCHECK_EQ(TranslatedFrame::kInterpretedFunction, frame_it->kind()); |
2265 | source_position_ = Deoptimizer::ComputeSourcePositionFromBytecodeArray( |
2266 | *frame_it->shared_info(), frame_it->node_id()); |
2267 | |
2268 | DCHECK_EQ(parameter_count, |
2269 | function_->shared()->internal_formal_parameter_count()); |
2270 | |
2271 | parameters_.resize(static_cast<size_t>(parameter_count)); |
2272 | for (int i = 0; i < parameter_count; i++) { |
2273 | Handle<Object> parameter = GetValueForDebugger(stack_it, isolate); |
2274 | SetParameter(i, parameter); |
2275 | stack_it++; |
2276 | } |
2277 | |
2278 | // Get the context. |
2279 | context_ = GetValueForDebugger(stack_it, isolate); |
2280 | stack_it++; |
2281 | |
2282 | // Get the expression stack. |
2283 | int stack_height = frame_it->height(); |
2284 | if (frame_it->kind() == TranslatedFrame::kInterpretedFunction) { |
2285 | // For interpreter frames, we should not count the accumulator. |
2286 | // TODO(jarin): Clean up the indexing in translated frames. |
2287 | stack_height--; |
2288 | } |
2289 | expression_stack_.resize(static_cast<size_t>(stack_height)); |
2290 | for (int i = 0; i < stack_height; i++) { |
2291 | Handle<Object> expression = GetValueForDebugger(stack_it, isolate); |
2292 | SetExpression(i, expression); |
2293 | stack_it++; |
2294 | } |
2295 | |
2296 | // For interpreter frame, skip the accumulator. |
2297 | if (frame_it->kind() == TranslatedFrame::kInterpretedFunction) { |
2298 | stack_it++; |
2299 | } |
2300 | CHECK(stack_it == frame_it->end()); |
2301 | } |
2302 | |
2303 | Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code code, Address pc) { |
2304 | CHECK(code->InstructionStart() <= pc && pc <= code->InstructionEnd()); |
2305 | SourcePosition last_position = SourcePosition::Unknown(); |
2306 | DeoptimizeReason last_reason = DeoptimizeReason::kUnknown; |
2307 | int last_deopt_id = kNoDeoptimizationId; |
2308 | int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) | |
2309 | RelocInfo::ModeMask(RelocInfo::DEOPT_ID) | |
2310 | RelocInfo::ModeMask(RelocInfo::DEOPT_SCRIPT_OFFSET) | |
2311 | RelocInfo::ModeMask(RelocInfo::DEOPT_INLINING_ID); |
2312 | for (RelocIterator it(code, mask); !it.done(); it.next()) { |
2313 | RelocInfo* info = it.rinfo(); |
2314 | if (info->pc() >= pc) break; |
2315 | if (info->rmode() == RelocInfo::DEOPT_SCRIPT_OFFSET) { |
2316 | int script_offset = static_cast<int>(info->data()); |
2317 | it.next(); |
2318 | DCHECK(it.rinfo()->rmode() == RelocInfo::DEOPT_INLINING_ID); |
2319 | int inlining_id = static_cast<int>(it.rinfo()->data()); |
2320 | last_position = SourcePosition(script_offset, inlining_id); |
2321 | } else if (info->rmode() == RelocInfo::DEOPT_ID) { |
2322 | last_deopt_id = static_cast<int>(info->data()); |
2323 | } else if (info->rmode() == RelocInfo::DEOPT_REASON) { |
2324 | last_reason = static_cast<DeoptimizeReason>(info->data()); |
2325 | } |
2326 | } |
2327 | return DeoptInfo(last_position, last_reason, last_deopt_id); |
2328 | } |
2329 | |
2330 | |
2331 | // static |
2332 | int Deoptimizer::ComputeSourcePositionFromBytecodeArray( |
2333 | SharedFunctionInfo shared, BailoutId node_id) { |
2334 | DCHECK(shared->HasBytecodeArray()); |
2335 | return AbstractCode::cast(shared->GetBytecodeArray()) |
2336 | ->SourcePosition(node_id.ToInt()); |
2337 | } |
2338 | |
2339 | // static |
2340 | TranslatedValue TranslatedValue::NewDeferredObject(TranslatedState* container, |
2341 | int length, |
2342 | int object_index) { |
2343 | TranslatedValue slot(container, kCapturedObject); |
2344 | slot.materialization_info_ = {object_index, length}; |
2345 | return slot; |
2346 | } |
2347 | |
2348 | |
2349 | // static |
2350 | TranslatedValue TranslatedValue::NewDuplicateObject(TranslatedState* container, |
2351 | int id) { |
2352 | TranslatedValue slot(container, kDuplicatedObject); |
2353 | slot.materialization_info_ = {id, -1}; |
2354 | return slot; |
2355 | } |
2356 | |
2357 | |
2358 | // static |
2359 | TranslatedValue TranslatedValue::NewFloat(TranslatedState* container, |
2360 | Float32 value) { |
2361 | TranslatedValue slot(container, kFloat); |
2362 | slot.float_value_ = value; |
2363 | return slot; |
2364 | } |
2365 | |
2366 | // static |
2367 | TranslatedValue TranslatedValue::NewDouble(TranslatedState* container, |
2368 | Float64 value) { |
2369 | TranslatedValue slot(container, kDouble); |
2370 | slot.double_value_ = value; |
2371 | return slot; |
2372 | } |
2373 | |
2374 | |
2375 | // static |
2376 | TranslatedValue TranslatedValue::NewInt32(TranslatedState* container, |
2377 | int32_t value) { |
2378 | TranslatedValue slot(container, kInt32); |
2379 | slot.int32_value_ = value; |
2380 | return slot; |
2381 | } |
2382 | |
2383 | // static |
2384 | TranslatedValue TranslatedValue::NewInt64(TranslatedState* container, |
2385 | int64_t value) { |
2386 | TranslatedValue slot(container, kInt64); |
2387 | slot.int64_value_ = value; |
2388 | return slot; |
2389 | } |
2390 | |
2391 | // static |
2392 | TranslatedValue TranslatedValue::NewUInt32(TranslatedState* container, |
2393 | uint32_t value) { |
2394 | TranslatedValue slot(container, kUInt32); |
2395 | slot.uint32_value_ = value; |
2396 | return slot; |
2397 | } |
2398 | |
2399 | |
2400 | // static |
2401 | TranslatedValue TranslatedValue::NewBool(TranslatedState* container, |
2402 | uint32_t value) { |
2403 | TranslatedValue slot(container, kBoolBit); |
2404 | slot.uint32_value_ = value; |
2405 | return slot; |
2406 | } |
2407 | |
2408 | |
2409 | // static |
2410 | TranslatedValue TranslatedValue::NewTagged(TranslatedState* container, |
2411 | Object literal) { |
2412 | TranslatedValue slot(container, kTagged); |
2413 | slot.raw_literal_ = literal; |
2414 | return slot; |
2415 | } |
2416 | |
2417 | // static |
2418 | TranslatedValue TranslatedValue::NewInvalid(TranslatedState* container) { |
2419 | return TranslatedValue(container, kInvalid); |
2420 | } |
2421 | |
2422 | |
2423 | Isolate* TranslatedValue::isolate() const { return container_->isolate(); } |
2424 | |
2425 | Object TranslatedValue::raw_literal() const { |
2426 | DCHECK_EQ(kTagged, kind()); |
2427 | return raw_literal_; |
2428 | } |
2429 | |
2430 | int32_t TranslatedValue::int32_value() const { |
2431 | DCHECK_EQ(kInt32, kind()); |
2432 | return int32_value_; |
2433 | } |
2434 | |
2435 | int64_t TranslatedValue::int64_value() const { |
2436 | DCHECK_EQ(kInt64, kind()); |
2437 | return int64_value_; |
2438 | } |
2439 | |
2440 | uint32_t TranslatedValue::uint32_value() const { |
2441 | DCHECK(kind() == kUInt32 || kind() == kBoolBit); |
2442 | return uint32_value_; |
2443 | } |
2444 | |
2445 | Float32 TranslatedValue::float_value() const { |
2446 | DCHECK_EQ(kFloat, kind()); |
2447 | return float_value_; |
2448 | } |
2449 | |
2450 | Float64 TranslatedValue::double_value() const { |
2451 | DCHECK_EQ(kDouble, kind()); |
2452 | return double_value_; |
2453 | } |
2454 | |
2455 | |
2456 | int TranslatedValue::object_length() const { |
2457 | DCHECK_EQ(kind(), kCapturedObject); |
2458 | return materialization_info_.length_; |
2459 | } |
2460 | |
2461 | |
2462 | int TranslatedValue::object_index() const { |
2463 | DCHECK(kind() == kCapturedObject || kind() == kDuplicatedObject); |
2464 | return materialization_info_.id_; |
2465 | } |
2466 | |
2467 | Object TranslatedValue::GetRawValue() const { |
2468 | // If we have a value, return it. |
2469 | if (materialization_state() == kFinished) { |
2470 | return *storage_; |
2471 | } |
2472 | |
2473 | // Otherwise, do a best effort to get the value without allocation. |
2474 | switch (kind()) { |
2475 | case kTagged: |
2476 | return raw_literal(); |
2477 | |
2478 | case kInt32: { |
2479 | bool is_smi = Smi::IsValid(int32_value()); |
2480 | if (is_smi) { |
2481 | return Smi::FromInt(int32_value()); |
2482 | } |
2483 | break; |
2484 | } |
2485 | |
2486 | case kInt64: { |
2487 | bool is_smi = (int64_value() >= static_cast<int64_t>(Smi::kMinValue) && |
2488 | int64_value() <= static_cast<int64_t>(Smi::kMaxValue)); |
2489 | if (is_smi) { |
2490 | return Smi::FromIntptr(static_cast<intptr_t>(int64_value())); |
2491 | } |
2492 | break; |
2493 | } |
2494 | |
2495 | case kUInt32: { |
2496 | bool is_smi = (uint32_value() <= static_cast<uintptr_t>(Smi::kMaxValue)); |
2497 | if (is_smi) { |
2498 | return Smi::FromInt(static_cast<int32_t>(uint32_value())); |
2499 | } |
2500 | break; |
2501 | } |
2502 | |
2503 | case kBoolBit: { |
2504 | if (uint32_value() == 0) { |
2505 | return ReadOnlyRoots(isolate()).false_value(); |
2506 | } else { |
2507 | CHECK_EQ(1U, uint32_value()); |
2508 | return ReadOnlyRoots(isolate()).true_value(); |
2509 | } |
2510 | } |
2511 | |
2512 | default: |
2513 | break; |
2514 | } |
2515 | |
2516 | // If we could not get the value without allocation, return the arguments |
2517 | // marker. |
2518 | return ReadOnlyRoots(isolate()).arguments_marker(); |
2519 | } |
2520 | |
2521 | void TranslatedValue::set_initialized_storage(Handle<Object> storage) { |
2522 | DCHECK_EQ(kUninitialized, materialization_state()); |
2523 | storage_ = storage; |
2524 | materialization_state_ = kFinished; |
2525 | } |
2526 | |
2527 | Handle<Object> TranslatedValue::GetValue() { |
2528 | // If we already have a value, then get it. |
2529 | if (materialization_state() == kFinished) return storage_; |
2530 | |
2531 | // Otherwise we have to materialize. |
2532 | switch (kind()) { |
2533 | case TranslatedValue::kTagged: |
2534 | case TranslatedValue::kInt32: |
2535 | case TranslatedValue::kInt64: |
2536 | case TranslatedValue::kUInt32: |
2537 | case TranslatedValue::kBoolBit: |
2538 | case TranslatedValue::kFloat: |
2539 | case TranslatedValue::kDouble: { |
2540 | MaterializeSimple(); |
2541 | return storage_; |
2542 | } |
2543 | |
2544 | case TranslatedValue::kCapturedObject: |
2545 | case TranslatedValue::kDuplicatedObject: { |
2546 | // We need to materialize the object (or possibly even object graphs). |
2547 | // To make the object verifier happy, we materialize in two steps. |
2548 | |
2549 | // 1. Allocate storage for reachable objects. This makes sure that for |
2550 | // each object we have allocated space on heap. The space will be |
2551 | // a byte array that will be later initialized, or a fully |
2552 | // initialized object if it is safe to allocate one that will |
2553 | // pass the verifier. |
2554 | container_->EnsureObjectAllocatedAt(this); |
2555 | |
2556 | // 2. Initialize the objects. If we have allocated only byte arrays |
2557 | // for some objects, we now overwrite the byte arrays with the |
2558 | // correct object fields. Note that this phase does not allocate |
2559 | // any new objects, so it does not trigger the object verifier. |
2560 | return container_->InitializeObjectAt(this); |
2561 | } |
2562 | |
2563 | case TranslatedValue::kInvalid: |
2564 | FATAL("unexpected case" ); |
2565 | return Handle<Object>::null(); |
2566 | } |
2567 | |
2568 | FATAL("internal error: value missing" ); |
2569 | return Handle<Object>::null(); |
2570 | } |
2571 | |
2572 | void TranslatedValue::MaterializeSimple() { |
2573 | // If we already have materialized, return. |
2574 | if (materialization_state() == kFinished) return; |
2575 | |
2576 | Object raw_value = GetRawValue(); |
2577 | if (raw_value != ReadOnlyRoots(isolate()).arguments_marker()) { |
2578 | // We can get the value without allocation, just return it here. |
2579 | set_initialized_storage(Handle<Object>(raw_value, isolate())); |
2580 | return; |
2581 | } |
2582 | |
2583 | switch (kind()) { |
2584 | case kInt32: |
2585 | set_initialized_storage( |
2586 | Handle<Object>(isolate()->factory()->NewNumber(int32_value()))); |
2587 | return; |
2588 | |
2589 | case kInt64: |
2590 | set_initialized_storage(Handle<Object>( |
2591 | isolate()->factory()->NewNumber(static_cast<double>(int64_value())))); |
2592 | return; |
2593 | |
2594 | case kUInt32: |
2595 | set_initialized_storage( |
2596 | Handle<Object>(isolate()->factory()->NewNumber(uint32_value()))); |
2597 | return; |
2598 | |
2599 | case kFloat: { |
2600 | double scalar_value = float_value().get_scalar(); |
2601 | set_initialized_storage( |
2602 | Handle<Object>(isolate()->factory()->NewNumber(scalar_value))); |
2603 | return; |
2604 | } |
2605 | |
2606 | case kDouble: { |
2607 | double scalar_value = double_value().get_scalar(); |
2608 | set_initialized_storage( |
2609 | Handle<Object>(isolate()->factory()->NewNumber(scalar_value))); |
2610 | return; |
2611 | } |
2612 | |
2613 | case kCapturedObject: |
2614 | case kDuplicatedObject: |
2615 | case kInvalid: |
2616 | case kTagged: |
2617 | case kBoolBit: |
2618 | FATAL("internal error: unexpected materialization." ); |
2619 | break; |
2620 | } |
2621 | } |
2622 | |
2623 | |
2624 | bool TranslatedValue::IsMaterializedObject() const { |
2625 | switch (kind()) { |
2626 | case kCapturedObject: |
2627 | case kDuplicatedObject: |
2628 | return true; |
2629 | default: |
2630 | return false; |
2631 | } |
2632 | } |
2633 | |
2634 | bool TranslatedValue::IsMaterializableByDebugger() const { |
2635 | // At the moment, we only allow materialization of doubles. |
2636 | return (kind() == kDouble); |
2637 | } |
2638 | |
2639 | int TranslatedValue::GetChildrenCount() const { |
2640 | if (kind() == kCapturedObject) { |
2641 | return object_length(); |
2642 | } else { |
2643 | return 0; |
2644 | } |
2645 | } |
2646 | |
2647 | uint64_t TranslatedState::GetUInt64Slot(Address fp, int slot_offset) { |
2648 | #if V8_TARGET_ARCH_32_BIT |
2649 | return ReadUnalignedValue<uint64_t>(fp + slot_offset); |
2650 | #else |
2651 | return Memory<uint64_t>(fp + slot_offset); |
2652 | #endif |
2653 | } |
2654 | |
2655 | uint32_t TranslatedState::GetUInt32Slot(Address fp, int slot_offset) { |
2656 | Address address = fp + slot_offset; |
2657 | #if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT |
2658 | return Memory<uint32_t>(address + kIntSize); |
2659 | #else |
2660 | return Memory<uint32_t>(address); |
2661 | #endif |
2662 | } |
2663 | |
2664 | Float32 TranslatedState::GetFloatSlot(Address fp, int slot_offset) { |
2665 | #if !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 |
2666 | return Float32::FromBits(GetUInt32Slot(fp, slot_offset)); |
2667 | #else |
2668 | return Float32::FromBits(Memory<uint32_t>(fp + slot_offset)); |
2669 | #endif |
2670 | } |
2671 | |
2672 | Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) { |
2673 | return Float64::FromBits(GetUInt64Slot(fp, slot_offset)); |
2674 | } |
2675 | |
2676 | void TranslatedValue::Handlify() { |
2677 | if (kind() == kTagged) { |
2678 | set_initialized_storage(Handle<Object>(raw_literal(), isolate())); |
2679 | raw_literal_ = Object(); |
2680 | } |
2681 | } |
2682 | |
2683 | TranslatedFrame TranslatedFrame::InterpretedFrame( |
2684 | BailoutId bytecode_offset, SharedFunctionInfo shared_info, int height, |
2685 | int return_value_offset, int return_value_count) { |
2686 | TranslatedFrame frame(kInterpretedFunction, shared_info, height, |
2687 | return_value_offset, return_value_count); |
2688 | frame.node_id_ = bytecode_offset; |
2689 | return frame; |
2690 | } |
2691 | |
2692 | TranslatedFrame TranslatedFrame::ArgumentsAdaptorFrame( |
2693 | SharedFunctionInfo shared_info, int height) { |
2694 | return TranslatedFrame(kArgumentsAdaptor, shared_info, height); |
2695 | } |
2696 | |
2697 | TranslatedFrame TranslatedFrame::ConstructStubFrame( |
2698 | BailoutId bailout_id, SharedFunctionInfo shared_info, int height) { |
2699 | TranslatedFrame frame(kConstructStub, shared_info, height); |
2700 | frame.node_id_ = bailout_id; |
2701 | return frame; |
2702 | } |
2703 | |
2704 | TranslatedFrame TranslatedFrame::BuiltinContinuationFrame( |
2705 | BailoutId bailout_id, SharedFunctionInfo shared_info, int height) { |
2706 | TranslatedFrame frame(kBuiltinContinuation, shared_info, height); |
2707 | frame.node_id_ = bailout_id; |
2708 | return frame; |
2709 | } |
2710 | |
2711 | TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationFrame( |
2712 | BailoutId bailout_id, SharedFunctionInfo shared_info, int height) { |
2713 | TranslatedFrame frame(kJavaScriptBuiltinContinuation, shared_info, height); |
2714 | frame.node_id_ = bailout_id; |
2715 | return frame; |
2716 | } |
2717 | |
2718 | TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame( |
2719 | BailoutId bailout_id, SharedFunctionInfo shared_info, int height) { |
2720 | TranslatedFrame frame(kJavaScriptBuiltinContinuationWithCatch, shared_info, |
2721 | height); |
2722 | frame.node_id_ = bailout_id; |
2723 | return frame; |
2724 | } |
2725 | |
2726 | int TranslatedFrame::GetValueCount() { |
2727 | switch (kind()) { |
2728 | case kInterpretedFunction: { |
2729 | int parameter_count = |
2730 | raw_shared_info_->internal_formal_parameter_count() + 1; |
2731 | // + 2 for function and context. |
2732 | return height_ + parameter_count + 2; |
2733 | } |
2734 | |
2735 | case kArgumentsAdaptor: |
2736 | case kConstructStub: |
2737 | case kBuiltinContinuation: |
2738 | case kJavaScriptBuiltinContinuation: |
2739 | case kJavaScriptBuiltinContinuationWithCatch: |
2740 | return 1 + height_; |
2741 | |
2742 | case kInvalid: |
2743 | UNREACHABLE(); |
2744 | break; |
2745 | } |
2746 | UNREACHABLE(); |
2747 | } |
2748 | |
2749 | |
2750 | void TranslatedFrame::Handlify() { |
2751 | if (!raw_shared_info_.is_null()) { |
2752 | shared_info_ = Handle<SharedFunctionInfo>(raw_shared_info_, |
2753 | raw_shared_info_->GetIsolate()); |
2754 | raw_shared_info_ = SharedFunctionInfo(); |
2755 | } |
2756 | for (auto& value : values_) { |
2757 | value.Handlify(); |
2758 | } |
2759 | } |
2760 | |
2761 | TranslatedFrame TranslatedState::CreateNextTranslatedFrame( |
2762 | TranslationIterator* iterator, FixedArray literal_array, Address fp, |
2763 | FILE* trace_file) { |
2764 | Translation::Opcode opcode = |
2765 | static_cast<Translation::Opcode>(iterator->Next()); |
2766 | switch (opcode) { |
2767 | case Translation::INTERPRETED_FRAME: { |
2768 | BailoutId bytecode_offset = BailoutId(iterator->Next()); |
2769 | SharedFunctionInfo shared_info = |
2770 | SharedFunctionInfo::cast(literal_array->get(iterator->Next())); |
2771 | int height = iterator->Next(); |
2772 | int return_value_offset = iterator->Next(); |
2773 | int return_value_count = iterator->Next(); |
2774 | if (trace_file != nullptr) { |
2775 | std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString(); |
2776 | PrintF(trace_file, " reading input frame %s" , name.get()); |
2777 | int arg_count = shared_info->internal_formal_parameter_count() + 1; |
2778 | PrintF(trace_file, |
2779 | " => bytecode_offset=%d, args=%d, height=%d, retval=%i(#%i); " |
2780 | "inputs:\n" , |
2781 | bytecode_offset.ToInt(), arg_count, height, return_value_offset, |
2782 | return_value_count); |
2783 | } |
2784 | return TranslatedFrame::InterpretedFrame( |
---|