1/*
2 * Copyright (C) 2013-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "FTLOSRExitCompiler.h"
28
29#if ENABLE(FTL_JIT)
30
31#include "DFGOSRExitCompilerCommon.h"
32#include "DFGOSRExitPreparation.h"
33#include "FTLExitArgumentForOperand.h"
34#include "FTLJITCode.h"
35#include "FTLLocation.h"
36#include "FTLOSRExit.h"
37#include "FTLOperations.h"
38#include "FTLState.h"
39#include "FTLSaveRestore.h"
40#include "LinkBuffer.h"
41#include "MaxFrameExtentForSlowPathCall.h"
42#include "OperandsInlines.h"
43#include "JSCInlines.h"
44
45namespace JSC { namespace FTL {
46
47using namespace DFG;
48
49static void reboxAccordingToFormat(
50 DataFormat format, AssemblyHelpers& jit, GPRReg value, GPRReg scratch1, GPRReg scratch2)
51{
52 switch (format) {
53 case DataFormatInt32: {
54 jit.zeroExtend32ToPtr(value, value);
55 jit.or64(GPRInfo::tagTypeNumberRegister, value);
56 break;
57 }
58
59 case DataFormatInt52: {
60 jit.rshift64(AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), value);
61 jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch2);
62 jit.boxInt52(value, value, scratch1, FPRInfo::fpRegT0);
63 jit.move64ToDouble(scratch2, FPRInfo::fpRegT0);
64 break;
65 }
66
67 case DataFormatStrictInt52: {
68 jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch2);
69 jit.boxInt52(value, value, scratch1, FPRInfo::fpRegT0);
70 jit.move64ToDouble(scratch2, FPRInfo::fpRegT0);
71 break;
72 }
73
74 case DataFormatBoolean: {
75 jit.zeroExtend32ToPtr(value, value);
76 jit.or32(MacroAssembler::TrustedImm32(ValueFalse), value);
77 break;
78 }
79
80 case DataFormatJS: {
81 // Done already!
82 break;
83 }
84
85 case DataFormatDouble: {
86 jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch1);
87 jit.move64ToDouble(value, FPRInfo::fpRegT0);
88 jit.purifyNaN(FPRInfo::fpRegT0);
89 jit.boxDouble(FPRInfo::fpRegT0, value);
90 jit.move64ToDouble(scratch1, FPRInfo::fpRegT0);
91 break;
92 }
93
94 default:
95 RELEASE_ASSERT_NOT_REACHED();
96 break;
97 }
98}
99
100static void compileRecovery(
101 CCallHelpers& jit, const ExitValue& value,
102 Vector<B3::ValueRep>& valueReps,
103 char* registerScratch,
104 const HashMap<ExitTimeObjectMaterialization*, EncodedJSValue*>& materializationToPointer)
105{
106 switch (value.kind()) {
107 case ExitValueDead:
108 jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0);
109 break;
110
111 case ExitValueConstant:
112 jit.move(MacroAssembler::TrustedImm64(JSValue::encode(value.constant())), GPRInfo::regT0);
113 break;
114
115 case ExitValueArgument:
116 Location::forValueRep(valueReps[value.exitArgument().argument()]).restoreInto(
117 jit, registerScratch, GPRInfo::regT0);
118 break;
119
120 case ExitValueInJSStack:
121 case ExitValueInJSStackAsInt32:
122 case ExitValueInJSStackAsInt52:
123 case ExitValueInJSStackAsDouble:
124 jit.load64(AssemblyHelpers::addressFor(value.virtualRegister()), GPRInfo::regT0);
125 break;
126
127 case ExitValueMaterializeNewObject:
128 jit.loadPtr(materializationToPointer.get(value.objectMaterialization()), GPRInfo::regT0);
129 break;
130
131 default:
132 RELEASE_ASSERT_NOT_REACHED();
133 break;
134 }
135
136 reboxAccordingToFormat(
137 value.dataFormat(), jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2);
138}
139
140static void compileStub(
141 unsigned exitID, JITCode* jitCode, OSRExit& exit, VM* vm, CodeBlock* codeBlock)
142{
143 // This code requires framePointerRegister is the same as callFrameRegister
144 static_assert(MacroAssembler::framePointerRegister == GPRInfo::callFrameRegister, "MacroAssembler::framePointerRegister and GPRInfo::callFrameRegister must be the same");
145
146 CCallHelpers jit(codeBlock);
147
148 // The first thing we need to do is restablish our frame in the case of an exception.
149 if (exit.isGenericUnwindHandler()) {
150 RELEASE_ASSERT(vm->callFrameForCatch); // The first time we hit this exit, like at all other times, this field should be non-null.
151 jit.restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
152 jit.loadPtr(vm->addressOfCallFrameForCatch(), MacroAssembler::framePointerRegister);
153 jit.addPtr(CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)),
154 MacroAssembler::framePointerRegister, CCallHelpers::stackPointerRegister);
155
156 // Do a pushToSave because that's what the exit compiler below expects the stack
157 // to look like because that's the last thing the ExitThunkGenerator does. The code
158 // below doesn't actually use the value that was pushed, but it does rely on the
159 // general shape of the stack being as it is in the non-exception OSR case.
160 jit.pushToSaveImmediateWithoutTouchingRegisters(CCallHelpers::TrustedImm32(0xbadbeef));
161 }
162
163 // We need scratch space to save all registers, to build up the JS stack, to deal with unwind
164 // fixup, pointers to all of the objects we materialize, and the elements inside those objects
165 // that we materialize.
166
167 // Figure out how much space we need for those object allocations.
168 unsigned numMaterializations = 0;
169 size_t maxMaterializationNumArguments = 0;
170 for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) {
171 numMaterializations++;
172
173 maxMaterializationNumArguments = std::max(
174 maxMaterializationNumArguments,
175 materialization->properties().size());
176 }
177
178 ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(
179 sizeof(EncodedJSValue) * (
180 exit.m_descriptor->m_values.size() + numMaterializations + maxMaterializationNumArguments) +
181 requiredScratchMemorySizeInBytes() +
182 codeBlock->calleeSaveRegisters()->size() * sizeof(uint64_t));
183 EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
184 EncodedJSValue* materializationPointers = scratch + exit.m_descriptor->m_values.size();
185 EncodedJSValue* materializationArguments = materializationPointers + numMaterializations;
186 char* registerScratch = bitwise_cast<char*>(materializationArguments + maxMaterializationNumArguments);
187 uint64_t* unwindScratch = bitwise_cast<uint64_t*>(registerScratch + requiredScratchMemorySizeInBytes());
188
189 HashMap<ExitTimeObjectMaterialization*, EncodedJSValue*> materializationToPointer;
190 unsigned materializationCount = 0;
191 for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) {
192 materializationToPointer.add(
193 materialization, materializationPointers + materializationCount++);
194 }
195
196 auto recoverValue = [&] (const ExitValue& value) {
197 compileRecovery(
198 jit, value,
199 exit.m_valueReps,
200 registerScratch, materializationToPointer);
201 };
202
203 // Note that we come in here, the stack used to be as B3 left it except that someone called pushToSave().
204 // We don't care about the value they saved. But, we do appreciate the fact that they did it, because we use
205 // that slot for saveAllRegisters().
206
207 saveAllRegisters(jit, registerScratch);
208
209 if (validateDFGDoesGC) {
210 // We're about to exit optimized code. So, there's no longer any optimized
211 // code running that expects no GC. We need to set this before object
212 // materialization below.
213
214 // Even though we set Heap::m_expectDoesGC in compileFTLOSRExit(), we also need
215 // to set it here because compileFTLOSRExit() is only called on the first time
216 // we exit from this site, but all subsequent exits will take this compiled
217 // ramp without calling compileFTLOSRExit() first.
218 jit.store8(CCallHelpers::TrustedImm32(true), vm->heap.addressOfExpectDoesGC());
219 }
220
221 // Bring the stack back into a sane form and assert that it's sane.
222 jit.popToRestore(GPRInfo::regT0);
223 jit.checkStackPointerAlignment();
224
225 if (UNLIKELY(vm->m_perBytecodeProfiler && jitCode->dfgCommon()->compilation)) {
226 Profiler::Database& database = *vm->m_perBytecodeProfiler;
227 Profiler::Compilation* compilation = jitCode->dfgCommon()->compilation.get();
228
229 Profiler::OSRExit* profilerExit = compilation->addOSRExit(
230 exitID, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
231 exit.m_kind, exit.m_kind == UncountableInvalidation);
232 jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
233 }
234
235 // The remaining code assumes that SP/FP are in the same state that they were in the FTL's
236 // call frame.
237
238 // Get the call frame and tag thingies.
239 // Restore the exiting function's callFrame value into a regT4
240 jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
241 jit.move(MacroAssembler::TrustedImm64(TagMask), GPRInfo::tagMaskRegister);
242
243 // Do some value profiling.
244 if (exit.m_descriptor->m_profileDataFormat != DataFormatNone) {
245 Location::forValueRep(exit.m_valueReps[0]).restoreInto(jit, registerScratch, GPRInfo::regT0);
246 reboxAccordingToFormat(
247 exit.m_descriptor->m_profileDataFormat, jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2);
248
249 if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
250 CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
251 if (ArrayProfile* arrayProfile = jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex())) {
252 jit.load32(MacroAssembler::Address(GPRInfo::regT0, JSCell::structureIDOffset()), GPRInfo::regT1);
253 jit.store32(GPRInfo::regT1, arrayProfile->addressOfLastSeenStructureID());
254
255 jit.load8(MacroAssembler::Address(GPRInfo::regT0, JSCell::typeInfoTypeOffset()), GPRInfo::regT2);
256 jit.sub32(MacroAssembler::TrustedImm32(FirstTypedArrayType), GPRInfo::regT2);
257 auto notTypedArray = jit.branch32(MacroAssembler::AboveOrEqual, GPRInfo::regT2, MacroAssembler::TrustedImm32(NumberOfTypedArrayTypesExcludingDataView));
258 jit.move(MacroAssembler::TrustedImmPtr(typedArrayModes), GPRInfo::regT1);
259 jit.load32(MacroAssembler::BaseIndex(GPRInfo::regT1, GPRInfo::regT2, MacroAssembler::TimesFour), GPRInfo::regT2);
260 auto storeArrayModes = jit.jump();
261
262 notTypedArray.link(&jit);
263 jit.load8(MacroAssembler::Address(GPRInfo::regT0, JSCell::indexingTypeAndMiscOffset()), GPRInfo::regT1);
264 jit.and32(MacroAssembler::TrustedImm32(IndexingModeMask), GPRInfo::regT1);
265 jit.move(MacroAssembler::TrustedImm32(1), GPRInfo::regT2);
266 jit.lshift32(GPRInfo::regT1, GPRInfo::regT2);
267 storeArrayModes.link(&jit);
268 jit.or32(GPRInfo::regT2, MacroAssembler::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
269 }
270 }
271
272 if (exit.m_descriptor->m_valueProfile)
273 exit.m_descriptor->m_valueProfile.emitReportValue(jit, JSValueRegs(GPRInfo::regT0));
274 }
275
276 // Materialize all objects. Don't materialize an object until all
277 // of the objects it needs have been materialized. We break cycles
278 // by populating objects late - we only consider an object as
279 // needing another object if the later is needed for the
280 // allocation of the former.
281
282 HashSet<ExitTimeObjectMaterialization*> toMaterialize;
283 for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations)
284 toMaterialize.add(materialization);
285
286 while (!toMaterialize.isEmpty()) {
287 unsigned previousToMaterializeSize = toMaterialize.size();
288
289 Vector<ExitTimeObjectMaterialization*> worklist;
290 worklist.appendRange(toMaterialize.begin(), toMaterialize.end());
291 for (ExitTimeObjectMaterialization* materialization : worklist) {
292 // Check if we can do anything about this right now.
293 bool allGood = true;
294 for (ExitPropertyValue value : materialization->properties()) {
295 if (!value.value().isObjectMaterialization())
296 continue;
297 if (!value.location().neededForMaterialization())
298 continue;
299 if (toMaterialize.contains(value.value().objectMaterialization())) {
300 // Gotta skip this one, since it needs a
301 // materialization that hasn't been materialized.
302 allGood = false;
303 break;
304 }
305 }
306 if (!allGood)
307 continue;
308
309 // All systems go for materializing the object. First we
310 // recover the values of all of its fields and then we
311 // call a function to actually allocate the beast.
312 // We only recover the fields that are needed for the allocation.
313 for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) {
314 const ExitPropertyValue& property = materialization->properties()[propertyIndex];
315 if (!property.location().neededForMaterialization())
316 continue;
317
318 recoverValue(property.value());
319 jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex);
320 }
321
322 static_assert(FunctionTraits<decltype(operationMaterializeObjectInOSR)>::arity < GPRInfo::numberOfArgumentRegisters, "This call assumes that we don't pass arguments on the stack.");
323 jit.setupArguments<decltype(operationMaterializeObjectInOSR)>(
324 CCallHelpers::TrustedImmPtr(materialization),
325 CCallHelpers::TrustedImmPtr(materializationArguments));
326 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationMaterializeObjectInOSR)), GPRInfo::nonArgGPR0);
327 jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
328 jit.storePtr(GPRInfo::returnValueGPR, materializationToPointer.get(materialization));
329
330 // Let everyone know that we're done.
331 toMaterialize.remove(materialization);
332 }
333
334 // We expect progress! This ensures that we crash rather than looping infinitely if there
335 // is something broken about this fixpoint. Or, this could happen if we ever violate the
336 // "materializations form a DAG" rule.
337 RELEASE_ASSERT(toMaterialize.size() < previousToMaterializeSize);
338 }
339
340 // Now that all the objects have been allocated, we populate them
341 // with the correct values. This time we can recover all the
342 // fields, including those that are only needed for the allocation.
343 for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) {
344 for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) {
345 recoverValue(materialization->properties()[propertyIndex].value());
346 jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex);
347 }
348
349 static_assert(FunctionTraits<decltype(operationPopulateObjectInOSR)>::arity < GPRInfo::numberOfArgumentRegisters, "This call assumes that we don't pass arguments on the stack.");
350 jit.setupArguments<decltype(operationPopulateObjectInOSR)>(
351 CCallHelpers::TrustedImmPtr(materialization),
352 CCallHelpers::TrustedImmPtr(materializationToPointer.get(materialization)),
353 CCallHelpers::TrustedImmPtr(materializationArguments));
354 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationPopulateObjectInOSR)), GPRInfo::nonArgGPR0);
355 jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
356 }
357
358 // Save all state from wherever the exit data tells us it was, into the appropriate place in
359 // the scratch buffer. This also does the reboxing.
360
361 for (unsigned index = exit.m_descriptor->m_values.size(); index--;) {
362 recoverValue(exit.m_descriptor->m_values[index]);
363 jit.store64(GPRInfo::regT0, scratch + index);
364 }
365
366 // Henceforth we make it look like the exiting function was called through a register
367 // preservation wrapper. This implies that FP must be nudged down by a certain amount. Then
368 // we restore the various things according to either exit.m_descriptor->m_values or by copying from the
369 // old frame, and finally we save the various callee-save registers into where the
370 // restoration thunk would restore them from.
371
372 // Before we start messing with the frame, we need to set aside any registers that the
373 // FTL code was preserving.
374 for (unsigned i = codeBlock->calleeSaveRegisters()->size(); i--;) {
375 RegisterAtOffset entry = codeBlock->calleeSaveRegisters()->at(i);
376 jit.load64(
377 MacroAssembler::Address(MacroAssembler::framePointerRegister, entry.offset()),
378 GPRInfo::regT0);
379 jit.store64(GPRInfo::regT0, unwindScratch + i);
380 }
381
382 CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin);
383
384 // First set up SP so that our data doesn't get clobbered by signals.
385 unsigned conservativeStackDelta =
386 (exit.m_descriptor->m_values.numberOfLocals() + baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters()) * sizeof(Register) +
387 maxFrameExtentForSlowPathCall;
388 conservativeStackDelta = WTF::roundUpToMultipleOf(
389 stackAlignmentBytes(), conservativeStackDelta);
390 jit.addPtr(
391 MacroAssembler::TrustedImm32(-conservativeStackDelta),
392 MacroAssembler::framePointerRegister, MacroAssembler::stackPointerRegister);
393 jit.checkStackPointerAlignment();
394
395 RegisterSet allFTLCalleeSaves = RegisterSet::ftlCalleeSaveRegisters();
396 const RegisterAtOffsetList* baselineCalleeSaves = baselineCodeBlock->calleeSaveRegisters();
397 RegisterAtOffsetList* vmCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
398 RegisterSet vmCalleeSavesToSkip = RegisterSet::stackRegisters();
399 if (exit.isExceptionHandler()) {
400 jit.loadPtr(&vm->topEntryFrame, GPRInfo::regT1);
401 jit.addPtr(CCallHelpers::TrustedImm32(EntryFrame::calleeSaveRegistersBufferOffset()), GPRInfo::regT1);
402 }
403
404 for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
405 if (!allFTLCalleeSaves.get(reg)) {
406 if (exit.isExceptionHandler())
407 RELEASE_ASSERT(!vmCalleeSaves->find(reg));
408 continue;
409 }
410 unsigned unwindIndex = codeBlock->calleeSaveRegisters()->indexOf(reg);
411 const RegisterAtOffset* baselineRegisterOffset = baselineCalleeSaves->find(reg);
412 RegisterAtOffset* vmCalleeSave = nullptr;
413 if (exit.isExceptionHandler())
414 vmCalleeSave = vmCalleeSaves->find(reg);
415
416 if (reg.isGPR()) {
417 GPRReg regToLoad = baselineRegisterOffset ? GPRInfo::regT0 : reg.gpr();
418 RELEASE_ASSERT(regToLoad != GPRInfo::regT1);
419
420 if (unwindIndex == UINT_MAX) {
421 // The FTL compilation didn't preserve this register. This means that it also
422 // didn't use the register. So its value at the beginning of OSR exit should be
423 // preserved by the thunk. Luckily, we saved all registers into the register
424 // scratch buffer, so we can restore them from there.
425 jit.load64(registerScratch + offsetOfReg(reg), regToLoad);
426 } else {
427 // The FTL compilation preserved the register. Its new value is therefore
428 // irrelevant, but we can get the value that was preserved by using the unwind
429 // data. We've already copied all unwind-able preserved registers into the unwind
430 // scratch buffer, so we can get it from there.
431 jit.load64(unwindScratch + unwindIndex, regToLoad);
432 }
433
434 if (baselineRegisterOffset)
435 jit.store64(regToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset()));
436 if (vmCalleeSave && !vmCalleeSavesToSkip.get(vmCalleeSave->reg()))
437 jit.store64(regToLoad, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
438 } else {
439 FPRReg fpRegToLoad = baselineRegisterOffset ? FPRInfo::fpRegT0 : reg.fpr();
440
441 if (unwindIndex == UINT_MAX)
442 jit.loadDouble(MacroAssembler::TrustedImmPtr(registerScratch + offsetOfReg(reg)), fpRegToLoad);
443 else
444 jit.loadDouble(MacroAssembler::TrustedImmPtr(unwindScratch + unwindIndex), fpRegToLoad);
445
446 if (baselineRegisterOffset)
447 jit.storeDouble(fpRegToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset()));
448 if (vmCalleeSave && !vmCalleeSavesToSkip.get(vmCalleeSave->reg()))
449 jit.storeDouble(fpRegToLoad, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
450 }
451 }
452
453 if (exit.isExceptionHandler()) {
454 RegisterAtOffset* vmCalleeSave = vmCalleeSaves->find(GPRInfo::tagTypeNumberRegister);
455 jit.store64(GPRInfo::tagTypeNumberRegister, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
456
457 vmCalleeSave = vmCalleeSaves->find(GPRInfo::tagMaskRegister);
458 jit.store64(GPRInfo::tagMaskRegister, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
459 }
460
461 size_t baselineVirtualRegistersForCalleeSaves = baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters();
462
463 // Now get state out of the scratch buffer and place it back into the stack. The values are
464 // already reboxed so we just move them.
465 for (unsigned index = exit.m_descriptor->m_values.size(); index--;) {
466 VirtualRegister reg = exit.m_descriptor->m_values.virtualRegisterForIndex(index);
467
468 if (reg.isLocal() && reg.toLocal() < static_cast<int>(baselineVirtualRegistersForCalleeSaves))
469 continue;
470
471 jit.load64(scratch + index, GPRInfo::regT0);
472 jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(reg));
473 }
474
475 handleExitCounts(jit, exit);
476 reifyInlinedCallFrames(jit, exit);
477 adjustAndJumpToTarget(*vm, jit, exit);
478
479 LinkBuffer patchBuffer(jit, codeBlock);
480 exit.m_code = FINALIZE_CODE_IF(
481 shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit(),
482 patchBuffer, OSRExitPtrTag,
483 "FTL OSR exit #%u (%s, %s) from %s, with operands = %s",
484 exitID, toCString(exit.m_codeOrigin).data(),
485 exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
486 toCString(ignoringContext<DumpContext>(exit.m_descriptor->m_values)).data()
487 );
488}
489
490extern "C" void* compileFTLOSRExit(ExecState* exec, unsigned exitID)
491{
492 if (shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit())
493 dataLog("Compiling OSR exit with exitID = ", exitID, "\n");
494
495 VM& vm = exec->vm();
496
497 if (validateDFGDoesGC) {
498 // We're about to exit optimized code. So, there's no longer any optimized
499 // code running that expects no GC.
500 vm.heap.setExpectDoesGC(true);
501 }
502
503 if (vm.callFrameForCatch)
504 RELEASE_ASSERT(vm.callFrameForCatch == exec);
505
506 CodeBlock* codeBlock = exec->codeBlock();
507
508 ASSERT(codeBlock);
509 ASSERT(codeBlock->jitType() == JITType::FTLJIT);
510
511 // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
512 // really be profitable.
513 DeferGCForAWhile deferGC(vm.heap);
514
515 JITCode* jitCode = codeBlock->jitCode()->ftl();
516 OSRExit& exit = jitCode->osrExit[exitID];
517
518 if (shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit()) {
519 dataLog(" Owning block: ", pointerDump(codeBlock), "\n");
520 dataLog(" Origin: ", exit.m_codeOrigin, "\n");
521 if (exit.m_codeOriginForExitProfile != exit.m_codeOrigin)
522 dataLog(" Origin for exit profile: ", exit.m_codeOriginForExitProfile, "\n");
523 dataLog(" Current call site index: ", exec->callSiteIndex().bits(), "\n");
524 dataLog(" Exit is exception handler: ", exit.isExceptionHandler(), "\n");
525 dataLog(" Is unwind handler: ", exit.isGenericUnwindHandler(), "\n");
526 dataLog(" Exit values: ", exit.m_descriptor->m_values, "\n");
527 dataLog(" Value reps: ", listDump(exit.m_valueReps), "\n");
528 if (!exit.m_descriptor->m_materializations.isEmpty()) {
529 dataLog(" Materializations:\n");
530 for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations)
531 dataLog(" ", pointerDump(materialization), "\n");
532 }
533 }
534
535 prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
536
537 compileStub(exitID, jitCode, exit, &vm, codeBlock);
538
539 MacroAssembler::repatchJump(
540 exit.codeLocationForRepatch(codeBlock), CodeLocationLabel<OSRExitPtrTag>(exit.m_code.code()));
541
542 return exit.m_code.code().executableAddress();
543}
544
545} } // namespace JSC::FTL
546
547#endif // ENABLE(FTL_JIT)
548
549