1/*
2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGOSREntry.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "CallFrame.h"
32#include "CodeBlock.h"
33#include "DFGJITCode.h"
34#include "DFGNode.h"
35#include "InterpreterInlines.h"
36#include "JIT.h"
37#include "JSCInlines.h"
38#include "VMInlines.h"
39#include <wtf/CommaPrinter.h>
40
41namespace JSC { namespace DFG {
42
43void OSREntryData::dumpInContext(PrintStream& out, DumpContext* context) const
44{
45 out.print("bc#", m_bytecodeIndex, ", machine code = ", RawPointer(m_machineCode.executableAddress()));
46 out.print(", stack rules = [");
47
48 auto printOperand = [&] (VirtualRegister reg) {
49 out.print(inContext(m_expectedValues.operand(reg), context), " (");
50 VirtualRegister toReg;
51 bool overwritten = false;
52 for (OSREntryReshuffling reshuffling : m_reshufflings) {
53 if (reg == VirtualRegister(reshuffling.fromOffset)) {
54 toReg = VirtualRegister(reshuffling.toOffset);
55 break;
56 }
57 if (reg == VirtualRegister(reshuffling.toOffset))
58 overwritten = true;
59 }
60 if (!overwritten && !toReg.isValid())
61 toReg = reg;
62 if (toReg.isValid()) {
63 if (toReg.isLocal() && !m_machineStackUsed.get(toReg.toLocal()))
64 out.print("ignored");
65 else
66 out.print("maps to ", toReg);
67 } else
68 out.print("overwritten");
69 if (reg.isLocal() && m_localsForcedDouble.get(reg.toLocal()))
70 out.print(", forced double");
71 if (reg.isLocal() && m_localsForcedAnyInt.get(reg.toLocal()))
72 out.print(", forced machine int");
73 out.print(")");
74 };
75
76 CommaPrinter comma;
77 for (size_t argumentIndex = m_expectedValues.numberOfArguments(); argumentIndex--;) {
78 out.print(comma, "arg", argumentIndex, ":");
79 printOperand(virtualRegisterForArgument(argumentIndex));
80 }
81 for (size_t localIndex = 0; localIndex < m_expectedValues.numberOfLocals(); ++localIndex) {
82 out.print(comma, "loc", localIndex, ":");
83 printOperand(virtualRegisterForLocal(localIndex));
84 }
85
86 out.print("], machine stack used = ", m_machineStackUsed);
87}
88
89void OSREntryData::dump(PrintStream& out) const
90{
91 dumpInContext(out, nullptr);
92}
93
94SUPPRESS_ASAN
95void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex)
96{
97 ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType()));
98 ASSERT(codeBlock->alternative());
99 ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
100 ASSERT(!codeBlock->jitCodeMap());
101
102 if (!Options::useOSREntryToDFG())
103 return nullptr;
104
105 if (Options::verboseOSR()) {
106 dataLog(
107 "DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock,
108 " from bc#", bytecodeIndex, "\n");
109 }
110
111 VM* vm = &exec->vm();
112
113 sanitizeStackForVM(vm);
114
115 if (bytecodeIndex)
116 codeBlock->ownerExecutable()->setDidTryToEnterInLoop(true);
117
118 if (codeBlock->jitType() != JITCode::DFGJIT) {
119 RELEASE_ASSERT(codeBlock->jitType() == JITCode::FTLJIT);
120
121 // When will this happen? We could have:
122 //
123 // - An exit from the FTL JIT into the baseline JIT followed by an attempt
124 // to reenter. We're fine with allowing this to fail. If it happens
125 // enough we'll just reoptimize. It basically means that the OSR exit cost
126 // us dearly and so reoptimizing is the right thing to do.
127 //
128 // - We have recursive code with hot loops. Consider that foo has a hot loop
129 // that calls itself. We have two foo's on the stack, lets call them foo1
130 // and foo2, with foo1 having called foo2 from foo's hot loop. foo2 gets
131 // optimized all the way into the FTL. Then it returns into foo1, and then
132 // foo1 wants to get optimized. It might reach this conclusion from its
133 // hot loop and attempt to OSR enter. And we'll tell it that it can't. It
134 // might be worth addressing this case, but I just think this case will
135 // be super rare. For now, if it does happen, it'll cause some compilation
136 // thrashing.
137
138 if (Options::verboseOSR())
139 dataLog(" OSR failed because the target code block is not DFG.\n");
140 return nullptr;
141 }
142
143 JITCode* jitCode = codeBlock->jitCode()->dfg();
144 OSREntryData* entry = jitCode->osrEntryDataForBytecodeIndex(bytecodeIndex);
145
146 if (!entry) {
147 if (Options::verboseOSR())
148 dataLogF(" OSR failed because the entrypoint was optimized out.\n");
149 return nullptr;
150 }
151
152 ASSERT(entry->m_bytecodeIndex == bytecodeIndex);
153
154 // The code below checks if it is safe to perform OSR entry. It may find
155 // that it is unsafe to do so, for any number of reasons, which are documented
156 // below. If the code decides not to OSR then it returns 0, and it's the caller's
157 // responsibility to patch up the state in such a way as to ensure that it's
158 // both safe and efficient to continue executing baseline code for now. This
159 // should almost certainly include calling either codeBlock->optimizeAfterWarmUp()
160 // or codeBlock->dontOptimizeAnytimeSoon().
161
162 // 1) Verify predictions. If the predictions are inconsistent with the actual
163 // values, then OSR entry is not possible at this time. It's tempting to
164 // assume that we could somehow avoid this case. We can certainly avoid it
165 // for first-time loop OSR - that is, OSR into a CodeBlock that we have just
166 // compiled. Then we are almost guaranteed that all of the predictions will
167 // check out. It would be pretty easy to make that a hard guarantee. But
168 // then there would still be the case where two call frames with the same
169 // baseline CodeBlock are on the stack at the same time. The top one
170 // triggers compilation and OSR. In that case, we may no longer have
171 // accurate value profiles for the one deeper in the stack. Hence, when we
172 // pop into the CodeBlock that is deeper on the stack, we might OSR and
173 // realize that the predictions are wrong. Probably, in most cases, this is
174 // just an anomaly in the sense that the older CodeBlock simply went off
175 // into a less-likely path. So, the wisest course of action is to simply not
176 // OSR at this time.
177
178 for (size_t argument = 0; argument < entry->m_expectedValues.numberOfArguments(); ++argument) {
179 if (argument >= exec->argumentCountIncludingThis()) {
180 if (Options::verboseOSR()) {
181 dataLogF(" OSR failed because argument %zu was not passed, expected ", argument);
182 entry->m_expectedValues.argument(argument).dump(WTF::dataFile());
183 dataLogF(".\n");
184 }
185 return nullptr;
186 }
187
188 JSValue value;
189 if (!argument)
190 value = exec->thisValue();
191 else
192 value = exec->argument(argument - 1);
193
194 if (!entry->m_expectedValues.argument(argument).validateOSREntryValue(value, FlushedJSValue)) {
195 if (Options::verboseOSR()) {
196 dataLog(
197 " OSR failed because argument ", argument, " is ", value,
198 ", expected ", entry->m_expectedValues.argument(argument), ".\n");
199 }
200 return nullptr;
201 }
202 }
203
204 for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
205 int localOffset = virtualRegisterForLocal(local).offset();
206 JSValue value = exec->registers()[localOffset].asanUnsafeJSValue();
207 FlushFormat format = FlushedJSValue;
208
209 if (entry->m_localsForcedAnyInt.get(local)) {
210 if (!value.isAnyInt()) {
211 dataLogLnIf(Options::verboseOSR(),
212 " OSR failed because variable ", localOffset, " is ",
213 value, ", expected ",
214 "machine int.");
215 return nullptr;
216 }
217 value = jsDoubleNumber(value.asAnyInt());
218 format = FlushedInt52;
219 }
220
221 if (entry->m_localsForcedDouble.get(local)) {
222 if (!value.isNumber()) {
223 dataLogLnIf(Options::verboseOSR(),
224 " OSR failed because variable ", localOffset, " is ",
225 value, ", expected number.");
226 return nullptr;
227 }
228 value = jsDoubleNumber(value.asNumber());
229 format = FlushedDouble;
230 }
231
232 if (!entry->m_expectedValues.local(local).validateOSREntryValue(value, format)) {
233 dataLogLnIf(Options::verboseOSR(),
234 " OSR failed because variable ", VirtualRegister(localOffset), " is ",
235 value, ", expected ",
236 entry->m_expectedValues.local(local), ".");
237 return nullptr;
238 }
239 }
240
241 // 2) Check the stack height. The DFG JIT may require a taller stack than the
242 // baseline JIT, in some cases. If we can't grow the stack, then don't do
243 // OSR right now. That's the only option we have unless we want basic block
244 // boundaries to start throwing RangeErrors. Although that would be possible,
245 // it seems silly: you'd be diverting the program to error handling when it
246 // would have otherwise just kept running albeit less quickly.
247
248 unsigned frameSizeForCheck = jitCode->common.requiredRegisterCountForExecutionAndExit();
249 if (UNLIKELY(!vm->ensureStackCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck - 1).offset()]))) {
250 if (Options::verboseOSR())
251 dataLogF(" OSR failed because stack growth failed.\n");
252 return nullptr;
253 }
254
255 if (Options::verboseOSR())
256 dataLogF(" OSR should succeed.\n");
257
258 // At this point we're committed to entering. We will do some work to set things up,
259 // but we also rely on our caller recognizing that when we return a non-null pointer,
260 // that means that we're already past the point of no return and we must succeed at
261 // entering.
262
263 // 3) Set up the data in the scratch buffer and perform data format conversions.
264
265 unsigned frameSize = jitCode->common.frameRegisterCount;
266 unsigned baselineFrameSize = entry->m_expectedValues.numberOfLocals();
267 unsigned maxFrameSize = std::max(frameSize, baselineFrameSize);
268
269 Register* scratch = bitwise_cast<Register*>(vm->scratchBufferForSize(sizeof(Register) * (2 + CallFrame::headerSizeInRegisters + maxFrameSize))->dataBuffer());
270
271 *bitwise_cast<size_t*>(scratch + 0) = frameSize;
272
273 void* targetPC = entry->m_machineCode.executableAddress();
274 RELEASE_ASSERT(codeBlock->jitCode()->contains(entry->m_machineCode.untaggedExecutableAddress()));
275 if (Options::verboseOSR())
276 dataLogF(" OSR using target PC %p.\n", targetPC);
277 RELEASE_ASSERT(targetPC);
278 *bitwise_cast<void**>(scratch + 1) = retagCodePtr(targetPC, OSREntryPtrTag, bitwise_cast<PtrTag>(exec));
279
280 Register* pivot = scratch + 2 + CallFrame::headerSizeInRegisters;
281
282 for (int index = -CallFrame::headerSizeInRegisters; index < static_cast<int>(baselineFrameSize); ++index) {
283 VirtualRegister reg(-1 - index);
284
285 if (reg.isLocal()) {
286 if (entry->m_localsForcedDouble.get(reg.toLocal())) {
287 *bitwise_cast<double*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asNumber();
288 continue;
289 }
290
291 if (entry->m_localsForcedAnyInt.get(reg.toLocal())) {
292 *bitwise_cast<int64_t*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asAnyInt() << JSValue::int52ShiftAmount;
293 continue;
294 }
295 }
296
297 pivot[index] = exec->registers()[reg.offset()].asanUnsafeJSValue();
298 }
299
300 // 4) Reshuffle those registers that need reshuffling.
301 Vector<JSValue> temporaryLocals(entry->m_reshufflings.size());
302 for (unsigned i = entry->m_reshufflings.size(); i--;)
303 temporaryLocals[i] = pivot[VirtualRegister(entry->m_reshufflings[i].fromOffset).toLocal()].asanUnsafeJSValue();
304 for (unsigned i = entry->m_reshufflings.size(); i--;)
305 pivot[VirtualRegister(entry->m_reshufflings[i].toOffset).toLocal()] = temporaryLocals[i];
306
307 // 5) Clear those parts of the call frame that the DFG ain't using. This helps GC on
308 // some programs by eliminating some stale pointer pathologies.
309 for (unsigned i = frameSize; i--;) {
310 if (entry->m_machineStackUsed.get(i))
311 continue;
312 pivot[i] = JSValue();
313 }
314
315 // 6) Copy our callee saves to buffer.
316#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
317 const RegisterAtOffsetList* registerSaveLocations = codeBlock->calleeSaveRegisters();
318 RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
319 RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
320
321 unsigned registerCount = registerSaveLocations->size();
322 VMEntryRecord* record = vmEntryRecord(vm->topEntryFrame);
323 for (unsigned i = 0; i < registerCount; i++) {
324 RegisterAtOffset currentEntry = registerSaveLocations->at(i);
325 if (dontSaveRegisters.get(currentEntry.reg()))
326 continue;
327 RegisterAtOffset* calleeSavesEntry = allCalleeSaves->find(currentEntry.reg());
328
329 *(bitwise_cast<intptr_t*>(pivot - 1) - currentEntry.offsetAsIndex()) = record->calleeSaveRegistersBuffer[calleeSavesEntry->offsetAsIndex()];
330 }
331#endif
332
333 // 7) Fix the call frame to have the right code block.
334
335 *bitwise_cast<CodeBlock**>(pivot - 1 - CallFrameSlot::codeBlock) = codeBlock;
336
337 if (Options::verboseOSR())
338 dataLogF(" OSR returning data buffer %p.\n", scratch);
339 return scratch;
340}
341
342MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareCatchOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex)
343{
344 ASSERT(codeBlock->jitType() == JITCode::DFGJIT || codeBlock->jitType() == JITCode::FTLJIT);
345
346 if (!Options::useOSREntryToDFG() && codeBlock->jitCode()->jitType() == JITCode::DFGJIT)
347 return nullptr;
348 if (!Options::useOSREntryToFTL() && codeBlock->jitCode()->jitType() == JITCode::FTLJIT)
349 return nullptr;
350
351 VM& vm = exec->vm();
352
353 CommonData* dfgCommon = codeBlock->jitCode()->dfgCommon();
354 RELEASE_ASSERT(dfgCommon);
355 DFG::CatchEntrypointData* catchEntrypoint = dfgCommon->catchOSREntryDataForBytecodeIndex(bytecodeIndex);
356 if (!catchEntrypoint) {
357 // This can be null under some circumstances. The most common is that we didn't
358 // compile this op_catch as an entrypoint since it had never executed when starting
359 // the compilation.
360 return nullptr;
361 }
362
363 // We're only allowed to OSR enter if we've proven we have compatible argument types.
364 for (unsigned argument = 0; argument < catchEntrypoint->argumentFormats.size(); ++argument) {
365 JSValue value = exec->uncheckedR(virtualRegisterForArgument(argument)).jsValue();
366 switch (catchEntrypoint->argumentFormats[argument]) {
367 case DFG::FlushedInt32:
368 if (!value.isInt32())
369 return nullptr;
370 break;
371 case DFG::FlushedCell:
372 if (!value.isCell())
373 return nullptr;
374 break;
375 case DFG::FlushedBoolean:
376 if (!value.isBoolean())
377 return nullptr;
378 break;
379 case DFG::DeadFlush:
380 // This means the argument is not alive. Therefore, it's allowed to be any type.
381 break;
382 case DFG::FlushedJSValue:
383 // An argument is trivially a JSValue.
384 break;
385 default:
386 RELEASE_ASSERT_NOT_REACHED();
387 }
388 }
389
390 unsigned frameSizeForCheck = dfgCommon->requiredRegisterCountForExecutionAndExit();
391 if (UNLIKELY(!vm.ensureStackCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck).offset()])))
392 return nullptr;
393
394 auto instruction = exec->codeBlock()->instructions().at(exec->bytecodeOffset());
395 ASSERT(instruction->is<OpCatch>());
396 ValueProfileAndOperandBuffer* buffer = instruction->as<OpCatch>().metadata(exec).m_buffer;
397 JSValue* dataBuffer = reinterpret_cast<JSValue*>(dfgCommon->catchOSREntryBuffer->dataBuffer());
398 unsigned index = 0;
399 buffer->forEach([&] (ValueProfileAndOperand& profile) {
400 if (!VirtualRegister(profile.m_operand).isLocal())
401 return;
402 dataBuffer[index] = exec->uncheckedR(profile.m_operand).jsValue();
403 ++index;
404 });
405
406 // The active length of catchOSREntryBuffer will be zeroed by ClearCatchLocals node.
407 dfgCommon->catchOSREntryBuffer->setActiveLength(sizeof(JSValue) * index);
408 return catchEntrypoint->machineCode;
409}
410
411} } // namespace JSC::DFG
412
413#endif // ENABLE(DFG_JIT)
414