1/*
2 * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "PolymorphicAccess.h"
28
29#if ENABLE(JIT)
30
31#include "BinarySwitch.h"
32#include "CCallHelpers.h"
33#include "CodeBlock.h"
34#include "FullCodeOrigin.h"
35#include "Heap.h"
36#include "JITOperations.h"
37#include "JSCInlines.h"
38#include "LinkBuffer.h"
39#include "StructureStubClearingWatchpoint.h"
40#include "StructureStubInfo.h"
41#include "SuperSampler.h"
42#include <wtf/CommaPrinter.h>
43#include <wtf/ListDump.h>
44
45namespace JSC {
46
47namespace PolymorphicAccessInternal {
48static const bool verbose = false;
49}
50
51void AccessGenerationResult::dump(PrintStream& out) const
52{
53 out.print(m_kind);
54 if (m_code)
55 out.print(":", m_code);
56}
57
58Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
59{
60 return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
61 watchpoints, jit->codeBlock(), stubInfo, condition);
62}
63
64void AccessGenerationState::restoreScratch()
65{
66 allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
67}
68
69void AccessGenerationState::succeed()
70{
71 restoreScratch();
72 success.append(jit->jump());
73}
74
75const RegisterSet& AccessGenerationState::liveRegistersForCall()
76{
77 if (!m_calculatedRegistersForCallAndExceptionHandling)
78 calculateLiveRegistersForCallAndExceptionHandling();
79 return m_liveRegistersForCall;
80}
81
82const RegisterSet& AccessGenerationState::liveRegistersToPreserveAtExceptionHandlingCallSite()
83{
84 if (!m_calculatedRegistersForCallAndExceptionHandling)
85 calculateLiveRegistersForCallAndExceptionHandling();
86 return m_liveRegistersToPreserveAtExceptionHandlingCallSite;
87}
88
89static RegisterSet calleeSaveRegisters()
90{
91 RegisterSet result = RegisterSet::registersToNotSaveForJSCall();
92 result.filter(RegisterSet::registersToNotSaveForCCall());
93 return result;
94}
95
96const RegisterSet& AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling()
97{
98 if (!m_calculatedRegistersForCallAndExceptionHandling) {
99 m_calculatedRegistersForCallAndExceptionHandling = true;
100
101 m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
102 m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
103 if (m_needsToRestoreRegistersIfException)
104 RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
105
106 m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
107 m_liveRegistersForCall.exclude(calleeSaveRegisters());
108 }
109 return m_liveRegistersForCall;
110}
111
112auto AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra) -> SpillState
113{
114 RegisterSet liveRegisters = liveRegistersForCall();
115 liveRegisters.merge(extra);
116
117 unsigned extraStackPadding = 0;
118 unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegisters, extraStackPadding);
119 return SpillState {
120 WTFMove(liveRegisters),
121 numberOfStackBytesUsedForRegisterPreservation
122 };
123}
124
125void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException(const SpillState& spillState)
126{
127 // Even if we're a getter, we don't want to ignore the result value like we normally do
128 // because the getter threw, and therefore, didn't return a value that means anything.
129 // Instead, we want to restore that register to what it was upon entering the getter
130 // inline cache. The subtlety here is if the base and the result are the same register,
131 // and the getter threw, we want OSR exit to see the original base value, not the result
132 // of the getter call.
133 RegisterSet dontRestore = spillState.spilledRegisters;
134 // As an optimization here, we only need to restore what is live for exception handling.
135 // We can construct the dontRestore set to accomplish this goal by having it contain only
136 // what is live for call but not live for exception handling. By ignoring things that are
137 // only live at the call but not the exception handler, we will only restore things live
138 // at the exception handler.
139 dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
140 restoreLiveRegistersFromStackForCall(spillState, dontRestore);
141}
142
143void AccessGenerationState::restoreLiveRegistersFromStackForCall(const SpillState& spillState, const RegisterSet& dontRestore)
144{
145 unsigned extraStackPadding = 0;
146 ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, spillState.spilledRegisters, dontRestore, spillState.numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
147}
148
149CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
150{
151 if (!m_calculatedRegistersForCallAndExceptionHandling)
152 calculateLiveRegistersForCallAndExceptionHandling();
153
154 if (!m_calculatedCallSiteIndex) {
155 m_calculatedCallSiteIndex = true;
156
157 if (m_needsToRestoreRegistersIfException)
158 m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
159 else
160 m_callSiteIndex = originalCallSiteIndex();
161 }
162
163 return m_callSiteIndex;
164}
165
166DisposableCallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandling()
167{
168 RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
169 RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
170 RELEASE_ASSERT(m_calculatedCallSiteIndex);
171 return DisposableCallSiteIndex::fromCallSiteIndex(m_callSiteIndex);
172}
173
174const HandlerInfo& AccessGenerationState::originalExceptionHandler()
175{
176 if (!m_calculatedRegistersForCallAndExceptionHandling)
177 calculateLiveRegistersForCallAndExceptionHandling();
178
179 RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
180 HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
181 RELEASE_ASSERT(exceptionHandler);
182 return *exceptionHandler;
183}
184
185CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
186
187void AccessGenerationState::emitExplicitExceptionHandler()
188{
189 restoreScratch();
190 jit->pushToSave(GPRInfo::regT0);
191 jit->loadPtr(&m_vm.topEntryFrame, GPRInfo::regT0);
192 jit->copyCalleeSavesToEntryFrameCalleeSavesBuffer(GPRInfo::regT0);
193 jit->popToRestore(GPRInfo::regT0);
194
195 if (needsToRestoreRegistersIfException()) {
196 // To the JIT that produces the original exception handling
197 // call site, they will expect the OSR exit to be arrived
198 // at from genericUnwind. Therefore we must model what genericUnwind
199 // does here. I.e, set callFrameForCatch and copy callee saves.
200
201 jit->storePtr(GPRInfo::callFrameRegister, m_vm.addressOfCallFrameForCatch());
202 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
203
204 // We don't need to insert a new exception handler in the table
205 // because we're doing a manual exception check here. i.e, we'll
206 // never arrive here from genericUnwind().
207 HandlerInfo originalHandler = originalExceptionHandler();
208 jit->addLinkTask(
209 [=] (LinkBuffer& linkBuffer) {
210 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
211 });
212 } else {
213 jit->setupArguments<decltype(lookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(&m_vm), GPRInfo::callFrameRegister);
214 CCallHelpers::Call lookupExceptionHandlerCall = jit->call(OperationPtrTag);
215 jit->addLinkTask(
216 [=] (LinkBuffer& linkBuffer) {
217 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr<OperationPtrTag>(lookupExceptionHandler));
218 });
219 jit->jumpToExceptionHandler(m_vm);
220 }
221}
222
223
224PolymorphicAccess::PolymorphicAccess() { }
225PolymorphicAccess::~PolymorphicAccess() { }
226
227AccessGenerationResult PolymorphicAccess::addCases(
228 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
229 const Identifier& ident, Vector<std::unique_ptr<AccessCase>, 2> originalCasesToAdd)
230{
231 SuperSamplerScope superSamplerScope(false);
232
233 // This method will add the originalCasesToAdd to the list one at a time while preserving the
234 // invariants:
235 // - If a newly added case canReplace() any existing case, then the existing case is removed before
236 // the new case is added. Removal doesn't change order of the list. Any number of existing cases
237 // can be removed via the canReplace() rule.
238 // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
239 // cascade through the cases in reverse order, you will get the most recent cases first.
240 // - If this method fails (returns null, doesn't add the cases), then both the previous case list
241 // and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
242 // add more things after failure.
243
244 // First ensure that the originalCasesToAdd doesn't contain duplicates.
245 Vector<std::unique_ptr<AccessCase>> casesToAdd;
246 for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
247 std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
248
249 // Add it only if it is not replaced by the subsequent cases in the list.
250 bool found = false;
251 for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
252 if (originalCasesToAdd[j]->canReplace(*myCase)) {
253 found = true;
254 break;
255 }
256 }
257
258 if (found)
259 continue;
260
261 casesToAdd.append(WTFMove(myCase));
262 }
263
264 if (PolymorphicAccessInternal::verbose)
265 dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
266
267 // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
268 // new stub that will be identical to the old one. Returning null should tell the caller to just
269 // keep doing what they were doing before.
270 if (casesToAdd.isEmpty())
271 return AccessGenerationResult::MadeNoChanges;
272
273 if (stubInfo.accessType != AccessType::InstanceOf) {
274 bool shouldReset = false;
275 AccessGenerationResult resetResult(AccessGenerationResult::ResetStubAndFireWatchpoints);
276 auto considerPolyProtoReset = [&] (Structure* a, Structure* b) {
277 if (Structure::shouldConvertToPolyProto(a, b)) {
278 // For now, we only reset if this is our first time invalidating this watchpoint.
279 // The reason we don't immediately fire this watchpoint is that we may be already
280 // watching the poly proto watchpoint, which if fired, would destroy us. We let
281 // the person handling the result to do a delayed fire.
282 ASSERT(a->rareData()->sharedPolyProtoWatchpoint().get() == b->rareData()->sharedPolyProtoWatchpoint().get());
283 if (a->rareData()->sharedPolyProtoWatchpoint()->isStillValid()) {
284 shouldReset = true;
285 resetResult.addWatchpointToFire(*a->rareData()->sharedPolyProtoWatchpoint(), StringFireDetail("Detected poly proto optimization opportunity."));
286 }
287 }
288 };
289
290 for (auto& caseToAdd : casesToAdd) {
291 for (auto& existingCase : m_list) {
292 Structure* a = caseToAdd->structure();
293 Structure* b = existingCase->structure();
294 considerPolyProtoReset(a, b);
295 }
296 }
297 for (unsigned i = 0; i < casesToAdd.size(); ++i) {
298 for (unsigned j = i + 1; j < casesToAdd.size(); ++j) {
299 Structure* a = casesToAdd[i]->structure();
300 Structure* b = casesToAdd[j]->structure();
301 considerPolyProtoReset(a, b);
302 }
303 }
304
305 if (shouldReset)
306 return resetResult;
307 }
308
309 // Now add things to the new list. Note that at this point, we will still have old cases that
310 // may be replaced by the new ones. That's fine. We will sort that out when we regenerate.
311 for (auto& caseToAdd : casesToAdd) {
312 commit(locker, vm, m_watchpoints, codeBlock, stubInfo, ident, *caseToAdd);
313 m_list.append(WTFMove(caseToAdd));
314 }
315
316 if (PolymorphicAccessInternal::verbose)
317 dataLog("After addCases: m_list: ", listDump(m_list), "\n");
318
319 return AccessGenerationResult::Buffered;
320}
321
322AccessGenerationResult PolymorphicAccess::addCase(
323 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
324 const Identifier& ident, std::unique_ptr<AccessCase> newAccess)
325{
326 Vector<std::unique_ptr<AccessCase>, 2> newAccesses;
327 newAccesses.append(WTFMove(newAccess));
328 return addCases(locker, vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
329}
330
331bool PolymorphicAccess::visitWeak(VM& vm) const
332{
333 for (unsigned i = 0; i < size(); ++i) {
334 if (!at(i).visitWeak(vm))
335 return false;
336 }
337 if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
338 for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
339 if (!vm.heap.isMarked(weakReference.get()))
340 return false;
341 }
342 }
343 return true;
344}
345
346bool PolymorphicAccess::propagateTransitions(SlotVisitor& visitor) const
347{
348 bool result = true;
349 for (unsigned i = 0; i < size(); ++i)
350 result &= at(i).propagateTransitions(visitor);
351 return result;
352}
353
354void PolymorphicAccess::dump(PrintStream& out) const
355{
356 out.print(RawPointer(this), ":[");
357 CommaPrinter comma;
358 for (auto& entry : m_list)
359 out.print(comma, *entry);
360 out.print("]");
361}
362
363void PolymorphicAccess::commit(
364 const GCSafeConcurrentJSLocker&, VM& vm, std::unique_ptr<WatchpointsOnStructureStubInfo>& watchpoints, CodeBlock* codeBlock,
365 StructureStubInfo& stubInfo, const Identifier& ident, AccessCase& accessCase)
366{
367 // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to
368 // properties on DOM nodes. For sure we cache many DOM node accesses, but even in
369 // Real Pages (TM), we appear to spend most of our time caching accesses to properties on
370 // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular).
371 // Those common kinds of JSC object accesses don't hit this case.
372
373 for (WatchpointSet* set : accessCase.commit(vm, ident)) {
374 Watchpoint* watchpoint =
375 WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
376 watchpoints, codeBlock, &stubInfo, ObjectPropertyCondition());
377
378 set->add(watchpoint);
379 }
380}
381
382AccessGenerationResult PolymorphicAccess::regenerate(
383 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident)
384{
385 SuperSamplerScope superSamplerScope(false);
386
387 if (PolymorphicAccessInternal::verbose)
388 dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
389
390 AccessGenerationState state(vm, codeBlock->globalObject());
391
392 state.access = this;
393 state.stubInfo = &stubInfo;
394 state.ident = &ident;
395
396 state.baseGPR = stubInfo.baseGPR();
397 state.thisGPR = stubInfo.patch.thisGPR;
398 state.valueRegs = stubInfo.valueRegs();
399
400 ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
401 state.allocator = &allocator;
402 allocator.lock(state.baseGPR);
403 if (state.thisGPR != InvalidGPRReg)
404 allocator.lock(state.thisGPR);
405 allocator.lock(state.valueRegs);
406#if USE(JSVALUE32_64)
407 allocator.lock(stubInfo.patch.baseTagGPR);
408#endif
409
410 state.scratchGPR = allocator.allocateScratchGPR();
411
412 CCallHelpers jit(codeBlock);
413 state.jit = &jit;
414
415 state.preservedReusedRegisterState =
416 allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
417
418 // Regenerating is our opportunity to figure out what our list of cases should look like. We
419 // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit
420 // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object
421 // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced
422 // from the code of the current stub (aka previous).
423 ListType cases;
424 unsigned srcIndex = 0;
425 unsigned dstIndex = 0;
426 while (srcIndex < m_list.size()) {
427 std::unique_ptr<AccessCase> someCase = WTFMove(m_list[srcIndex++]);
428
429 // If the case had been generated, then we have to keep the original in m_list in case we
430 // fail to regenerate. That case may have data structures that are used by the code that it
431 // had generated. If the case had not been generated, then we want to remove it from m_list.
432 bool isGenerated = someCase->state() == AccessCase::Generated;
433
434 [&] () {
435 if (!someCase->couldStillSucceed())
436 return;
437
438 // Figure out if this is replaced by any later case. Given two cases A and B where A
439 // comes first in the case list, we know that A would have triggered first if we had
440 // generated the cases in a cascade. That's why this loop asks B->canReplace(A) but not
441 // A->canReplace(B). If A->canReplace(B) was true then A would never have requested
442 // repatching in cases where Repatch.cpp would have then gone on to generate B. If that
443 // did happen by some fluke, then we'd just miss the redundancy here, which wouldn't be
444 // incorrect - just slow. However, if A's checks failed and Repatch.cpp concluded that
445 // this new condition could be handled by B and B->canReplace(A), then this says that we
446 // don't need A anymore.
447 //
448 // If we can generate a binary switch, then A->canReplace(B) == B->canReplace(A). So,
449 // it doesn't matter that we only do the check in one direction.
450 for (unsigned j = srcIndex; j < m_list.size(); ++j) {
451 if (m_list[j]->canReplace(*someCase))
452 return;
453 }
454
455 if (isGenerated)
456 cases.append(someCase->clone());
457 else
458 cases.append(WTFMove(someCase));
459 }();
460
461 if (isGenerated)
462 m_list[dstIndex++] = WTFMove(someCase);
463 }
464 m_list.resize(dstIndex);
465
466 bool generatedFinalCode = false;
467
468 // If the resulting set of cases is so big that we would stop caching and this is InstanceOf,
469 // then we want to generate the generic InstanceOf and then stop.
470 if (cases.size() >= Options::maxAccessVariantListSize()
471 && stubInfo.accessType == AccessType::InstanceOf) {
472 while (!cases.isEmpty())
473 m_list.append(cases.takeLast());
474 cases.append(AccessCase::create(vm, codeBlock, AccessCase::InstanceOfGeneric));
475 generatedFinalCode = true;
476 }
477
478 if (PolymorphicAccessInternal::verbose)
479 dataLog("Optimized cases: ", listDump(cases), "\n");
480
481 // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we
482 // won't change that set anymore.
483
484 bool allGuardedByStructureCheck = true;
485 bool hasJSGetterSetterCall = false;
486 for (auto& newCase : cases) {
487 commit(locker, vm, state.watchpoints, codeBlock, stubInfo, ident, *newCase);
488 allGuardedByStructureCheck &= newCase->guardedByStructureCheck();
489 if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter)
490 hasJSGetterSetterCall = true;
491 }
492
493 if (cases.isEmpty()) {
494 // This is super unlikely, but we make it legal anyway.
495 state.failAndRepatch.append(jit.jump());
496 } else if (!allGuardedByStructureCheck || cases.size() == 1) {
497 // If there are any proxies in the list, we cannot just use a binary switch over the structure.
498 // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
499 // one case.
500 CCallHelpers::JumpList fallThrough;
501
502 // Cascade through the list, preferring newer entries.
503 for (unsigned i = cases.size(); i--;) {
504 fallThrough.link(&jit);
505 fallThrough.clear();
506 cases[i]->generateWithGuard(state, fallThrough);
507 }
508 state.failAndRepatch.append(fallThrough);
509 } else {
510 jit.load32(
511 CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
512 state.scratchGPR);
513
514 Vector<int64_t> caseValues(cases.size());
515 for (unsigned i = 0; i < cases.size(); ++i)
516 caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
517
518 BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
519 while (binarySwitch.advance(jit))
520 cases[binarySwitch.caseIndex()]->generate(state);
521 state.failAndRepatch.append(binarySwitch.fallThrough());
522 }
523
524 if (!state.failAndIgnore.empty()) {
525 state.failAndIgnore.link(&jit);
526
527 // Make sure that the inline cache optimization code knows that we are taking slow path because
528 // of something that isn't patchable. The slow path will decrement "countdown" and will only
529 // patch things if the countdown reaches zero. We increment the slow path count here to ensure
530 // that the slow path does not try to patch.
531#if CPU(X86) || CPU(X86_64)
532 jit.move(CCallHelpers::TrustedImmPtr(&stubInfo.countdown), state.scratchGPR);
533 jit.add8(CCallHelpers::TrustedImm32(1), CCallHelpers::Address(state.scratchGPR));
534#else
535 jit.load8(&stubInfo.countdown, state.scratchGPR);
536 jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
537 jit.store8(state.scratchGPR, &stubInfo.countdown);
538#endif
539 }
540
541 CCallHelpers::JumpList failure;
542 if (allocator.didReuseRegisters()) {
543 state.failAndRepatch.link(&jit);
544 state.restoreScratch();
545 } else
546 failure = state.failAndRepatch;
547 failure.append(jit.jump());
548
549 CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
550 DisposableCallSiteIndex callSiteIndexForExceptionHandling;
551 if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
552 // Emit the exception handler.
553 // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
554 // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have
555 // their own exception handling logic that doesn't go through genericUnwind.
556 MacroAssembler::Label makeshiftCatchHandler = jit.label();
557
558 int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
559 AccessGenerationState::SpillState spillStateForJSGetterSetter = state.spillStateForJSGetterSetter();
560 ASSERT(!spillStateForJSGetterSetter.isEmpty());
561 stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
562 stackPointerOffset -= spillStateForJSGetterSetter.numberOfStackBytesUsedForRegisterPreservation;
563
564 jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
565 jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
566
567 state.restoreLiveRegistersFromStackForCallWithThrownException(spillStateForJSGetterSetter);
568 state.restoreScratch();
569 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
570
571 HandlerInfo oldHandler = state.originalExceptionHandler();
572 DisposableCallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
573 jit.addLinkTask(
574 [=] (LinkBuffer& linkBuffer) {
575 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
576
577 HandlerInfo handlerToRegister = oldHandler;
578 handlerToRegister.nativeCode = linkBuffer.locationOf<ExceptionHandlerPtrTag>(makeshiftCatchHandler);
579 handlerToRegister.start = newExceptionHandlingCallSite.bits();
580 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
581 codeBlock->appendExceptionHandler(handlerToRegister);
582 });
583
584 // We set these to indicate to the stub to remove itself from the CodeBlock's
585 // exception handler table when it is deallocated.
586 codeBlockThatOwnsExceptionHandlers = codeBlock;
587 ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
588 callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
589 }
590
591 LinkBuffer linkBuffer(jit, codeBlock, JITCompilationCanFail);
592 if (linkBuffer.didFailToAllocate()) {
593 if (PolymorphicAccessInternal::verbose)
594 dataLog("Did fail to allocate.\n");
595 return AccessGenerationResult::GaveUp;
596 }
597
598 CodeLocationLabel<JSInternalPtrTag> successLabel = stubInfo.doneLocation();
599
600 linkBuffer.link(state.success, successLabel);
601
602 linkBuffer.link(failure, stubInfo.slowPathStartLocation());
603
604 if (PolymorphicAccessInternal::verbose)
605 dataLog(FullCodeOrigin(codeBlock, stubInfo.codeOrigin), ": Generating polymorphic access stub for ", listDump(cases), "\n");
606
607 MacroAssemblerCodeRef<JITStubRoutinePtrTag> code = FINALIZE_CODE_FOR(
608 codeBlock, linkBuffer, JITStubRoutinePtrTag,
609 "%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data());
610
611 bool doesCalls = false;
612 Vector<JSCell*> cellsToMark;
613 for (auto& entry : cases)
614 doesCalls |= entry->doesCalls(&cellsToMark);
615
616 m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
617 m_watchpoints = WTFMove(state.watchpoints);
618 if (!state.weakReferences.isEmpty())
619 m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
620 if (PolymorphicAccessInternal::verbose)
621 dataLog("Returning: ", code.code(), "\n");
622
623 m_list = WTFMove(cases);
624
625 AccessGenerationResult::Kind resultKind;
626 if (m_list.size() >= Options::maxAccessVariantListSize() || generatedFinalCode)
627 resultKind = AccessGenerationResult::GeneratedFinalCode;
628 else
629 resultKind = AccessGenerationResult::GeneratedNewCode;
630
631 return AccessGenerationResult(resultKind, code.code());
632}
633
634void PolymorphicAccess::aboutToDie()
635{
636 if (m_stubRoutine)
637 m_stubRoutine->aboutToDie();
638}
639
640} // namespace JSC
641
642namespace WTF {
643
644using namespace JSC;
645
646void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
647{
648 switch (kind) {
649 case AccessGenerationResult::MadeNoChanges:
650 out.print("MadeNoChanges");
651 return;
652 case AccessGenerationResult::GaveUp:
653 out.print("GaveUp");
654 return;
655 case AccessGenerationResult::Buffered:
656 out.print("Buffered");
657 return;
658 case AccessGenerationResult::GeneratedNewCode:
659 out.print("GeneratedNewCode");
660 return;
661 case AccessGenerationResult::GeneratedFinalCode:
662 out.print("GeneratedFinalCode");
663 return;
664 case AccessGenerationResult::ResetStubAndFireWatchpoints:
665 out.print("ResetStubAndFireWatchpoints");
666 return;
667 }
668
669 RELEASE_ASSERT_NOT_REACHED();
670}
671
672void printInternal(PrintStream& out, AccessCase::AccessType type)
673{
674 switch (type) {
675 case AccessCase::Load:
676 out.print("Load");
677 return;
678 case AccessCase::Transition:
679 out.print("Transition");
680 return;
681 case AccessCase::Replace:
682 out.print("Replace");
683 return;
684 case AccessCase::Miss:
685 out.print("Miss");
686 return;
687 case AccessCase::GetGetter:
688 out.print("GetGetter");
689 return;
690 case AccessCase::Getter:
691 out.print("Getter");
692 return;
693 case AccessCase::Setter:
694 out.print("Setter");
695 return;
696 case AccessCase::CustomValueGetter:
697 out.print("CustomValueGetter");
698 return;
699 case AccessCase::CustomAccessorGetter:
700 out.print("CustomAccessorGetter");
701 return;
702 case AccessCase::CustomValueSetter:
703 out.print("CustomValueSetter");
704 return;
705 case AccessCase::CustomAccessorSetter:
706 out.print("CustomAccessorSetter");
707 return;
708 case AccessCase::IntrinsicGetter:
709 out.print("IntrinsicGetter");
710 return;
711 case AccessCase::InHit:
712 out.print("InHit");
713 return;
714 case AccessCase::InMiss:
715 out.print("InMiss");
716 return;
717 case AccessCase::ArrayLength:
718 out.print("ArrayLength");
719 return;
720 case AccessCase::StringLength:
721 out.print("StringLength");
722 return;
723 case AccessCase::DirectArgumentsLength:
724 out.print("DirectArgumentsLength");
725 return;
726 case AccessCase::ScopedArgumentsLength:
727 out.print("ScopedArgumentsLength");
728 return;
729 case AccessCase::ModuleNamespaceLoad:
730 out.print("ModuleNamespaceLoad");
731 return;
732 case AccessCase::InstanceOfHit:
733 out.print("InstanceOfHit");
734 return;
735 case AccessCase::InstanceOfMiss:
736 out.print("InstanceOfMiss");
737 return;
738 case AccessCase::InstanceOfGeneric:
739 out.print("InstanceOfGeneric");
740 return;
741 }
742
743 RELEASE_ASSERT_NOT_REACHED();
744}
745
746void printInternal(PrintStream& out, AccessCase::State state)
747{
748 switch (state) {
749 case AccessCase::Primordial:
750 out.print("Primordial");
751 return;
752 case AccessCase::Committed:
753 out.print("Committed");
754 return;
755 case AccessCase::Generated:
756 out.print("Generated");
757 return;
758 }
759
760 RELEASE_ASSERT_NOT_REACHED();
761}
762
763} // namespace WTF
764
765#endif // ENABLE(JIT)
766
767
768