1/*
2 * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "MarkedBlock.h"
28
29#include "AlignedMemoryAllocator.h"
30#include "BlockDirectoryInlines.h"
31#include "FreeListInlines.h"
32#include "JSCast.h"
33#include "JSDestructibleObject.h"
34#include "JSCInlines.h"
35#include "MarkedBlockInlines.h"
36#include "SuperSampler.h"
37#include "SweepingScope.h"
38#include <wtf/CommaPrinter.h>
39
40namespace JSC {
41
42const size_t MarkedBlock::blockSize;
43
44static const bool computeBalance = false;
45static size_t balance;
46
47MarkedBlock::Handle* MarkedBlock::tryCreate(Heap& heap, AlignedMemoryAllocator* alignedMemoryAllocator)
48{
49 if (computeBalance) {
50 balance++;
51 if (!(balance % 10))
52 dataLog("MarkedBlock Balance: ", balance, "\n");
53 }
54 void* blockSpace = alignedMemoryAllocator->tryAllocateAlignedMemory(blockSize, blockSize);
55 if (!blockSpace)
56 return nullptr;
57 if (scribbleFreeCells())
58 scribble(blockSpace, blockSize);
59 return new Handle(heap, alignedMemoryAllocator, blockSpace);
60}
61
62MarkedBlock::Handle::Handle(Heap& heap, AlignedMemoryAllocator* alignedMemoryAllocator, void* blockSpace)
63 : m_alignedMemoryAllocator(alignedMemoryAllocator)
64 , m_weakSet(heap.vm(), CellContainer())
65{
66 m_block = new (NotNull, blockSpace) MarkedBlock(*heap.vm(), *this);
67
68 m_weakSet.setContainer(*m_block);
69
70 heap.didAllocateBlock(blockSize);
71}
72
73MarkedBlock::Handle::~Handle()
74{
75 Heap& heap = *this->heap();
76 if (computeBalance) {
77 balance--;
78 if (!(balance % 10))
79 dataLog("MarkedBlock Balance: ", balance, "\n");
80 }
81 removeFromDirectory();
82 m_block->~MarkedBlock();
83 m_alignedMemoryAllocator->freeAlignedMemory(m_block);
84 heap.didFreeBlock(blockSize);
85}
86
87MarkedBlock::MarkedBlock(VM& vm, Handle& handle)
88{
89 new (&footer()) Footer(vm, handle);
90 if (false)
91 dataLog(RawPointer(this), ": Allocated.\n");
92}
93
94MarkedBlock::~MarkedBlock()
95{
96 footer().~Footer();
97}
98
99MarkedBlock::Footer::Footer(VM& vm, Handle& handle)
100 : m_handle(handle)
101 , m_vm(&vm)
102 , m_markingVersion(MarkedSpace::nullVersion)
103 , m_newlyAllocatedVersion(MarkedSpace::nullVersion)
104{
105}
106
107MarkedBlock::Footer::~Footer()
108{
109}
110
111void MarkedBlock::Handle::unsweepWithNoNewlyAllocated()
112{
113 RELEASE_ASSERT(m_isFreeListed);
114 m_isFreeListed = false;
115}
116
117void MarkedBlock::Handle::setIsFreeListed()
118{
119 m_directory->setIsEmpty(NoLockingNecessary, this, false);
120 m_isFreeListed = true;
121}
122
123void MarkedBlock::Handle::stopAllocating(const FreeList& freeList)
124{
125 auto locker = holdLock(blockFooter().m_lock);
126
127 if (false)
128 dataLog(RawPointer(this), ": MarkedBlock::Handle::stopAllocating!\n");
129 ASSERT(!directory()->isAllocated(NoLockingNecessary, this));
130
131 if (!isFreeListed()) {
132 if (false)
133 dataLog("There ain't no newly allocated.\n");
134 // This means that we either didn't use this block at all for allocation since last GC,
135 // or someone had already done stopAllocating() before.
136 ASSERT(freeList.allocationWillFail());
137 return;
138 }
139
140 if (false)
141 dataLog("Free list: ", freeList, "\n");
142
143 // Roll back to a coherent state for Heap introspection. Cells newly
144 // allocated from our free list are not currently marked, so we need another
145 // way to tell what's live vs dead.
146
147 blockFooter().m_newlyAllocated.clearAll();
148 blockFooter().m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion();
149
150 forEachCell(
151 [&] (HeapCell* cell, HeapCell::Kind) -> IterationStatus {
152 block().setNewlyAllocated(cell);
153 return IterationStatus::Continue;
154 });
155
156 freeList.forEach(
157 [&] (HeapCell* cell) {
158 if (false)
159 dataLog("Free cell: ", RawPointer(cell), "\n");
160 if (m_attributes.destruction == NeedsDestruction)
161 cell->zap();
162 block().clearNewlyAllocated(cell);
163 });
164
165 m_isFreeListed = false;
166}
167
168void MarkedBlock::Handle::lastChanceToFinalize()
169{
170 directory()->setIsAllocated(NoLockingNecessary, this, false);
171 directory()->setIsDestructible(NoLockingNecessary, this, true);
172 blockFooter().m_marks.clearAll();
173 block().clearHasAnyMarked();
174 blockFooter().m_markingVersion = heap()->objectSpace().markingVersion();
175 m_weakSet.lastChanceToFinalize();
176 blockFooter().m_newlyAllocated.clearAll();
177 blockFooter().m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion();
178 sweep(nullptr);
179}
180
181void MarkedBlock::Handle::resumeAllocating(FreeList& freeList)
182{
183 {
184 auto locker = holdLock(blockFooter().m_lock);
185
186 if (false)
187 dataLog(RawPointer(this), ": MarkedBlock::Handle::resumeAllocating!\n");
188 ASSERT(!directory()->isAllocated(NoLockingNecessary, this));
189 ASSERT(!isFreeListed());
190
191 if (!block().hasAnyNewlyAllocated()) {
192 if (false)
193 dataLog("There ain't no newly allocated.\n");
194 // This means we had already exhausted the block when we stopped allocation.
195 freeList.clear();
196 return;
197 }
198 }
199
200 // Re-create our free list from before stopping allocation. Note that this may return an empty
201 // freelist, in which case the block will still be Marked!
202 sweep(&freeList);
203}
204
205void MarkedBlock::Handle::zap(const FreeList& freeList)
206{
207 freeList.forEach(
208 [&] (HeapCell* cell) {
209 if (m_attributes.destruction == NeedsDestruction)
210 cell->zap();
211 });
212}
213
214void MarkedBlock::aboutToMarkSlow(HeapVersion markingVersion)
215{
216 ASSERT(vm()->heap.objectSpace().isMarking());
217 auto locker = holdLock(footer().m_lock);
218
219 if (!areMarksStale(markingVersion))
220 return;
221
222 BlockDirectory* directory = handle().directory();
223
224 if (handle().directory()->isAllocated(holdLock(directory->bitvectorLock()), &handle())
225 || !marksConveyLivenessDuringMarking(markingVersion)) {
226 if (false)
227 dataLog(RawPointer(this), ": Clearing marks without doing anything else.\n");
228 // We already know that the block is full and is already recognized as such, or that the
229 // block did not survive the previous GC. So, we can clear mark bits the old fashioned
230 // way. Note that it's possible for such a block to have newlyAllocated with an up-to-
231 // date version! If it does, then we want to leave the newlyAllocated alone, since that
232 // means that we had allocated in this previously empty block but did not fill it up, so
233 // we created a newlyAllocated.
234 footer().m_marks.clearAll();
235 } else {
236 if (false)
237 dataLog(RawPointer(this), ": Doing things.\n");
238 HeapVersion newlyAllocatedVersion = space()->newlyAllocatedVersion();
239 if (footer().m_newlyAllocatedVersion == newlyAllocatedVersion) {
240 // When do we get here? The block could not have been filled up. The newlyAllocated bits would
241 // have had to be created since the end of the last collection. The only things that create
242 // them are aboutToMarkSlow, lastChanceToFinalize, and stopAllocating. If it had been
243 // aboutToMarkSlow, then we shouldn't be here since the marks wouldn't be stale anymore. It
244 // cannot be lastChanceToFinalize. So it must be stopAllocating. That means that we just
245 // computed the newlyAllocated bits just before the start of an increment. When we are in that
246 // mode, it seems as if newlyAllocated should subsume marks.
247 ASSERT(footer().m_newlyAllocated.subsumes(footer().m_marks));
248 footer().m_marks.clearAll();
249 } else {
250 footer().m_newlyAllocated.setAndClear(footer().m_marks);
251 footer().m_newlyAllocatedVersion = newlyAllocatedVersion;
252 }
253 }
254 clearHasAnyMarked();
255 WTF::storeStoreFence();
256 footer().m_markingVersion = markingVersion;
257
258 // This means we're the first ones to mark any object in this block.
259 directory->setIsMarkingNotEmpty(holdLock(directory->bitvectorLock()), &handle(), true);
260}
261
262void MarkedBlock::resetAllocated()
263{
264 footer().m_newlyAllocated.clearAll();
265 footer().m_newlyAllocatedVersion = MarkedSpace::nullVersion;
266}
267
268void MarkedBlock::resetMarks()
269{
270 // We want aboutToMarkSlow() to see what the mark bits were after the last collection. It uses
271 // the version number to distinguish between the marks having already been stale before
272 // beginMarking(), or just stale now that beginMarking() bumped the version. If we have a version
273 // wraparound, then we will call this method before resetting the version to null. When the
274 // version is null, aboutToMarkSlow() will assume that the marks were not stale as of before
275 // beginMarking(). Hence the need to whip the marks into shape.
276 if (areMarksStale())
277 footer().m_marks.clearAll();
278 footer().m_markingVersion = MarkedSpace::nullVersion;
279}
280
281#if !ASSERT_DISABLED
282void MarkedBlock::assertMarksNotStale()
283{
284 ASSERT(footer().m_markingVersion == vm()->heap.objectSpace().markingVersion());
285}
286#endif // !ASSERT_DISABLED
287
288bool MarkedBlock::areMarksStale()
289{
290 return areMarksStale(vm()->heap.objectSpace().markingVersion());
291}
292
293bool MarkedBlock::Handle::areMarksStale()
294{
295 return m_block->areMarksStale();
296}
297
298bool MarkedBlock::isMarked(const void* p)
299{
300 return isMarked(vm()->heap.objectSpace().markingVersion(), p);
301}
302
303void MarkedBlock::Handle::didConsumeFreeList()
304{
305 auto locker = holdLock(blockFooter().m_lock);
306 if (false)
307 dataLog(RawPointer(this), ": MarkedBlock::Handle::didConsumeFreeList!\n");
308 ASSERT(isFreeListed());
309 m_isFreeListed = false;
310 directory()->setIsAllocated(NoLockingNecessary, this, true);
311}
312
313size_t MarkedBlock::markCount()
314{
315 return areMarksStale() ? 0 : footer().m_marks.count();
316}
317
318void MarkedBlock::clearHasAnyMarked()
319{
320 footer().m_biasedMarkCount = footer().m_markCountBias;
321}
322
323void MarkedBlock::noteMarkedSlow()
324{
325 BlockDirectory* directory = handle().directory();
326 directory->setIsMarkingRetired(holdLock(directory->bitvectorLock()), &handle(), true);
327}
328
329void MarkedBlock::Handle::removeFromDirectory()
330{
331 if (!m_directory)
332 return;
333
334 m_directory->removeBlock(this);
335}
336
337void MarkedBlock::Handle::didAddToDirectory(BlockDirectory* directory, size_t index)
338{
339 ASSERT(m_index == std::numeric_limits<size_t>::max());
340 ASSERT(!m_directory);
341
342 RELEASE_ASSERT(directory->subspace()->alignedMemoryAllocator() == m_alignedMemoryAllocator);
343
344 m_index = index;
345 m_directory = directory;
346 blockFooter().m_subspace = directory->subspace();
347
348 size_t cellSize = directory->cellSize();
349 m_atomsPerCell = (cellSize + atomSize - 1) / atomSize;
350 m_endAtom = endAtom - m_atomsPerCell + 1;
351
352 m_attributes = directory->attributes();
353
354 if (!isJSCellKind(m_attributes.cellKind))
355 RELEASE_ASSERT(m_attributes.destruction == DoesNotNeedDestruction);
356
357 double markCountBias = -(Options::minMarkedBlockUtilization() * cellsPerBlock());
358
359 // The mark count bias should be comfortably within this range.
360 RELEASE_ASSERT(markCountBias > static_cast<double>(std::numeric_limits<int16_t>::min()));
361 RELEASE_ASSERT(markCountBias < 0);
362
363 // This means we haven't marked anything yet.
364 blockFooter().m_biasedMarkCount = blockFooter().m_markCountBias = static_cast<int16_t>(markCountBias);
365}
366
367void MarkedBlock::Handle::didRemoveFromDirectory()
368{
369 ASSERT(m_index != std::numeric_limits<size_t>::max());
370 ASSERT(m_directory);
371
372 m_index = std::numeric_limits<size_t>::max();
373 m_directory = nullptr;
374 blockFooter().m_subspace = nullptr;
375}
376
377#if !ASSERT_DISABLED
378void MarkedBlock::assertValidCell(VM& vm, HeapCell* cell) const
379{
380 RELEASE_ASSERT(&vm == this->vm());
381 RELEASE_ASSERT(const_cast<MarkedBlock*>(this)->handle().cellAlign(cell) == cell);
382}
383#endif
384
385void MarkedBlock::Handle::dumpState(PrintStream& out)
386{
387 CommaPrinter comma;
388 directory()->forEachBitVectorWithName(
389 holdLock(directory()->bitvectorLock()),
390 [&] (FastBitVector& bitvector, const char* name) {
391 out.print(comma, name, ":", bitvector[index()] ? "YES" : "no");
392 });
393}
394
395Subspace* MarkedBlock::Handle::subspace() const
396{
397 return directory()->subspace();
398}
399
400void MarkedBlock::Handle::sweep(FreeList* freeList)
401{
402 SweepingScope sweepingScope(*heap());
403
404 SweepMode sweepMode = freeList ? SweepToFreeList : SweepOnly;
405
406 m_directory->setIsUnswept(NoLockingNecessary, this, false);
407
408 m_weakSet.sweep();
409
410 bool needsDestruction = m_attributes.destruction == NeedsDestruction
411 && m_directory->isDestructible(NoLockingNecessary, this);
412
413 if (sweepMode == SweepOnly && !needsDestruction)
414 return;
415
416 if (m_isFreeListed) {
417 dataLog("FATAL: ", RawPointer(this), "->sweep: block is free-listed.\n");
418 RELEASE_ASSERT_NOT_REACHED();
419 }
420
421 if (isAllocated()) {
422 dataLog("FATAL: ", RawPointer(this), "->sweep: block is allocated.\n");
423 RELEASE_ASSERT_NOT_REACHED();
424 }
425
426 if (space()->isMarking())
427 blockFooter().m_lock.lock();
428
429 subspace()->didBeginSweepingToFreeList(this);
430
431 if (needsDestruction) {
432 subspace()->finishSweep(*this, freeList);
433 return;
434 }
435
436 // Handle the no-destructor specializations here, since we have the most of those. This
437 // ensures that they don't get re-specialized for every destructor space.
438
439 EmptyMode emptyMode = this->emptyMode();
440 ScribbleMode scribbleMode = this->scribbleMode();
441 NewlyAllocatedMode newlyAllocatedMode = this->newlyAllocatedMode();
442 MarksMode marksMode = this->marksMode();
443
444 auto trySpecialized = [&] () -> bool {
445 if (sweepMode != SweepToFreeList)
446 return false;
447 if (scribbleMode != DontScribble)
448 return false;
449 if (newlyAllocatedMode != DoesNotHaveNewlyAllocated)
450 return false;
451
452 switch (emptyMode) {
453 case IsEmpty:
454 switch (marksMode) {
455 case MarksNotStale:
456 specializedSweep<true, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale>(freeList, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale, [] (VM&, JSCell*) { });
457 return true;
458 case MarksStale:
459 specializedSweep<true, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale>(freeList, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale, [] (VM&, JSCell*) { });
460 return true;
461 }
462 break;
463 case NotEmpty:
464 switch (marksMode) {
465 case MarksNotStale:
466 specializedSweep<true, NotEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale>(freeList, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale, [] (VM&, JSCell*) { });
467 return true;
468 case MarksStale:
469 specializedSweep<true, NotEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale>(freeList, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale, [] (VM&, JSCell*) { });
470 return true;
471 }
472 break;
473 }
474
475 return false;
476 };
477
478 if (trySpecialized())
479 return;
480
481 // The template arguments don't matter because the first one is false.
482 specializedSweep<false, IsEmpty, SweepOnly, BlockHasNoDestructors, DontScribble, HasNewlyAllocated, MarksStale>(freeList, emptyMode, sweepMode, BlockHasNoDestructors, scribbleMode, newlyAllocatedMode, marksMode, [] (VM&, JSCell*) { });
483}
484
485bool MarkedBlock::Handle::isFreeListedCell(const void* target) const
486{
487 ASSERT(isFreeListed());
488 return m_directory->isFreeListedCell(target);
489}
490
491} // namespace JSC
492
493namespace WTF {
494
495void printInternal(PrintStream& out, JSC::MarkedBlock::Handle::SweepMode mode)
496{
497 switch (mode) {
498 case JSC::MarkedBlock::Handle::SweepToFreeList:
499 out.print("SweepToFreeList");
500 return;
501 case JSC::MarkedBlock::Handle::SweepOnly:
502 out.print("SweepOnly");
503 return;
504 }
505 RELEASE_ASSERT_NOT_REACHED();
506}
507
508} // namespace WTF
509
510