1/*
2 * Copyright (C) 2016-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#include "BlockDirectory.h"
29#include "JSCast.h"
30#include "MarkedBlock.h"
31#include "MarkedSpace.h"
32#include "Operations.h"
33#include "SuperSampler.h"
34#include "VM.h"
35
36namespace JSC {
37
38inline unsigned MarkedBlock::Handle::cellsPerBlock()
39{
40 return MarkedSpace::blockPayload / cellSize();
41}
42
43inline bool MarkedBlock::isNewlyAllocatedStale() const
44{
45 return footer().m_newlyAllocatedVersion != space()->newlyAllocatedVersion();
46}
47
48inline bool MarkedBlock::hasAnyNewlyAllocated()
49{
50 return !isNewlyAllocatedStale();
51}
52
53inline Heap* MarkedBlock::heap() const
54{
55 return &vm()->heap;
56}
57
58inline MarkedSpace* MarkedBlock::space() const
59{
60 return &heap()->objectSpace();
61}
62
63inline MarkedSpace* MarkedBlock::Handle::space() const
64{
65 return &heap()->objectSpace();
66}
67
68inline bool MarkedBlock::marksConveyLivenessDuringMarking(HeapVersion markingVersion)
69{
70 return marksConveyLivenessDuringMarking(footer().m_markingVersion, markingVersion);
71}
72
73inline bool MarkedBlock::marksConveyLivenessDuringMarking(HeapVersion myMarkingVersion, HeapVersion markingVersion)
74{
75 // This returns true if any of these is true:
76 // - We just created the block and so the bits are clear already.
77 // - This block has objects marked during the last GC, and so its version was up-to-date just
78 // before the current collection did beginMarking(). This means that any objects that have
79 // their mark bit set are valid objects that were never deleted, and so are candidates for
80 // marking in any conservative scan. Using our jargon, they are "live".
81 // - We did ~2^32 collections and rotated the version back to null, so we needed to hard-reset
82 // everything. If the marks had been stale, we would have cleared them. So, we can be sure that
83 // any set mark bit reflects objects marked during last GC, i.e. "live" objects.
84 // It would be absurd to use this method when not collecting, since this special "one version
85 // back" state only makes sense when we're in a concurrent collection and have to be
86 // conservative.
87 ASSERT(space()->isMarking());
88 if (heap()->collectionScope() != CollectionScope::Full)
89 return false;
90 return myMarkingVersion == MarkedSpace::nullVersion
91 || MarkedSpace::nextVersion(myMarkingVersion) == markingVersion;
92}
93
94inline bool MarkedBlock::Handle::isAllocated()
95{
96 return m_directory->isAllocated(NoLockingNecessary, this);
97}
98
99ALWAYS_INLINE bool MarkedBlock::Handle::isLive(HeapVersion markingVersion, HeapVersion newlyAllocatedVersion, bool isMarking, const HeapCell* cell)
100{
101 if (directory()->isAllocated(NoLockingNecessary, this))
102 return true;
103
104 // We need to do this while holding the lock because marks might be stale. In that case, newly
105 // allocated will not yet be valid. Consider this interleaving.
106 //
107 // One thread is doing this:
108 //
109 // 1) IsLiveChecksNewlyAllocated: We check if newly allocated is valid. If it is valid, and the bit is
110 // set, we return true. Let's assume that this executes atomically. It doesn't have to in general,
111 // but we can assume that for the purpose of seeing this bug.
112 //
113 // 2) IsLiveChecksMarks: Having failed that, we check the mark bits. This step implies the rest of
114 // this function. It happens under a lock so it's atomic.
115 //
116 // Another thread is doing:
117 //
118 // 1) AboutToMarkSlow: This is the entire aboutToMarkSlow function, and let's say it's atomic. It
119 // sorta is since it holds a lock, but that doesn't actually make it atomic with respect to
120 // IsLiveChecksNewlyAllocated, since that does not hold a lock in our scenario.
121 //
122 // The harmful interleaving happens if we start out with a block that has stale mark bits that
123 // nonetheless convey liveness during marking (the off-by-one version trick). The interleaving is
124 // just:
125 //
126 // IsLiveChecksNewlyAllocated AboutToMarkSlow IsLiveChecksMarks
127 //
128 // We started with valid marks but invalid newly allocated. So, the first part doesn't think that
129 // anything is live, but dutifully drops down to the marks step. But in the meantime, we clear the
130 // mark bits and transfer their contents into newlyAllocated. So IsLiveChecksMarks also sees nothing
131 // live. Ooops!
132 //
133 // Fortunately, since this is just a read critical section, we can use a CountingLock.
134 //
135 // Probably many users of CountingLock could use its lambda-based and locker-based APIs. But here, we
136 // need to ensure that everything is ALWAYS_INLINE. It's hard to do that when using lambdas. It's
137 // more reliable to write it inline instead. Empirically, it seems like how inline this is has some
138 // impact on perf - around 2% on splay if you get it wrong.
139
140 MarkedBlock& block = this->block();
141 MarkedBlock::Footer& footer = block.footer();
142
143 auto count = footer.m_lock.tryOptimisticFencelessRead();
144 if (count.value) {
145 Dependency fenceBefore = Dependency::fence(count.input);
146 MarkedBlock& fencedBlock = *fenceBefore.consume(&block);
147 MarkedBlock::Footer& fencedFooter = fencedBlock.footer();
148 MarkedBlock::Handle* fencedThis = fenceBefore.consume(this);
149
150 ASSERT_UNUSED(fencedThis, !fencedThis->isFreeListed());
151
152 HeapVersion myNewlyAllocatedVersion = fencedFooter.m_newlyAllocatedVersion;
153 if (myNewlyAllocatedVersion == newlyAllocatedVersion) {
154 bool result = fencedBlock.isNewlyAllocated(cell);
155 if (footer.m_lock.fencelessValidate(count.value, Dependency::fence(result)))
156 return result;
157 } else {
158 HeapVersion myMarkingVersion = fencedFooter.m_markingVersion;
159 if (myMarkingVersion != markingVersion
160 && (!isMarking || !fencedBlock.marksConveyLivenessDuringMarking(myMarkingVersion, markingVersion))) {
161 if (footer.m_lock.fencelessValidate(count.value, Dependency::fence(myMarkingVersion)))
162 return false;
163 } else {
164 bool result = fencedFooter.m_marks.get(block.atomNumber(cell));
165 if (footer.m_lock.fencelessValidate(count.value, Dependency::fence(result)))
166 return result;
167 }
168 }
169 }
170
171 auto locker = holdLock(footer.m_lock);
172
173 ASSERT(!isFreeListed());
174
175 HeapVersion myNewlyAllocatedVersion = footer.m_newlyAllocatedVersion;
176 if (myNewlyAllocatedVersion == newlyAllocatedVersion)
177 return block.isNewlyAllocated(cell);
178
179 if (block.areMarksStale(markingVersion)) {
180 if (!isMarking)
181 return false;
182 if (!block.marksConveyLivenessDuringMarking(markingVersion))
183 return false;
184 }
185
186 return footer.m_marks.get(block.atomNumber(cell));
187}
188
189inline bool MarkedBlock::Handle::isLiveCell(HeapVersion markingVersion, HeapVersion newlyAllocatedVersion, bool isMarking, const void* p)
190{
191 if (!m_block->isAtom(p))
192 return false;
193 return isLive(markingVersion, newlyAllocatedVersion, isMarking, static_cast<const HeapCell*>(p));
194}
195
196inline bool MarkedBlock::Handle::isLive(const HeapCell* cell)
197{
198 return isLive(space()->markingVersion(), space()->newlyAllocatedVersion(), space()->isMarking(), cell);
199}
200
201inline bool MarkedBlock::Handle::isLiveCell(const void* p)
202{
203 return isLiveCell(space()->markingVersion(), space()->newlyAllocatedVersion(), space()->isMarking(), p);
204}
205
206inline bool MarkedBlock::Handle::areMarksStaleForSweep()
207{
208 return marksMode() == MarksStale;
209}
210
211// The following has to be true for specialization to kick in:
212//
213// sweepMode == SweepToFreeList
214// scribbleMode == DontScribble
215// newlyAllocatedMode == DoesNotHaveNewlyAllocated
216// destructionMode != BlockHasDestrictorsAndCollectorIsRunning
217//
218// emptyMode = IsEmpty
219// destructionMode = DoesNotNeedDestruction
220// marksMode = MarksNotStale (1)
221// marksMode = MarksStale (2)
222// emptyMode = NotEmpty
223// destructionMode = DoesNotNeedDestruction
224// marksMode = MarksNotStale (3)
225// marksMode = MarksStale (4)
226// destructionMode = NeedsDestruction
227// marksMode = MarksNotStale (5)
228// marksMode = MarksStale (6)
229//
230// Only the DoesNotNeedDestruction one should be specialized by MarkedBlock.
231
232template<bool specialize, MarkedBlock::Handle::EmptyMode specializedEmptyMode, MarkedBlock::Handle::SweepMode specializedSweepMode, MarkedBlock::Handle::SweepDestructionMode specializedDestructionMode, MarkedBlock::Handle::ScribbleMode specializedScribbleMode, MarkedBlock::Handle::NewlyAllocatedMode specializedNewlyAllocatedMode, MarkedBlock::Handle::MarksMode specializedMarksMode, typename DestroyFunc>
233void MarkedBlock::Handle::specializedSweep(FreeList* freeList, MarkedBlock::Handle::EmptyMode emptyMode, MarkedBlock::Handle::SweepMode sweepMode, MarkedBlock::Handle::SweepDestructionMode destructionMode, MarkedBlock::Handle::ScribbleMode scribbleMode, MarkedBlock::Handle::NewlyAllocatedMode newlyAllocatedMode, MarkedBlock::Handle::MarksMode marksMode, const DestroyFunc& destroyFunc)
234{
235 if (specialize) {
236 emptyMode = specializedEmptyMode;
237 sweepMode = specializedSweepMode;
238 destructionMode = specializedDestructionMode;
239 scribbleMode = specializedScribbleMode;
240 newlyAllocatedMode = specializedNewlyAllocatedMode;
241 marksMode = specializedMarksMode;
242 }
243
244 RELEASE_ASSERT(!(destructionMode == BlockHasNoDestructors && sweepMode == SweepOnly));
245
246 SuperSamplerScope superSamplerScope(false);
247
248 MarkedBlock& block = this->block();
249 MarkedBlock::Footer& footer = block.footer();
250
251 if (false)
252 dataLog(RawPointer(this), "/", RawPointer(&block), ": MarkedBlock::Handle::specializedSweep!\n");
253
254 unsigned cellSize = this->cellSize();
255
256 VM& vm = *this->vm();
257 auto destroy = [&] (void* cell) {
258 JSCell* jsCell = static_cast<JSCell*>(cell);
259 if (!jsCell->isZapped()) {
260 destroyFunc(vm, jsCell);
261 jsCell->zap();
262 }
263 };
264
265 m_directory->setIsDestructible(NoLockingNecessary, this, false);
266
267 if (Options::useBumpAllocator()
268 && emptyMode == IsEmpty
269 && newlyAllocatedMode == DoesNotHaveNewlyAllocated) {
270
271 // This is an incredibly powerful assertion that checks the sanity of our block bits.
272 if (marksMode == MarksNotStale && !footer.m_marks.isEmpty()) {
273 WTF::dataFile().atomically(
274 [&] (PrintStream& out) {
275 out.print("Block ", RawPointer(&block), ": marks not empty!\n");
276 out.print("Block lock is held: ", footer.m_lock.isHeld(), "\n");
277 out.print("Marking version of block: ", footer.m_markingVersion, "\n");
278 out.print("Marking version of heap: ", space()->markingVersion(), "\n");
279 UNREACHABLE_FOR_PLATFORM();
280 });
281 }
282
283 char* startOfLastCell = static_cast<char*>(cellAlign(block.atoms() + m_endAtom - 1));
284 char* payloadEnd = startOfLastCell + cellSize;
285 RELEASE_ASSERT(payloadEnd - MarkedBlock::blockSize <= bitwise_cast<char*>(&block));
286 char* payloadBegin = bitwise_cast<char*>(block.atoms());
287
288 if (sweepMode == SweepToFreeList)
289 setIsFreeListed();
290 if (space()->isMarking())
291 footer.m_lock.unlock();
292 if (destructionMode != BlockHasNoDestructors) {
293 for (char* cell = payloadBegin; cell < payloadEnd; cell += cellSize)
294 destroy(cell);
295 }
296 if (sweepMode == SweepToFreeList) {
297 if (scribbleMode == Scribble)
298 scribble(payloadBegin, payloadEnd - payloadBegin);
299 freeList->initializeBump(payloadEnd, payloadEnd - payloadBegin);
300 }
301 if (false)
302 dataLog("Quickly swept block ", RawPointer(this), " with cell size ", cellSize, " and attributes ", m_attributes, ": ", pointerDump(freeList), "\n");
303 return;
304 }
305
306 // This produces a free list that is ordered in reverse through the block.
307 // This is fine, since the allocation code makes no assumptions about the
308 // order of the free list.
309 FreeCell* head = 0;
310 size_t count = 0;
311 uintptr_t secret;
312 cryptographicallyRandomValues(&secret, sizeof(uintptr_t));
313 bool isEmpty = true;
314 Vector<size_t> deadCells;
315 auto handleDeadCell = [&] (size_t i) {
316 HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(&block.atoms()[i]);
317
318 if (destructionMode != BlockHasNoDestructors)
319 destroy(cell);
320
321 if (sweepMode == SweepToFreeList) {
322 FreeCell* freeCell = reinterpret_cast_ptr<FreeCell*>(cell);
323 if (scribbleMode == Scribble)
324 scribble(freeCell, cellSize);
325 freeCell->setNext(head, secret);
326 head = freeCell;
327 ++count;
328 }
329 };
330 for (size_t i = 0; i < m_endAtom; i += m_atomsPerCell) {
331 if (emptyMode == NotEmpty
332 && ((marksMode == MarksNotStale && footer.m_marks.get(i))
333 || (newlyAllocatedMode == HasNewlyAllocated && footer.m_newlyAllocated.get(i)))) {
334 isEmpty = false;
335 continue;
336 }
337
338 if (destructionMode == BlockHasDestructorsAndCollectorIsRunning)
339 deadCells.append(i);
340 else
341 handleDeadCell(i);
342 }
343
344 // We only want to discard the newlyAllocated bits if we're creating a FreeList,
345 // otherwise we would lose information on what's currently alive.
346 if (sweepMode == SweepToFreeList && newlyAllocatedMode == HasNewlyAllocated)
347 footer.m_newlyAllocatedVersion = MarkedSpace::nullVersion;
348
349 if (space()->isMarking())
350 footer.m_lock.unlock();
351
352 if (destructionMode == BlockHasDestructorsAndCollectorIsRunning) {
353 for (size_t i : deadCells)
354 handleDeadCell(i);
355 }
356
357 if (sweepMode == SweepToFreeList) {
358 freeList->initializeList(head, secret, count * cellSize);
359 setIsFreeListed();
360 } else if (isEmpty)
361 m_directory->setIsEmpty(NoLockingNecessary, this, true);
362 if (false)
363 dataLog("Slowly swept block ", RawPointer(&block), " with cell size ", cellSize, " and attributes ", m_attributes, ": ", pointerDump(freeList), "\n");
364}
365
366template<typename DestroyFunc>
367void MarkedBlock::Handle::finishSweepKnowingHeapCellType(FreeList* freeList, const DestroyFunc& destroyFunc)
368{
369 SweepMode sweepMode = freeList ? SweepToFreeList : SweepOnly;
370 SweepDestructionMode destructionMode = this->sweepDestructionMode();
371 EmptyMode emptyMode = this->emptyMode();
372 ScribbleMode scribbleMode = this->scribbleMode();
373 NewlyAllocatedMode newlyAllocatedMode = this->newlyAllocatedMode();
374 MarksMode marksMode = this->marksMode();
375
376 auto trySpecialized = [&] () -> bool {
377 if (scribbleMode != DontScribble)
378 return false;
379 if (newlyAllocatedMode != DoesNotHaveNewlyAllocated)
380 return false;
381 if (destructionMode != BlockHasDestructors)
382 return false;
383
384 switch (emptyMode) {
385 case IsEmpty:
386 switch (sweepMode) {
387 case SweepOnly:
388 switch (marksMode) {
389 case MarksNotStale:
390 specializedSweep<true, IsEmpty, SweepOnly, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale>(freeList, IsEmpty, SweepOnly, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale, destroyFunc);
391 return true;
392 case MarksStale:
393 specializedSweep<true, IsEmpty, SweepOnly, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale>(freeList, IsEmpty, SweepOnly, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale, destroyFunc);
394 return true;
395 }
396 RELEASE_ASSERT_NOT_REACHED();
397 case SweepToFreeList:
398 switch (marksMode) {
399 case MarksNotStale:
400 specializedSweep<true, IsEmpty, SweepToFreeList, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale>(freeList, IsEmpty, SweepToFreeList, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale, destroyFunc);
401 return true;
402 case MarksStale:
403 specializedSweep<true, IsEmpty, SweepToFreeList, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale>(freeList, IsEmpty, SweepToFreeList, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale, destroyFunc);
404 return true;
405 }
406 }
407 RELEASE_ASSERT_NOT_REACHED();
408 case NotEmpty:
409 switch (sweepMode) {
410 case SweepOnly:
411 switch (marksMode) {
412 case MarksNotStale:
413 specializedSweep<true, NotEmpty, SweepOnly, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale>(freeList, NotEmpty, SweepOnly, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale, destroyFunc);
414 return true;
415 case MarksStale:
416 specializedSweep<true, NotEmpty, SweepOnly, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale>(freeList, NotEmpty, SweepOnly, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale, destroyFunc);
417 return true;
418 }
419 RELEASE_ASSERT_NOT_REACHED();
420 case SweepToFreeList:
421 switch (marksMode) {
422 case MarksNotStale:
423 specializedSweep<true, NotEmpty, SweepToFreeList, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale>(freeList, NotEmpty, SweepToFreeList, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale, destroyFunc);
424 return true;
425 case MarksStale:
426 specializedSweep<true, NotEmpty, SweepToFreeList, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale>(freeList, NotEmpty, SweepToFreeList, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale, destroyFunc);
427 return true;
428 }
429 }
430 }
431
432 return false;
433 };
434
435 if (trySpecialized())
436 return;
437
438 // The template arguments don't matter because the first one is false.
439 specializedSweep<false, IsEmpty, SweepOnly, BlockHasNoDestructors, DontScribble, HasNewlyAllocated, MarksStale>(freeList, emptyMode, sweepMode, destructionMode, scribbleMode, newlyAllocatedMode, marksMode, destroyFunc);
440}
441
442inline MarkedBlock::Handle::SweepDestructionMode MarkedBlock::Handle::sweepDestructionMode()
443{
444 if (m_attributes.destruction == NeedsDestruction) {
445 if (space()->isMarking())
446 return BlockHasDestructorsAndCollectorIsRunning;
447 return BlockHasDestructors;
448 }
449 return BlockHasNoDestructors;
450}
451
452inline bool MarkedBlock::Handle::isEmpty()
453{
454 return m_directory->isEmpty(NoLockingNecessary, this);
455}
456
457inline MarkedBlock::Handle::EmptyMode MarkedBlock::Handle::emptyMode()
458{
459 // It's not obvious, but this is the only way to know if the block is empty. It's the only
460 // bit that captures these caveats:
461 // - It's true when the block is freshly allocated.
462 // - It's true if the block had been swept in the past, all destructors were called, and that
463 // sweep proved that the block is empty.
464 return isEmpty() ? IsEmpty : NotEmpty;
465}
466
467inline MarkedBlock::Handle::ScribbleMode MarkedBlock::Handle::scribbleMode()
468{
469 return scribbleFreeCells() ? Scribble : DontScribble;
470}
471
472inline MarkedBlock::Handle::NewlyAllocatedMode MarkedBlock::Handle::newlyAllocatedMode()
473{
474 return block().hasAnyNewlyAllocated() ? HasNewlyAllocated : DoesNotHaveNewlyAllocated;
475}
476
477inline MarkedBlock::Handle::MarksMode MarkedBlock::Handle::marksMode()
478{
479 HeapVersion markingVersion = space()->markingVersion();
480 bool marksAreUseful = !block().areMarksStale(markingVersion);
481 if (space()->isMarking())
482 marksAreUseful |= block().marksConveyLivenessDuringMarking(markingVersion);
483 return marksAreUseful ? MarksNotStale : MarksStale;
484}
485
486template <typename Functor>
487inline IterationStatus MarkedBlock::Handle::forEachLiveCell(const Functor& functor)
488{
489 // FIXME: This is not currently efficient to use in the constraint solver because isLive() grabs a
490 // lock to protect itself from concurrent calls to aboutToMarkSlow(). But we could get around this by
491 // having this function grab the lock before and after the iteration, and check if the marking version
492 // changed. If it did, just run again. Inside the loop, we only need to ensure that if a race were to
493 // happen, we will just overlook objects. I think that because of how aboutToMarkSlow() does things,
494 // a race ought to mean that it just returns false when it should have returned true - but this is
495 // something that would have to be verified carefully.
496 //
497 // NOTE: Some users of forEachLiveCell require that their callback is called exactly once for
498 // each live cell. We could optimize this function for those users by using a slow loop if the
499 // block is in marks-mean-live mode. That would only affect blocks that had partial survivors
500 // during the last collection and no survivors (yet) during this collection.
501 //
502 // https://bugs.webkit.org/show_bug.cgi?id=180315
503
504 HeapCell::Kind kind = m_attributes.cellKind;
505 for (size_t i = 0; i < m_endAtom; i += m_atomsPerCell) {
506 HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(&m_block->atoms()[i]);
507 if (!isLive(cell))
508 continue;
509
510 if (functor(i, cell, kind) == IterationStatus::Done)
511 return IterationStatus::Done;
512 }
513 return IterationStatus::Continue;
514}
515
516template <typename Functor>
517inline IterationStatus MarkedBlock::Handle::forEachDeadCell(const Functor& functor)
518{
519 HeapCell::Kind kind = m_attributes.cellKind;
520 for (size_t i = 0; i < m_endAtom; i += m_atomsPerCell) {
521 HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(&m_block->atoms()[i]);
522 if (isLive(cell))
523 continue;
524
525 if (functor(cell, kind) == IterationStatus::Done)
526 return IterationStatus::Done;
527 }
528 return IterationStatus::Continue;
529}
530
531template <typename Functor>
532inline IterationStatus MarkedBlock::Handle::forEachMarkedCell(const Functor& functor)
533{
534 HeapCell::Kind kind = m_attributes.cellKind;
535 MarkedBlock& block = this->block();
536 bool areMarksStale = block.areMarksStale();
537 WTF::loadLoadFence();
538 if (areMarksStale)
539 return IterationStatus::Continue;
540 for (size_t i = 0; i < m_endAtom; i += m_atomsPerCell) {
541 if (!block.footer().m_marks.get(i))
542 continue;
543
544 HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(&m_block->atoms()[i]);
545
546 if (functor(i, cell, kind) == IterationStatus::Done)
547 return IterationStatus::Done;
548 }
549 return IterationStatus::Continue;
550}
551
552} // namespace JSC
553
554