1/*
2 * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
3 * Copyright (C) 2001 Peter Kelly (pmk@post.com)
4 * Copyright (C) 2003-2018 Apple Inc. All rights reserved.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 */
21
22#pragma once
23
24#include "CellAttributes.h"
25#include "DestructionMode.h"
26#include "HeapCell.h"
27#include "IterationStatus.h"
28#include "WeakSet.h"
29#include <wtf/Atomics.h>
30#include <wtf/Bitmap.h>
31#include <wtf/HashFunctions.h>
32#include <wtf/CountingLock.h>
33#include <wtf/StdLibExtras.h>
34
35namespace JSC {
36
37class AlignedMemoryAllocator;
38class FreeList;
39class Heap;
40class JSCell;
41class BlockDirectory;
42class MarkedSpace;
43class SlotVisitor;
44class Subspace;
45
46typedef uint32_t HeapVersion;
47
48// A marked block is a page-aligned container for heap-allocated objects.
49// Objects are allocated within cells of the marked block. For a given
50// marked block, all cells have the same size. Objects smaller than the
51// cell size may be allocated in the marked block, in which case the
52// allocation suffers from internal fragmentation: wasted space whose
53// size is equal to the difference between the cell size and the object
54// size.
55
56class MarkedBlock {
57 WTF_MAKE_NONCOPYABLE(MarkedBlock);
58 friend class LLIntOffsetsExtractor;
59 friend struct VerifyMarked;
60
61public:
62 class Footer;
63 class Handle;
64private:
65 friend class Footer;
66 friend class Handle;
67public:
68 static constexpr size_t atomSize = 16; // bytes
69
70 // Block size must be at least as large as the system page size.
71#if CPU(PPC64) || CPU(PPC64LE) || CPU(PPC) || CPU(UNKNOWN)
72 static constexpr size_t blockSize = 64 * KB;
73#else
74 static constexpr size_t blockSize = 16 * KB;
75#endif
76
77 static constexpr size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
78
79 static constexpr size_t atomsPerBlock = blockSize / atomSize;
80
81 static_assert(!(MarkedBlock::atomSize & (MarkedBlock::atomSize - 1)), "MarkedBlock::atomSize must be a power of two.");
82 static_assert(!(MarkedBlock::blockSize & (MarkedBlock::blockSize - 1)), "MarkedBlock::blockSize must be a power of two.");
83
84 struct VoidFunctor {
85 typedef void ReturnType;
86 void returnValue() { }
87 };
88
89 class CountFunctor {
90 public:
91 typedef size_t ReturnType;
92
93 CountFunctor() : m_count(0) { }
94 void count(size_t count) const { m_count += count; }
95 ReturnType returnValue() const { return m_count; }
96
97 private:
98 // FIXME: This is mutable because we're using a functor rather than C++ lambdas.
99 // https://bugs.webkit.org/show_bug.cgi?id=159644
100 mutable ReturnType m_count;
101 };
102
103 class Handle {
104 WTF_MAKE_NONCOPYABLE(Handle);
105 WTF_MAKE_FAST_ALLOCATED;
106 friend class LLIntOffsetsExtractor;
107 friend class MarkedBlock;
108 friend struct VerifyMarked;
109 public:
110
111 ~Handle();
112
113 MarkedBlock& block();
114 MarkedBlock::Footer& blockFooter();
115
116 void* cellAlign(void*);
117
118 bool isEmpty();
119
120 void lastChanceToFinalize();
121
122 BlockDirectory* directory() const;
123 Subspace* subspace() const;
124 AlignedMemoryAllocator* alignedMemoryAllocator() const;
125 Heap* heap() const;
126 inline MarkedSpace* space() const;
127 VM* vm() const;
128 WeakSet& weakSet();
129
130 enum SweepMode { SweepOnly, SweepToFreeList };
131
132 // Sweeping ensures that destructors get called and removes the block from the unswept
133 // set. Sweeping to free list also removes the block from the empty set, if it was in that
134 // set. Sweeping with SweepOnly may add this block to the empty set, if the block is found
135 // to be empty. The free-list being null implies SweepOnly.
136 //
137 // Note that you need to make sure that the empty bit reflects reality. If it's not set
138 // and the block is freshly created, then we'll make the mistake of running destructors in
139 // the block. If it's not set and the block has nothing marked, then we'll make the
140 // mistake of making a pop freelist rather than a bump freelist.
141 void sweep(FreeList*);
142
143 // This is to be called by Subspace.
144 template<typename DestroyFunc>
145 void finishSweepKnowingHeapCellType(FreeList*, const DestroyFunc&);
146
147 void unsweepWithNoNewlyAllocated();
148
149 void zap(const FreeList&);
150
151 void shrink();
152
153 void visitWeakSet(SlotVisitor&);
154 void reapWeakSet();
155
156 // While allocating from a free list, MarkedBlock temporarily has bogus
157 // cell liveness data. To restore accurate cell liveness data, call one
158 // of these functions:
159 void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
160 void stopAllocating(const FreeList&);
161 void resumeAllocating(FreeList&); // Call this if you canonicalized a block for some non-collection related purpose.
162
163 size_t cellSize();
164 inline unsigned cellsPerBlock();
165
166 const CellAttributes& attributes() const;
167 DestructionMode destruction() const;
168 bool needsDestruction() const;
169 HeapCell::Kind cellKind() const;
170
171 size_t markCount();
172 size_t size();
173
174 bool isAllocated();
175
176 bool isLive(HeapVersion markingVersion, HeapVersion newlyAllocatedVersion, bool isMarking, const HeapCell*);
177 inline bool isLiveCell(HeapVersion markingVersion, HeapVersion newlyAllocatedVersion, bool isMarking, const void*);
178
179 bool isLive(const HeapCell*);
180 bool isLiveCell(const void*);
181
182 bool isFreeListedCell(const void* target) const;
183
184 template <typename Functor> IterationStatus forEachCell(const Functor&);
185 template <typename Functor> inline IterationStatus forEachLiveCell(const Functor&);
186 template <typename Functor> inline IterationStatus forEachDeadCell(const Functor&);
187 template <typename Functor> inline IterationStatus forEachMarkedCell(const Functor&);
188
189 JS_EXPORT_PRIVATE bool areMarksStale();
190 bool areMarksStaleForSweep();
191
192 void assertMarksNotStale();
193
194 bool isFreeListed() const { return m_isFreeListed; }
195
196 size_t index() const { return m_index; }
197
198 void removeFromDirectory();
199
200 void didAddToDirectory(BlockDirectory*, size_t index);
201 void didRemoveFromDirectory();
202
203 void dumpState(PrintStream&);
204
205 private:
206 Handle(Heap&, AlignedMemoryAllocator*, void*);
207
208 enum SweepDestructionMode { BlockHasNoDestructors, BlockHasDestructors, BlockHasDestructorsAndCollectorIsRunning };
209 enum ScribbleMode { DontScribble, Scribble };
210 enum EmptyMode { IsEmpty, NotEmpty };
211 enum NewlyAllocatedMode { HasNewlyAllocated, DoesNotHaveNewlyAllocated };
212 enum MarksMode { MarksStale, MarksNotStale };
213
214 SweepDestructionMode sweepDestructionMode();
215 EmptyMode emptyMode();
216 ScribbleMode scribbleMode();
217 NewlyAllocatedMode newlyAllocatedMode();
218 MarksMode marksMode();
219
220 template<bool, EmptyMode, SweepMode, SweepDestructionMode, ScribbleMode, NewlyAllocatedMode, MarksMode, typename DestroyFunc>
221 void specializedSweep(FreeList*, EmptyMode, SweepMode, SweepDestructionMode, ScribbleMode, NewlyAllocatedMode, MarksMode, const DestroyFunc&);
222
223 void setIsFreeListed();
224
225 MarkedBlock::Handle* m_prev { nullptr };
226 MarkedBlock::Handle* m_next { nullptr };
227
228 size_t m_atomsPerCell { std::numeric_limits<size_t>::max() };
229 size_t m_endAtom { std::numeric_limits<size_t>::max() }; // This is a fuzzy end. Always test for < m_endAtom.
230
231 CellAttributes m_attributes;
232 bool m_isFreeListed { false };
233
234 AlignedMemoryAllocator* m_alignedMemoryAllocator { nullptr };
235 BlockDirectory* m_directory { nullptr };
236 size_t m_index { std::numeric_limits<size_t>::max() };
237 WeakSet m_weakSet;
238
239 MarkedBlock* m_block { nullptr };
240 };
241
242private:
243 static constexpr size_t atomAlignmentMask = atomSize - 1;
244
245 typedef char Atom[atomSize];
246
247public:
248 class Footer {
249 public:
250 Footer(VM&, Handle&);
251 ~Footer();
252
253 private:
254 friend class LLIntOffsetsExtractor;
255 friend class MarkedBlock;
256
257 Handle& m_handle;
258 VM* m_vm;
259 Subspace* m_subspace;
260
261 CountingLock m_lock;
262
263 // The actual mark count can be computed by doing: m_biasedMarkCount - m_markCountBias. Note
264 // that this count is racy. It will accurately detect whether or not exactly zero things were
265 // marked, but if N things got marked, then this may report anything in the range [1, N] (or
266 // before unbiased, it would be [1 + m_markCountBias, N + m_markCountBias].)
267 int16_t m_biasedMarkCount;
268
269 // We bias the mark count so that if m_biasedMarkCount >= 0 then the block should be retired.
270 // We go to all this trouble to make marking a bit faster: this way, marking knows when to
271 // retire a block using a js/jns on m_biasedMarkCount.
272 //
273 // For example, if a block has room for 100 objects and retirement happens whenever 90% are
274 // live, then m_markCountBias will be -90. This way, when marking begins, this will cause us to
275 // set m_biasedMarkCount to -90 as well, since:
276 //
277 // m_biasedMarkCount = actualMarkCount + m_markCountBias.
278 //
279 // Marking an object will increment m_biasedMarkCount. Once 90 objects get marked, we will have
280 // m_biasedMarkCount = 0, which will trigger retirement. In other words, we want to set
281 // m_markCountBias like so:
282 //
283 // m_markCountBias = -(minMarkedBlockUtilization * cellsPerBlock)
284 //
285 // All of this also means that you can detect if any objects are marked by doing:
286 //
287 // m_biasedMarkCount != m_markCountBias
288 int16_t m_markCountBias;
289
290 HeapVersion m_markingVersion;
291 HeapVersion m_newlyAllocatedVersion;
292
293 Bitmap<atomsPerBlock> m_marks;
294 Bitmap<atomsPerBlock> m_newlyAllocated;
295 };
296
297private:
298 Footer& footer();
299 const Footer& footer() const;
300
301public:
302 static constexpr size_t endAtom = (blockSize - sizeof(Footer)) / atomSize;
303 static constexpr size_t payloadSize = endAtom * atomSize;
304 static constexpr size_t footerSize = blockSize - payloadSize;
305
306 static_assert(payloadSize == ((blockSize - sizeof(MarkedBlock::Footer)) & ~(atomSize - 1)), "Payload size computed the alternate way should give the same result");
307
308 static MarkedBlock::Handle* tryCreate(Heap&, AlignedMemoryAllocator*);
309
310 Handle& handle();
311 const Handle& handle() const;
312
313 VM* vm() const;
314 inline Heap* heap() const;
315 inline MarkedSpace* space() const;
316
317 static bool isAtomAligned(const void*);
318 static MarkedBlock* blockFor(const void*);
319 size_t atomNumber(const void*);
320
321 size_t markCount();
322
323 bool isMarked(const void*);
324 bool isMarked(HeapVersion markingVersion, const void*);
325 bool isMarked(const void*, Dependency);
326 bool testAndSetMarked(const void*, Dependency);
327
328 bool isAtom(const void*);
329 void clearMarked(const void*);
330
331 bool isNewlyAllocated(const void*);
332 void setNewlyAllocated(const void*);
333 void clearNewlyAllocated(const void*);
334 const Bitmap<atomsPerBlock>& newlyAllocated() const;
335
336 HeapVersion newlyAllocatedVersion() const { return footer().m_newlyAllocatedVersion; }
337
338 inline bool isNewlyAllocatedStale() const;
339
340 inline bool hasAnyNewlyAllocated();
341 void resetAllocated();
342
343 size_t cellSize();
344 const CellAttributes& attributes() const;
345
346 bool hasAnyMarked() const;
347 void noteMarked();
348#if ASSERT_DISABLED
349 void assertValidCell(VM&, HeapCell*) const { }
350#else
351 void assertValidCell(VM&, HeapCell*) const;
352#endif
353
354 WeakSet& weakSet();
355
356 JS_EXPORT_PRIVATE bool areMarksStale();
357 bool areMarksStale(HeapVersion markingVersion);
358
359 Dependency aboutToMark(HeapVersion markingVersion);
360
361#if ASSERT_DISABLED
362 void assertMarksNotStale() { }
363#else
364 JS_EXPORT_PRIVATE void assertMarksNotStale();
365#endif
366
367 void resetMarks();
368
369 bool isMarkedRaw(const void* p);
370 HeapVersion markingVersion() const { return footer().m_markingVersion; }
371
372 const Bitmap<atomsPerBlock>& marks() const;
373
374 CountingLock& lock() { return footer().m_lock; }
375
376 Subspace* subspace() const { return footer().m_subspace; }
377
378 static constexpr size_t offsetOfFooter = endAtom * atomSize;
379
380private:
381 MarkedBlock(VM&, Handle&);
382 ~MarkedBlock();
383 Atom* atoms();
384
385 JS_EXPORT_PRIVATE void aboutToMarkSlow(HeapVersion markingVersion);
386 void clearHasAnyMarked();
387
388 void noteMarkedSlow();
389
390 inline bool marksConveyLivenessDuringMarking(HeapVersion markingVersion);
391 inline bool marksConveyLivenessDuringMarking(HeapVersion myMarkingVersion, HeapVersion markingVersion);
392};
393
394inline MarkedBlock::Footer& MarkedBlock::footer()
395{
396 return *bitwise_cast<MarkedBlock::Footer*>(atoms() + endAtom);
397}
398
399inline const MarkedBlock::Footer& MarkedBlock::footer() const
400{
401 return const_cast<MarkedBlock*>(this)->footer();
402}
403
404inline MarkedBlock::Handle& MarkedBlock::handle()
405{
406 return footer().m_handle;
407}
408
409inline const MarkedBlock::Handle& MarkedBlock::handle() const
410{
411 return const_cast<MarkedBlock*>(this)->handle();
412}
413
414inline MarkedBlock& MarkedBlock::Handle::block()
415{
416 return *m_block;
417}
418
419inline MarkedBlock::Footer& MarkedBlock::Handle::blockFooter()
420{
421 return block().footer();
422}
423
424inline MarkedBlock::Atom* MarkedBlock::atoms()
425{
426 return reinterpret_cast<Atom*>(this);
427}
428
429inline bool MarkedBlock::isAtomAligned(const void* p)
430{
431 return !(reinterpret_cast<uintptr_t>(p) & atomAlignmentMask);
432}
433
434inline void* MarkedBlock::Handle::cellAlign(void* p)
435{
436 uintptr_t base = reinterpret_cast<uintptr_t>(block().atoms());
437 uintptr_t bits = reinterpret_cast<uintptr_t>(p);
438 bits -= base;
439 bits -= bits % cellSize();
440 bits += base;
441 return reinterpret_cast<void*>(bits);
442}
443
444inline MarkedBlock* MarkedBlock::blockFor(const void* p)
445{
446 return reinterpret_cast<MarkedBlock*>(reinterpret_cast<uintptr_t>(p) & blockMask);
447}
448
449inline BlockDirectory* MarkedBlock::Handle::directory() const
450{
451 return m_directory;
452}
453
454inline AlignedMemoryAllocator* MarkedBlock::Handle::alignedMemoryAllocator() const
455{
456 return m_alignedMemoryAllocator;
457}
458
459inline Heap* MarkedBlock::Handle::heap() const
460{
461 return m_weakSet.heap();
462}
463
464inline VM* MarkedBlock::Handle::vm() const
465{
466 return m_weakSet.vm();
467}
468
469inline VM* MarkedBlock::vm() const
470{
471 return footer().m_vm;
472}
473
474inline WeakSet& MarkedBlock::Handle::weakSet()
475{
476 return m_weakSet;
477}
478
479inline WeakSet& MarkedBlock::weakSet()
480{
481 return handle().weakSet();
482}
483
484inline void MarkedBlock::Handle::shrink()
485{
486 m_weakSet.shrink();
487}
488
489inline void MarkedBlock::Handle::visitWeakSet(SlotVisitor& visitor)
490{
491 return m_weakSet.visit(visitor);
492}
493
494inline void MarkedBlock::Handle::reapWeakSet()
495{
496 m_weakSet.reap();
497}
498
499inline size_t MarkedBlock::Handle::cellSize()
500{
501 return m_atomsPerCell * atomSize;
502}
503
504inline size_t MarkedBlock::cellSize()
505{
506 return handle().cellSize();
507}
508
509inline const CellAttributes& MarkedBlock::Handle::attributes() const
510{
511 return m_attributes;
512}
513
514inline const CellAttributes& MarkedBlock::attributes() const
515{
516 return handle().attributes();
517}
518
519inline bool MarkedBlock::Handle::needsDestruction() const
520{
521 return m_attributes.destruction == NeedsDestruction;
522}
523
524inline DestructionMode MarkedBlock::Handle::destruction() const
525{
526 return m_attributes.destruction;
527}
528
529inline HeapCell::Kind MarkedBlock::Handle::cellKind() const
530{
531 return m_attributes.cellKind;
532}
533
534inline size_t MarkedBlock::Handle::markCount()
535{
536 return m_block->markCount();
537}
538
539inline size_t MarkedBlock::Handle::size()
540{
541 return markCount() * cellSize();
542}
543
544inline size_t MarkedBlock::atomNumber(const void* p)
545{
546 return (reinterpret_cast<uintptr_t>(p) - reinterpret_cast<uintptr_t>(this)) / atomSize;
547}
548
549inline bool MarkedBlock::areMarksStale(HeapVersion markingVersion)
550{
551 return markingVersion != footer().m_markingVersion;
552}
553
554inline Dependency MarkedBlock::aboutToMark(HeapVersion markingVersion)
555{
556 HeapVersion version = footer().m_markingVersion;
557 if (UNLIKELY(version != markingVersion))
558 aboutToMarkSlow(markingVersion);
559 return Dependency::fence(version);
560}
561
562inline void MarkedBlock::Handle::assertMarksNotStale()
563{
564 block().assertMarksNotStale();
565}
566
567inline bool MarkedBlock::isMarkedRaw(const void* p)
568{
569 return footer().m_marks.get(atomNumber(p));
570}
571
572inline bool MarkedBlock::isMarked(HeapVersion markingVersion, const void* p)
573{
574 HeapVersion version = footer().m_markingVersion;
575 if (UNLIKELY(version != markingVersion))
576 return false;
577 return footer().m_marks.get(atomNumber(p), Dependency::fence(version));
578}
579
580inline bool MarkedBlock::isMarked(const void* p, Dependency dependency)
581{
582 assertMarksNotStale();
583 return footer().m_marks.get(atomNumber(p), dependency);
584}
585
586inline bool MarkedBlock::testAndSetMarked(const void* p, Dependency dependency)
587{
588 assertMarksNotStale();
589 return footer().m_marks.concurrentTestAndSet(atomNumber(p), dependency);
590}
591
592inline const Bitmap<MarkedBlock::atomsPerBlock>& MarkedBlock::marks() const
593{
594 return footer().m_marks;
595}
596
597inline bool MarkedBlock::isNewlyAllocated(const void* p)
598{
599 return footer().m_newlyAllocated.get(atomNumber(p));
600}
601
602inline void MarkedBlock::setNewlyAllocated(const void* p)
603{
604 footer().m_newlyAllocated.set(atomNumber(p));
605}
606
607inline void MarkedBlock::clearNewlyAllocated(const void* p)
608{
609 footer().m_newlyAllocated.clear(atomNumber(p));
610}
611
612inline const Bitmap<MarkedBlock::atomsPerBlock>& MarkedBlock::newlyAllocated() const
613{
614 return footer().m_newlyAllocated;
615}
616
617inline bool MarkedBlock::isAtom(const void* p)
618{
619 ASSERT(MarkedBlock::isAtomAligned(p));
620 size_t atomNumber = this->atomNumber(p);
621 if (atomNumber % handle().m_atomsPerCell) // Filters pointers into cell middles.
622 return false;
623 if (atomNumber >= handle().m_endAtom) // Filters pointers into invalid cells out of the range.
624 return false;
625 return true;
626}
627
628template <typename Functor>
629inline IterationStatus MarkedBlock::Handle::forEachCell(const Functor& functor)
630{
631 HeapCell::Kind kind = m_attributes.cellKind;
632 for (size_t i = 0; i < m_endAtom; i += m_atomsPerCell) {
633 HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(&m_block->atoms()[i]);
634 if (functor(cell, kind) == IterationStatus::Done)
635 return IterationStatus::Done;
636 }
637 return IterationStatus::Continue;
638}
639
640inline bool MarkedBlock::hasAnyMarked() const
641{
642 return footer().m_biasedMarkCount != footer().m_markCountBias;
643}
644
645inline void MarkedBlock::noteMarked()
646{
647 // This is racy by design. We don't want to pay the price of an atomic increment!
648 int16_t biasedMarkCount = footer().m_biasedMarkCount;
649 ++biasedMarkCount;
650 footer().m_biasedMarkCount = biasedMarkCount;
651 if (UNLIKELY(!biasedMarkCount))
652 noteMarkedSlow();
653}
654
655} // namespace JSC
656
657namespace WTF {
658
659struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> {
660 static unsigned hash(JSC::MarkedBlock* const& key)
661 {
662 // Aligned VM regions tend to be monotonically increasing integers,
663 // which is a great hash function, but we have to remove the low bits,
664 // since they're always zero, which is a terrible hash function!
665 return reinterpret_cast<uintptr_t>(key) / JSC::MarkedBlock::blockSize;
666 }
667};
668
669template<> struct DefaultHash<JSC::MarkedBlock*> {
670 typedef MarkedBlockHash Hash;
671};
672
673void printInternal(PrintStream& out, JSC::MarkedBlock::Handle::SweepMode);
674
675} // namespace WTF
676