1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_HEAP_H_
6#define V8_HEAP_HEAP_H_
7
8#include <cmath>
9#include <map>
10#include <unordered_map>
11#include <unordered_set>
12#include <vector>
13
14// Clients of this interface shouldn't depend on lots of heap internals.
15// Do not include anything from src/heap here!
16#include "include/v8-internal.h"
17#include "include/v8.h"
18#include "src/accessors.h"
19#include "src/allocation.h"
20#include "src/assert-scope.h"
21#include "src/base/atomic-utils.h"
22#include "src/globals.h"
23#include "src/heap-symbols.h"
24#include "src/objects.h"
25#include "src/objects/allocation-site.h"
26#include "src/objects/fixed-array.h"
27#include "src/objects/heap-object.h"
28#include "src/objects/smi.h"
29#include "src/objects/string-table.h"
30#include "src/roots.h"
31#include "src/visitors.h"
32#include "testing/gtest/include/gtest/gtest_prod.h"
33
34namespace v8 {
35
36namespace debug {
37using OutOfMemoryCallback = void (*)(void* data);
38} // namespace debug
39
40namespace internal {
41
42namespace heap {
43class HeapTester;
44class TestMemoryAllocatorScope;
45} // namespace heap
46
47class ObjectBoilerplateDescription;
48class BytecodeArray;
49class CodeDataContainer;
50class DeoptimizationData;
51class HandlerTable;
52class IncrementalMarking;
53class JSArrayBuffer;
54class ExternalString;
55using v8::MemoryPressureLevel;
56
57class AllocationObserver;
58class ArrayBufferCollector;
59class ArrayBufferTracker;
60class CodeLargeObjectSpace;
61class ConcurrentMarking;
62class GCIdleTimeHandler;
63class GCIdleTimeHeapState;
64class GCTracer;
65class HeapController;
66class HeapObjectAllocationTracker;
67class HeapObjectsFilter;
68class HeapStats;
69class HistogramTimer;
70class Isolate;
71class JSFinalizationGroup;
72class LocalEmbedderHeapTracer;
73class MemoryAllocator;
74class MemoryReducer;
75class MinorMarkCompactCollector;
76class ObjectIterator;
77class ObjectStats;
78class Page;
79class PagedSpace;
80class ReadOnlyHeap;
81class RootVisitor;
82class ScavengeJob;
83class Scavenger;
84class ScavengerCollector;
85class Space;
86class StoreBuffer;
87class StressScavengeObserver;
88class TimedHistogram;
89class TracePossibleWrapperReporter;
90class WeakObjectRetainer;
91
92enum ArrayStorageAllocationMode {
93 DONT_INITIALIZE_ARRAY_ELEMENTS,
94 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
95};
96
97enum class ClearRecordedSlots { kYes, kNo };
98
99enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
100
101enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
102
103enum class FixedArrayVisitationMode { kRegular, kIncremental };
104
105enum class TraceRetainingPathMode { kEnabled, kDisabled };
106
107enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
108
109enum class GarbageCollectionReason {
110 kUnknown = 0,
111 kAllocationFailure = 1,
112 kAllocationLimit = 2,
113 kContextDisposal = 3,
114 kCountersExtension = 4,
115 kDebugger = 5,
116 kDeserializer = 6,
117 kExternalMemoryPressure = 7,
118 kFinalizeMarkingViaStackGuard = 8,
119 kFinalizeMarkingViaTask = 9,
120 kFullHashtable = 10,
121 kHeapProfiler = 11,
122 kIdleTask = 12,
123 kLastResort = 13,
124 kLowMemoryNotification = 14,
125 kMakeHeapIterable = 15,
126 kMemoryPressure = 16,
127 kMemoryReducer = 17,
128 kRuntime = 18,
129 kSamplingProfiler = 19,
130 kSnapshotCreator = 20,
131 kTesting = 21,
132 kExternalFinalize = 22
133 // If you add new items here, then update the incremental_marking_reason,
134 // mark_compact_reason, and scavenge_reason counters in counters.h.
135 // Also update src/tools/metrics/histograms/histograms.xml in chromium.
136};
137
138enum class YoungGenerationHandling {
139 kRegularScavenge = 0,
140 kFastPromotionDuringScavenge = 1,
141 // Histogram::InspectConstructionArguments in chromium requires us to have at
142 // least three buckets.
143 kUnusedBucket = 2,
144 // If you add new items here, then update the young_generation_handling in
145 // counters.h.
146 // Also update src/tools/metrics/histograms/histograms.xml in chromium.
147};
148
149enum class GCIdleTimeAction : uint8_t;
150
151class AllocationResult {
152 public:
153 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
154 return AllocationResult(space);
155 }
156
157 // Implicit constructor from Object.
158 AllocationResult(Object object) // NOLINT
159 : object_(object) {
160 // AllocationResults can't return Smis, which are used to represent
161 // failure and the space to retry in.
162 CHECK(!object->IsSmi());
163 }
164
165 AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
166
167 inline bool IsRetry() { return object_->IsSmi(); }
168 inline HeapObject ToObjectChecked();
169 inline AllocationSpace RetrySpace();
170
171 template <typename T>
172 bool To(T* obj) {
173 if (IsRetry()) return false;
174 *obj = T::cast(object_);
175 return true;
176 }
177
178 private:
179 explicit AllocationResult(AllocationSpace space)
180 : object_(Smi::FromInt(static_cast<int>(space))) {}
181
182 Object object_;
183};
184
185STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize);
186
187#ifdef DEBUG
188struct CommentStatistic {
189 const char* comment;
190 int size;
191 int count;
192 void Clear() {
193 comment = nullptr;
194 size = 0;
195 count = 0;
196 }
197 // Must be small, since an iteration is used for lookup.
198 static const int kMaxComments = 64;
199};
200#endif
201
202using EphemeronRememberedSet =
203 std::unordered_map<EphemeronHashTable, std::unordered_set<int>,
204 Object::Hasher>;
205
206class Heap {
207 public:
208 // Stores ephemeron entries where the EphemeronHashTable is in old-space,
209 // and the key of the entry is in new-space. Such keys do not appear in the
210 // usual OLD_TO_NEW remembered set.
211 EphemeronRememberedSet ephemeron_remembered_set_;
212 enum FindMementoMode { kForRuntime, kForGC };
213
214 enum HeapState {
215 NOT_IN_GC,
216 SCAVENGE,
217 MARK_COMPACT,
218 MINOR_MARK_COMPACT,
219 TEAR_DOWN
220 };
221
222 using PretenuringFeedbackMap =
223 std::unordered_map<AllocationSite, size_t, Object::Hasher>;
224
225 // Taking this mutex prevents the GC from entering a phase that relocates
226 // object references.
227 base::Mutex* relocation_mutex() { return &relocation_mutex_; }
228
229 // Support for partial snapshots. After calling this we have a linear
230 // space to write objects in each space.
231 struct Chunk {
232 uint32_t size;
233 Address start;
234 Address end;
235 };
236 using Reservation = std::vector<Chunk>;
237
238 static const int kInitalOldGenerationLimitFactor = 2;
239
240#if V8_OS_ANDROID
241 // Don't apply pointer multiplier on Android since it has no swap space and
242 // should instead adapt it's heap size based on available physical memory.
243 static const int kPointerMultiplier = 1;
244#else
245 // TODO(ishell): kSystePointerMultiplier?
246 static const int kPointerMultiplier = i::kSystemPointerSize / 4;
247#endif
248
249 // Semi-space size needs to be a multiple of page size.
250 static const size_t kMinSemiSpaceSizeInKB = 512 * kPointerMultiplier;
251 static const size_t kMaxSemiSpaceSizeInKB = 8192 * kPointerMultiplier;
252
253 STATIC_ASSERT(kMinSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0);
254 STATIC_ASSERT(kMaxSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0);
255
256 static const int kTraceRingBufferSize = 512;
257 static const int kStacktraceBufferSize = 512;
258
259 static const int kNoGCFlags = 0;
260 static const int kReduceMemoryFootprintMask = 1;
261
262 // The minimum size of a HeapObject on the heap.
263 static const int kMinObjectSizeInTaggedWords = 2;
264
265 static const int kMinPromotedPercentForFastPromotionMode = 90;
266
267 STATIC_ASSERT(static_cast<int>(RootIndex::kUndefinedValue) ==
268 Internals::kUndefinedValueRootIndex);
269 STATIC_ASSERT(static_cast<int>(RootIndex::kTheHoleValue) ==
270 Internals::kTheHoleValueRootIndex);
271 STATIC_ASSERT(static_cast<int>(RootIndex::kNullValue) ==
272 Internals::kNullValueRootIndex);
273 STATIC_ASSERT(static_cast<int>(RootIndex::kTrueValue) ==
274 Internals::kTrueValueRootIndex);
275 STATIC_ASSERT(static_cast<int>(RootIndex::kFalseValue) ==
276 Internals::kFalseValueRootIndex);
277 STATIC_ASSERT(static_cast<int>(RootIndex::kempty_string) ==
278 Internals::kEmptyStringRootIndex);
279
280 // Calculates the maximum amount of filler that could be required by the
281 // given alignment.
282 V8_EXPORT_PRIVATE static int GetMaximumFillToAlign(
283 AllocationAlignment alignment);
284 // Calculates the actual amount of filler required for a given address at the
285 // given alignment.
286 V8_EXPORT_PRIVATE static int GetFillToAlign(Address address,
287 AllocationAlignment alignment);
288
289 // Returns the size of the initial area of a code-range, which is marked
290 // writable and reserved to contain unwind information.
291 static size_t GetCodeRangeReservedAreaSize();
292
293 void FatalProcessOutOfMemory(const char* location);
294
295 // Checks whether the space is valid.
296 static bool IsValidAllocationSpace(AllocationSpace space);
297
298 // Zapping is needed for verify heap, and always done in debug builds.
299 static inline bool ShouldZapGarbage() {
300#ifdef DEBUG
301 return true;
302#else
303#ifdef VERIFY_HEAP
304 return FLAG_verify_heap;
305#else
306 return false;
307#endif
308#endif
309 }
310
311 // Helper function to get the bytecode flushing mode based on the flags. This
312 // is required because it is not safe to acess flags in concurrent marker.
313 static inline BytecodeFlushMode GetBytecodeFlushMode() {
314 if (FLAG_stress_flush_bytecode) {
315 return BytecodeFlushMode::kStressFlushBytecode;
316 } else if (FLAG_flush_bytecode) {
317 return BytecodeFlushMode::kFlushBytecode;
318 }
319 return BytecodeFlushMode::kDoNotFlushBytecode;
320 }
321
322 static uintptr_t ZapValue() {
323 return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
324 }
325
326 static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
327 return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
328 }
329
330 static inline GarbageCollector YoungGenerationCollector() {
331#if ENABLE_MINOR_MC
332 return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
333#else
334 return SCAVENGER;
335#endif // ENABLE_MINOR_MC
336 }
337
338 static inline const char* CollectorName(GarbageCollector collector) {
339 switch (collector) {
340 case SCAVENGER:
341 return "Scavenger";
342 case MARK_COMPACTOR:
343 return "Mark-Compact";
344 case MINOR_MARK_COMPACTOR:
345 return "Minor Mark-Compact";
346 }
347 return "Unknown collector";
348 }
349
350 // Copy block of memory from src to dst. Size of block should be aligned
351 // by pointer size.
352 static inline void CopyBlock(Address dst, Address src, int byte_size);
353
354 V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code host);
355 V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object,
356 Address slot,
357 HeapObject value);
358 V8_EXPORT_PRIVATE void RecordEphemeronKeyWrite(EphemeronHashTable table,
359 Address key_slot);
360 V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode(
361 Address raw_object, Address address, Isolate* isolate);
362 V8_EXPORT_PRIVATE static void GenerationalBarrierForElementsSlow(
363 Heap* heap, FixedArray array, int offset, int length);
364 V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
365 Code host, RelocInfo* rinfo, HeapObject value);
366 V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject object,
367 Address slot,
368 HeapObject value);
369 V8_EXPORT_PRIVATE static void MarkingBarrierForElementsSlow(
370 Heap* heap, HeapObject object);
371 V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code host,
372 RelocInfo* rinfo,
373 HeapObject value);
374 V8_EXPORT_PRIVATE static void MarkingBarrierForDescriptorArraySlow(
375 Heap* heap, HeapObject host, HeapObject descriptor_array,
376 int number_of_own_descriptors);
377 V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject object);
378
379 // Notifies the heap that is ok to start marking or other activities that
380 // should not happen during deserialization.
381 void NotifyDeserializationComplete();
382
383 void NotifyBootstrapComplete();
384
385 void NotifyOldGenerationExpansion();
386
387 inline Address* NewSpaceAllocationTopAddress();
388 inline Address* NewSpaceAllocationLimitAddress();
389 inline Address* OldSpaceAllocationTopAddress();
390 inline Address* OldSpaceAllocationLimitAddress();
391
392 // Move len elements within a given array from src_index index to dst_index
393 // index.
394 void MoveElements(FixedArray array, int dst_index, int src_index, int len,
395 WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
396
397 // Copy len elements from src_index of src array to dst_index of dst array.
398 void CopyElements(FixedArray dst, FixedArray src, int dst_index,
399 int src_index, int len, WriteBarrierMode mode);
400
401 // Initialize a filler object to keep the ability to iterate over the heap
402 // when introducing gaps within pages. If slots could have been recorded in
403 // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
404 // pass ClearRecordedSlots::kNo. If the memory after the object header of
405 // the filler should be cleared, pass in kClearFreedMemory. The default is
406 // kDontClearFreedMemory.
407 V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt(
408 Address addr, int size, ClearRecordedSlots clear_slots_mode,
409 ClearFreedMemoryMode clear_memory_mode =
410 ClearFreedMemoryMode::kDontClearFreedMemory);
411
412 template <typename T>
413 void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim);
414
415 bool CanMoveObjectStart(HeapObject object);
416
417 bool IsImmovable(HeapObject object);
418
419 static bool IsLargeObject(HeapObject object);
420
421 // Trim the given array from the left. Note that this relocates the object
422 // start and hence is only valid if there is only a single reference to it.
423 V8_EXPORT_PRIVATE FixedArrayBase LeftTrimFixedArray(FixedArrayBase obj,
424 int elements_to_trim);
425
426 // Trim the given array from the right.
427 V8_EXPORT_PRIVATE void RightTrimFixedArray(FixedArrayBase obj,
428 int elements_to_trim);
429 void RightTrimWeakFixedArray(WeakFixedArray obj, int elements_to_trim);
430
431 // Converts the given boolean condition to JavaScript boolean value.
432 inline Oddball ToBoolean(bool condition);
433
434 // Notify the heap that a context has been disposed.
435 V8_EXPORT_PRIVATE int NotifyContextDisposed(bool dependant_context);
436
437 void set_native_contexts_list(Object object) {
438 native_contexts_list_ = object;
439 }
440 Object native_contexts_list() const { return native_contexts_list_; }
441
442 void set_allocation_sites_list(Object object) {
443 allocation_sites_list_ = object;
444 }
445 Object allocation_sites_list() { return allocation_sites_list_; }
446
447 // Used in CreateAllocationSiteStub and the (de)serializer.
448 Address allocation_sites_list_address() {
449 return reinterpret_cast<Address>(&allocation_sites_list_);
450 }
451
452 // Traverse all the allocaions_sites [nested_site and weak_next] in the list
453 // and foreach call the visitor
454 void ForeachAllocationSite(
455 Object list, const std::function<void(AllocationSite)>& visitor);
456
457 // Number of mark-sweeps.
458 int ms_count() const { return ms_count_; }
459
460 // Checks whether the given object is allowed to be migrated from it's
461 // current space into the given destination space. Used for debugging.
462 bool AllowedToBeMigrated(HeapObject object, AllocationSpace dest);
463
464 void CheckHandleCount();
465
466 // Number of "runtime allocations" done so far.
467 uint32_t allocations_count() { return allocations_count_; }
468
469 // Print short heap statistics.
470 void PrintShortHeapStatistics();
471
472 bool write_protect_code_memory() const { return write_protect_code_memory_; }
473
474 uintptr_t code_space_memory_modification_scope_depth() {
475 return code_space_memory_modification_scope_depth_;
476 }
477
478 void increment_code_space_memory_modification_scope_depth() {
479 code_space_memory_modification_scope_depth_++;
480 }
481
482 void decrement_code_space_memory_modification_scope_depth() {
483 code_space_memory_modification_scope_depth_--;
484 }
485
486 void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk);
487 V8_EXPORT_PRIVATE void UnprotectAndRegisterMemoryChunk(HeapObject object);
488 void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
489 V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
490
491 void EnableUnprotectedMemoryChunksRegistry() {
492 unprotected_memory_chunks_registry_enabled_ = true;
493 }
494
495 void DisableUnprotectedMemoryChunksRegistry() {
496 unprotected_memory_chunks_registry_enabled_ = false;
497 }
498
499 bool unprotected_memory_chunks_registry_enabled() {
500 return unprotected_memory_chunks_registry_enabled_;
501 }
502
503 inline HeapState gc_state() { return gc_state_; }
504 void SetGCState(HeapState state);
505 bool IsTearingDown() const { return gc_state_ == TEAR_DOWN; }
506
507 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
508
509 // If an object has an AllocationMemento trailing it, return it, otherwise
510 // return a null AllocationMemento.
511 template <FindMementoMode mode>
512 inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
513
514 // Returns false if not able to reserve.
515 bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps);
516
517 //
518 // Support for the API.
519 //
520
521 void CreateApiObjects();
522
523 // Implements the corresponding V8 API function.
524 bool IdleNotification(double deadline_in_seconds);
525 bool IdleNotification(int idle_time_in_ms);
526
527 V8_EXPORT_PRIVATE void MemoryPressureNotification(MemoryPressureLevel level,
528 bool is_isolate_locked);
529 void CheckMemoryPressure();
530
531 V8_EXPORT_PRIVATE void AddNearHeapLimitCallback(v8::NearHeapLimitCallback,
532 void* data);
533 V8_EXPORT_PRIVATE void RemoveNearHeapLimitCallback(
534 v8::NearHeapLimitCallback callback, size_t heap_limit);
535 V8_EXPORT_PRIVATE void AutomaticallyRestoreInitialHeapLimit(
536 double threshold_percent);
537
538 V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs();
539
540 void RecordStats(HeapStats* stats, bool take_snapshot = false);
541
542 // Check new space expansion criteria and expand semispaces if it was hit.
543 void CheckNewSpaceExpansionCriteria();
544
545 void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
546
547 // An object should be promoted if the object has survived a
548 // scavenge operation.
549 inline bool ShouldBePromoted(Address old_address);
550
551 void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
552
553 inline int NextScriptId();
554 inline int NextDebuggingId();
555 inline int GetNextTemplateSerialNumber();
556
557 void SetSerializedObjects(FixedArray objects);
558 void SetSerializedGlobalProxySizes(FixedArray sizes);
559
560 // For post mortem debugging.
561 void RememberUnmappedPage(Address page, bool compacted);
562
563 int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
564
565 V8_INLINE int64_t external_memory();
566 V8_INLINE void update_external_memory(int64_t delta);
567 V8_INLINE void update_external_memory_concurrently_freed(intptr_t freed);
568 V8_INLINE void account_external_memory_concurrently_freed();
569
570 size_t backing_store_bytes() const { return backing_store_bytes_; }
571
572 void CompactWeakArrayLists(AllocationType allocation);
573
574 V8_EXPORT_PRIVATE void AddRetainedMap(Handle<Map> map);
575
576 // This event is triggered after successful allocation of a new object made
577 // by runtime. Allocations of target space for object evacuation do not
578 // trigger the event. In order to track ALL allocations one must turn off
579 // FLAG_inline_new.
580 inline void OnAllocationEvent(HeapObject object, int size_in_bytes);
581
582 // This event is triggered after object is moved to a new place.
583 void OnMoveEvent(HeapObject target, HeapObject source, int size_in_bytes);
584
585 inline bool CanAllocateInReadOnlySpace();
586 bool deserialization_complete() const { return deserialization_complete_; }
587
588 bool HasLowAllocationRate();
589 bool HasHighFragmentation();
590 bool HasHighFragmentation(size_t used, size_t committed);
591
592 void ActivateMemoryReducerIfNeeded();
593
594 V8_EXPORT_PRIVATE bool ShouldOptimizeForMemoryUsage();
595
596 bool HighMemoryPressure() {
597 return memory_pressure_level_ != MemoryPressureLevel::kNone;
598 }
599
600 void RestoreHeapLimit(size_t heap_limit) {
601 // Do not set the limit lower than the live size + some slack.
602 size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
603 max_old_generation_size_ =
604 Min(max_old_generation_size_, Max(heap_limit, min_limit));
605 }
606
607 // ===========================================================================
608 // Initialization. ===========================================================
609 // ===========================================================================
610
611 // Configure heap sizes
612 // max_semi_space_size_in_kb: maximum semi-space size in KB
613 // max_old_generation_size_in_mb: maximum old generation size in MB
614 // code_range_size_in_mb: code range size in MB
615 void ConfigureHeap(size_t max_semi_space_size_in_kb,
616 size_t max_old_generation_size_in_mb,
617 size_t code_range_size_in_mb);
618 void ConfigureHeapDefault();
619
620 // Prepares the heap, setting up for deserialization.
621 void SetUp();
622
623 // Sets read-only heap and space.
624 void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap);
625
626 // Sets up the heap memory without creating any objects.
627 void SetUpSpaces();
628
629 // (Re-)Initialize hash seed from flag or RNG.
630 void InitializeHashSeed();
631
632 // Bootstraps the object heap with the core set of objects required to run.
633 // Returns whether it succeeded.
634 bool CreateHeapObjects();
635
636 // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
637 void CreateObjectStats();
638
639 // Sets the TearDown state, so no new GC tasks get posted.
640 void StartTearDown();
641
642 // Destroys all memory allocated by the heap.
643 void TearDown();
644
645 // Returns whether SetUp has been called.
646 bool HasBeenSetUp();
647
648 // ===========================================================================
649 // Getters for spaces. =======================================================
650 // ===========================================================================
651
652 inline Address NewSpaceTop();
653
654 NewSpace* new_space() { return new_space_; }
655 OldSpace* old_space() { return old_space_; }
656 CodeSpace* code_space() { return code_space_; }
657 MapSpace* map_space() { return map_space_; }
658 LargeObjectSpace* lo_space() { return lo_space_; }
659 CodeLargeObjectSpace* code_lo_space() { return code_lo_space_; }
660 NewLargeObjectSpace* new_lo_space() { return new_lo_space_; }
661 ReadOnlySpace* read_only_space() { return read_only_space_; }
662
663 inline PagedSpace* paged_space(int idx);
664 inline Space* space(int idx);
665
666 // Returns name of the space.
667 V8_EXPORT_PRIVATE static const char* GetSpaceName(AllocationSpace space);
668
669 // ===========================================================================
670 // Getters to other components. ==============================================
671 // ===========================================================================
672
673 ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
674
675 GCTracer* tracer() { return tracer_.get(); }
676
677 MemoryAllocator* memory_allocator() { return memory_allocator_.get(); }
678
679 inline Isolate* isolate();
680
681 MarkCompactCollector* mark_compact_collector() {
682 return mark_compact_collector_.get();
683 }
684
685 MinorMarkCompactCollector* minor_mark_compact_collector() {
686 return minor_mark_compact_collector_;
687 }
688
689 ArrayBufferCollector* array_buffer_collector() {
690 return array_buffer_collector_.get();
691 }
692
693 // ===========================================================================
694 // Root set access. ==========================================================
695 // ===========================================================================
696
697 // Shortcut to the roots table stored in the Isolate.
698 V8_INLINE RootsTable& roots_table();
699
700// Heap root getters.
701#define ROOT_ACCESSOR(type, name, CamelName) inline type name();
702 MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
703#undef ROOT_ACCESSOR
704
705 V8_INLINE void SetRootMaterializedObjects(FixedArray objects);
706 V8_INLINE void SetRootScriptList(Object value);
707 V8_INLINE void SetRootStringTable(StringTable value);
708 V8_INLINE void SetRootNoScriptSharedFunctionInfos(Object value);
709 V8_INLINE void SetMessageListeners(TemplateList value);
710 V8_INLINE void SetPendingOptimizeForTestBytecode(Object bytecode);
711
712 // Set the stack limit in the roots table. Some architectures generate
713 // code that looks here, because it is faster than loading from the static
714 // jslimit_/real_jslimit_ variable in the StackGuard.
715 void SetStackLimits();
716
717 // The stack limit is thread-dependent. To be able to reproduce the same
718 // snapshot blob, we need to reset it before serializing.
719 void ClearStackLimits();
720
721 void RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end);
722 void UnregisterStrongRoots(FullObjectSlot start);
723
724 void SetBuiltinsConstantsTable(FixedArray cache);
725
726 // A full copy of the interpreter entry trampoline, used as a template to
727 // create copies of the builtin at runtime. The copies are used to create
728 // better profiling information for ticks in bytecode execution. Note that
729 // this is always a copy of the full builtin, i.e. not the off-heap
730 // trampoline.
731 // See also: FLAG_interpreted_frames_native_stack.
732 void SetInterpreterEntryTrampolineForProfiling(Code code);
733
734 // Add finalization_group into the dirty_js_finalization_groups list.
735 void AddDirtyJSFinalizationGroup(
736 JSFinalizationGroup finalization_group,
737 std::function<void(HeapObject object, ObjectSlot slot, Object target)>
738 gc_notify_updated_slot);
739
740 V8_EXPORT_PRIVATE void AddKeepDuringJobTarget(Handle<JSReceiver> target);
741 void ClearKeepDuringJobSet();
742
743 // ===========================================================================
744 // Inline allocation. ========================================================
745 // ===========================================================================
746
747 // Indicates whether inline bump-pointer allocation has been disabled.
748 bool inline_allocation_disabled() { return inline_allocation_disabled_; }
749
750 // Switch whether inline bump-pointer allocation should be used.
751 V8_EXPORT_PRIVATE void EnableInlineAllocation();
752 V8_EXPORT_PRIVATE void DisableInlineAllocation();
753
754 // ===========================================================================
755 // Methods triggering GCs. ===================================================
756 // ===========================================================================
757
758 // Performs garbage collection operation.
759 // Returns whether there is a chance that another major GC could
760 // collect more garbage.
761 V8_EXPORT_PRIVATE bool CollectGarbage(
762 AllocationSpace space, GarbageCollectionReason gc_reason,
763 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
764
765 // Performs a full garbage collection.
766 V8_EXPORT_PRIVATE void CollectAllGarbage(
767 int flags, GarbageCollectionReason gc_reason,
768 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
769
770 // Last hope GC, should try to squeeze as much as possible.
771 V8_EXPORT_PRIVATE void CollectAllAvailableGarbage(
772 GarbageCollectionReason gc_reason);
773
774 // Precise garbage collection that potentially finalizes already running
775 // incremental marking before performing an atomic garbage collection.
776 // Only use if absolutely necessary or in tests to avoid floating garbage!
777 V8_EXPORT_PRIVATE void PreciseCollectAllGarbage(
778 int flags, GarbageCollectionReason gc_reason,
779 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
780
781 // Reports and external memory pressure event, either performs a major GC or
782 // completes incremental marking in order to free external resources.
783 void ReportExternalMemoryPressure();
784
785 using GetExternallyAllocatedMemoryInBytesCallback =
786 v8::Isolate::GetExternallyAllocatedMemoryInBytesCallback;
787
788 void SetGetExternallyAllocatedMemoryInBytesCallback(
789 GetExternallyAllocatedMemoryInBytesCallback callback) {
790 external_memory_callback_ = callback;
791 }
792
793 // Invoked when GC was requested via the stack guard.
794 void HandleGCRequest();
795
796 // ===========================================================================
797 // Builtins. =================================================================
798 // ===========================================================================
799
800 V8_EXPORT_PRIVATE Code builtin(int index);
801 Address builtin_address(int index);
802 void set_builtin(int index, Code builtin);
803
804 // ===========================================================================
805 // Iterators. ================================================================
806 // ===========================================================================
807
808 // None of these methods iterate over the read-only roots. To do this use
809 // ReadOnlyRoots::Iterate. Read-only root iteration is not necessary for
810 // garbage collection and is usually only performed as part of
811 // (de)serialization or heap verification.
812
813 // Iterates over the strong roots and the weak roots.
814 void IterateRoots(RootVisitor* v, VisitMode mode);
815 // Iterates over the strong roots.
816 void IterateStrongRoots(RootVisitor* v, VisitMode mode);
817 // Iterates over entries in the smi roots list. Only interesting to the
818 // serializer/deserializer, since GC does not care about smis.
819 void IterateSmiRoots(RootVisitor* v);
820 // Iterates over weak string tables.
821 void IterateWeakRoots(RootVisitor* v, VisitMode mode);
822 // Iterates over weak global handles.
823 void IterateWeakGlobalHandles(RootVisitor* v);
824 // Iterates over builtins.
825 void IterateBuiltins(RootVisitor* v);
826
827 // ===========================================================================
828 // Store buffer API. =========================================================
829 // ===========================================================================
830
831 // Used for query incremental marking status in generated code.
832 Address* IsMarkingFlagAddress() {
833 return reinterpret_cast<Address*>(&is_marking_flag_);
834 }
835
836 void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }
837
838 Address* store_buffer_top_address();
839 static intptr_t store_buffer_mask_constant();
840 static Address store_buffer_overflow_function_address();
841
842 void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
843 void ClearRecordedSlotRange(Address start, Address end);
844
845#ifdef DEBUG
846 void VerifyClearedSlot(HeapObject object, ObjectSlot slot);
847#endif
848
849 // ===========================================================================
850 // Incremental marking API. ==================================================
851 // ===========================================================================
852
853 int GCFlagsForIncrementalMarking() {
854 return ShouldOptimizeForMemoryUsage() ? kReduceMemoryFootprintMask
855 : kNoGCFlags;
856 }
857
858 // Start incremental marking and ensure that idle time handler can perform
859 // incremental steps.
860 V8_EXPORT_PRIVATE void StartIdleIncrementalMarking(
861 GarbageCollectionReason gc_reason,
862 GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
863
864 // Starts incremental marking assuming incremental marking is currently
865 // stopped.
866 V8_EXPORT_PRIVATE void StartIncrementalMarking(
867 int gc_flags, GarbageCollectionReason gc_reason,
868 GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
869
870 void StartIncrementalMarkingIfAllocationLimitIsReached(
871 int gc_flags,
872 GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
873
874 void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
875 // Synchronously finalizes incremental marking.
876 void FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason);
877
878 void RegisterDeserializedObjectsForBlackAllocation(
879 Reservation* reservations, const std::vector<HeapObject>& large_objects,
880 const std::vector<Address>& maps);
881
882 IncrementalMarking* incremental_marking() {
883 return incremental_marking_.get();
884 }
885
886 // ===========================================================================
887 // Concurrent marking API. ===================================================
888 // ===========================================================================
889
890 ConcurrentMarking* concurrent_marking() { return concurrent_marking_.get(); }
891
892 // The runtime uses this function to notify potentially unsafe object layout
893 // changes that require special synchronization with the concurrent marker.
894 // The old size is the size of the object before layout change.
895 void NotifyObjectLayoutChange(HeapObject object, int old_size,
896 const DisallowHeapAllocation&);
897
898#ifdef VERIFY_HEAP
899 // This function checks that either
900 // - the map transition is safe,
901 // - or it was communicated to GC using NotifyObjectLayoutChange.
902 V8_EXPORT_PRIVATE void VerifyObjectLayoutChange(HeapObject object,
903 Map new_map);
904#endif
905
906 // ===========================================================================
907 // Deoptimization support API. ===============================================
908 // ===========================================================================
909
910 // Setters for code offsets of well-known deoptimization targets.
911 void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
912 void SetConstructStubCreateDeoptPCOffset(int pc_offset);
913 void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
914 void SetInterpreterEntryReturnPCOffset(int pc_offset);
915
916 // Invalidates references in the given {code} object that are referenced
917 // transitively from the deoptimization data. Mutates write-protected code.
918 void InvalidateCodeDeoptimizationData(Code code);
919
920 void DeoptMarkedAllocationSites();
921
922 bool DeoptMaybeTenuredAllocationSites();
923
924 // ===========================================================================
925 // Embedder heap tracer support. =============================================
926 // ===========================================================================
927
928 LocalEmbedderHeapTracer* local_embedder_heap_tracer() const {
929 return local_embedder_heap_tracer_.get();
930 }
931
932 void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
933 EmbedderHeapTracer* GetEmbedderHeapTracer() const;
934
935 void RegisterExternallyReferencedObject(Address* location);
936 void SetEmbedderStackStateForNextFinalizaton(
937 EmbedderHeapTracer::EmbedderStackState stack_state);
938
939 // ===========================================================================
940 // External string table API. ================================================
941 // ===========================================================================
942
943 // Registers an external string.
944 inline void RegisterExternalString(String string);
945
946 // Called when a string's resource is changed. The size of the payload is sent
947 // as argument of the method.
948 V8_EXPORT_PRIVATE void UpdateExternalString(String string, size_t old_payload,
949 size_t new_payload);
950
951 // Finalizes an external string by deleting the associated external
952 // data and clearing the resource pointer.
953 inline void FinalizeExternalString(String string);
954
955 static String UpdateYoungReferenceInExternalStringTableEntry(
956 Heap* heap, FullObjectSlot pointer);
957
958 // ===========================================================================
959 // Methods checking/returning the space of a given object/address. ===========
960 // ===========================================================================
961
962 // Returns whether the object resides in new space.
963 static inline bool InYoungGeneration(Object object);
964 static inline bool InYoungGeneration(MaybeObject object);
965 static inline bool InYoungGeneration(HeapObject heap_object);
966 static inline bool InFromPage(Object object);
967 static inline bool InFromPage(MaybeObject object);
968 static inline bool InFromPage(HeapObject heap_object);
969 static inline bool InToPage(Object object);
970 static inline bool InToPage(MaybeObject object);
971 static inline bool InToPage(HeapObject heap_object);
972
973 // Returns whether the object resides in old space.
974 inline bool InOldSpace(Object object);
975
976 // Checks whether an address/object in the heap (including auxiliary
977 // area and unused area).
978 V8_EXPORT_PRIVATE bool Contains(HeapObject value);
979
980 // Checks whether an address/object in a space.
981 // Currently used by tests, serialization and heap verification only.
982 V8_EXPORT_PRIVATE bool InSpace(HeapObject value, AllocationSpace space);
983
984 // Slow methods that can be used for verification as they can also be used
985 // with off-heap Addresses.
986 bool InSpaceSlow(Address addr, AllocationSpace space);
987
988 static inline Heap* FromWritableHeapObject(const HeapObject obj);
989
990 // ===========================================================================
991 // Object statistics tracking. ===============================================
992 // ===========================================================================
993
994 // Returns the number of buckets used by object statistics tracking during a
995 // major GC. Note that the following methods fail gracefully when the bounds
996 // are exceeded though.
997 size_t NumberOfTrackedHeapObjectTypes();
998
999 // Returns object statistics about count and size at the last major GC.
1000 // Objects are being grouped into buckets that roughly resemble existing
1001 // instance types.
1002 size_t ObjectCountAtLastGC(size_t index);
1003 size_t ObjectSizeAtLastGC(size_t index);
1004
1005 // Retrieves names of buckets used by object statistics tracking.
1006 bool GetObjectTypeName(size_t index, const char** object_type,
1007 const char** object_sub_type);
1008
1009 // The total number of native contexts object on the heap.
1010 size_t NumberOfNativeContexts();
1011 // The total number of native contexts that were detached but were not
1012 // garbage collected yet.
1013 size_t NumberOfDetachedContexts();
1014
1015 // ===========================================================================
1016 // Code statistics. ==========================================================
1017 // ===========================================================================
1018
1019 // Collect code (Code and BytecodeArray objects) statistics.
1020 void CollectCodeStatistics();
1021
1022 // ===========================================================================
1023 // GC statistics. ============================================================
1024 // ===========================================================================
1025
1026 // Returns the maximum amount of memory reserved for the heap.
1027 V8_EXPORT_PRIVATE size_t MaxReserved();
1028 size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
1029 size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
1030 size_t MaxOldGenerationSize() { return max_old_generation_size_; }
1031
1032 V8_EXPORT_PRIVATE static size_t ComputeMaxOldGenerationSize(
1033 uint64_t physical_memory);
1034
1035 static size_t ComputeMaxSemiSpaceSize(uint64_t physical_memory) {
1036 const uint64_t min_physical_memory = 512 * MB;
1037 const uint64_t max_physical_memory = 3 * static_cast<uint64_t>(GB);
1038
1039 uint64_t capped_physical_memory =
1040 Max(Min(physical_memory, max_physical_memory), min_physical_memory);
1041 // linearly scale max semi-space size: (X-A)/(B-A)*(D-C)+C
1042 size_t semi_space_size_in_kb =
1043 static_cast<size_t>(((capped_physical_memory - min_physical_memory) *
1044 (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
1045 (max_physical_memory - min_physical_memory) +
1046 kMinSemiSpaceSizeInKB);
1047 return RoundUp(semi_space_size_in_kb, (1 << kPageSizeBits) / KB);
1048 }
1049
1050 // Returns the capacity of the heap in bytes w/o growing. Heap grows when
1051 // more spaces are needed until it reaches the limit.
1052 size_t Capacity();
1053
1054 // Returns the capacity of the old generation.
1055 V8_EXPORT_PRIVATE size_t OldGenerationCapacity();
1056
1057 // Returns the amount of memory currently held alive by the unmapper.
1058 size_t CommittedMemoryOfUnmapper();
1059
1060 // Returns the amount of memory currently committed for the heap.
1061 size_t CommittedMemory();
1062
1063 // Returns the amount of memory currently committed for the old space.
1064 size_t CommittedOldGenerationMemory();
1065
1066 // Returns the amount of executable memory currently committed for the heap.
1067 size_t CommittedMemoryExecutable();
1068
1069 // Returns the amount of phyical memory currently committed for the heap.
1070 size_t CommittedPhysicalMemory();
1071
1072 // Returns the maximum amount of memory ever committed for the heap.
1073 size_t MaximumCommittedMemory() { return maximum_committed_; }
1074
1075 // Updates the maximum committed memory for the heap. Should be called
1076 // whenever a space grows.
1077 void UpdateMaximumCommitted();
1078
1079 // Returns the available bytes in space w/o growing.
1080 // Heap doesn't guarantee that it can allocate an object that requires
1081 // all available bytes. Check MaxHeapObjectSize() instead.
1082 size_t Available();
1083
1084 // Returns of size of all objects residing in the heap.
1085 V8_EXPORT_PRIVATE size_t SizeOfObjects();
1086
1087 void UpdateSurvivalStatistics(int start_new_space_size);
1088
1089 inline void IncrementPromotedObjectsSize(size_t object_size) {
1090 promoted_objects_size_ += object_size;
1091 }
1092 inline size_t promoted_objects_size() { return promoted_objects_size_; }
1093
1094 inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) {
1095 semi_space_copied_object_size_ += object_size;
1096 }
1097 inline size_t semi_space_copied_object_size() {
1098 return semi_space_copied_object_size_;
1099 }
1100
1101 inline size_t SurvivedYoungObjectSize() {
1102 return promoted_objects_size_ + semi_space_copied_object_size_;
1103 }
1104
1105 inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
1106
1107 inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
1108
1109 inline void IncrementNodesPromoted() { nodes_promoted_++; }
1110
1111 inline void IncrementYoungSurvivorsCounter(size_t survived) {
1112 survived_last_scavenge_ = survived;
1113 survived_since_last_expansion_ += survived;
1114 }
1115
1116 inline uint64_t OldGenerationObjectsAndPromotedExternalMemorySize() {
1117 return OldGenerationSizeOfObjects() + PromotedExternalMemorySize();
1118 }
1119
1120 inline void UpdateNewSpaceAllocationCounter();
1121
1122 inline size_t NewSpaceAllocationCounter();
1123
1124 // This should be used only for testing.
1125 void set_new_space_allocation_counter(size_t new_value) {
1126 new_space_allocation_counter_ = new_value;
1127 }
1128
1129 void UpdateOldGenerationAllocationCounter() {
1130 old_generation_allocation_counter_at_last_gc_ =
1131 OldGenerationAllocationCounter();
1132 old_generation_size_at_last_gc_ = 0;
1133 }
1134
1135 size_t OldGenerationAllocationCounter() {
1136 return old_generation_allocation_counter_at_last_gc_ +
1137 PromotedSinceLastGC();
1138 }
1139
1140 // This should be used only for testing.
1141 void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
1142 old_generation_allocation_counter_at_last_gc_ = new_value;
1143 }
1144
1145 size_t PromotedSinceLastGC() {
1146 size_t old_generation_size = OldGenerationSizeOfObjects();
1147 DCHECK_GE(old_generation_size, old_generation_size_at_last_gc_);
1148 return old_generation_size - old_generation_size_at_last_gc_;
1149 }
1150
1151 // This is called by the sweeper when it discovers more free space
1152 // than expected at the end of the preceding GC.
1153 void NotifyRefinedOldGenerationSize(size_t decreased_bytes) {
1154 if (old_generation_size_at_last_gc_ != 0) {
1155 // OldGenerationSizeOfObjects() is now smaller by |decreased_bytes|.
1156 // Adjust old_generation_size_at_last_gc_ too, so that PromotedSinceLastGC
1157 // continues to increase monotonically, rather than decreasing here.
1158 DCHECK_GE(old_generation_size_at_last_gc_, decreased_bytes);
1159 old_generation_size_at_last_gc_ -= decreased_bytes;
1160 }
1161 }
1162
1163 int gc_count() const { return gc_count_; }
1164
1165 bool is_current_gc_forced() const { return is_current_gc_forced_; }
1166
1167 // Returns the size of objects residing in non-new spaces.
1168 // Excludes external memory held by those objects.
1169 V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects();
1170
1171 // ===========================================================================
1172 // Prologue/epilogue callback methods.========================================
1173 // ===========================================================================
1174
1175 void AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
1176 GCType gc_type_filter, void* data);
1177 void RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
1178 void* data);
1179
1180 void AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
1181 GCType gc_type_filter, void* data);
1182 void RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
1183 void* data);
1184
1185 void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
1186 void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1187
1188 // ===========================================================================
1189 // Allocation methods. =======================================================
1190 // ===========================================================================
1191
1192 // Creates a filler object and returns a heap object immediately after it.
1193 V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT HeapObject
1194 PrecedeWithFiller(HeapObject object, int filler_size);
1195
1196 // Creates a filler object if needed for alignment and returns a heap object
1197 // immediately after it. If any space is left after the returned object,
1198 // another filler object is created so the over allocated memory is iterable.
1199 V8_WARN_UNUSED_RESULT HeapObject
1200 AlignWithFiller(HeapObject object, int object_size, int allocation_size,
1201 AllocationAlignment alignment);
1202
1203 // ===========================================================================
1204 // ArrayBuffer tracking. =====================================================
1205 // ===========================================================================
1206
1207 // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
1208 // in the registration/unregistration APIs. Consider dropping the "New" from
1209 // "RegisterNewArrayBuffer" because one can re-register a previously
1210 // unregistered buffer, too, and the name is confusing.
1211 void RegisterNewArrayBuffer(JSArrayBuffer buffer);
1212 void UnregisterArrayBuffer(JSArrayBuffer buffer);
1213
1214 // ===========================================================================
1215 // Allocation site tracking. =================================================
1216 // ===========================================================================
1217
1218 // Updates the AllocationSite of a given {object}. The entry (including the
1219 // count) is cached on the local pretenuring feedback.
1220 inline void UpdateAllocationSite(
1221 Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback);
1222
1223 // Merges local pretenuring feedback into the global one. Note that this
1224 // method needs to be called after evacuation, as allocation sites may be
1225 // evacuated and this method resolves forward pointers accordingly.
1226 void MergeAllocationSitePretenuringFeedback(
1227 const PretenuringFeedbackMap& local_pretenuring_feedback);
1228
1229 // ===========================================================================
1230 // Allocation tracking. ======================================================
1231 // ===========================================================================
1232
1233 // Adds {new_space_observer} to new space and {observer} to any other space.
1234 void AddAllocationObserversToAllSpaces(
1235 AllocationObserver* observer, AllocationObserver* new_space_observer);
1236
1237 // Removes {new_space_observer} from new space and {observer} from any other
1238 // space.
1239 void RemoveAllocationObserversFromAllSpaces(
1240 AllocationObserver* observer, AllocationObserver* new_space_observer);
1241
1242 bool allocation_step_in_progress() { return allocation_step_in_progress_; }
1243 void set_allocation_step_in_progress(bool val) {
1244 allocation_step_in_progress_ = val;
1245 }
1246
1247 // ===========================================================================
1248 // Heap object allocation tracking. ==========================================
1249 // ===========================================================================
1250
1251 void AddHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
1252 void RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
1253 bool has_heap_object_allocation_tracker() const {
1254 return !allocation_trackers_.empty();
1255 }
1256
1257 // ===========================================================================
1258 // Retaining path tracking. ==================================================
1259 // ===========================================================================
1260
1261 // Adds the given object to the weak table of retaining path targets.
1262 // On each GC if the marker discovers the object, it will print the retaining
1263 // path. This requires --track-retaining-path flag.
1264 void AddRetainingPathTarget(Handle<HeapObject> object,
1265 RetainingPathOption option);
1266
1267 // ===========================================================================
1268 // Stack frame support. ======================================================
1269 // ===========================================================================
1270
1271 // Returns the Code object for a given interior pointer.
1272 Code GcSafeFindCodeForInnerPointer(Address inner_pointer);
1273
1274 // Returns true if {addr} is contained within {code} and false otherwise.
1275 // Mostly useful for debugging.
1276 bool GcSafeCodeContains(Code code, Address addr);
1277
1278// =============================================================================
1279#ifdef VERIFY_HEAP
1280 // Verify the heap is in its normal state before or after a GC.
1281 V8_EXPORT_PRIVATE void Verify();
1282 void VerifyRememberedSetFor(HeapObject object);
1283#endif
1284
1285#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
1286 void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
1287#endif
1288
1289#ifdef DEBUG
1290 void VerifyCountersAfterSweeping();
1291 void VerifyCountersBeforeConcurrentSweeping();
1292
1293 void Print();
1294 void PrintHandles();
1295
1296 // Report code statistics.
1297 void ReportCodeStatistics(const char* title);
1298#endif
1299 void* GetRandomMmapAddr() {
1300 void* result = v8::internal::GetRandomMmapAddr();
1301#if V8_TARGET_ARCH_X64
1302#if V8_OS_MACOSX
1303 // The Darwin kernel [as of macOS 10.12.5] does not clean up page
1304 // directory entries [PDE] created from mmap or mach_vm_allocate, even
1305 // after the region is destroyed. Using a virtual address space that is
1306 // too large causes a leak of about 1 wired [can never be paged out] page
1307 // per call to mmap(). The page is only reclaimed when the process is
1308 // killed. Confine the hint to a 32-bit section of the virtual address
1309 // space. See crbug.com/700928.
1310 uintptr_t offset =
1311 reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
1312 kMmapRegionMask;
1313 result = reinterpret_cast<void*>(mmap_region_base_ + offset);
1314#endif // V8_OS_MACOSX
1315#endif // V8_TARGET_ARCH_X64
1316 return result;
1317 }
1318
1319 static const char* GarbageCollectionReasonToString(
1320 GarbageCollectionReason gc_reason);
1321
1322 // Calculates the nof entries for the full sized number to string cache.
1323 inline int MaxNumberToStringCacheSize() const;
1324
1325 private:
1326 class SkipStoreBufferScope;
1327
1328 using ExternalStringTableUpdaterCallback = String (*)(Heap* heap,
1329 FullObjectSlot pointer);
1330
1331 // External strings table is a place where all external strings are
1332 // registered. We need to keep track of such strings to properly
1333 // finalize them.
1334 class ExternalStringTable {
1335 public:
1336 explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
1337
1338 // Registers an external string.
1339 inline void AddString(String string);
1340 bool Contains(String string);
1341
1342 void IterateAll(RootVisitor* v);
1343 void IterateYoung(RootVisitor* v);
1344 void PromoteYoung();
1345
1346 // Restores internal invariant and gets rid of collected strings. Must be
1347 // called after each Iterate*() that modified the strings.
1348 void CleanUpAll();
1349 void CleanUpYoung();
1350
1351 // Finalize all registered external strings and clear tables.
1352 void TearDown();
1353
1354 void UpdateYoungReferences(
1355 Heap::ExternalStringTableUpdaterCallback updater_func);
1356 void UpdateReferences(
1357 Heap::ExternalStringTableUpdaterCallback updater_func);
1358
1359 private:
1360 void Verify();
1361 void VerifyYoung();
1362
1363 Heap* const heap_;
1364
1365 // To speed up scavenge collections young string are kept separate from old
1366 // strings.
1367 std::vector<Object> young_strings_;
1368 std::vector<Object> old_strings_;
1369
1370 DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
1371 };
1372
1373 struct StrongRootsList;
1374
1375 struct StringTypeTable {
1376 InstanceType type;
1377 int size;
1378 RootIndex index;
1379 };
1380
1381 struct ConstantStringTable {
1382 const char* contents;
1383 RootIndex index;
1384 };
1385
1386 struct StructTable {
1387 InstanceType type;
1388 int size;
1389 RootIndex index;
1390 };
1391
1392 struct GCCallbackTuple {
1393 GCCallbackTuple(v8::Isolate::GCCallbackWithData callback, GCType gc_type,
1394 void* data)
1395 : callback(callback), gc_type(gc_type), data(data) {}
1396
1397 bool operator==(const GCCallbackTuple& other) const;
1398 GCCallbackTuple& operator=(const GCCallbackTuple& other) V8_NOEXCEPT;
1399
1400 v8::Isolate::GCCallbackWithData callback;
1401 GCType gc_type;
1402 void* data;
1403 };
1404
1405 static const int kInitialStringTableSize = StringTable::kMinCapacity;
1406 static const int kInitialEvalCacheSize = 64;
1407 static const int kInitialNumberStringCacheSize = 256;
1408
1409 static const int kRememberedUnmappedPages = 128;
1410
1411 static const StringTypeTable string_type_table[];
1412 static const ConstantStringTable constant_string_table[];
1413 static const StructTable struct_table[];
1414
1415 static const int kYoungSurvivalRateHighThreshold = 90;
1416 static const int kYoungSurvivalRateAllowedDeviation = 15;
1417 static const int kOldSurvivalRateLowThreshold = 10;
1418
1419 static const int kMaxMarkCompactsInIdleRound = 7;
1420 static const int kIdleScavengeThreshold = 5;
1421
1422 static const int kInitialFeedbackCapacity = 256;
1423
1424 Heap();
1425 ~Heap();
1426
1427 static bool IsRegularObjectAllocation(AllocationType allocation) {
1428 return AllocationType::kYoung == allocation ||
1429 AllocationType::kOld == allocation;
1430 }
1431
1432 static size_t DefaultGetExternallyAllocatedMemoryInBytesCallback() {
1433 return 0;
1434 }
1435
1436#define ROOT_ACCESSOR(type, name, CamelName) inline void set_##name(type value);
1437 ROOT_LIST(ROOT_ACCESSOR)
1438#undef ROOT_ACCESSOR
1439
1440 StoreBuffer* store_buffer() { return store_buffer_.get(); }
1441
1442 void set_current_gc_flags(int flags) {
1443 current_gc_flags_ = flags;
1444 }
1445
1446 inline bool ShouldReduceMemory() const {
1447 return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
1448 }
1449
1450 int NumberOfScavengeTasks();
1451
1452 // Checks whether a global GC is necessary
1453 GarbageCollector SelectGarbageCollector(AllocationSpace space,
1454 const char** reason);
1455
1456 // Make sure there is a filler value behind the top of the new space
1457 // so that the GC does not confuse some unintialized/stale memory
1458 // with the allocation memento of the object at the top
1459 void EnsureFillerObjectAtTop();
1460
1461 // Ensure that we have swept all spaces in such a way that we can iterate
1462 // over all objects. May cause a GC.
1463 void MakeHeapIterable();
1464
1465 // Performs garbage collection
1466 // Returns whether there is a chance another major GC could
1467 // collect more garbage.
1468 bool PerformGarbageCollection(
1469 GarbageCollector collector,
1470 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1471
1472 inline void UpdateOldSpaceLimits();
1473
1474 bool CreateInitialMaps();
1475 void CreateInternalAccessorInfoObjects();
1476 void CreateInitialObjects();
1477
1478 // Commits from space if it is uncommitted.
1479 void EnsureFromSpaceIsCommitted();
1480
1481 // Uncommit unused semi space.
1482 V8_EXPORT_PRIVATE bool UncommitFromSpace();
1483
1484 // Fill in bogus values in from space
1485 void ZapFromSpace();
1486
1487 // Zaps the memory of a code object.
1488 V8_EXPORT_PRIVATE void ZapCodeObject(Address start_address,
1489 int size_in_bytes);
1490
1491 // Deopts all code that contains allocation instruction which are tenured or
1492 // not tenured. Moreover it clears the pretenuring allocation site statistics.
1493 void ResetAllAllocationSitesDependentCode(AllocationType allocation);
1494
1495 // Evaluates local pretenuring for the old space and calls
1496 // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
1497 // the old space.
1498 void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
1499
1500 // Record statistics after garbage collection.
1501 void ReportStatisticsAfterGC();
1502
1503 // Flush the number to string cache.
1504 void FlushNumberStringCache();
1505
1506 void ConfigureInitialOldGenerationSize();
1507
1508 bool HasLowYoungGenerationAllocationRate();
1509 bool HasLowOldGenerationAllocationRate();
1510 double YoungGenerationMutatorUtilization();
1511 double OldGenerationMutatorUtilization();
1512
1513 void ReduceNewSpaceSize();
1514
1515 GCIdleTimeHeapState ComputeHeapState();
1516
1517 bool PerformIdleTimeAction(GCIdleTimeAction action,
1518 GCIdleTimeHeapState heap_state,
1519 double deadline_in_ms);
1520
1521 void IdleNotificationEpilogue(GCIdleTimeAction action,
1522 GCIdleTimeHeapState heap_state, double start_ms,
1523 double deadline_in_ms);
1524
1525 int NextAllocationTimeout(int current_timeout = 0);
1526 inline void UpdateAllocationsHash(HeapObject object);
1527 inline void UpdateAllocationsHash(uint32_t value);
1528 void PrintAllocationsHash();
1529
1530 void PrintMaxMarkingLimitReached();
1531 void PrintMaxNewSpaceSizeReached();
1532
1533 int NextStressMarkingLimit();
1534
1535 void AddToRingBuffer(const char* string);
1536 void GetFromRingBuffer(char* buffer);
1537
1538 void CompactRetainedMaps(WeakArrayList retained_maps);
1539
1540 void CollectGarbageOnMemoryPressure();
1541
1542 void EagerlyFreeExternalMemory();
1543
1544 bool InvokeNearHeapLimitCallback();
1545
1546 void ComputeFastPromotionMode();
1547
1548 // Attempt to over-approximate the weak closure by marking object groups and
1549 // implicit references from global handles, but don't atomically complete
1550 // marking. If we continue to mark incrementally, we might have marked
1551 // objects that die later.
1552 void FinalizeIncrementalMarkingIncrementally(
1553 GarbageCollectionReason gc_reason);
1554
1555 // Returns the timer used for a given GC type.
1556 // - GCScavenger: young generation GC
1557 // - GCCompactor: full GC
1558 // - GCFinalzeMC: finalization of incremental full GC
1559 // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
1560 // memory reduction
1561 TimedHistogram* GCTypeTimer(GarbageCollector collector);
1562 TimedHistogram* GCTypePriorityTimer(GarbageCollector collector);
1563
1564 // ===========================================================================
1565 // Pretenuring. ==============================================================
1566 // ===========================================================================
1567
1568 // Pretenuring decisions are made based on feedback collected during new space
1569 // evacuation. Note that between feedback collection and calling this method
1570 // object in old space must not move.
1571 void ProcessPretenuringFeedback();
1572
1573 // Removes an entry from the global pretenuring storage.
1574 void RemoveAllocationSitePretenuringFeedback(AllocationSite site);
1575
1576 // ===========================================================================
1577 // Actual GC. ================================================================
1578 // ===========================================================================
1579
1580 // Code that should be run before and after each GC. Includes some
1581 // reporting/verification activities when compiled with DEBUG set.
1582 void GarbageCollectionPrologue();
1583 void GarbageCollectionEpilogue();
1584
1585 // Performs a major collection in the whole heap.
1586 void MarkCompact();
1587 // Performs a minor collection of just the young generation.
1588 void MinorMarkCompact();
1589
1590 // Code to be run before and after mark-compact.
1591 void MarkCompactPrologue();
1592 void MarkCompactEpilogue();
1593
1594 // Performs a minor collection in new generation.
1595 void Scavenge();
1596 void EvacuateYoungGeneration();
1597
1598 void UpdateYoungReferencesInExternalStringTable(
1599 ExternalStringTableUpdaterCallback updater_func);
1600
1601 void UpdateReferencesInExternalStringTable(
1602 ExternalStringTableUpdaterCallback updater_func);
1603
1604 void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
1605 void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
1606 void ProcessNativeContexts(WeakObjectRetainer* retainer);
1607 void ProcessAllocationSites(WeakObjectRetainer* retainer);
1608 void ProcessWeakListRoots(WeakObjectRetainer* retainer);
1609
1610 // ===========================================================================
1611 // GC statistics. ============================================================
1612 // ===========================================================================
1613
1614 inline size_t OldGenerationSpaceAvailable() {
1615 if (old_generation_allocation_limit_ <=
1616 OldGenerationObjectsAndPromotedExternalMemorySize())
1617 return 0;
1618 return old_generation_allocation_limit_ -
1619 static_cast<size_t>(
1620 OldGenerationObjectsAndPromotedExternalMemorySize());
1621 }
1622
1623 // We allow incremental marking to overshoot the allocation limit for
1624 // performace reasons. If the overshoot is too large then we are more
1625 // eager to finalize incremental marking.
1626 inline bool AllocationLimitOvershotByLargeMargin() {
1627 // This guards against too eager finalization in small heaps.
1628 // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
1629 size_t kMarginForSmallHeaps = 32u * MB;
1630 if (old_generation_allocation_limit_ >=
1631 OldGenerationObjectsAndPromotedExternalMemorySize())
1632 return false;
1633 uint64_t overshoot = OldGenerationObjectsAndPromotedExternalMemorySize() -
1634 old_generation_allocation_limit_;
1635 // Overshoot margin is 50% of allocation limit or half-way to the max heap
1636 // with special handling of small heaps.
1637 uint64_t margin =
1638 Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
1639 (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
1640 return overshoot >= margin;
1641 }
1642
1643 void UpdateTotalGCTime(double duration);
1644
1645 bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
1646
1647 bool IsIneffectiveMarkCompact(size_t old_generation_size,
1648 double mutator_utilization);
1649 void CheckIneffectiveMarkCompact(size_t old_generation_size,
1650 double mutator_utilization);
1651
1652 inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1653 size_t amount);
1654
1655 inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1656 size_t amount);
1657
1658 // ===========================================================================
1659 // Growing strategy. =========================================================
1660 // ===========================================================================
1661
1662 HeapController* heap_controller() { return heap_controller_.get(); }
1663 MemoryReducer* memory_reducer() { return memory_reducer_.get(); }
1664
1665 // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
1666 // This constant limits the effect of load RAIL mode on GC.
1667 // The value is arbitrary and chosen as the largest load time observed in
1668 // v8 browsing benchmarks.
1669 static const int kMaxLoadTimeMs = 7000;
1670
1671 bool ShouldOptimizeForLoadTime();
1672
1673 size_t old_generation_allocation_limit() const {
1674 return old_generation_allocation_limit_;
1675 }
1676
1677 bool always_allocate() { return always_allocate_scope_count_ != 0; }
1678
1679 V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
1680
1681 bool ShouldExpandOldGenerationOnSlowAllocation();
1682
1683 enum class HeapGrowingMode { kSlow, kConservative, kMinimal, kDefault };
1684
1685 HeapGrowingMode CurrentHeapGrowingMode();
1686
1687 enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
1688 IncrementalMarkingLimit IncrementalMarkingLimitReached();
1689
1690 // ===========================================================================
1691 // Idle notification. ========================================================
1692 // ===========================================================================
1693
1694 bool RecentIdleNotificationHappened();
1695 void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
1696
1697 // ===========================================================================
1698 // Allocation methods. =======================================================
1699 // ===========================================================================
1700
1701 // Allocates a JS Map in the heap.
1702 V8_WARN_UNUSED_RESULT AllocationResult
1703 AllocateMap(InstanceType instance_type, int instance_size,
1704 ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
1705 int inobject_properties = 0);
1706
1707 // Allocate an uninitialized object. The memory is non-executable if the
1708 // hardware and OS allow. This is the single choke-point for allocations
1709 // performed by the runtime and should not be bypassed (to extend this to
1710 // inlined allocations, use the Heap::DisableInlineAllocation() support).
1711 V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
1712 int size_in_bytes, AllocationType allocation,
1713 AllocationAlignment aligment = kWordAligned);
1714
1715 // This method will try to perform an allocation of a given size of a given
1716 // AllocationType. If the allocation fails, a regular full garbage collection
1717 // is triggered and the allocation is retried. This is performed multiple
1718 // times. If after that retry procedure the allocation still fails nullptr is
1719 // returned.
1720 HeapObject AllocateRawWithLightRetry(
1721 int size, AllocationType allocation,
1722 AllocationAlignment alignment = kWordAligned);
1723
1724 // This method will try to perform an allocation of a given size of a given
1725 // AllocationType. If the allocation fails, a regular full garbage collection
1726 // is triggered and the allocation is retried. This is performed multiple
1727 // times. If after that retry procedure the allocation still fails a "hammer"
1728 // garbage collection is triggered which tries to significantly reduce memory.
1729 // If the allocation still fails after that a fatal error is thrown.
1730 HeapObject AllocateRawWithRetryOrFail(
1731 int size, AllocationType allocation,
1732 AllocationAlignment alignment = kWordAligned);
1733 HeapObject AllocateRawCodeInLargeObjectSpace(int size);
1734
1735 // Allocates a heap object based on the map.
1736 V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
1737 AllocationType allocation);
1738
1739 // Takes a code object and checks if it is on memory which is not subject to
1740 // compaction. This method will return a new code object on an immovable
1741 // memory location if the original code object was movable.
1742 HeapObject EnsureImmovableCode(HeapObject heap_object, int object_size);
1743
1744 // Allocates a partial map for bootstrapping.
1745 V8_WARN_UNUSED_RESULT AllocationResult
1746 AllocatePartialMap(InstanceType instance_type, int instance_size);
1747
1748 void FinalizePartialMap(Map map);
1749
1750 // Allocate empty fixed typed array of given type.
1751 V8_WARN_UNUSED_RESULT AllocationResult
1752 AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
1753
1754 void set_force_oom(bool value) { force_oom_ = value; }
1755
1756 // ===========================================================================
1757 // Retaining path tracing ====================================================
1758 // ===========================================================================
1759
1760 void AddRetainer(HeapObject retainer, HeapObject object);
1761 void AddEphemeronRetainer(HeapObject retainer, HeapObject object);
1762 void AddRetainingRoot(Root root, HeapObject object);
1763 // Returns true if the given object is a target of retaining path tracking.
1764 // Stores the option corresponding to the object in the provided *option.
1765 bool IsRetainingPathTarget(HeapObject object, RetainingPathOption* option);
1766 void PrintRetainingPath(HeapObject object, RetainingPathOption option);
1767
1768#ifdef DEBUG
1769 V8_EXPORT_PRIVATE void IncrementObjectCounters();
1770#endif // DEBUG
1771
1772 // The amount of memory that has been freed concurrently.
1773 std::atomic<intptr_t> external_memory_concurrently_freed_{0};
1774
1775 // This can be calculated directly from a pointer to the heap; however, it is
1776 // more expedient to get at the isolate directly from within Heap methods.
1777 Isolate* isolate_ = nullptr;
1778
1779 size_t code_range_size_ = 0;
1780 size_t max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
1781 size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;
1782 size_t max_old_generation_size_ = 700ul * (kSystemPointerSize / 4) * MB;
1783 size_t initial_max_old_generation_size_;
1784 size_t initial_max_old_generation_size_threshold_;
1785 size_t initial_old_generation_size_;
1786 bool old_generation_size_configured_ = false;
1787 size_t maximum_committed_ = 0;
1788 size_t old_generation_capacity_after_bootstrap_ = 0;
1789
1790 // Backing store bytes (array buffers and external strings).
1791 std::atomic<size_t> backing_store_bytes_{0};
1792
1793 // For keeping track of how much data has survived
1794 // scavenge since last new space expansion.
1795 size_t survived_since_last_expansion_ = 0;
1796
1797 // ... and since the last scavenge.
1798 size_t survived_last_scavenge_ = 0;
1799
1800 // This is not the depth of nested AlwaysAllocateScope's but rather a single
1801 // count, as scopes can be acquired from multiple tasks (read: threads).
1802 std::atomic<size_t> always_allocate_scope_count_{0};
1803
1804 // Stores the memory pressure level that set by MemoryPressureNotification
1805 // and reset by a mark-compact garbage collection.
1806 std::atomic<MemoryPressureLevel> memory_pressure_level_;
1807
1808 std::vector<std::pair<v8::NearHeapLimitCallback, void*> >
1809 near_heap_limit_callbacks_;
1810
1811 // For keeping track of context disposals.
1812 int contexts_disposed_ = 0;
1813
1814 // The length of the retained_maps array at the time of context disposal.
1815 // This separates maps in the retained_maps array that were created before
1816 // and after context disposal.
1817 int number_of_disposed_maps_ = 0;
1818
1819 ReadOnlyHeap* read_only_heap_ = nullptr;
1820
1821 NewSpace* new_space_ = nullptr;
1822 OldSpace* old_space_ = nullptr;
1823 CodeSpace* code_space_ = nullptr;
1824 MapSpace* map_space_ = nullptr;
1825 LargeObjectSpace* lo_space_ = nullptr;
1826 CodeLargeObjectSpace* code_lo_space_ = nullptr;
1827 NewLargeObjectSpace* new_lo_space_ = nullptr;
1828 ReadOnlySpace* read_only_space_ = nullptr;
1829 // Map from the space id to the space.
1830 Space* space_[LAST_SPACE + 1];
1831
1832 // Determines whether code space is write-protected. This is essentially a
1833 // race-free copy of the {FLAG_write_protect_code_memory} flag.
1834 bool write_protect_code_memory_ = false;
1835
1836 // Holds the number of open CodeSpaceMemoryModificationScopes.
1837 uintptr_t code_space_memory_modification_scope_depth_ = 0;
1838
1839 HeapState gc_state_ = NOT_IN_GC;
1840
1841 int gc_post_processing_depth_ = 0;
1842
1843 // Returns the amount of external memory registered since last global gc.
1844 V8_EXPORT_PRIVATE uint64_t PromotedExternalMemorySize();
1845
1846 // How many "runtime allocations" happened.
1847 uint32_t allocations_count_ = 0;
1848
1849 // Running hash over allocations performed.
1850 uint32_t raw_allocations_hash_ = 0;
1851
1852 // Starts marking when stress_marking_percentage_% of the marking start limit
1853 // is reached.
1854 int stress_marking_percentage_ = 0;
1855
1856 // Observer that causes more frequent checks for reached incremental marking
1857 // limit.
1858 AllocationObserver* stress_marking_observer_ = nullptr;
1859
1860 // Observer that can cause early scavenge start.
1861 StressScavengeObserver* stress_scavenge_observer_ = nullptr;
1862
1863 bool allocation_step_in_progress_ = false;
1864
1865 // The maximum percent of the marking limit reached wihout causing marking.
1866 // This is tracked when specyfing --fuzzer-gc-analysis.
1867 double max_marking_limit_reached_ = 0.0;
1868
1869 // How many mark-sweep collections happened.
1870 unsigned int ms_count_ = 0;
1871
1872 // How many gc happened.
1873 unsigned int gc_count_ = 0;
1874
1875 // The number of Mark-Compact garbage collections that are considered as
1876 // ineffective. See IsIneffectiveMarkCompact() predicate.
1877 int consecutive_ineffective_mark_compacts_ = 0;
1878
1879 static const uintptr_t kMmapRegionMask = 0xFFFFFFFFu;
1880 uintptr_t mmap_region_base_ = 0;
1881
1882 // For post mortem debugging.
1883 int remembered_unmapped_pages_index_ = 0;
1884 Address remembered_unmapped_pages_[kRememberedUnmappedPages];
1885
1886 // Limit that triggers a global GC on the next (normally caused) GC. This
1887 // is checked when we have already decided to do a GC to help determine
1888 // which collector to invoke, before expanding a paged space in the old
1889 // generation and on every allocation in large object space.
1890 size_t old_generation_allocation_limit_;
1891
1892 // Indicates that inline bump-pointer allocation has been globally disabled
1893 // for all spaces. This is used to disable allocations in generated code.
1894 bool inline_allocation_disabled_ = false;
1895
1896 // Weak list heads, threaded through the objects.
1897 // List heads are initialized lazily and contain the undefined_value at start.
1898 Object native_contexts_list_;
1899 Object allocation_sites_list_;
1900
1901 std::vector<GCCallbackTuple> gc_epilogue_callbacks_;
1902 std::vector<GCCallbackTuple> gc_prologue_callbacks_;
1903
1904 GetExternallyAllocatedMemoryInBytesCallback external_memory_callback_;
1905
1906 int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
1907
1908 size_t promoted_objects_size_ = 0;
1909 double promotion_ratio_ = 0.0;
1910 double promotion_rate_ = 0.0;
1911 size_t semi_space_copied_object_size_ = 0;
1912 size_t previous_semi_space_copied_object_size_ = 0;
1913 double semi_space_copied_rate_ = 0.0;
1914 int nodes_died_in_new_space_ = 0;
1915 int nodes_copied_in_new_space_ = 0;
1916 int nodes_promoted_ = 0;
1917
1918 // This is the pretenuring trigger for allocation sites that are in maybe
1919 // tenure state. When we switched to the maximum new space size we deoptimize
1920 // the code that belongs to the allocation site and derive the lifetime
1921 // of the allocation site.
1922 unsigned int maximum_size_scavenges_ = 0;
1923
1924 // Total time spent in GC.
1925 double total_gc_time_ms_;
1926
1927 // Last time an idle notification happened.
1928 double last_idle_notification_time_ = 0.0;
1929
1930 // Last time a garbage collection happened.
1931 double last_gc_time_ = 0.0;
1932
1933 std::unique_ptr<GCTracer> tracer_;
1934 std::unique_ptr<MarkCompactCollector> mark_compact_collector_;
1935 MinorMarkCompactCollector* minor_mark_compact_collector_ = nullptr;
1936 std::unique_ptr<ScavengerCollector> scavenger_collector_;
1937 std::unique_ptr<ArrayBufferCollector> array_buffer_collector_;
1938 std::unique_ptr<MemoryAllocator> memory_allocator_;
1939 std::unique_ptr<StoreBuffer> store_buffer_;
1940 std::unique_ptr<HeapController> heap_controller_;
1941 std::unique_ptr<IncrementalMarking> incremental_marking_;
1942 std::unique_ptr<ConcurrentMarking> concurrent_marking_;
1943 std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_;
1944 std::unique_ptr<MemoryReducer> memory_reducer_;
1945 std::unique_ptr<ObjectStats> live_object_stats_;
1946 std::unique_ptr<ObjectStats> dead_object_stats_;
1947 std::unique_ptr<ScavengeJob> scavenge_job_;
1948 std::unique_ptr<AllocationObserver> idle_scavenge_observer_;
1949 std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
1950 StrongRootsList* strong_roots_list_ = nullptr;
1951
1952 // This counter is increased before each GC and never reset.
1953 // To account for the bytes allocated since the last GC, use the
1954 // NewSpaceAllocationCounter() function.
1955 size_t new_space_allocation_counter_ = 0;
1956
1957 // This counter is increased before each GC and never reset. To
1958 // account for the bytes allocated since the last GC, use the
1959 // OldGenerationAllocationCounter() function.
1960 size_t old_generation_allocation_counter_at_last_gc_ = 0;
1961
1962 // The size of objects in old generation after the last MarkCompact GC.
1963 size_t old_generation_size_at_last_gc_ = 0;
1964
1965 // The feedback storage is used to store allocation sites (keys) and how often
1966 // they have been visited (values) by finding a memento behind an object. The
1967 // storage is only alive temporary during a GC. The invariant is that all
1968 // pointers in this map are already fixed, i.e., they do not point to
1969 // forwarding pointers.
1970 PretenuringFeedbackMap global_pretenuring_feedback_;
1971
1972 char trace_ring_buffer_[kTraceRingBufferSize];
1973
1974 // Used as boolean.
1975 uint8_t is_marking_flag_ = 0;
1976
1977 // If it's not full then the data is from 0 to ring_buffer_end_. If it's
1978 // full then the data is from ring_buffer_end_ to the end of the buffer and
1979 // from 0 to ring_buffer_end_.
1980 bool ring_buffer_full_ = false;
1981 size_t ring_buffer_end_ = 0;
1982
1983 // Flag is set when the heap has been configured. The heap can be repeatedly
1984 // configured through the API until it is set up.
1985 bool configured_ = false;
1986
1987 // Currently set GC flags that are respected by all GC components.
1988 int current_gc_flags_ = Heap::kNoGCFlags;
1989
1990 // Currently set GC callback flags that are used to pass information between
1991 // the embedder and V8's GC.
1992 GCCallbackFlags current_gc_callback_flags_;
1993
1994 bool is_current_gc_forced_;
1995
1996 ExternalStringTable external_string_table_;
1997
1998 base::Mutex relocation_mutex_;
1999
2000 int gc_callbacks_depth_ = 0;
2001
2002 bool deserialization_complete_ = false;
2003
2004 bool fast_promotion_mode_ = false;
2005
2006 // Used for testing purposes.
2007 bool force_oom_ = false;
2008 bool delay_sweeper_tasks_for_testing_ = false;
2009
2010 HeapObject pending_layout_change_object_;
2011
2012 base::Mutex unprotected_memory_chunks_mutex_;
2013 std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
2014 bool unprotected_memory_chunks_registry_enabled_ = false;
2015
2016#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
2017 // If the --gc-interval flag is set to a positive value, this
2018 // variable holds the value indicating the number of allocations
2019 // remain until the next failure and garbage collection.
2020 int allocation_timeout_ = 0;
2021#endif // V8_ENABLE_ALLOCATION_TIMEOUT
2022
2023 std::map<HeapObject, HeapObject, Object::Comparer> retainer_;
2024 std::map<HeapObject, Root, Object::Comparer> retaining_root_;
2025 // If an object is retained by an ephemeron, then the retaining key of the
2026 // ephemeron is stored in this map.
2027 std::map<HeapObject, HeapObject, Object::Comparer> ephemeron_retainer_;
2028 // For each index inthe retaining_path_targets_ array this map
2029 // stores the option of the corresponding target.
2030 std::map<int, RetainingPathOption> retaining_path_target_option_;
2031
2032 std::vector<HeapObjectAllocationTracker*> allocation_trackers_;
2033
2034 // Classes in "heap" can be friends.
2035 friend class AlwaysAllocateScope;
2036 friend class ArrayBufferCollector;
2037 friend class ConcurrentMarking;
2038 friend class GCCallbacksScope;
2039 friend class GCTracer;
2040 friend class MemoryController;
2041 friend class HeapIterator;
2042 friend class IdleScavengeObserver;
2043 friend class IncrementalMarking;
2044 friend class IncrementalMarkingJob;
2045 friend class LargeObjectSpace;
2046 template <FixedArrayVisitationMode fixed_array_mode,
2047 TraceRetainingPathMode retaining_path_mode, typename MarkingState>
2048 friend class MarkingVisitor;
2049 friend class MarkCompactCollector;
2050 friend class MarkCompactCollectorBase;
2051 friend class MinorMarkCompactCollector;
2052 friend class NewLargeObjectSpace;
2053 friend class NewSpace;
2054 friend class ObjectStatsCollector;
2055 friend class Page;
2056 friend class PagedSpace;
2057 friend class ReadOnlyRoots;
2058 friend class Scavenger;
2059 friend class ScavengerCollector;
2060 friend class Space;
2061 friend class StoreBuffer;
2062 friend class Sweeper;
2063 friend class heap::TestMemoryAllocatorScope;
2064
2065 // The allocator interface.
2066 friend class Factory;
2067
2068 // The Isolate constructs us.
2069 friend class Isolate;
2070
2071 // Used in cctest.
2072 friend class heap::HeapTester;
2073
2074 FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
2075 FRIEND_TEST(HeapTest, ExternalLimitDefault);
2076 FRIEND_TEST(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling);
2077 DISALLOW_COPY_AND_ASSIGN(Heap);
2078};
2079
2080class HeapStats {
2081 public:
2082 static const int kStartMarker = 0xDECADE00;
2083 static const int kEndMarker = 0xDECADE01;
2084
2085 intptr_t* start_marker; // 0
2086 size_t* ro_space_size; // 1
2087 size_t* ro_space_capacity; // 2
2088 size_t* new_space_size; // 3
2089 size_t* new_space_capacity; // 4
2090 size_t* old_space_size; // 5
2091 size_t* old_space_capacity; // 6
2092 size_t* code_space_size; // 7
2093 size_t* code_space_capacity; // 8
2094 size_t* map_space_size; // 9
2095 size_t* map_space_capacity; // 10
2096 size_t* lo_space_size; // 11
2097 size_t* code_lo_space_size; // 12
2098 size_t* global_handle_count; // 13
2099 size_t* weak_global_handle_count; // 14
2100 size_t* pending_global_handle_count; // 15
2101 size_t* near_death_global_handle_count; // 16
2102 size_t* free_global_handle_count; // 17
2103 size_t* memory_allocator_size; // 18
2104 size_t* memory_allocator_capacity; // 19
2105 size_t* malloced_memory; // 20
2106 size_t* malloced_peak_memory; // 21
2107 size_t* objects_per_type; // 22
2108 size_t* size_per_type; // 23
2109 int* os_error; // 24
2110 char* last_few_messages; // 25
2111 char* js_stacktrace; // 26
2112 intptr_t* end_marker; // 27
2113};
2114
2115
2116class AlwaysAllocateScope {
2117 public:
2118 explicit inline AlwaysAllocateScope(Heap* heap);
2119 explicit inline AlwaysAllocateScope(Isolate* isolate);
2120 inline ~AlwaysAllocateScope();
2121
2122 private:
2123 Heap* heap_;
2124};
2125
2126// The CodeSpaceMemoryModificationScope can only be used by the main thread.
2127class CodeSpaceMemoryModificationScope {
2128 public:
2129 explicit inline CodeSpaceMemoryModificationScope(Heap* heap);
2130 inline ~CodeSpaceMemoryModificationScope();
2131
2132 private:
2133 Heap* heap_;
2134};
2135
2136// The CodePageCollectionMemoryModificationScope can only be used by the main
2137// thread. It will not be enabled if a CodeSpaceMemoryModificationScope is
2138// already active.
2139class CodePageCollectionMemoryModificationScope {
2140 public:
2141 explicit inline CodePageCollectionMemoryModificationScope(Heap* heap);
2142 inline ~CodePageCollectionMemoryModificationScope();
2143
2144 private:
2145 Heap* heap_;
2146};
2147
2148// The CodePageMemoryModificationScope does not check if tansitions to
2149// writeable and back to executable are actually allowed, i.e. the MemoryChunk
2150// was registered to be executable. It can be used by concurrent threads.
2151class CodePageMemoryModificationScope {
2152 public:
2153 explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk);
2154 inline ~CodePageMemoryModificationScope();
2155
2156 private:
2157 MemoryChunk* chunk_;
2158 bool scope_active_;
2159
2160 // Disallow any GCs inside this scope, as a relocation of the underlying
2161 // object would change the {MemoryChunk} that this scope targets.
2162 DISALLOW_HEAP_ALLOCATION(no_heap_allocation_)
2163};
2164
2165// Visitor class to verify interior pointers in spaces that do not contain
2166// or care about intergenerational references. All heap object pointers have to
2167// point into the heap to a location that has a map pointer at its first word.
2168// Caveat: Heap::Contains is an approximation because it can return true for
2169// objects in a heap space but above the allocation pointer.
2170class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
2171 public:
2172 explicit VerifyPointersVisitor(Heap* heap) : heap_(heap) {}
2173 void VisitPointers(HeapObject host, ObjectSlot start,
2174 ObjectSlot end) override;
2175 void VisitPointers(HeapObject host, MaybeObjectSlot start,
2176 MaybeObjectSlot end) override;
2177 void VisitCodeTarget(Code host, RelocInfo* rinfo) override;
2178 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override;
2179
2180 void VisitRootPointers(Root root, const char* description,
2181 FullObjectSlot start, FullObjectSlot end) override;
2182
2183 protected:
2184 V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object);
2185
2186 template <typename TSlot>
2187 V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end);
2188
2189 virtual void VerifyPointers(HeapObject host, MaybeObjectSlot start,
2190 MaybeObjectSlot end);
2191
2192 Heap* heap_;
2193};
2194
2195
2196// Verify that all objects are Smis.
2197class VerifySmisVisitor : public RootVisitor {
2198 public:
2199 void VisitRootPointers(Root root, const char* description,
2200 FullObjectSlot start, FullObjectSlot end) override;
2201};
2202
2203// Space iterator for iterating over all the paged spaces of the heap: Map
2204// space, old space, code space and optionally read only space. Returns each
2205// space in turn, and null when it is done.
2206class V8_EXPORT_PRIVATE PagedSpaces {
2207 public:
2208 enum class SpacesSpecifier { kSweepablePagedSpaces, kAllPagedSpaces };
2209
2210 explicit PagedSpaces(Heap* heap, SpacesSpecifier specifier =
2211 SpacesSpecifier::kSweepablePagedSpaces)
2212 : heap_(heap),
2213 counter_(specifier == SpacesSpecifier::kAllPagedSpaces ? RO_SPACE
2214 : OLD_SPACE) {}
2215 PagedSpace* next();
2216
2217 private:
2218 Heap* heap_;
2219 int counter_;
2220};
2221
2222
2223class SpaceIterator : public Malloced {
2224 public:
2225 explicit SpaceIterator(Heap* heap);
2226 virtual ~SpaceIterator();
2227
2228 bool has_next();
2229 Space* next();
2230
2231 private:
2232 Heap* heap_;
2233 int current_space_; // from enum AllocationSpace.
2234};
2235
2236
2237// A HeapIterator provides iteration over the whole heap. It
2238// aggregates the specific iterators for the different spaces as
2239// these can only iterate over one space only.
2240//
2241// HeapIterator ensures there is no allocation during its lifetime
2242// (using an embedded DisallowHeapAllocation instance).
2243//
2244// HeapIterator can skip free list nodes (that is, de-allocated heap
2245// objects that still remain in the heap). As implementation of free
2246// nodes filtering uses GC marks, it can't be used during MS/MC GC
2247// phases. Also, it is forbidden to interrupt iteration in this mode,
2248// as this will leave heap objects marked (and thus, unusable).
2249class V8_EXPORT_PRIVATE HeapIterator {
2250 public:
2251 enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
2252
2253 explicit HeapIterator(Heap* heap,
2254 HeapObjectsFiltering filtering = kNoFiltering);
2255 ~HeapIterator();
2256
2257 HeapObject next();
2258
2259 private:
2260 HeapObject NextObject();
2261
2262 DISALLOW_HEAP_ALLOCATION(no_heap_allocation_)
2263
2264 Heap* heap_;
2265 HeapObjectsFiltering filtering_;
2266 HeapObjectsFilter* filter_;
2267 // Space iterator for iterating all the spaces.
2268 SpaceIterator* space_iterator_;
2269 // Object iterator for the space currently being iterated.
2270 std::unique_ptr<ObjectIterator> object_iterator_;
2271};
2272
2273// Abstract base class for checking whether a weak object should be retained.
2274class WeakObjectRetainer {
2275 public:
2276 virtual ~WeakObjectRetainer() = default;
2277
2278 // Return whether this object should be retained. If nullptr is returned the
2279 // object has no references. Otherwise the address of the retained object
2280 // should be returned as in some GC situations the object has been moved.
2281 virtual Object RetainAs(Object object) = 0;
2282};
2283
2284// -----------------------------------------------------------------------------
2285// Allows observation of allocations.
2286class AllocationObserver {
2287 public:
2288 explicit AllocationObserver(intptr_t step_size)
2289 : step_size_(step_size), bytes_to_next_step_(step_size) {
2290 DCHECK_LE(kTaggedSize, step_size);
2291 }
2292 virtual ~AllocationObserver() = default;
2293
2294 // Called each time the observed space does an allocation step. This may be
2295 // more frequently than the step_size we are monitoring (e.g. when there are
2296 // multiple observers, or when page or space boundary is encountered.)
2297 void AllocationStep(int bytes_allocated, Address soon_object, size_t size);
2298
2299 protected:
2300 intptr_t step_size() const { return step_size_; }
2301 intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
2302
2303 // Pure virtual method provided by the subclasses that gets called when at
2304 // least step_size bytes have been allocated. soon_object is the address just
2305 // allocated (but not yet initialized.) size is the size of the object as
2306 // requested (i.e. w/o the alignment fillers). Some complexities to be aware
2307 // of:
2308 // 1) soon_object will be nullptr in cases where we end up observing an
2309 // allocation that happens to be a filler space (e.g. page boundaries.)
2310 // 2) size is the requested size at the time of allocation. Right-trimming
2311 // may change the object size dynamically.
2312 // 3) soon_object may actually be the first object in an allocation-folding
2313 // group. In such a case size is the size of the group rather than the
2314 // first object.
2315 virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
2316
2317 // Subclasses can override this method to make step size dynamic.
2318 virtual intptr_t GetNextStepSize() { return step_size_; }
2319
2320 intptr_t step_size_;
2321 intptr_t bytes_to_next_step_;
2322
2323 private:
2324 friend class Space;
2325 DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
2326};
2327
2328// -----------------------------------------------------------------------------
2329// Allows observation of heap object allocations.
2330class HeapObjectAllocationTracker {
2331 public:
2332 virtual void AllocationEvent(Address addr, int size) = 0;
2333 virtual void MoveEvent(Address from, Address to, int size) {}
2334 virtual void UpdateObjectSizeEvent(Address addr, int size) {}
2335 virtual ~HeapObjectAllocationTracker() = default;
2336};
2337
2338template <typename T>
2339T ForwardingAddress(T heap_obj) {
2340 MapWord map_word = heap_obj->map_word();
2341
2342 if (map_word.IsForwardingAddress()) {
2343 return T::cast(map_word.ToForwardingAddress());
2344 } else if (Heap::InFromPage(heap_obj)) {
2345 return T();
2346 } else {
2347 // TODO(ulan): Support minor mark-compactor here.
2348 return heap_obj;
2349 }
2350}
2351
2352} // namespace internal
2353} // namespace v8
2354
2355#endif // V8_HEAP_HEAP_H_
2356