1 | // Copyright 2012 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #ifndef V8_HEAP_HEAP_H_ |
6 | #define V8_HEAP_HEAP_H_ |
7 | |
8 | #include <cmath> |
9 | #include <map> |
10 | #include <unordered_map> |
11 | #include <unordered_set> |
12 | #include <vector> |
13 | |
14 | // Clients of this interface shouldn't depend on lots of heap internals. |
15 | // Do not include anything from src/heap here! |
16 | #include "include/v8-internal.h" |
17 | #include "include/v8.h" |
18 | #include "src/accessors.h" |
19 | #include "src/allocation.h" |
20 | #include "src/assert-scope.h" |
21 | #include "src/base/atomic-utils.h" |
22 | #include "src/globals.h" |
23 | #include "src/heap-symbols.h" |
24 | #include "src/objects.h" |
25 | #include "src/objects/allocation-site.h" |
26 | #include "src/objects/fixed-array.h" |
27 | #include "src/objects/heap-object.h" |
28 | #include "src/objects/smi.h" |
29 | #include "src/objects/string-table.h" |
30 | #include "src/roots.h" |
31 | #include "src/visitors.h" |
32 | #include "testing/gtest/include/gtest/gtest_prod.h" |
33 | |
34 | namespace v8 { |
35 | |
36 | namespace debug { |
37 | using OutOfMemoryCallback = void (*)(void* data); |
38 | } // namespace debug |
39 | |
40 | namespace internal { |
41 | |
42 | namespace heap { |
43 | class HeapTester; |
44 | class TestMemoryAllocatorScope; |
45 | } // namespace heap |
46 | |
47 | class ObjectBoilerplateDescription; |
48 | class BytecodeArray; |
49 | class CodeDataContainer; |
50 | class DeoptimizationData; |
51 | class HandlerTable; |
52 | class IncrementalMarking; |
53 | class JSArrayBuffer; |
54 | class ExternalString; |
55 | using v8::MemoryPressureLevel; |
56 | |
57 | class AllocationObserver; |
58 | class ArrayBufferCollector; |
59 | class ArrayBufferTracker; |
60 | class CodeLargeObjectSpace; |
61 | class ConcurrentMarking; |
62 | class GCIdleTimeHandler; |
63 | class GCIdleTimeHeapState; |
64 | class GCTracer; |
65 | class HeapController; |
66 | class HeapObjectAllocationTracker; |
67 | class HeapObjectsFilter; |
68 | class HeapStats; |
69 | class HistogramTimer; |
70 | class Isolate; |
71 | class JSFinalizationGroup; |
72 | class LocalEmbedderHeapTracer; |
73 | class MemoryAllocator; |
74 | class MemoryReducer; |
75 | class MinorMarkCompactCollector; |
76 | class ObjectIterator; |
77 | class ObjectStats; |
78 | class Page; |
79 | class PagedSpace; |
80 | class ReadOnlyHeap; |
81 | class RootVisitor; |
82 | class ScavengeJob; |
83 | class Scavenger; |
84 | class ScavengerCollector; |
85 | class Space; |
86 | class StoreBuffer; |
87 | class StressScavengeObserver; |
88 | class TimedHistogram; |
89 | class TracePossibleWrapperReporter; |
90 | class WeakObjectRetainer; |
91 | |
92 | enum ArrayStorageAllocationMode { |
93 | DONT_INITIALIZE_ARRAY_ELEMENTS, |
94 | INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE |
95 | }; |
96 | |
97 | enum class ClearRecordedSlots { kYes, kNo }; |
98 | |
99 | enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory }; |
100 | |
101 | enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes }; |
102 | |
103 | enum class FixedArrayVisitationMode { kRegular, kIncremental }; |
104 | |
105 | enum class TraceRetainingPathMode { kEnabled, kDisabled }; |
106 | |
107 | enum class RetainingPathOption { kDefault, kTrackEphemeronPath }; |
108 | |
109 | enum class GarbageCollectionReason { |
110 | kUnknown = 0, |
111 | kAllocationFailure = 1, |
112 | kAllocationLimit = 2, |
113 | kContextDisposal = 3, |
114 | kCountersExtension = 4, |
115 | kDebugger = 5, |
116 | kDeserializer = 6, |
117 | kExternalMemoryPressure = 7, |
118 | kFinalizeMarkingViaStackGuard = 8, |
119 | kFinalizeMarkingViaTask = 9, |
120 | kFullHashtable = 10, |
121 | kHeapProfiler = 11, |
122 | kIdleTask = 12, |
123 | kLastResort = 13, |
124 | kLowMemoryNotification = 14, |
125 | kMakeHeapIterable = 15, |
126 | kMemoryPressure = 16, |
127 | kMemoryReducer = 17, |
128 | kRuntime = 18, |
129 | kSamplingProfiler = 19, |
130 | kSnapshotCreator = 20, |
131 | kTesting = 21, |
132 | kExternalFinalize = 22 |
133 | // If you add new items here, then update the incremental_marking_reason, |
134 | // mark_compact_reason, and scavenge_reason counters in counters.h. |
135 | // Also update src/tools/metrics/histograms/histograms.xml in chromium. |
136 | }; |
137 | |
138 | enum class YoungGenerationHandling { |
139 | kRegularScavenge = 0, |
140 | kFastPromotionDuringScavenge = 1, |
141 | // Histogram::InspectConstructionArguments in chromium requires us to have at |
142 | // least three buckets. |
143 | kUnusedBucket = 2, |
144 | // If you add new items here, then update the young_generation_handling in |
145 | // counters.h. |
146 | // Also update src/tools/metrics/histograms/histograms.xml in chromium. |
147 | }; |
148 | |
149 | enum class GCIdleTimeAction : uint8_t; |
150 | |
151 | class AllocationResult { |
152 | public: |
153 | static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) { |
154 | return AllocationResult(space); |
155 | } |
156 | |
157 | // Implicit constructor from Object. |
158 | AllocationResult(Object object) // NOLINT |
159 | : object_(object) { |
160 | // AllocationResults can't return Smis, which are used to represent |
161 | // failure and the space to retry in. |
162 | CHECK(!object->IsSmi()); |
163 | } |
164 | |
165 | AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {} |
166 | |
167 | inline bool IsRetry() { return object_->IsSmi(); } |
168 | inline HeapObject ToObjectChecked(); |
169 | inline AllocationSpace RetrySpace(); |
170 | |
171 | template <typename T> |
172 | bool To(T* obj) { |
173 | if (IsRetry()) return false; |
174 | *obj = T::cast(object_); |
175 | return true; |
176 | } |
177 | |
178 | private: |
179 | explicit AllocationResult(AllocationSpace space) |
180 | : object_(Smi::FromInt(static_cast<int>(space))) {} |
181 | |
182 | Object object_; |
183 | }; |
184 | |
185 | STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize); |
186 | |
187 | #ifdef DEBUG |
188 | struct { |
189 | const char* ; |
190 | int ; |
191 | int ; |
192 | void () { |
193 | comment = nullptr; |
194 | size = 0; |
195 | count = 0; |
196 | } |
197 | // Must be small, since an iteration is used for lookup. |
198 | static const int = 64; |
199 | }; |
200 | #endif |
201 | |
202 | using EphemeronRememberedSet = |
203 | std::unordered_map<EphemeronHashTable, std::unordered_set<int>, |
204 | Object::Hasher>; |
205 | |
206 | class Heap { |
207 | public: |
208 | // Stores ephemeron entries where the EphemeronHashTable is in old-space, |
209 | // and the key of the entry is in new-space. Such keys do not appear in the |
210 | // usual OLD_TO_NEW remembered set. |
211 | EphemeronRememberedSet ephemeron_remembered_set_; |
212 | enum FindMementoMode { kForRuntime, kForGC }; |
213 | |
214 | enum HeapState { |
215 | NOT_IN_GC, |
216 | SCAVENGE, |
217 | MARK_COMPACT, |
218 | MINOR_MARK_COMPACT, |
219 | TEAR_DOWN |
220 | }; |
221 | |
222 | using PretenuringFeedbackMap = |
223 | std::unordered_map<AllocationSite, size_t, Object::Hasher>; |
224 | |
225 | // Taking this mutex prevents the GC from entering a phase that relocates |
226 | // object references. |
227 | base::Mutex* relocation_mutex() { return &relocation_mutex_; } |
228 | |
229 | // Support for partial snapshots. After calling this we have a linear |
230 | // space to write objects in each space. |
231 | struct Chunk { |
232 | uint32_t size; |
233 | Address start; |
234 | Address end; |
235 | }; |
236 | using Reservation = std::vector<Chunk>; |
237 | |
238 | static const int kInitalOldGenerationLimitFactor = 2; |
239 | |
240 | #if V8_OS_ANDROID |
241 | // Don't apply pointer multiplier on Android since it has no swap space and |
242 | // should instead adapt it's heap size based on available physical memory. |
243 | static const int kPointerMultiplier = 1; |
244 | #else |
245 | // TODO(ishell): kSystePointerMultiplier? |
246 | static const int kPointerMultiplier = i::kSystemPointerSize / 4; |
247 | #endif |
248 | |
249 | // Semi-space size needs to be a multiple of page size. |
250 | static const size_t kMinSemiSpaceSizeInKB = 512 * kPointerMultiplier; |
251 | static const size_t kMaxSemiSpaceSizeInKB = 8192 * kPointerMultiplier; |
252 | |
253 | STATIC_ASSERT(kMinSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0); |
254 | STATIC_ASSERT(kMaxSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0); |
255 | |
256 | static const int kTraceRingBufferSize = 512; |
257 | static const int kStacktraceBufferSize = 512; |
258 | |
259 | static const int kNoGCFlags = 0; |
260 | static const int = 1; |
261 | |
262 | // The minimum size of a HeapObject on the heap. |
263 | static const int kMinObjectSizeInTaggedWords = 2; |
264 | |
265 | static const int kMinPromotedPercentForFastPromotionMode = 90; |
266 | |
267 | STATIC_ASSERT(static_cast<int>(RootIndex::kUndefinedValue) == |
268 | Internals::kUndefinedValueRootIndex); |
269 | STATIC_ASSERT(static_cast<int>(RootIndex::kTheHoleValue) == |
270 | Internals::kTheHoleValueRootIndex); |
271 | STATIC_ASSERT(static_cast<int>(RootIndex::kNullValue) == |
272 | Internals::kNullValueRootIndex); |
273 | STATIC_ASSERT(static_cast<int>(RootIndex::kTrueValue) == |
274 | Internals::kTrueValueRootIndex); |
275 | STATIC_ASSERT(static_cast<int>(RootIndex::kFalseValue) == |
276 | Internals::kFalseValueRootIndex); |
277 | STATIC_ASSERT(static_cast<int>(RootIndex::kempty_string) == |
278 | Internals::kEmptyStringRootIndex); |
279 | |
280 | // Calculates the maximum amount of filler that could be required by the |
281 | // given alignment. |
282 | V8_EXPORT_PRIVATE static int GetMaximumFillToAlign( |
283 | AllocationAlignment alignment); |
284 | // Calculates the actual amount of filler required for a given address at the |
285 | // given alignment. |
286 | V8_EXPORT_PRIVATE static int GetFillToAlign(Address address, |
287 | AllocationAlignment alignment); |
288 | |
289 | // Returns the size of the initial area of a code-range, which is marked |
290 | // writable and reserved to contain unwind information. |
291 | static size_t GetCodeRangeReservedAreaSize(); |
292 | |
293 | void FatalProcessOutOfMemory(const char* location); |
294 | |
295 | // Checks whether the space is valid. |
296 | static bool IsValidAllocationSpace(AllocationSpace space); |
297 | |
298 | // Zapping is needed for verify heap, and always done in debug builds. |
299 | static inline bool ShouldZapGarbage() { |
300 | #ifdef DEBUG |
301 | return true; |
302 | #else |
303 | #ifdef VERIFY_HEAP |
304 | return FLAG_verify_heap; |
305 | #else |
306 | return false; |
307 | #endif |
308 | #endif |
309 | } |
310 | |
311 | // Helper function to get the bytecode flushing mode based on the flags. This |
312 | // is required because it is not safe to acess flags in concurrent marker. |
313 | static inline BytecodeFlushMode GetBytecodeFlushMode() { |
314 | if (FLAG_stress_flush_bytecode) { |
315 | return BytecodeFlushMode::kStressFlushBytecode; |
316 | } else if (FLAG_flush_bytecode) { |
317 | return BytecodeFlushMode::kFlushBytecode; |
318 | } |
319 | return BytecodeFlushMode::kDoNotFlushBytecode; |
320 | } |
321 | |
322 | static uintptr_t ZapValue() { |
323 | return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue; |
324 | } |
325 | |
326 | static inline bool IsYoungGenerationCollector(GarbageCollector collector) { |
327 | return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR; |
328 | } |
329 | |
330 | static inline GarbageCollector YoungGenerationCollector() { |
331 | #if ENABLE_MINOR_MC |
332 | return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER; |
333 | #else |
334 | return SCAVENGER; |
335 | #endif // ENABLE_MINOR_MC |
336 | } |
337 | |
338 | static inline const char* CollectorName(GarbageCollector collector) { |
339 | switch (collector) { |
340 | case SCAVENGER: |
341 | return "Scavenger" ; |
342 | case MARK_COMPACTOR: |
343 | return "Mark-Compact" ; |
344 | case MINOR_MARK_COMPACTOR: |
345 | return "Minor Mark-Compact" ; |
346 | } |
347 | return "Unknown collector" ; |
348 | } |
349 | |
350 | // Copy block of memory from src to dst. Size of block should be aligned |
351 | // by pointer size. |
352 | static inline void CopyBlock(Address dst, Address src, int byte_size); |
353 | |
354 | V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code host); |
355 | V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object, |
356 | Address slot, |
357 | HeapObject value); |
358 | V8_EXPORT_PRIVATE void RecordEphemeronKeyWrite(EphemeronHashTable table, |
359 | Address key_slot); |
360 | V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode( |
361 | Address raw_object, Address address, Isolate* isolate); |
362 | V8_EXPORT_PRIVATE static void GenerationalBarrierForElementsSlow( |
363 | Heap* heap, FixedArray array, int offset, int length); |
364 | V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow( |
365 | Code host, RelocInfo* rinfo, HeapObject value); |
366 | V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject object, |
367 | Address slot, |
368 | HeapObject value); |
369 | V8_EXPORT_PRIVATE static void MarkingBarrierForElementsSlow( |
370 | Heap* heap, HeapObject object); |
371 | V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code host, |
372 | RelocInfo* rinfo, |
373 | HeapObject value); |
374 | V8_EXPORT_PRIVATE static void MarkingBarrierForDescriptorArraySlow( |
375 | Heap* heap, HeapObject host, HeapObject descriptor_array, |
376 | int number_of_own_descriptors); |
377 | V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject object); |
378 | |
379 | // Notifies the heap that is ok to start marking or other activities that |
380 | // should not happen during deserialization. |
381 | void NotifyDeserializationComplete(); |
382 | |
383 | void NotifyBootstrapComplete(); |
384 | |
385 | void NotifyOldGenerationExpansion(); |
386 | |
387 | inline Address* NewSpaceAllocationTopAddress(); |
388 | inline Address* NewSpaceAllocationLimitAddress(); |
389 | inline Address* OldSpaceAllocationTopAddress(); |
390 | inline Address* OldSpaceAllocationLimitAddress(); |
391 | |
392 | // Move len elements within a given array from src_index index to dst_index |
393 | // index. |
394 | void MoveElements(FixedArray array, int dst_index, int src_index, int len, |
395 | WriteBarrierMode mode = UPDATE_WRITE_BARRIER); |
396 | |
397 | // Copy len elements from src_index of src array to dst_index of dst array. |
398 | void CopyElements(FixedArray dst, FixedArray src, int dst_index, |
399 | int src_index, int len, WriteBarrierMode mode); |
400 | |
401 | // Initialize a filler object to keep the ability to iterate over the heap |
402 | // when introducing gaps within pages. If slots could have been recorded in |
403 | // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise, |
404 | // pass ClearRecordedSlots::kNo. If the memory after the object header of |
405 | // the filler should be cleared, pass in kClearFreedMemory. The default is |
406 | // kDontClearFreedMemory. |
407 | V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt( |
408 | Address addr, int size, ClearRecordedSlots clear_slots_mode, |
409 | ClearFreedMemoryMode clear_memory_mode = |
410 | ClearFreedMemoryMode::kDontClearFreedMemory); |
411 | |
412 | template <typename T> |
413 | void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim); |
414 | |
415 | bool CanMoveObjectStart(HeapObject object); |
416 | |
417 | bool IsImmovable(HeapObject object); |
418 | |
419 | static bool IsLargeObject(HeapObject object); |
420 | |
421 | // Trim the given array from the left. Note that this relocates the object |
422 | // start and hence is only valid if there is only a single reference to it. |
423 | V8_EXPORT_PRIVATE FixedArrayBase LeftTrimFixedArray(FixedArrayBase obj, |
424 | int elements_to_trim); |
425 | |
426 | // Trim the given array from the right. |
427 | V8_EXPORT_PRIVATE void RightTrimFixedArray(FixedArrayBase obj, |
428 | int elements_to_trim); |
429 | void RightTrimWeakFixedArray(WeakFixedArray obj, int elements_to_trim); |
430 | |
431 | // Converts the given boolean condition to JavaScript boolean value. |
432 | inline Oddball ToBoolean(bool condition); |
433 | |
434 | // Notify the heap that a context has been disposed. |
435 | V8_EXPORT_PRIVATE int NotifyContextDisposed(bool dependant_context); |
436 | |
437 | void set_native_contexts_list(Object object) { |
438 | native_contexts_list_ = object; |
439 | } |
440 | Object native_contexts_list() const { return native_contexts_list_; } |
441 | |
442 | void set_allocation_sites_list(Object object) { |
443 | allocation_sites_list_ = object; |
444 | } |
445 | Object allocation_sites_list() { return allocation_sites_list_; } |
446 | |
447 | // Used in CreateAllocationSiteStub and the (de)serializer. |
448 | Address allocation_sites_list_address() { |
449 | return reinterpret_cast<Address>(&allocation_sites_list_); |
450 | } |
451 | |
452 | // Traverse all the allocaions_sites [nested_site and weak_next] in the list |
453 | // and foreach call the visitor |
454 | void ForeachAllocationSite( |
455 | Object list, const std::function<void(AllocationSite)>& visitor); |
456 | |
457 | // Number of mark-sweeps. |
458 | int ms_count() const { return ms_count_; } |
459 | |
460 | // Checks whether the given object is allowed to be migrated from it's |
461 | // current space into the given destination space. Used for debugging. |
462 | bool AllowedToBeMigrated(HeapObject object, AllocationSpace dest); |
463 | |
464 | void CheckHandleCount(); |
465 | |
466 | // Number of "runtime allocations" done so far. |
467 | uint32_t allocations_count() { return allocations_count_; } |
468 | |
469 | // Print short heap statistics. |
470 | void PrintShortHeapStatistics(); |
471 | |
472 | bool write_protect_code_memory() const { return write_protect_code_memory_; } |
473 | |
474 | uintptr_t code_space_memory_modification_scope_depth() { |
475 | return code_space_memory_modification_scope_depth_; |
476 | } |
477 | |
478 | void increment_code_space_memory_modification_scope_depth() { |
479 | code_space_memory_modification_scope_depth_++; |
480 | } |
481 | |
482 | void decrement_code_space_memory_modification_scope_depth() { |
483 | code_space_memory_modification_scope_depth_--; |
484 | } |
485 | |
486 | void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk); |
487 | V8_EXPORT_PRIVATE void UnprotectAndRegisterMemoryChunk(HeapObject object); |
488 | void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk); |
489 | V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks(); |
490 | |
491 | void EnableUnprotectedMemoryChunksRegistry() { |
492 | unprotected_memory_chunks_registry_enabled_ = true; |
493 | } |
494 | |
495 | void DisableUnprotectedMemoryChunksRegistry() { |
496 | unprotected_memory_chunks_registry_enabled_ = false; |
497 | } |
498 | |
499 | bool unprotected_memory_chunks_registry_enabled() { |
500 | return unprotected_memory_chunks_registry_enabled_; |
501 | } |
502 | |
503 | inline HeapState gc_state() { return gc_state_; } |
504 | void SetGCState(HeapState state); |
505 | bool IsTearingDown() const { return gc_state_ == TEAR_DOWN; } |
506 | |
507 | inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } |
508 | |
509 | // If an object has an AllocationMemento trailing it, return it, otherwise |
510 | // return a null AllocationMemento. |
511 | template <FindMementoMode mode> |
512 | inline AllocationMemento FindAllocationMemento(Map map, HeapObject object); |
513 | |
514 | // Returns false if not able to reserve. |
515 | bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps); |
516 | |
517 | // |
518 | // Support for the API. |
519 | // |
520 | |
521 | void CreateApiObjects(); |
522 | |
523 | // Implements the corresponding V8 API function. |
524 | bool IdleNotification(double deadline_in_seconds); |
525 | bool IdleNotification(int idle_time_in_ms); |
526 | |
527 | V8_EXPORT_PRIVATE void MemoryPressureNotification(MemoryPressureLevel level, |
528 | bool is_isolate_locked); |
529 | void CheckMemoryPressure(); |
530 | |
531 | V8_EXPORT_PRIVATE void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, |
532 | void* data); |
533 | V8_EXPORT_PRIVATE void RemoveNearHeapLimitCallback( |
534 | v8::NearHeapLimitCallback callback, size_t heap_limit); |
535 | V8_EXPORT_PRIVATE void AutomaticallyRestoreInitialHeapLimit( |
536 | double threshold_percent); |
537 | |
538 | V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs(); |
539 | |
540 | void RecordStats(HeapStats* stats, bool take_snapshot = false); |
541 | |
542 | // Check new space expansion criteria and expand semispaces if it was hit. |
543 | void CheckNewSpaceExpansionCriteria(); |
544 | |
545 | void VisitExternalResources(v8::ExternalResourceVisitor* visitor); |
546 | |
547 | // An object should be promoted if the object has survived a |
548 | // scavenge operation. |
549 | inline bool ShouldBePromoted(Address old_address); |
550 | |
551 | void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature); |
552 | |
553 | inline int NextScriptId(); |
554 | inline int NextDebuggingId(); |
555 | inline int GetNextTemplateSerialNumber(); |
556 | |
557 | void SetSerializedObjects(FixedArray objects); |
558 | void SetSerializedGlobalProxySizes(FixedArray sizes); |
559 | |
560 | // For post mortem debugging. |
561 | void RememberUnmappedPage(Address page, bool compacted); |
562 | |
563 | int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; } |
564 | |
565 | V8_INLINE int64_t external_memory(); |
566 | V8_INLINE void update_external_memory(int64_t delta); |
567 | V8_INLINE void update_external_memory_concurrently_freed(intptr_t freed); |
568 | V8_INLINE void account_external_memory_concurrently_freed(); |
569 | |
570 | size_t backing_store_bytes() const { return backing_store_bytes_; } |
571 | |
572 | void CompactWeakArrayLists(AllocationType allocation); |
573 | |
574 | V8_EXPORT_PRIVATE void AddRetainedMap(Handle<Map> map); |
575 | |
576 | // This event is triggered after successful allocation of a new object made |
577 | // by runtime. Allocations of target space for object evacuation do not |
578 | // trigger the event. In order to track ALL allocations one must turn off |
579 | // FLAG_inline_new. |
580 | inline void OnAllocationEvent(HeapObject object, int size_in_bytes); |
581 | |
582 | // This event is triggered after object is moved to a new place. |
583 | void OnMoveEvent(HeapObject target, HeapObject source, int size_in_bytes); |
584 | |
585 | inline bool CanAllocateInReadOnlySpace(); |
586 | bool deserialization_complete() const { return deserialization_complete_; } |
587 | |
588 | bool HasLowAllocationRate(); |
589 | bool HasHighFragmentation(); |
590 | bool HasHighFragmentation(size_t used, size_t committed); |
591 | |
592 | void ActivateMemoryReducerIfNeeded(); |
593 | |
594 | V8_EXPORT_PRIVATE bool ShouldOptimizeForMemoryUsage(); |
595 | |
596 | bool HighMemoryPressure() { |
597 | return memory_pressure_level_ != MemoryPressureLevel::kNone; |
598 | } |
599 | |
600 | void RestoreHeapLimit(size_t heap_limit) { |
601 | // Do not set the limit lower than the live size + some slack. |
602 | size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4; |
603 | max_old_generation_size_ = |
604 | Min(max_old_generation_size_, Max(heap_limit, min_limit)); |
605 | } |
606 | |
607 | // =========================================================================== |
608 | // Initialization. =========================================================== |
609 | // =========================================================================== |
610 | |
611 | // Configure heap sizes |
612 | // max_semi_space_size_in_kb: maximum semi-space size in KB |
613 | // max_old_generation_size_in_mb: maximum old generation size in MB |
614 | // code_range_size_in_mb: code range size in MB |
615 | void ConfigureHeap(size_t max_semi_space_size_in_kb, |
616 | size_t max_old_generation_size_in_mb, |
617 | size_t code_range_size_in_mb); |
618 | void ConfigureHeapDefault(); |
619 | |
620 | // Prepares the heap, setting up for deserialization. |
621 | void SetUp(); |
622 | |
623 | // Sets read-only heap and space. |
624 | void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap); |
625 | |
626 | // Sets up the heap memory without creating any objects. |
627 | void SetUpSpaces(); |
628 | |
629 | // (Re-)Initialize hash seed from flag or RNG. |
630 | void InitializeHashSeed(); |
631 | |
632 | // Bootstraps the object heap with the core set of objects required to run. |
633 | // Returns whether it succeeded. |
634 | bool CreateHeapObjects(); |
635 | |
636 | // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr. |
637 | void CreateObjectStats(); |
638 | |
639 | // Sets the TearDown state, so no new GC tasks get posted. |
640 | void StartTearDown(); |
641 | |
642 | // Destroys all memory allocated by the heap. |
643 | void TearDown(); |
644 | |
645 | // Returns whether SetUp has been called. |
646 | bool HasBeenSetUp(); |
647 | |
648 | // =========================================================================== |
649 | // Getters for spaces. ======================================================= |
650 | // =========================================================================== |
651 | |
652 | inline Address NewSpaceTop(); |
653 | |
654 | NewSpace* new_space() { return new_space_; } |
655 | OldSpace* old_space() { return old_space_; } |
656 | CodeSpace* code_space() { return code_space_; } |
657 | MapSpace* map_space() { return map_space_; } |
658 | LargeObjectSpace* lo_space() { return lo_space_; } |
659 | CodeLargeObjectSpace* code_lo_space() { return code_lo_space_; } |
660 | NewLargeObjectSpace* new_lo_space() { return new_lo_space_; } |
661 | ReadOnlySpace* read_only_space() { return read_only_space_; } |
662 | |
663 | inline PagedSpace* paged_space(int idx); |
664 | inline Space* space(int idx); |
665 | |
666 | // Returns name of the space. |
667 | V8_EXPORT_PRIVATE static const char* GetSpaceName(AllocationSpace space); |
668 | |
669 | // =========================================================================== |
670 | // Getters to other components. ============================================== |
671 | // =========================================================================== |
672 | |
673 | ReadOnlyHeap* read_only_heap() const { return read_only_heap_; } |
674 | |
675 | GCTracer* tracer() { return tracer_.get(); } |
676 | |
677 | MemoryAllocator* memory_allocator() { return memory_allocator_.get(); } |
678 | |
679 | inline Isolate* isolate(); |
680 | |
681 | MarkCompactCollector* mark_compact_collector() { |
682 | return mark_compact_collector_.get(); |
683 | } |
684 | |
685 | MinorMarkCompactCollector* minor_mark_compact_collector() { |
686 | return minor_mark_compact_collector_; |
687 | } |
688 | |
689 | ArrayBufferCollector* array_buffer_collector() { |
690 | return array_buffer_collector_.get(); |
691 | } |
692 | |
693 | // =========================================================================== |
694 | // Root set access. ========================================================== |
695 | // =========================================================================== |
696 | |
697 | // Shortcut to the roots table stored in the Isolate. |
698 | V8_INLINE RootsTable& roots_table(); |
699 | |
700 | // Heap root getters. |
701 | #define ROOT_ACCESSOR(type, name, CamelName) inline type name(); |
702 | MUTABLE_ROOT_LIST(ROOT_ACCESSOR) |
703 | #undef ROOT_ACCESSOR |
704 | |
705 | V8_INLINE void SetRootMaterializedObjects(FixedArray objects); |
706 | V8_INLINE void SetRootScriptList(Object value); |
707 | V8_INLINE void SetRootStringTable(StringTable value); |
708 | V8_INLINE void SetRootNoScriptSharedFunctionInfos(Object value); |
709 | V8_INLINE void SetMessageListeners(TemplateList value); |
710 | V8_INLINE void SetPendingOptimizeForTestBytecode(Object bytecode); |
711 | |
712 | // Set the stack limit in the roots table. Some architectures generate |
713 | // code that looks here, because it is faster than loading from the static |
714 | // jslimit_/real_jslimit_ variable in the StackGuard. |
715 | void SetStackLimits(); |
716 | |
717 | // The stack limit is thread-dependent. To be able to reproduce the same |
718 | // snapshot blob, we need to reset it before serializing. |
719 | void ClearStackLimits(); |
720 | |
721 | void RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end); |
722 | void UnregisterStrongRoots(FullObjectSlot start); |
723 | |
724 | void SetBuiltinsConstantsTable(FixedArray cache); |
725 | |
726 | // A full copy of the interpreter entry trampoline, used as a template to |
727 | // create copies of the builtin at runtime. The copies are used to create |
728 | // better profiling information for ticks in bytecode execution. Note that |
729 | // this is always a copy of the full builtin, i.e. not the off-heap |
730 | // trampoline. |
731 | // See also: FLAG_interpreted_frames_native_stack. |
732 | void SetInterpreterEntryTrampolineForProfiling(Code code); |
733 | |
734 | // Add finalization_group into the dirty_js_finalization_groups list. |
735 | void AddDirtyJSFinalizationGroup( |
736 | JSFinalizationGroup finalization_group, |
737 | std::function<void(HeapObject object, ObjectSlot slot, Object target)> |
738 | gc_notify_updated_slot); |
739 | |
740 | V8_EXPORT_PRIVATE void AddKeepDuringJobTarget(Handle<JSReceiver> target); |
741 | void ClearKeepDuringJobSet(); |
742 | |
743 | // =========================================================================== |
744 | // Inline allocation. ======================================================== |
745 | // =========================================================================== |
746 | |
747 | // Indicates whether inline bump-pointer allocation has been disabled. |
748 | bool inline_allocation_disabled() { return inline_allocation_disabled_; } |
749 | |
750 | // Switch whether inline bump-pointer allocation should be used. |
751 | V8_EXPORT_PRIVATE void EnableInlineAllocation(); |
752 | V8_EXPORT_PRIVATE void DisableInlineAllocation(); |
753 | |
754 | // =========================================================================== |
755 | // Methods triggering GCs. =================================================== |
756 | // =========================================================================== |
757 | |
758 | // Performs garbage collection operation. |
759 | // Returns whether there is a chance that another major GC could |
760 | // collect more garbage. |
761 | V8_EXPORT_PRIVATE bool CollectGarbage( |
762 | AllocationSpace space, GarbageCollectionReason gc_reason, |
763 | const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
764 | |
765 | // Performs a full garbage collection. |
766 | V8_EXPORT_PRIVATE void CollectAllGarbage( |
767 | int flags, GarbageCollectionReason gc_reason, |
768 | const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
769 | |
770 | // Last hope GC, should try to squeeze as much as possible. |
771 | V8_EXPORT_PRIVATE void CollectAllAvailableGarbage( |
772 | GarbageCollectionReason gc_reason); |
773 | |
774 | // Precise garbage collection that potentially finalizes already running |
775 | // incremental marking before performing an atomic garbage collection. |
776 | // Only use if absolutely necessary or in tests to avoid floating garbage! |
777 | V8_EXPORT_PRIVATE void PreciseCollectAllGarbage( |
778 | int flags, GarbageCollectionReason gc_reason, |
779 | const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
780 | |
781 | // Reports and external memory pressure event, either performs a major GC or |
782 | // completes incremental marking in order to free external resources. |
783 | void ReportExternalMemoryPressure(); |
784 | |
785 | using GetExternallyAllocatedMemoryInBytesCallback = |
786 | v8::Isolate::GetExternallyAllocatedMemoryInBytesCallback; |
787 | |
788 | void SetGetExternallyAllocatedMemoryInBytesCallback( |
789 | GetExternallyAllocatedMemoryInBytesCallback callback) { |
790 | external_memory_callback_ = callback; |
791 | } |
792 | |
793 | // Invoked when GC was requested via the stack guard. |
794 | void HandleGCRequest(); |
795 | |
796 | // =========================================================================== |
797 | // Builtins. ================================================================= |
798 | // =========================================================================== |
799 | |
800 | V8_EXPORT_PRIVATE Code builtin(int index); |
801 | Address builtin_address(int index); |
802 | void set_builtin(int index, Code builtin); |
803 | |
804 | // =========================================================================== |
805 | // Iterators. ================================================================ |
806 | // =========================================================================== |
807 | |
808 | // None of these methods iterate over the read-only roots. To do this use |
809 | // ReadOnlyRoots::Iterate. Read-only root iteration is not necessary for |
810 | // garbage collection and is usually only performed as part of |
811 | // (de)serialization or heap verification. |
812 | |
813 | // Iterates over the strong roots and the weak roots. |
814 | void IterateRoots(RootVisitor* v, VisitMode mode); |
815 | // Iterates over the strong roots. |
816 | void IterateStrongRoots(RootVisitor* v, VisitMode mode); |
817 | // Iterates over entries in the smi roots list. Only interesting to the |
818 | // serializer/deserializer, since GC does not care about smis. |
819 | void IterateSmiRoots(RootVisitor* v); |
820 | // Iterates over weak string tables. |
821 | void IterateWeakRoots(RootVisitor* v, VisitMode mode); |
822 | // Iterates over weak global handles. |
823 | void IterateWeakGlobalHandles(RootVisitor* v); |
824 | // Iterates over builtins. |
825 | void IterateBuiltins(RootVisitor* v); |
826 | |
827 | // =========================================================================== |
828 | // Store buffer API. ========================================================= |
829 | // =========================================================================== |
830 | |
831 | // Used for query incremental marking status in generated code. |
832 | Address* IsMarkingFlagAddress() { |
833 | return reinterpret_cast<Address*>(&is_marking_flag_); |
834 | } |
835 | |
836 | void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; } |
837 | |
838 | Address* store_buffer_top_address(); |
839 | static intptr_t store_buffer_mask_constant(); |
840 | static Address store_buffer_overflow_function_address(); |
841 | |
842 | void ClearRecordedSlot(HeapObject object, ObjectSlot slot); |
843 | void ClearRecordedSlotRange(Address start, Address end); |
844 | |
845 | #ifdef DEBUG |
846 | void VerifyClearedSlot(HeapObject object, ObjectSlot slot); |
847 | #endif |
848 | |
849 | // =========================================================================== |
850 | // Incremental marking API. ================================================== |
851 | // =========================================================================== |
852 | |
853 | int GCFlagsForIncrementalMarking() { |
854 | return ShouldOptimizeForMemoryUsage() ? kReduceMemoryFootprintMask |
855 | : kNoGCFlags; |
856 | } |
857 | |
858 | // Start incremental marking and ensure that idle time handler can perform |
859 | // incremental steps. |
860 | V8_EXPORT_PRIVATE void StartIdleIncrementalMarking( |
861 | GarbageCollectionReason gc_reason, |
862 | GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags); |
863 | |
864 | // Starts incremental marking assuming incremental marking is currently |
865 | // stopped. |
866 | V8_EXPORT_PRIVATE void StartIncrementalMarking( |
867 | int gc_flags, GarbageCollectionReason gc_reason, |
868 | GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags); |
869 | |
870 | void StartIncrementalMarkingIfAllocationLimitIsReached( |
871 | int gc_flags, |
872 | GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags); |
873 | |
874 | void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason); |
875 | // Synchronously finalizes incremental marking. |
876 | void FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason); |
877 | |
878 | void RegisterDeserializedObjectsForBlackAllocation( |
879 | Reservation* reservations, const std::vector<HeapObject>& large_objects, |
880 | const std::vector<Address>& maps); |
881 | |
882 | IncrementalMarking* incremental_marking() { |
883 | return incremental_marking_.get(); |
884 | } |
885 | |
886 | // =========================================================================== |
887 | // Concurrent marking API. =================================================== |
888 | // =========================================================================== |
889 | |
890 | ConcurrentMarking* concurrent_marking() { return concurrent_marking_.get(); } |
891 | |
892 | // The runtime uses this function to notify potentially unsafe object layout |
893 | // changes that require special synchronization with the concurrent marker. |
894 | // The old size is the size of the object before layout change. |
895 | void NotifyObjectLayoutChange(HeapObject object, int old_size, |
896 | const DisallowHeapAllocation&); |
897 | |
898 | #ifdef VERIFY_HEAP |
899 | // This function checks that either |
900 | // - the map transition is safe, |
901 | // - or it was communicated to GC using NotifyObjectLayoutChange. |
902 | V8_EXPORT_PRIVATE void VerifyObjectLayoutChange(HeapObject object, |
903 | Map new_map); |
904 | #endif |
905 | |
906 | // =========================================================================== |
907 | // Deoptimization support API. =============================================== |
908 | // =========================================================================== |
909 | |
910 | // Setters for code offsets of well-known deoptimization targets. |
911 | void SetArgumentsAdaptorDeoptPCOffset(int pc_offset); |
912 | void SetConstructStubCreateDeoptPCOffset(int pc_offset); |
913 | void SetConstructStubInvokeDeoptPCOffset(int pc_offset); |
914 | void SetInterpreterEntryReturnPCOffset(int pc_offset); |
915 | |
916 | // Invalidates references in the given {code} object that are referenced |
917 | // transitively from the deoptimization data. Mutates write-protected code. |
918 | void InvalidateCodeDeoptimizationData(Code code); |
919 | |
920 | void DeoptMarkedAllocationSites(); |
921 | |
922 | bool DeoptMaybeTenuredAllocationSites(); |
923 | |
924 | // =========================================================================== |
925 | // Embedder heap tracer support. ============================================= |
926 | // =========================================================================== |
927 | |
928 | LocalEmbedderHeapTracer* local_embedder_heap_tracer() const { |
929 | return local_embedder_heap_tracer_.get(); |
930 | } |
931 | |
932 | void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer); |
933 | EmbedderHeapTracer* GetEmbedderHeapTracer() const; |
934 | |
935 | void RegisterExternallyReferencedObject(Address* location); |
936 | void SetEmbedderStackStateForNextFinalizaton( |
937 | EmbedderHeapTracer::EmbedderStackState stack_state); |
938 | |
939 | // =========================================================================== |
940 | // External string table API. ================================================ |
941 | // =========================================================================== |
942 | |
943 | // Registers an external string. |
944 | inline void RegisterExternalString(String string); |
945 | |
946 | // Called when a string's resource is changed. The size of the payload is sent |
947 | // as argument of the method. |
948 | V8_EXPORT_PRIVATE void UpdateExternalString(String string, size_t old_payload, |
949 | size_t new_payload); |
950 | |
951 | // Finalizes an external string by deleting the associated external |
952 | // data and clearing the resource pointer. |
953 | inline void FinalizeExternalString(String string); |
954 | |
955 | static String UpdateYoungReferenceInExternalStringTableEntry( |
956 | Heap* heap, FullObjectSlot pointer); |
957 | |
958 | // =========================================================================== |
959 | // Methods checking/returning the space of a given object/address. =========== |
960 | // =========================================================================== |
961 | |
962 | // Returns whether the object resides in new space. |
963 | static inline bool InYoungGeneration(Object object); |
964 | static inline bool InYoungGeneration(MaybeObject object); |
965 | static inline bool InYoungGeneration(HeapObject heap_object); |
966 | static inline bool InFromPage(Object object); |
967 | static inline bool InFromPage(MaybeObject object); |
968 | static inline bool InFromPage(HeapObject heap_object); |
969 | static inline bool InToPage(Object object); |
970 | static inline bool InToPage(MaybeObject object); |
971 | static inline bool InToPage(HeapObject heap_object); |
972 | |
973 | // Returns whether the object resides in old space. |
974 | inline bool InOldSpace(Object object); |
975 | |
976 | // Checks whether an address/object in the heap (including auxiliary |
977 | // area and unused area). |
978 | V8_EXPORT_PRIVATE bool Contains(HeapObject value); |
979 | |
980 | // Checks whether an address/object in a space. |
981 | // Currently used by tests, serialization and heap verification only. |
982 | V8_EXPORT_PRIVATE bool InSpace(HeapObject value, AllocationSpace space); |
983 | |
984 | // Slow methods that can be used for verification as they can also be used |
985 | // with off-heap Addresses. |
986 | bool InSpaceSlow(Address addr, AllocationSpace space); |
987 | |
988 | static inline Heap* FromWritableHeapObject(const HeapObject obj); |
989 | |
990 | // =========================================================================== |
991 | // Object statistics tracking. =============================================== |
992 | // =========================================================================== |
993 | |
994 | // Returns the number of buckets used by object statistics tracking during a |
995 | // major GC. Note that the following methods fail gracefully when the bounds |
996 | // are exceeded though. |
997 | size_t NumberOfTrackedHeapObjectTypes(); |
998 | |
999 | // Returns object statistics about count and size at the last major GC. |
1000 | // Objects are being grouped into buckets that roughly resemble existing |
1001 | // instance types. |
1002 | size_t ObjectCountAtLastGC(size_t index); |
1003 | size_t ObjectSizeAtLastGC(size_t index); |
1004 | |
1005 | // Retrieves names of buckets used by object statistics tracking. |
1006 | bool GetObjectTypeName(size_t index, const char** object_type, |
1007 | const char** object_sub_type); |
1008 | |
1009 | // The total number of native contexts object on the heap. |
1010 | size_t NumberOfNativeContexts(); |
1011 | // The total number of native contexts that were detached but were not |
1012 | // garbage collected yet. |
1013 | size_t NumberOfDetachedContexts(); |
1014 | |
1015 | // =========================================================================== |
1016 | // Code statistics. ========================================================== |
1017 | // =========================================================================== |
1018 | |
1019 | // Collect code (Code and BytecodeArray objects) statistics. |
1020 | void CollectCodeStatistics(); |
1021 | |
1022 | // =========================================================================== |
1023 | // GC statistics. ============================================================ |
1024 | // =========================================================================== |
1025 | |
1026 | // Returns the maximum amount of memory reserved for the heap. |
1027 | V8_EXPORT_PRIVATE size_t MaxReserved(); |
1028 | size_t MaxSemiSpaceSize() { return max_semi_space_size_; } |
1029 | size_t InitialSemiSpaceSize() { return initial_semispace_size_; } |
1030 | size_t MaxOldGenerationSize() { return max_old_generation_size_; } |
1031 | |
1032 | V8_EXPORT_PRIVATE static size_t ComputeMaxOldGenerationSize( |
1033 | uint64_t physical_memory); |
1034 | |
1035 | static size_t ComputeMaxSemiSpaceSize(uint64_t physical_memory) { |
1036 | const uint64_t min_physical_memory = 512 * MB; |
1037 | const uint64_t max_physical_memory = 3 * static_cast<uint64_t>(GB); |
1038 | |
1039 | uint64_t capped_physical_memory = |
1040 | Max(Min(physical_memory, max_physical_memory), min_physical_memory); |
1041 | // linearly scale max semi-space size: (X-A)/(B-A)*(D-C)+C |
1042 | size_t semi_space_size_in_kb = |
1043 | static_cast<size_t>(((capped_physical_memory - min_physical_memory) * |
1044 | (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) / |
1045 | (max_physical_memory - min_physical_memory) + |
1046 | kMinSemiSpaceSizeInKB); |
1047 | return RoundUp(semi_space_size_in_kb, (1 << kPageSizeBits) / KB); |
1048 | } |
1049 | |
1050 | // Returns the capacity of the heap in bytes w/o growing. Heap grows when |
1051 | // more spaces are needed until it reaches the limit. |
1052 | size_t Capacity(); |
1053 | |
1054 | // Returns the capacity of the old generation. |
1055 | V8_EXPORT_PRIVATE size_t OldGenerationCapacity(); |
1056 | |
1057 | // Returns the amount of memory currently held alive by the unmapper. |
1058 | size_t CommittedMemoryOfUnmapper(); |
1059 | |
1060 | // Returns the amount of memory currently committed for the heap. |
1061 | size_t CommittedMemory(); |
1062 | |
1063 | // Returns the amount of memory currently committed for the old space. |
1064 | size_t CommittedOldGenerationMemory(); |
1065 | |
1066 | // Returns the amount of executable memory currently committed for the heap. |
1067 | size_t CommittedMemoryExecutable(); |
1068 | |
1069 | // Returns the amount of phyical memory currently committed for the heap. |
1070 | size_t CommittedPhysicalMemory(); |
1071 | |
1072 | // Returns the maximum amount of memory ever committed for the heap. |
1073 | size_t MaximumCommittedMemory() { return maximum_committed_; } |
1074 | |
1075 | // Updates the maximum committed memory for the heap. Should be called |
1076 | // whenever a space grows. |
1077 | void UpdateMaximumCommitted(); |
1078 | |
1079 | // Returns the available bytes in space w/o growing. |
1080 | // Heap doesn't guarantee that it can allocate an object that requires |
1081 | // all available bytes. Check MaxHeapObjectSize() instead. |
1082 | size_t Available(); |
1083 | |
1084 | // Returns of size of all objects residing in the heap. |
1085 | V8_EXPORT_PRIVATE size_t SizeOfObjects(); |
1086 | |
1087 | void UpdateSurvivalStatistics(int start_new_space_size); |
1088 | |
1089 | inline void IncrementPromotedObjectsSize(size_t object_size) { |
1090 | promoted_objects_size_ += object_size; |
1091 | } |
1092 | inline size_t promoted_objects_size() { return promoted_objects_size_; } |
1093 | |
1094 | inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) { |
1095 | semi_space_copied_object_size_ += object_size; |
1096 | } |
1097 | inline size_t semi_space_copied_object_size() { |
1098 | return semi_space_copied_object_size_; |
1099 | } |
1100 | |
1101 | inline size_t SurvivedYoungObjectSize() { |
1102 | return promoted_objects_size_ + semi_space_copied_object_size_; |
1103 | } |
1104 | |
1105 | inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; } |
1106 | |
1107 | inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; } |
1108 | |
1109 | inline void IncrementNodesPromoted() { nodes_promoted_++; } |
1110 | |
1111 | inline void IncrementYoungSurvivorsCounter(size_t survived) { |
1112 | survived_last_scavenge_ = survived; |
1113 | survived_since_last_expansion_ += survived; |
1114 | } |
1115 | |
1116 | inline uint64_t OldGenerationObjectsAndPromotedExternalMemorySize() { |
1117 | return OldGenerationSizeOfObjects() + PromotedExternalMemorySize(); |
1118 | } |
1119 | |
1120 | inline void UpdateNewSpaceAllocationCounter(); |
1121 | |
1122 | inline size_t NewSpaceAllocationCounter(); |
1123 | |
1124 | // This should be used only for testing. |
1125 | void set_new_space_allocation_counter(size_t new_value) { |
1126 | new_space_allocation_counter_ = new_value; |
1127 | } |
1128 | |
1129 | void UpdateOldGenerationAllocationCounter() { |
1130 | old_generation_allocation_counter_at_last_gc_ = |
1131 | OldGenerationAllocationCounter(); |
1132 | old_generation_size_at_last_gc_ = 0; |
1133 | } |
1134 | |
1135 | size_t OldGenerationAllocationCounter() { |
1136 | return old_generation_allocation_counter_at_last_gc_ + |
1137 | PromotedSinceLastGC(); |
1138 | } |
1139 | |
1140 | // This should be used only for testing. |
1141 | void set_old_generation_allocation_counter_at_last_gc(size_t new_value) { |
1142 | old_generation_allocation_counter_at_last_gc_ = new_value; |
1143 | } |
1144 | |
1145 | size_t PromotedSinceLastGC() { |
1146 | size_t old_generation_size = OldGenerationSizeOfObjects(); |
1147 | DCHECK_GE(old_generation_size, old_generation_size_at_last_gc_); |
1148 | return old_generation_size - old_generation_size_at_last_gc_; |
1149 | } |
1150 | |
1151 | // This is called by the sweeper when it discovers more free space |
1152 | // than expected at the end of the preceding GC. |
1153 | void NotifyRefinedOldGenerationSize(size_t decreased_bytes) { |
1154 | if (old_generation_size_at_last_gc_ != 0) { |
1155 | // OldGenerationSizeOfObjects() is now smaller by |decreased_bytes|. |
1156 | // Adjust old_generation_size_at_last_gc_ too, so that PromotedSinceLastGC |
1157 | // continues to increase monotonically, rather than decreasing here. |
1158 | DCHECK_GE(old_generation_size_at_last_gc_, decreased_bytes); |
1159 | old_generation_size_at_last_gc_ -= decreased_bytes; |
1160 | } |
1161 | } |
1162 | |
1163 | int gc_count() const { return gc_count_; } |
1164 | |
1165 | bool is_current_gc_forced() const { return is_current_gc_forced_; } |
1166 | |
1167 | // Returns the size of objects residing in non-new spaces. |
1168 | // Excludes external memory held by those objects. |
1169 | V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects(); |
1170 | |
1171 | // =========================================================================== |
1172 | // Prologue/epilogue callback methods.======================================== |
1173 | // =========================================================================== |
1174 | |
1175 | void AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback, |
1176 | GCType gc_type_filter, void* data); |
1177 | void RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback, |
1178 | void* data); |
1179 | |
1180 | void AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback, |
1181 | GCType gc_type_filter, void* data); |
1182 | void RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback, |
1183 | void* data); |
1184 | |
1185 | void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags); |
1186 | void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags); |
1187 | |
1188 | // =========================================================================== |
1189 | // Allocation methods. ======================================================= |
1190 | // =========================================================================== |
1191 | |
1192 | // Creates a filler object and returns a heap object immediately after it. |
1193 | V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT HeapObject |
1194 | PrecedeWithFiller(HeapObject object, int filler_size); |
1195 | |
1196 | // Creates a filler object if needed for alignment and returns a heap object |
1197 | // immediately after it. If any space is left after the returned object, |
1198 | // another filler object is created so the over allocated memory is iterable. |
1199 | V8_WARN_UNUSED_RESULT HeapObject |
1200 | AlignWithFiller(HeapObject object, int object_size, int allocation_size, |
1201 | AllocationAlignment alignment); |
1202 | |
1203 | // =========================================================================== |
1204 | // ArrayBuffer tracking. ===================================================== |
1205 | // =========================================================================== |
1206 | |
1207 | // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external |
1208 | // in the registration/unregistration APIs. Consider dropping the "New" from |
1209 | // "RegisterNewArrayBuffer" because one can re-register a previously |
1210 | // unregistered buffer, too, and the name is confusing. |
1211 | void RegisterNewArrayBuffer(JSArrayBuffer buffer); |
1212 | void UnregisterArrayBuffer(JSArrayBuffer buffer); |
1213 | |
1214 | // =========================================================================== |
1215 | // Allocation site tracking. ================================================= |
1216 | // =========================================================================== |
1217 | |
1218 | // Updates the AllocationSite of a given {object}. The entry (including the |
1219 | // count) is cached on the local pretenuring feedback. |
1220 | inline void UpdateAllocationSite( |
1221 | Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback); |
1222 | |
1223 | // Merges local pretenuring feedback into the global one. Note that this |
1224 | // method needs to be called after evacuation, as allocation sites may be |
1225 | // evacuated and this method resolves forward pointers accordingly. |
1226 | void MergeAllocationSitePretenuringFeedback( |
1227 | const PretenuringFeedbackMap& local_pretenuring_feedback); |
1228 | |
1229 | // =========================================================================== |
1230 | // Allocation tracking. ====================================================== |
1231 | // =========================================================================== |
1232 | |
1233 | // Adds {new_space_observer} to new space and {observer} to any other space. |
1234 | void AddAllocationObserversToAllSpaces( |
1235 | AllocationObserver* observer, AllocationObserver* new_space_observer); |
1236 | |
1237 | // Removes {new_space_observer} from new space and {observer} from any other |
1238 | // space. |
1239 | void RemoveAllocationObserversFromAllSpaces( |
1240 | AllocationObserver* observer, AllocationObserver* new_space_observer); |
1241 | |
1242 | bool allocation_step_in_progress() { return allocation_step_in_progress_; } |
1243 | void set_allocation_step_in_progress(bool val) { |
1244 | allocation_step_in_progress_ = val; |
1245 | } |
1246 | |
1247 | // =========================================================================== |
1248 | // Heap object allocation tracking. ========================================== |
1249 | // =========================================================================== |
1250 | |
1251 | void AddHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker); |
1252 | void RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker); |
1253 | bool has_heap_object_allocation_tracker() const { |
1254 | return !allocation_trackers_.empty(); |
1255 | } |
1256 | |
1257 | // =========================================================================== |
1258 | // Retaining path tracking. ================================================== |
1259 | // =========================================================================== |
1260 | |
1261 | // Adds the given object to the weak table of retaining path targets. |
1262 | // On each GC if the marker discovers the object, it will print the retaining |
1263 | // path. This requires --track-retaining-path flag. |
1264 | void AddRetainingPathTarget(Handle<HeapObject> object, |
1265 | RetainingPathOption option); |
1266 | |
1267 | // =========================================================================== |
1268 | // Stack frame support. ====================================================== |
1269 | // =========================================================================== |
1270 | |
1271 | // Returns the Code object for a given interior pointer. |
1272 | Code GcSafeFindCodeForInnerPointer(Address inner_pointer); |
1273 | |
1274 | // Returns true if {addr} is contained within {code} and false otherwise. |
1275 | // Mostly useful for debugging. |
1276 | bool GcSafeCodeContains(Code code, Address addr); |
1277 | |
1278 | // ============================================================================= |
1279 | #ifdef VERIFY_HEAP |
1280 | // Verify the heap is in its normal state before or after a GC. |
1281 | V8_EXPORT_PRIVATE void Verify(); |
1282 | void VerifyRememberedSetFor(HeapObject object); |
1283 | #endif |
1284 | |
1285 | #ifdef V8_ENABLE_ALLOCATION_TIMEOUT |
1286 | void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; } |
1287 | #endif |
1288 | |
1289 | #ifdef DEBUG |
1290 | void VerifyCountersAfterSweeping(); |
1291 | void VerifyCountersBeforeConcurrentSweeping(); |
1292 | |
1293 | void Print(); |
1294 | void PrintHandles(); |
1295 | |
1296 | // Report code statistics. |
1297 | void ReportCodeStatistics(const char* title); |
1298 | #endif |
1299 | void* GetRandomMmapAddr() { |
1300 | void* result = v8::internal::GetRandomMmapAddr(); |
1301 | #if V8_TARGET_ARCH_X64 |
1302 | #if V8_OS_MACOSX |
1303 | // The Darwin kernel [as of macOS 10.12.5] does not clean up page |
1304 | // directory entries [PDE] created from mmap or mach_vm_allocate, even |
1305 | // after the region is destroyed. Using a virtual address space that is |
1306 | // too large causes a leak of about 1 wired [can never be paged out] page |
1307 | // per call to mmap(). The page is only reclaimed when the process is |
1308 | // killed. Confine the hint to a 32-bit section of the virtual address |
1309 | // space. See crbug.com/700928. |
1310 | uintptr_t offset = |
1311 | reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) & |
1312 | kMmapRegionMask; |
1313 | result = reinterpret_cast<void*>(mmap_region_base_ + offset); |
1314 | #endif // V8_OS_MACOSX |
1315 | #endif // V8_TARGET_ARCH_X64 |
1316 | return result; |
1317 | } |
1318 | |
1319 | static const char* GarbageCollectionReasonToString( |
1320 | GarbageCollectionReason gc_reason); |
1321 | |
1322 | // Calculates the nof entries for the full sized number to string cache. |
1323 | inline int MaxNumberToStringCacheSize() const; |
1324 | |
1325 | private: |
1326 | class SkipStoreBufferScope; |
1327 | |
1328 | using ExternalStringTableUpdaterCallback = String (*)(Heap* heap, |
1329 | FullObjectSlot pointer); |
1330 | |
1331 | // External strings table is a place where all external strings are |
1332 | // registered. We need to keep track of such strings to properly |
1333 | // finalize them. |
1334 | class ExternalStringTable { |
1335 | public: |
1336 | explicit ExternalStringTable(Heap* heap) : heap_(heap) {} |
1337 | |
1338 | // Registers an external string. |
1339 | inline void AddString(String string); |
1340 | bool Contains(String string); |
1341 | |
1342 | void IterateAll(RootVisitor* v); |
1343 | void IterateYoung(RootVisitor* v); |
1344 | void PromoteYoung(); |
1345 | |
1346 | // Restores internal invariant and gets rid of collected strings. Must be |
1347 | // called after each Iterate*() that modified the strings. |
1348 | void CleanUpAll(); |
1349 | void CleanUpYoung(); |
1350 | |
1351 | // Finalize all registered external strings and clear tables. |
1352 | void TearDown(); |
1353 | |
1354 | void UpdateYoungReferences( |
1355 | Heap::ExternalStringTableUpdaterCallback updater_func); |
1356 | void UpdateReferences( |
1357 | Heap::ExternalStringTableUpdaterCallback updater_func); |
1358 | |
1359 | private: |
1360 | void Verify(); |
1361 | void VerifyYoung(); |
1362 | |
1363 | Heap* const heap_; |
1364 | |
1365 | // To speed up scavenge collections young string are kept separate from old |
1366 | // strings. |
1367 | std::vector<Object> young_strings_; |
1368 | std::vector<Object> old_strings_; |
1369 | |
1370 | DISALLOW_COPY_AND_ASSIGN(ExternalStringTable); |
1371 | }; |
1372 | |
1373 | struct StrongRootsList; |
1374 | |
1375 | struct StringTypeTable { |
1376 | InstanceType type; |
1377 | int size; |
1378 | RootIndex index; |
1379 | }; |
1380 | |
1381 | struct ConstantStringTable { |
1382 | const char* contents; |
1383 | RootIndex index; |
1384 | }; |
1385 | |
1386 | struct StructTable { |
1387 | InstanceType type; |
1388 | int size; |
1389 | RootIndex index; |
1390 | }; |
1391 | |
1392 | struct GCCallbackTuple { |
1393 | GCCallbackTuple(v8::Isolate::GCCallbackWithData callback, GCType gc_type, |
1394 | void* data) |
1395 | : callback(callback), gc_type(gc_type), data(data) {} |
1396 | |
1397 | bool operator==(const GCCallbackTuple& other) const; |
1398 | GCCallbackTuple& operator=(const GCCallbackTuple& other) V8_NOEXCEPT; |
1399 | |
1400 | v8::Isolate::GCCallbackWithData callback; |
1401 | GCType gc_type; |
1402 | void* data; |
1403 | }; |
1404 | |
1405 | static const int kInitialStringTableSize = StringTable::kMinCapacity; |
1406 | static const int kInitialEvalCacheSize = 64; |
1407 | static const int kInitialNumberStringCacheSize = 256; |
1408 | |
1409 | static const int kRememberedUnmappedPages = 128; |
1410 | |
1411 | static const StringTypeTable string_type_table[]; |
1412 | static const ConstantStringTable constant_string_table[]; |
1413 | static const StructTable struct_table[]; |
1414 | |
1415 | static const int kYoungSurvivalRateHighThreshold = 90; |
1416 | static const int kYoungSurvivalRateAllowedDeviation = 15; |
1417 | static const int kOldSurvivalRateLowThreshold = 10; |
1418 | |
1419 | static const int kMaxMarkCompactsInIdleRound = 7; |
1420 | static const int kIdleScavengeThreshold = 5; |
1421 | |
1422 | static const int kInitialFeedbackCapacity = 256; |
1423 | |
1424 | Heap(); |
1425 | ~Heap(); |
1426 | |
1427 | static bool IsRegularObjectAllocation(AllocationType allocation) { |
1428 | return AllocationType::kYoung == allocation || |
1429 | AllocationType::kOld == allocation; |
1430 | } |
1431 | |
1432 | static size_t DefaultGetExternallyAllocatedMemoryInBytesCallback() { |
1433 | return 0; |
1434 | } |
1435 | |
1436 | #define ROOT_ACCESSOR(type, name, CamelName) inline void set_##name(type |
---|