1// Copyright 2017 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/heap/concurrent-marking.h"
6
7#include <stack>
8#include <unordered_map>
9
10#include "include/v8config.h"
11#include "src/base/template-utils.h"
12#include "src/heap/gc-tracer.h"
13#include "src/heap/heap-inl.h"
14#include "src/heap/heap.h"
15#include "src/heap/mark-compact-inl.h"
16#include "src/heap/mark-compact.h"
17#include "src/heap/marking.h"
18#include "src/heap/objects-visiting-inl.h"
19#include "src/heap/objects-visiting.h"
20#include "src/heap/worklist.h"
21#include "src/isolate.h"
22#include "src/objects/data-handler-inl.h"
23#include "src/objects/embedder-data-array-inl.h"
24#include "src/objects/hash-table-inl.h"
25#include "src/objects/slots-inl.h"
26#include "src/transitions-inl.h"
27#include "src/utils-inl.h"
28#include "src/utils.h"
29#include "src/v8.h"
30
31namespace v8 {
32namespace internal {
33
34class ConcurrentMarkingState final
35 : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
36 public:
37 explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
38 : memory_chunk_data_(memory_chunk_data) {}
39
40 ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) {
41 DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
42 reinterpret_cast<intptr_t>(chunk),
43 MemoryChunk::kMarkBitmapOffset);
44 return chunk->marking_bitmap<AccessMode::ATOMIC>();
45 }
46
47 void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
48 (*memory_chunk_data_)[chunk].live_bytes += by;
49 }
50
51 // The live_bytes and SetLiveBytes methods of the marking state are
52 // not used by the concurrent marker.
53
54 private:
55 MemoryChunkDataMap* memory_chunk_data_;
56};
57
58// Helper class for storing in-object slot addresses and values.
59class SlotSnapshot {
60 public:
61 SlotSnapshot() : number_of_slots_(0) {}
62 int number_of_slots() const { return number_of_slots_; }
63 ObjectSlot slot(int i) const { return snapshot_[i].first; }
64 Object value(int i) const { return snapshot_[i].second; }
65 void clear() { number_of_slots_ = 0; }
66 void add(ObjectSlot slot, Object value) {
67 snapshot_[number_of_slots_++] = {slot, value};
68 }
69
70 private:
71 static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kTaggedSize;
72 int number_of_slots_;
73 std::pair<ObjectSlot, Object> snapshot_[kMaxSnapshotSize];
74 DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
75};
76
77class ConcurrentMarkingVisitor final
78 : public HeapVisitor<int, ConcurrentMarkingVisitor> {
79 public:
80 using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
81
82 explicit ConcurrentMarkingVisitor(
83 ConcurrentMarking::MarkingWorklist* shared,
84 MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
85 ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
86 bool embedder_tracing_enabled, unsigned mark_compact_epoch,
87 bool is_forced_gc)
88 : shared_(shared, task_id),
89 weak_objects_(weak_objects),
90 embedder_objects_(embedder_objects, task_id),
91 marking_state_(memory_chunk_data),
92 memory_chunk_data_(memory_chunk_data),
93 task_id_(task_id),
94 embedder_tracing_enabled_(embedder_tracing_enabled),
95 mark_compact_epoch_(mark_compact_epoch),
96 is_forced_gc_(is_forced_gc) {
97 // It is not safe to access flags from concurrent marking visitor. So
98 // set the bytecode flush mode based on the flags here
99 bytecode_flush_mode_ = Heap::GetBytecodeFlushMode();
100 }
101
102 template <typename T>
103 static V8_INLINE T Cast(HeapObject object) {
104 return T::cast(object);
105 }
106
107 bool ShouldVisit(HeapObject object) {
108 return marking_state_.GreyToBlack(object);
109 }
110
111 bool AllowDefaultJSObjectVisit() { return false; }
112
113 template <typename THeapObjectSlot>
114 void ProcessStrongHeapObject(HeapObject host, THeapObjectSlot slot,
115 HeapObject heap_object) {
116 MarkObject(heap_object);
117 MarkCompactCollector::RecordSlot(host, slot, heap_object);
118 }
119
120 template <typename THeapObjectSlot>
121 void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
122 HeapObject heap_object) {
123#ifdef THREAD_SANITIZER
124 // Perform a dummy acquire load to tell TSAN that there is no data race
125 // in mark-bit initialization. See MemoryChunk::Initialize for the
126 // corresponding release store.
127 MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
128 CHECK_NOT_NULL(chunk->synchronized_heap());
129#endif
130 if (marking_state_.IsBlackOrGrey(heap_object)) {
131 // Weak references with live values are directly processed here to
132 // reduce the processing time of weak cells during the main GC
133 // pause.
134 MarkCompactCollector::RecordSlot(host, slot, heap_object);
135 } else {
136 // If we do not know about liveness of the value, we have to process
137 // the reference when we know the liveness of the whole transitive
138 // closure.
139 weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
140 }
141 }
142
143 void VisitPointers(HeapObject host, ObjectSlot start,
144 ObjectSlot end) override {
145 VisitPointersImpl(host, start, end);
146 }
147
148 void VisitPointers(HeapObject host, MaybeObjectSlot start,
149 MaybeObjectSlot end) override {
150 VisitPointersImpl(host, start, end);
151 }
152
153 template <typename TSlot>
154 V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
155 using THeapObjectSlot = typename TSlot::THeapObjectSlot;
156 for (TSlot slot = start; slot < end; ++slot) {
157 typename TSlot::TObject object = slot.Relaxed_Load();
158 HeapObject heap_object;
159 if (object.GetHeapObjectIfStrong(&heap_object)) {
160 // If the reference changes concurrently from strong to weak, the write
161 // barrier will treat the weak reference as strong, so we won't miss the
162 // weak reference.
163 ProcessStrongHeapObject(host, THeapObjectSlot(slot), heap_object);
164 } else if (TSlot::kCanBeWeak &&
165 object.GetHeapObjectIfWeak(&heap_object)) {
166 ProcessWeakHeapObject(host, THeapObjectSlot(slot), heap_object);
167 }
168 }
169 }
170
171 // Weak list pointers should be ignored during marking. The lists are
172 // reconstructed after GC.
173 void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
174 ObjectSlot end) final {}
175
176 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
177 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
178 HeapObject object = rinfo->target_object();
179 RecordRelocSlot(host, rinfo, object);
180 if (!marking_state_.IsBlackOrGrey(object)) {
181 if (host->IsWeakObject(object)) {
182 weak_objects_->weak_objects_in_code.Push(task_id_,
183 std::make_pair(object, host));
184 } else {
185 MarkObject(object);
186 }
187 }
188 }
189
190 void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
191 DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
192 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
193 RecordRelocSlot(host, rinfo, target);
194 MarkObject(target);
195 }
196
197 void VisitPointersInSnapshot(HeapObject host, const SlotSnapshot& snapshot) {
198 for (int i = 0; i < snapshot.number_of_slots(); i++) {
199 ObjectSlot slot = snapshot.slot(i);
200 Object object = snapshot.value(i);
201 DCHECK(!HasWeakHeapObjectTag(object));
202 if (!object->IsHeapObject()) continue;
203 HeapObject heap_object = HeapObject::cast(object);
204 MarkObject(heap_object);
205 MarkCompactCollector::RecordSlot(host, slot, heap_object);
206 }
207 }
208
209 // ===========================================================================
210 // JS object =================================================================
211 // ===========================================================================
212
213 int VisitJSObject(Map map, JSObject object) {
214 return VisitJSObjectSubclass(map, object);
215 }
216
217 int VisitJSObjectFast(Map map, JSObject object) {
218 return VisitJSObjectSubclassFast(map, object);
219 }
220
221 int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
222 return VisitJSObjectSubclass(map, object);
223 }
224
225 int VisitJSWeakRef(Map map, JSWeakRef weak_ref) {
226 int size = VisitJSObjectSubclass(map, weak_ref);
227 if (size == 0) {
228 return 0;
229 }
230 if (weak_ref->target()->IsHeapObject()) {
231 HeapObject target = HeapObject::cast(weak_ref->target());
232 if (marking_state_.IsBlackOrGrey(target)) {
233 // Record the slot inside the JSWeakRef, since the
234 // VisitJSObjectSubclass above didn't visit it.
235 ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
236 MarkCompactCollector::RecordSlot(weak_ref, slot, target);
237 } else {
238 // JSWeakRef points to a potentially dead object. We have to process
239 // them when we know the liveness of the whole transitive closure.
240 weak_objects_->js_weak_refs.Push(task_id_, weak_ref);
241 }
242 }
243 return size;
244 }
245
246 int VisitWeakCell(Map map, WeakCell weak_cell) {
247 if (!ShouldVisit(weak_cell)) return 0;
248
249 int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
250 VisitMapPointer(weak_cell, weak_cell->map_slot());
251 WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
252 if (weak_cell->target()->IsHeapObject()) {
253 HeapObject target = HeapObject::cast(weak_cell->target());
254 if (marking_state_.IsBlackOrGrey(target)) {
255 // Record the slot inside the WeakCell, since the IterateBody above
256 // didn't visit it.
257 ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
258 MarkCompactCollector::RecordSlot(weak_cell, slot, target);
259 } else {
260 // WeakCell points to a potentially dead object. We have to process
261 // them when we know the liveness of the whole transitive closure.
262 weak_objects_->weak_cells.Push(task_id_, weak_cell);
263 }
264 }
265 return size;
266 }
267
268 // Some JS objects can carry back links to embedders that contain information
269 // relevant to the garbage collectors.
270
271 int VisitJSApiObject(Map map, JSObject object) {
272 return VisitEmbedderTracingSubclass(map, object);
273 }
274
275 int VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
276 return VisitEmbedderTracingSubclass(map, object);
277 }
278
279 int VisitJSDataView(Map map, JSDataView object) {
280 return VisitEmbedderTracingSubclass(map, object);
281 }
282
283 int VisitJSTypedArray(Map map, JSTypedArray object) {
284 return VisitEmbedderTracingSubclass(map, object);
285 }
286
287 // ===========================================================================
288 // Strings with pointers =====================================================
289 // ===========================================================================
290
291 int VisitConsString(Map map, ConsString object) {
292 return VisitFullyWithSnapshot(map, object);
293 }
294
295 int VisitSlicedString(Map map, SlicedString object) {
296 return VisitFullyWithSnapshot(map, object);
297 }
298
299 int VisitThinString(Map map, ThinString object) {
300 return VisitFullyWithSnapshot(map, object);
301 }
302
303 // ===========================================================================
304 // Strings without pointers ==================================================
305 // ===========================================================================
306
307 int VisitSeqOneByteString(Map map, SeqOneByteString object) {
308 if (!ShouldVisit(object)) return 0;
309 VisitMapPointer(object, object->map_slot());
310 return SeqOneByteString::SizeFor(object->synchronized_length());
311 }
312
313 int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
314 if (!ShouldVisit(object)) return 0;
315 VisitMapPointer(object, object->map_slot());
316 return SeqTwoByteString::SizeFor(object->synchronized_length());
317 }
318
319 // ===========================================================================
320 // Fixed array object ========================================================
321 // ===========================================================================
322
323 int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
324 MemoryChunk* chunk) {
325 // The concurrent marker can process larger chunks than the main thread
326 // marker.
327 const int kProgressBarScanningChunk =
328 RoundUp(kMaxRegularHeapObjectSize, kTaggedSize);
329 DCHECK(marking_state_.IsBlackOrGrey(object));
330 marking_state_.GreyToBlack(object);
331 int size = FixedArray::BodyDescriptor::SizeOf(map, object);
332 size_t current_progress_bar = chunk->ProgressBar();
333 if (current_progress_bar == 0) {
334 // Try to move the progress bar forward to start offset. This solves the
335 // problem of not being able to observe a progress bar reset when
336 // processing the first kProgressBarScanningChunk.
337 if (!chunk->TrySetProgressBar(0,
338 FixedArray::BodyDescriptor::kStartOffset))
339 return 0;
340 current_progress_bar = FixedArray::BodyDescriptor::kStartOffset;
341 }
342 int start = static_cast<int>(current_progress_bar);
343 int end = Min(size, start + kProgressBarScanningChunk);
344 if (start < end) {
345 VisitPointers(object, object.RawField(start), object.RawField(end));
346 // Setting the progress bar can fail if the object that is currently
347 // scanned is also revisited. In this case, there may be two tasks racing
348 // on the progress counter. The looser can bail out because the progress
349 // bar is reset before the tasks race on the object.
350 if (chunk->TrySetProgressBar(current_progress_bar, end) && (end < size)) {
351 // The object can be pushed back onto the marking worklist only after
352 // progress bar was updated.
353 shared_.Push(object);
354 }
355 }
356 return end - start;
357 }
358
359 int VisitFixedArray(Map map, FixedArray object) {
360 // Arrays with the progress bar are not left-trimmable because they reside
361 // in the large object space.
362 MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
363 return chunk->IsFlagSet<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR)
364 ? VisitFixedArrayWithProgressBar(map, object, chunk)
365 : VisitLeftTrimmableArray(map, object);
366 }
367
368 int VisitFixedDoubleArray(Map map, FixedDoubleArray object) {
369 return VisitLeftTrimmableArray(map, object);
370 }
371
372 // ===========================================================================
373 // Side-effectful visitation.
374 // ===========================================================================
375
376 int VisitSharedFunctionInfo(Map map, SharedFunctionInfo shared_info) {
377 if (!ShouldVisit(shared_info)) return 0;
378
379 int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
380 VisitMapPointer(shared_info, shared_info->map_slot());
381 SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
382 this);
383
384 // If the SharedFunctionInfo has old bytecode, mark it as flushable,
385 // otherwise visit the function data field strongly.
386 if (shared_info->ShouldFlushBytecode(bytecode_flush_mode_)) {
387 weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
388 } else {
389 VisitPointer(shared_info, shared_info->RawField(
390 SharedFunctionInfo::kFunctionDataOffset));
391 }
392 return size;
393 }
394
395 int VisitBytecodeArray(Map map, BytecodeArray object) {
396 if (!ShouldVisit(object)) return 0;
397 int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
398 VisitMapPointer(object, object->map_slot());
399 BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
400 if (!is_forced_gc_) {
401 object->MakeOlder();
402 }
403 return size;
404 }
405
406 int VisitJSFunction(Map map, JSFunction object) {
407 int size = VisitJSObjectSubclass(map, object);
408
409 // Check if the JSFunction needs reset due to bytecode being flushed.
410 if (bytecode_flush_mode_ != BytecodeFlushMode::kDoNotFlushBytecode &&
411 object->NeedsResetDueToFlushedBytecode()) {
412 weak_objects_->flushed_js_functions.Push(task_id_, object);
413 }
414
415 return size;
416 }
417
418 int VisitMap(Map meta_map, Map map) {
419 if (!ShouldVisit(map)) return 0;
420 int size = Map::BodyDescriptor::SizeOf(meta_map, map);
421 if (map->CanTransition()) {
422 // Maps that can transition share their descriptor arrays and require
423 // special visiting logic to avoid memory leaks.
424 // Since descriptor arrays are potentially shared, ensure that only the
425 // descriptors that belong to this map are marked. The first time a
426 // non-empty descriptor array is marked, its header is also visited. The
427 // slot holding the descriptor array will be implicitly recorded when the
428 // pointer fields of this map are visited.
429 DescriptorArray descriptors = map->synchronized_instance_descriptors();
430 MarkDescriptorArrayBlack(descriptors);
431 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
432 if (number_of_own_descriptors) {
433 // It is possible that the concurrent marker observes the
434 // number_of_own_descriptors out of sync with the descriptors. In that
435 // case the marking write barrier for the descriptor array will ensure
436 // that all required descriptors are marked. The concurrent marker
437 // just should avoid crashing in that case. That's why we need the
438 // std::min<int>() below.
439 VisitDescriptors(descriptors,
440 std::min<int>(number_of_own_descriptors,
441 descriptors->number_of_descriptors()));
442 }
443 // Mark the pointer fields of the Map. Since the transitions array has
444 // been marked already, it is fine that one of these fields contains a
445 // pointer to it.
446 }
447 Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
448 return size;
449 }
450
451 void VisitDescriptors(DescriptorArray descriptor_array,
452 int number_of_own_descriptors) {
453 int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
454 int16_t old_marked = descriptor_array->UpdateNumberOfMarkedDescriptors(
455 mark_compact_epoch_, new_marked);
456 if (old_marked < new_marked) {
457 VisitPointers(
458 descriptor_array,
459 MaybeObjectSlot(descriptor_array->GetDescriptorSlot(old_marked)),
460 MaybeObjectSlot(descriptor_array->GetDescriptorSlot(new_marked)));
461 }
462 }
463
464 int VisitDescriptorArray(Map map, DescriptorArray array) {
465 if (!ShouldVisit(array)) return 0;
466 VisitMapPointer(array, array->map_slot());
467 int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
468 VisitPointers(array, array->GetFirstPointerSlot(),
469 array->GetDescriptorSlot(0));
470 VisitDescriptors(array, array->number_of_descriptors());
471 return size;
472 }
473
474 int VisitTransitionArray(Map map, TransitionArray array) {
475 if (!ShouldVisit(array)) return 0;
476 VisitMapPointer(array, array->map_slot());
477 int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
478 TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
479 weak_objects_->transition_arrays.Push(task_id_, array);
480 return size;
481 }
482
483 int VisitJSWeakCollection(Map map, JSWeakCollection object) {
484 return VisitJSObjectSubclass(map, object);
485 }
486
487 int VisitEphemeronHashTable(Map map, EphemeronHashTable table) {
488 if (!ShouldVisit(table)) return 0;
489 weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
490
491 for (int i = 0; i < table->Capacity(); i++) {
492 ObjectSlot key_slot =
493 table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
494 HeapObject key = HeapObject::cast(table->KeyAt(i));
495 MarkCompactCollector::RecordSlot(table, key_slot, key);
496
497 ObjectSlot value_slot =
498 table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
499
500 if (marking_state_.IsBlackOrGrey(key)) {
501 VisitPointer(table, value_slot);
502
503 } else {
504 Object value_obj = table->ValueAt(i);
505
506 if (value_obj->IsHeapObject()) {
507 HeapObject value = HeapObject::cast(value_obj);
508 MarkCompactCollector::RecordSlot(table, value_slot, value);
509
510 // Revisit ephemerons with both key and value unreachable at end
511 // of concurrent marking cycle.
512 if (marking_state_.IsWhite(value)) {
513 weak_objects_->discovered_ephemerons.Push(task_id_,
514 Ephemeron{key, value});
515 }
516 }
517 }
518 }
519
520 return table->SizeFromMap(map);
521 }
522
523 // Implements ephemeron semantics: Marks value if key is already reachable.
524 // Returns true if value was actually marked.
525 bool ProcessEphemeron(HeapObject key, HeapObject value) {
526 if (marking_state_.IsBlackOrGrey(key)) {
527 if (marking_state_.WhiteToGrey(value)) {
528 shared_.Push(value);
529 return true;
530 }
531
532 } else if (marking_state_.IsWhite(value)) {
533 weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
534 }
535
536 return false;
537 }
538
539 void MarkObject(HeapObject object) {
540#ifdef THREAD_SANITIZER
541 // Perform a dummy acquire load to tell TSAN that there is no data race
542 // in mark-bit initialization. See MemoryChunk::Initialize for the
543 // corresponding release store.
544 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
545 CHECK_NOT_NULL(chunk->synchronized_heap());
546#endif
547 if (marking_state_.WhiteToGrey(object)) {
548 shared_.Push(object);
549 }
550 }
551
552 void MarkDescriptorArrayBlack(DescriptorArray descriptors) {
553 marking_state_.WhiteToGrey(descriptors);
554 if (marking_state_.GreyToBlack(descriptors)) {
555 VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
556 descriptors->GetDescriptorSlot(0));
557 }
558 }
559
560 private:
561 // Helper class for collecting in-object slot addresses and values.
562 class SlotSnapshottingVisitor final : public ObjectVisitor {
563 public:
564 explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
565 : slot_snapshot_(slot_snapshot) {
566 slot_snapshot_->clear();
567 }
568
569 void VisitPointers(HeapObject host, ObjectSlot start,
570 ObjectSlot end) override {
571 for (ObjectSlot p = start; p < end; ++p) {
572 Object object = p.Relaxed_Load();
573 slot_snapshot_->add(p, object);
574 }
575 }
576
577 void VisitPointers(HeapObject host, MaybeObjectSlot start,
578 MaybeObjectSlot end) override {
579 // This should never happen, because we don't use snapshotting for objects
580 // which contain weak references.
581 UNREACHABLE();
582 }
583
584 void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
585 // This should never happen, because snapshotting is performed only on
586 // JSObjects (and derived classes).
587 UNREACHABLE();
588 }
589
590 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
591 // This should never happen, because snapshotting is performed only on
592 // JSObjects (and derived classes).
593 UNREACHABLE();
594 }
595
596 void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
597 ObjectSlot end) override {
598 DCHECK(host->IsWeakCell() || host->IsJSWeakRef());
599 }
600
601 private:
602 SlotSnapshot* slot_snapshot_;
603 };
604
605 template <typename T>
606 int VisitJSObjectSubclassFast(Map map, T object) {
607 DCHECK_IMPLIES(FLAG_unbox_double_fields, map->HasFastPointerLayout());
608 using TBodyDescriptor = typename T::FastBodyDescriptor;
609 return VisitJSObjectSubclass<T, TBodyDescriptor>(map, object);
610 }
611
612 template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
613 int VisitJSObjectSubclass(Map map, T object) {
614 int size = TBodyDescriptor::SizeOf(map, object);
615 int used_size = map->UsedInstanceSize();
616 DCHECK_LE(used_size, size);
617 DCHECK_GE(used_size, T::kHeaderSize);
618 return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object,
619 used_size, size);
620 }
621
622 template <typename T>
623 int VisitEmbedderTracingSubclass(Map map, T object) {
624 DCHECK(object->IsApiWrapper());
625 int size = VisitJSObjectSubclass(map, object);
626 if (size && embedder_tracing_enabled_) {
627 // Success: The object needs to be processed for embedder references on
628 // the main thread.
629 embedder_objects_.Push(object);
630 }
631 return size;
632 }
633
634 template <typename T>
635 int VisitLeftTrimmableArray(Map map, T object) {
636 // The synchronized_length() function checks that the length is a Smi.
637 // This is not necessarily the case if the array is being left-trimmed.
638 Object length = object->unchecked_synchronized_length();
639 if (!ShouldVisit(object)) return 0;
640 // The cached length must be the actual length as the array is not black.
641 // Left trimming marks the array black before over-writing the length.
642 DCHECK(length->IsSmi());
643 int size = T::SizeFor(Smi::ToInt(length));
644 VisitMapPointer(object, object->map_slot());
645 T::BodyDescriptor::IterateBody(map, object, size, this);
646 return size;
647 }
648
649 template <typename T>
650 int VisitFullyWithSnapshot(Map map, T object) {
651 using TBodyDescriptor = typename T::BodyDescriptor;
652 int size = TBodyDescriptor::SizeOf(map, object);
653 return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object, size,
654 size);
655 }
656
657 template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
658 int VisitPartiallyWithSnapshot(Map map, T object, int used_size, int size) {
659 const SlotSnapshot& snapshot =
660 MakeSlotSnapshot<T, TBodyDescriptor>(map, object, used_size);
661 if (!ShouldVisit(object)) return 0;
662 VisitPointersInSnapshot(object, snapshot);
663 return size;
664 }
665
666 template <typename T, typename TBodyDescriptor>
667 const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
668 SlotSnapshottingVisitor visitor(&slot_snapshot_);
669 visitor.VisitPointer(object, ObjectSlot(object->map_slot().address()));
670 TBodyDescriptor::IterateBody(map, object, size, &visitor);
671 return slot_snapshot_;
672 }
673
674 void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
675 MarkCompactCollector::RecordRelocSlotInfo info =
676 MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
677 if (info.should_record) {
678 MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
679 if (!data.typed_slots) {
680 data.typed_slots.reset(new TypedSlots());
681 }
682 data.typed_slots->Insert(info.slot_type, info.offset);
683 }
684 }
685
686 ConcurrentMarking::MarkingWorklist::View shared_;
687 WeakObjects* weak_objects_;
688 ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
689 ConcurrentMarkingState marking_state_;
690 MemoryChunkDataMap* memory_chunk_data_;
691 int task_id_;
692 SlotSnapshot slot_snapshot_;
693 bool embedder_tracing_enabled_;
694 const unsigned mark_compact_epoch_;
695 bool is_forced_gc_;
696 BytecodeFlushMode bytecode_flush_mode_;
697};
698
699// Strings can change maps due to conversion to thin string or external strings.
700// Use unchecked cast to avoid data race in slow dchecks.
701template <>
702ConsString ConcurrentMarkingVisitor::Cast(HeapObject object) {
703 return ConsString::unchecked_cast(object);
704}
705
706template <>
707SlicedString ConcurrentMarkingVisitor::Cast(HeapObject object) {
708 return SlicedString::unchecked_cast(object);
709}
710
711template <>
712ThinString ConcurrentMarkingVisitor::Cast(HeapObject object) {
713 return ThinString::unchecked_cast(object);
714}
715
716template <>
717SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
718 return SeqOneByteString::unchecked_cast(object);
719}
720
721template <>
722SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
723 return SeqTwoByteString::unchecked_cast(object);
724}
725
726// Fixed array can become a free space during left trimming.
727template <>
728FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
729 return FixedArray::unchecked_cast(object);
730}
731
732class ConcurrentMarking::Task : public CancelableTask {
733 public:
734 Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
735 TaskState* task_state, int task_id)
736 : CancelableTask(isolate),
737 concurrent_marking_(concurrent_marking),
738 task_state_(task_state),
739 task_id_(task_id) {}
740
741 ~Task() override = default;
742
743 private:
744 // v8::internal::CancelableTask overrides.
745 void RunInternal() override {
746 concurrent_marking_->Run(task_id_, task_state_);
747 }
748
749 ConcurrentMarking* concurrent_marking_;
750 TaskState* task_state_;
751 int task_id_;
752 DISALLOW_COPY_AND_ASSIGN(Task);
753};
754
755ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
756 MarkingWorklist* on_hold,
757 WeakObjects* weak_objects,
758 EmbedderTracingWorklist* embedder_objects)
759 : heap_(heap),
760 shared_(shared),
761 on_hold_(on_hold),
762 weak_objects_(weak_objects),
763 embedder_objects_(embedder_objects) {
764// The runtime flag should be set only if the compile time flag was set.
765#ifndef V8_CONCURRENT_MARKING
766 CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
767#endif
768}
769
770void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
771 TRACE_BACKGROUND_GC(heap_->tracer(),
772 GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
773 size_t kBytesUntilInterruptCheck = 64 * KB;
774 int kObjectsUntilInterrupCheck = 1000;
775 ConcurrentMarkingVisitor visitor(
776 shared_, &task_state->memory_chunk_data, weak_objects_, embedder_objects_,
777 task_id, heap_->local_embedder_heap_tracer()->InUse(),
778 task_state->mark_compact_epoch, task_state->is_forced_gc);
779 double time_ms;
780 size_t marked_bytes = 0;
781 if (FLAG_trace_concurrent_marking) {
782 heap_->isolate()->PrintWithTimestamp(
783 "Starting concurrent marking task %d\n", task_id);
784 }
785 bool ephemeron_marked = false;
786
787 {
788 TimedScope scope(&time_ms);
789
790 {
791 Ephemeron ephemeron;
792
793 while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
794 if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
795 ephemeron_marked = true;
796 }
797 }
798 }
799
800 bool done = false;
801 while (!done) {
802 size_t current_marked_bytes = 0;
803 int objects_processed = 0;
804 while (current_marked_bytes < kBytesUntilInterruptCheck &&
805 objects_processed < kObjectsUntilInterrupCheck) {
806 HeapObject object;
807 if (!shared_->Pop(task_id, &object)) {
808 done = true;
809 break;
810 }
811 objects_processed++;
812 // The order of the two loads is important.
813 Address new_space_top = heap_->new_space()->original_top_acquire();
814 Address new_space_limit = heap_->new_space()->original_limit_relaxed();
815 Address new_large_object = heap_->new_lo_space()->pending_object();
816 Address addr = object->address();
817 if ((new_space_top <= addr && addr < new_space_limit) ||
818 addr == new_large_object) {
819 on_hold_->Push(task_id, object);
820 } else {
821 Map map = object->synchronized_map();
822 current_marked_bytes += visitor.Visit(map, object);
823 }
824 }
825 marked_bytes += current_marked_bytes;
826 base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
827 marked_bytes);
828 if (task_state->preemption_request) {
829 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
830 "ConcurrentMarking::Run Preempted");
831 break;
832 }
833 }
834
835 if (done) {
836 Ephemeron ephemeron;
837
838 while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
839 if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
840 ephemeron_marked = true;
841 }
842 }
843 }
844
845 shared_->FlushToGlobal(task_id);
846 on_hold_->FlushToGlobal(task_id);
847 embedder_objects_->FlushToGlobal(task_id);
848
849 weak_objects_->transition_arrays.FlushToGlobal(task_id);
850 weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
851 weak_objects_->current_ephemerons.FlushToGlobal(task_id);
852 weak_objects_->next_ephemerons.FlushToGlobal(task_id);
853 weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
854 weak_objects_->weak_references.FlushToGlobal(task_id);
855 weak_objects_->js_weak_refs.FlushToGlobal(task_id);
856 weak_objects_->weak_cells.FlushToGlobal(task_id);
857 weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
858 weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
859 weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
860 base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
861 total_marked_bytes_ += marked_bytes;
862
863 if (ephemeron_marked) {
864 set_ephemeron_marked(true);
865 }
866
867 {
868 base::MutexGuard guard(&pending_lock_);
869 is_pending_[task_id] = false;
870 --pending_task_count_;
871 pending_condition_.NotifyAll();
872 }
873 }
874 if (FLAG_trace_concurrent_marking) {
875 heap_->isolate()->PrintWithTimestamp(
876 "Task %d concurrently marked %dKB in %.2fms\n", task_id,
877 static_cast<int>(marked_bytes / KB), time_ms);
878 }
879}
880
881void ConcurrentMarking::ScheduleTasks() {
882 DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
883 DCHECK(!heap_->IsTearingDown());
884 base::MutexGuard guard(&pending_lock_);
885 DCHECK_EQ(0, pending_task_count_);
886 if (task_count_ == 0) {
887 static const int num_cores =
888 V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
889#if defined(V8_OS_MACOSX)
890 // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
891 // marking on competing hyper-threads (regresses Octane/Splay). As such,
892 // only use num_cores/2, leaving one of those for the main thread.
893 // TODO(ulan): Use all cores on Mac 10.12+.
894 task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
895#else // defined(OS_MACOSX)
896 // On other platforms use all logical cores, leaving one for the main
897 // thread.
898 task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
899#endif // defined(OS_MACOSX)
900 }
901 // Task id 0 is for the main thread.
902 for (int i = 1; i <= task_count_; i++) {
903 if (!is_pending_[i]) {
904 if (FLAG_trace_concurrent_marking) {
905 heap_->isolate()->PrintWithTimestamp(
906 "Scheduling concurrent marking task %d\n", i);
907 }
908 task_state_[i].preemption_request = false;
909 task_state_[i].mark_compact_epoch =
910 heap_->mark_compact_collector()->epoch();
911 task_state_[i].is_forced_gc = heap_->is_current_gc_forced();
912 is_pending_[i] = true;
913 ++pending_task_count_;
914 auto task =
915 base::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
916 cancelable_id_[i] = task->id();
917 V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
918 }
919 }
920 DCHECK_EQ(task_count_, pending_task_count_);
921}
922
923void ConcurrentMarking::RescheduleTasksIfNeeded() {
924 DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
925 if (heap_->IsTearingDown()) return;
926 {
927 base::MutexGuard guard(&pending_lock_);
928 if (pending_task_count_ > 0) return;
929 }
930 if (!shared_->IsGlobalPoolEmpty() ||
931 !weak_objects_->current_ephemerons.IsEmpty() ||
932 !weak_objects_->discovered_ephemerons.IsEmpty()) {
933 ScheduleTasks();
934 }
935}
936
937bool ConcurrentMarking::Stop(StopRequest stop_request) {
938 DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
939 base::MutexGuard guard(&pending_lock_);
940
941 if (pending_task_count_ == 0) return false;
942
943 if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
944 CancelableTaskManager* task_manager =
945 heap_->isolate()->cancelable_task_manager();
946 for (int i = 1; i <= task_count_; i++) {
947 if (is_pending_[i]) {
948 if (task_manager->TryAbort(cancelable_id_[i]) ==
949 TryAbortResult::kTaskAborted) {
950 is_pending_[i] = false;
951 --pending_task_count_;
952 } else if (stop_request == StopRequest::PREEMPT_TASKS) {
953 task_state_[i].preemption_request = true;
954 }
955 }
956 }
957 }
958 while (pending_task_count_ > 0) {
959 pending_condition_.Wait(&pending_lock_);
960 }
961 for (int i = 1; i <= task_count_; i++) {
962 DCHECK(!is_pending_[i]);
963 }
964 return true;
965}
966
967bool ConcurrentMarking::IsStopped() {
968 if (!FLAG_concurrent_marking) return true;
969
970 base::MutexGuard guard(&pending_lock_);
971 return pending_task_count_ == 0;
972}
973
974void ConcurrentMarking::FlushMemoryChunkData(
975 MajorNonAtomicMarkingState* marking_state) {
976 DCHECK_EQ(pending_task_count_, 0);
977 for (int i = 1; i <= task_count_; i++) {
978 MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
979 for (auto& pair : memory_chunk_data) {
980 // ClearLiveness sets the live bytes to zero.
981 // Pages with zero live bytes might be already unmapped.
982 MemoryChunk* memory_chunk = pair.first;
983 MemoryChunkData& data = pair.second;
984 if (data.live_bytes) {
985 marking_state->IncrementLiveBytes(memory_chunk, data.live_bytes);
986 }
987 if (data.typed_slots) {
988 RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
989 std::move(data.typed_slots));
990 }
991 }
992 memory_chunk_data.clear();
993 task_state_[i].marked_bytes = 0;
994 }
995 total_marked_bytes_ = 0;
996}
997
998void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
999 for (int i = 1; i <= task_count_; i++) {
1000 auto it = task_state_[i].memory_chunk_data.find(chunk);
1001 if (it != task_state_[i].memory_chunk_data.end()) {
1002 it->second.live_bytes = 0;
1003 it->second.typed_slots.reset();
1004 }
1005 }
1006}
1007
1008size_t ConcurrentMarking::TotalMarkedBytes() {
1009 size_t result = 0;
1010 for (int i = 1; i <= task_count_; i++) {
1011 result +=
1012 base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
1013 }
1014 result += total_marked_bytes_;
1015 return result;
1016}
1017
1018ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
1019 : concurrent_marking_(concurrent_marking),
1020 resume_on_exit_(FLAG_concurrent_marking &&
1021 concurrent_marking_->Stop(
1022 ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
1023 DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
1024}
1025
1026ConcurrentMarking::PauseScope::~PauseScope() {
1027 if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
1028}
1029
1030} // namespace internal
1031} // namespace v8
1032