1// Copyright 2016 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_SLOT_SET_H_
6#define V8_HEAP_SLOT_SET_H_
7
8#include <map>
9#include <stack>
10
11#include "src/allocation.h"
12#include "src/base/atomic-utils.h"
13#include "src/base/bits.h"
14#include "src/objects/compressed-slots.h"
15#include "src/objects/slots.h"
16#include "src/utils.h"
17
18namespace v8 {
19namespace internal {
20
21enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
22
23// Data structure for maintaining a set of slots in a standard (non-large)
24// page. The base address of the page must be set with SetPageStart before any
25// operation.
26// The data structure assumes that the slots are pointer size aligned and
27// splits the valid slot offset range into kBuckets buckets.
28// Each bucket is a bitmap with a bit corresponding to a single slot offset.
29class SlotSet : public Malloced {
30 public:
31 enum EmptyBucketMode {
32 FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
33 PREFREE_EMPTY_BUCKETS, // An empty bucket will be unlinked from the slot
34 // set, but deallocated on demand by a sweeper
35 // thread.
36 KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
37 };
38
39 SlotSet() {
40 for (int i = 0; i < kBuckets; i++) {
41 StoreBucket(&buckets_[i], nullptr);
42 }
43 }
44
45 ~SlotSet() {
46 for (int i = 0; i < kBuckets; i++) {
47 ReleaseBucket(i);
48 }
49 FreeToBeFreedBuckets();
50 }
51
52 void SetPageStart(Address page_start) { page_start_ = page_start; }
53
54 // The slot offset specifies a slot at address page_start_ + slot_offset.
55 // This method should only be called on the main thread because concurrent
56 // allocation of the bucket is not thread-safe.
57 //
58 // AccessMode defines whether there can be concurrent access on the buckets
59 // or not.
60 template <AccessMode access_mode = AccessMode::ATOMIC>
61 void Insert(int slot_offset) {
62 int bucket_index, cell_index, bit_index;
63 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
64 Bucket bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
65 if (bucket == nullptr) {
66 bucket = AllocateBucket();
67 if (!SwapInNewBucket<access_mode>(&buckets_[bucket_index], bucket)) {
68 DeleteArray<uint32_t>(bucket);
69 bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
70 }
71 }
72 // Check that monotonicity is preserved, i.e., once a bucket is set we do
73 // not free it concurrently.
74 DCHECK_NOT_NULL(bucket);
75 DCHECK_EQ(bucket, LoadBucket<access_mode>(&buckets_[bucket_index]));
76 uint32_t mask = 1u << bit_index;
77 if ((LoadCell<access_mode>(&bucket[cell_index]) & mask) == 0) {
78 SetCellBits<access_mode>(&bucket[cell_index], mask);
79 }
80 }
81
82 // The slot offset specifies a slot at address page_start_ + slot_offset.
83 // Returns true if the set contains the slot.
84 bool Contains(int slot_offset) {
85 int bucket_index, cell_index, bit_index;
86 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
87 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
88 if (bucket == nullptr) return false;
89 return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
90 }
91
92 // The slot offset specifies a slot at address page_start_ + slot_offset.
93 void Remove(int slot_offset) {
94 int bucket_index, cell_index, bit_index;
95 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
96 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
97 if (bucket != nullptr) {
98 uint32_t cell = LoadCell(&bucket[cell_index]);
99 uint32_t bit_mask = 1u << bit_index;
100 if (cell & bit_mask) {
101 ClearCellBits(&bucket[cell_index], bit_mask);
102 }
103 }
104 }
105
106 // The slot offsets specify a range of slots at addresses:
107 // [page_start_ + start_offset ... page_start_ + end_offset).
108 void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
109 CHECK_LE(end_offset, 1 << kPageSizeBits);
110 DCHECK_LE(start_offset, end_offset);
111 int start_bucket, start_cell, start_bit;
112 SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
113 int end_bucket, end_cell, end_bit;
114 SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
115 uint32_t start_mask = (1u << start_bit) - 1;
116 uint32_t end_mask = ~((1u << end_bit) - 1);
117 Bucket bucket;
118 if (start_bucket == end_bucket && start_cell == end_cell) {
119 bucket = LoadBucket(&buckets_[start_bucket]);
120 if (bucket != nullptr) {
121 ClearCellBits(&bucket[start_cell], ~(start_mask | end_mask));
122 }
123 return;
124 }
125 int current_bucket = start_bucket;
126 int current_cell = start_cell;
127 bucket = LoadBucket(&buckets_[current_bucket]);
128 if (bucket != nullptr) {
129 ClearCellBits(&bucket[current_cell], ~start_mask);
130 }
131 current_cell++;
132 if (current_bucket < end_bucket) {
133 if (bucket != nullptr) {
134 ClearBucket(bucket, current_cell, kCellsPerBucket);
135 }
136 // The rest of the current bucket is cleared.
137 // Move on to the next bucket.
138 current_bucket++;
139 current_cell = 0;
140 }
141 DCHECK(current_bucket == end_bucket ||
142 (current_bucket < end_bucket && current_cell == 0));
143 while (current_bucket < end_bucket) {
144 if (mode == PREFREE_EMPTY_BUCKETS) {
145 PreFreeEmptyBucket(current_bucket);
146 } else if (mode == FREE_EMPTY_BUCKETS) {
147 ReleaseBucket(current_bucket);
148 } else {
149 DCHECK(mode == KEEP_EMPTY_BUCKETS);
150 bucket = LoadBucket(&buckets_[current_bucket]);
151 if (bucket != nullptr) {
152 ClearBucket(bucket, 0, kCellsPerBucket);
153 }
154 }
155 current_bucket++;
156 }
157 // All buckets between start_bucket and end_bucket are cleared.
158 bucket = LoadBucket(&buckets_[current_bucket]);
159 DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
160 if (current_bucket == kBuckets || bucket == nullptr) {
161 return;
162 }
163 while (current_cell < end_cell) {
164 StoreCell(&bucket[current_cell], 0);
165 current_cell++;
166 }
167 // All cells between start_cell and end_cell are cleared.
168 DCHECK(current_bucket == end_bucket && current_cell == end_cell);
169 ClearCellBits(&bucket[end_cell], ~end_mask);
170 }
171
172 // The slot offset specifies a slot at address page_start_ + slot_offset.
173 bool Lookup(int slot_offset) {
174 int bucket_index, cell_index, bit_index;
175 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
176 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
177 if (bucket == nullptr) return false;
178 return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
179 }
180
181 // Iterate over all slots in the set and for each slot invoke the callback.
182 // If the callback returns REMOVE_SLOT then the slot is removed from the set.
183 // Returns the new number of slots.
184 // This method should only be called on the main thread.
185 //
186 // Sample usage:
187 // Iterate([](MaybeObjectSlot slot) {
188 // if (good(slot)) return KEEP_SLOT;
189 // else return REMOVE_SLOT;
190 // });
191 template <typename Callback>
192 int Iterate(Callback callback, EmptyBucketMode mode) {
193 int new_count = 0;
194 for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
195 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
196 if (bucket != nullptr) {
197 int in_bucket_count = 0;
198 int cell_offset = bucket_index * kBitsPerBucket;
199 for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
200 uint32_t cell = LoadCell(&bucket[i]);
201 if (cell) {
202 uint32_t old_cell = cell;
203 uint32_t mask = 0;
204 while (cell) {
205 int bit_offset = base::bits::CountTrailingZeros(cell);
206 uint32_t bit_mask = 1u << bit_offset;
207 uint32_t slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
208 if (callback(MaybeObjectSlot(page_start_ + slot)) == KEEP_SLOT) {
209 ++in_bucket_count;
210 } else {
211 mask |= bit_mask;
212 }
213 cell ^= bit_mask;
214 }
215 uint32_t new_cell = old_cell & ~mask;
216 if (old_cell != new_cell) {
217 ClearCellBits(&bucket[i], mask);
218 }
219 }
220 }
221 if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
222 PreFreeEmptyBucket(bucket_index);
223 }
224 new_count += in_bucket_count;
225 }
226 }
227 return new_count;
228 }
229
230 int NumberOfPreFreedEmptyBuckets() {
231 base::MutexGuard guard(&to_be_freed_buckets_mutex_);
232 return static_cast<int>(to_be_freed_buckets_.size());
233 }
234
235 void PreFreeEmptyBuckets() {
236 for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
237 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
238 if (bucket != nullptr) {
239 if (IsEmptyBucket(bucket)) {
240 PreFreeEmptyBucket(bucket_index);
241 }
242 }
243 }
244 }
245
246 void FreeEmptyBuckets() {
247 for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
248 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
249 if (bucket != nullptr) {
250 if (IsEmptyBucket(bucket)) {
251 ReleaseBucket(bucket_index);
252 }
253 }
254 }
255 }
256
257 void FreeToBeFreedBuckets() {
258 base::MutexGuard guard(&to_be_freed_buckets_mutex_);
259 while (!to_be_freed_buckets_.empty()) {
260 Bucket top = to_be_freed_buckets_.top();
261 to_be_freed_buckets_.pop();
262 DeleteArray<uint32_t>(top);
263 }
264 DCHECK_EQ(0u, to_be_freed_buckets_.size());
265 }
266
267 private:
268 using Bucket = uint32_t*;
269 static const int kMaxSlots = (1 << kPageSizeBits) / kTaggedSize;
270 static const int kCellsPerBucket = 32;
271 static const int kCellsPerBucketLog2 = 5;
272 static const int kBitsPerCell = 32;
273 static const int kBitsPerCellLog2 = 5;
274 static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
275 static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
276 static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
277
278 Bucket AllocateBucket() {
279 Bucket result = NewArray<uint32_t>(kCellsPerBucket);
280 for (int i = 0; i < kCellsPerBucket; i++) {
281 result[i] = 0;
282 }
283 return result;
284 }
285
286 void ClearBucket(Bucket bucket, int start_cell, int end_cell) {
287 DCHECK_GE(start_cell, 0);
288 DCHECK_LE(end_cell, kCellsPerBucket);
289 int current_cell = start_cell;
290 while (current_cell < kCellsPerBucket) {
291 StoreCell(&bucket[current_cell], 0);
292 current_cell++;
293 }
294 }
295
296 void PreFreeEmptyBucket(int bucket_index) {
297 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
298 if (bucket != nullptr) {
299 base::MutexGuard guard(&to_be_freed_buckets_mutex_);
300 to_be_freed_buckets_.push(bucket);
301 StoreBucket(&buckets_[bucket_index], nullptr);
302 }
303 }
304
305 void ReleaseBucket(int bucket_index) {
306 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
307 StoreBucket(&buckets_[bucket_index], nullptr);
308 DeleteArray<uint32_t>(bucket);
309 }
310
311 template <AccessMode access_mode = AccessMode::ATOMIC>
312 Bucket LoadBucket(Bucket* bucket) {
313 if (access_mode == AccessMode::ATOMIC)
314 return base::AsAtomicPointer::Acquire_Load(bucket);
315 return *bucket;
316 }
317
318 template <AccessMode access_mode = AccessMode::ATOMIC>
319 void StoreBucket(Bucket* bucket, Bucket value) {
320 if (access_mode == AccessMode::ATOMIC) {
321 base::AsAtomicPointer::Release_Store(bucket, value);
322 } else {
323 *bucket = value;
324 }
325 }
326
327 bool IsEmptyBucket(Bucket bucket) {
328 for (int i = 0; i < kCellsPerBucket; i++) {
329 if (LoadCell(&bucket[i])) {
330 return false;
331 }
332 }
333 return true;
334 }
335
336 template <AccessMode access_mode = AccessMode::ATOMIC>
337 bool SwapInNewBucket(Bucket* bucket, Bucket value) {
338 if (access_mode == AccessMode::ATOMIC) {
339 return base::AsAtomicPointer::Release_CompareAndSwap(bucket, nullptr,
340 value) == nullptr;
341 } else {
342 DCHECK_NULL(*bucket);
343 *bucket = value;
344 return true;
345 }
346 }
347
348 template <AccessMode access_mode = AccessMode::ATOMIC>
349 uint32_t LoadCell(uint32_t* cell) {
350 if (access_mode == AccessMode::ATOMIC)
351 return base::AsAtomic32::Acquire_Load(cell);
352 return *cell;
353 }
354
355 void StoreCell(uint32_t* cell, uint32_t value) {
356 base::AsAtomic32::Release_Store(cell, value);
357 }
358
359 void ClearCellBits(uint32_t* cell, uint32_t mask) {
360 base::AsAtomic32::SetBits(cell, 0u, mask);
361 }
362
363 template <AccessMode access_mode = AccessMode::ATOMIC>
364 void SetCellBits(uint32_t* cell, uint32_t mask) {
365 if (access_mode == AccessMode::ATOMIC) {
366 base::AsAtomic32::SetBits(cell, mask, mask);
367 } else {
368 *cell = (*cell & ~mask) | mask;
369 }
370 }
371
372 // Converts the slot offset into bucket/cell/bit index.
373 void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
374 int* bit_index) {
375 DCHECK(IsAligned(slot_offset, kTaggedSize));
376 int slot = slot_offset >> kTaggedSizeLog2;
377 DCHECK(slot >= 0 && slot <= kMaxSlots);
378 *bucket_index = slot >> kBitsPerBucketLog2;
379 *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
380 *bit_index = slot & (kBitsPerCell - 1);
381 }
382
383 Bucket buckets_[kBuckets];
384 Address page_start_;
385 base::Mutex to_be_freed_buckets_mutex_;
386 std::stack<uint32_t*> to_be_freed_buckets_;
387};
388
389enum SlotType {
390 EMBEDDED_OBJECT_SLOT,
391 OBJECT_SLOT,
392 CODE_TARGET_SLOT,
393 CODE_ENTRY_SLOT,
394 CLEARED_SLOT
395};
396
397// Data structure for maintaining a list of typed slots in a page.
398// Typed slots can only appear in Code and JSFunction objects, so
399// the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
400// The implementation is a chain of chunks, where each chunks is an array of
401// encoded (slot type, slot offset) pairs.
402// There is no duplicate detection and we do not expect many duplicates because
403// typed slots contain V8 internal pointers that are not directly exposed to JS.
404class V8_EXPORT_PRIVATE TypedSlots {
405 public:
406 static const int kMaxOffset = 1 << 29;
407 TypedSlots() = default;
408 virtual ~TypedSlots();
409 void Insert(SlotType type, uint32_t offset);
410 void Merge(TypedSlots* other);
411
412 protected:
413 class OffsetField : public BitField<int, 0, 29> {};
414 class TypeField : public BitField<SlotType, 29, 3> {};
415 struct TypedSlot {
416 uint32_t type_and_offset;
417 };
418 struct Chunk {
419 Chunk* next;
420 TypedSlot* buffer;
421 int32_t capacity;
422 int32_t count;
423 };
424 static const int kInitialBufferSize = 100;
425 static const int kMaxBufferSize = 16 * KB;
426 static int NextCapacity(int capacity) {
427 return Min(kMaxBufferSize, capacity * 2);
428 }
429 Chunk* EnsureChunk();
430 Chunk* NewChunk(Chunk* next, int capacity);
431 Chunk* head_ = nullptr;
432 Chunk* tail_ = nullptr;
433};
434
435// A multiset of per-page typed slots that allows concurrent iteration
436// clearing of invalid slots.
437class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
438 public:
439 // The PREFREE_EMPTY_CHUNKS indicates that chunks detected as empty
440 // during the iteration are queued in to_be_freed_chunks_, which are
441 // then freed in FreeToBeFreedChunks.
442 enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
443
444 explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
445
446 ~TypedSlotSet() override;
447
448 // Iterate over all slots in the set and for each slot invoke the callback.
449 // If the callback returns REMOVE_SLOT then the slot is removed from the set.
450 // Returns the new number of slots.
451 //
452 // Sample usage:
453 // Iterate([](SlotType slot_type, Address slot_address) {
454 // if (good(slot_type, slot_address)) return KEEP_SLOT;
455 // else return REMOVE_SLOT;
456 // });
457 // This can run concurrently to ClearInvalidSlots().
458 template <typename Callback>
459 int Iterate(Callback callback, IterationMode mode) {
460 STATIC_ASSERT(CLEARED_SLOT < 8);
461 Chunk* chunk = head_;
462 Chunk* previous = nullptr;
463 int new_count = 0;
464 while (chunk != nullptr) {
465 TypedSlot* buffer = chunk->buffer;
466 int count = chunk->count;
467 bool empty = true;
468 for (int i = 0; i < count; i++) {
469 TypedSlot slot = LoadTypedSlot(buffer + i);
470 SlotType type = TypeField::decode(slot.type_and_offset);
471 if (type != CLEARED_SLOT) {
472 uint32_t offset = OffsetField::decode(slot.type_and_offset);
473 Address addr = page_start_ + offset;
474 if (callback(type, addr) == KEEP_SLOT) {
475 new_count++;
476 empty = false;
477 } else {
478 ClearTypedSlot(buffer + i);
479 }
480 }
481 }
482 Chunk* next = chunk->next;
483 if (mode == PREFREE_EMPTY_CHUNKS && empty) {
484 // We remove the chunk from the list but let it still point its next
485 // chunk to allow concurrent iteration.
486 if (previous) {
487 StoreNext(previous, next);
488 } else {
489 StoreHead(next);
490 }
491 base::MutexGuard guard(&to_be_freed_chunks_mutex_);
492 to_be_freed_chunks_.push(std::unique_ptr<Chunk>(chunk));
493 } else {
494 previous = chunk;
495 }
496 chunk = next;
497 }
498 return new_count;
499 }
500
501 // Clears all slots that have the offset in the specified ranges.
502 // This can run concurrently to Iterate().
503 void ClearInvalidSlots(const std::map<uint32_t, uint32_t>& invalid_ranges);
504
505 // Frees empty chunks accumulated by PREFREE_EMPTY_CHUNKS.
506 void FreeToBeFreedChunks();
507
508 private:
509 // Atomic operations used by Iterate and ClearInvalidSlots;
510 Chunk* LoadNext(Chunk* chunk) {
511 return base::AsAtomicPointer::Relaxed_Load(&chunk->next);
512 }
513 void StoreNext(Chunk* chunk, Chunk* next) {
514 return base::AsAtomicPointer::Relaxed_Store(&chunk->next, next);
515 }
516 Chunk* LoadHead() { return base::AsAtomicPointer::Relaxed_Load(&head_); }
517 void StoreHead(Chunk* chunk) {
518 base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
519 }
520 TypedSlot LoadTypedSlot(TypedSlot* slot) {
521 return TypedSlot{base::AsAtomic32::Relaxed_Load(&slot->type_and_offset)};
522 }
523 void ClearTypedSlot(TypedSlot* slot) {
524 // Order is important here and should match that of LoadTypedSlot.
525 base::AsAtomic32::Relaxed_Store(
526 &slot->type_and_offset,
527 TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
528 }
529
530 Address page_start_;
531 base::Mutex to_be_freed_chunks_mutex_;
532 std::stack<std::unique_ptr<Chunk>> to_be_freed_chunks_;
533};
534
535} // namespace internal
536} // namespace v8
537
538#endif // V8_HEAP_SLOT_SET_H_
539