1// Copyright 2016 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/snapshot/deserializer.h"
6
7#include "src/assembler-inl.h"
8#include "src/heap/heap-inl.h"
9#include "src/heap/heap-write-barrier-inl.h"
10#include "src/heap/read-only-heap.h"
11#include "src/interpreter/interpreter.h"
12#include "src/isolate.h"
13#include "src/log.h"
14#include "src/objects-body-descriptors-inl.h"
15#include "src/objects/api-callbacks.h"
16#include "src/objects/cell-inl.h"
17#include "src/objects/hash-table.h"
18#include "src/objects/js-array-buffer-inl.h"
19#include "src/objects/js-array-inl.h"
20#include "src/objects/maybe-object.h"
21#include "src/objects/slots.h"
22#include "src/objects/smi.h"
23#include "src/objects/string.h"
24#include "src/roots.h"
25#include "src/snapshot/natives.h"
26#include "src/snapshot/snapshot.h"
27#include "src/tracing/trace-event.h"
28#include "src/tracing/traced-value.h"
29
30namespace v8 {
31namespace internal {
32
33template <typename TSlot>
34TSlot Deserializer::Write(TSlot dest, MaybeObject value) {
35 DCHECK(!allocator()->next_reference_is_weak());
36 dest.store(value);
37 return dest + 1;
38}
39
40template <typename TSlot>
41TSlot Deserializer::WriteAddress(TSlot dest, Address value) {
42 DCHECK(!allocator()->next_reference_is_weak());
43 memcpy(dest.ToVoidPtr(), &value, kSystemPointerSize);
44 STATIC_ASSERT(IsAligned(kSystemPointerSize, TSlot::kSlotDataSize));
45 return dest + (kSystemPointerSize / TSlot::kSlotDataSize);
46}
47
48void Deserializer::Initialize(Isolate* isolate) {
49 DCHECK_NULL(isolate_);
50 DCHECK_NOT_NULL(isolate);
51 isolate_ = isolate;
52 allocator()->Initialize(isolate->heap());
53
54#ifdef DEBUG
55 // The read-only deserializer is run by read-only heap set-up before the heap
56 // is fully set up. External reference table relies on a few parts of this
57 // set-up (like old-space), so it may be uninitialized at this point.
58 if (isolate->isolate_data()->external_reference_table()->is_initialized()) {
59 // Count the number of external references registered through the API.
60 num_api_references_ = 0;
61 if (isolate_->api_external_references() != nullptr) {
62 while (isolate_->api_external_references()[num_api_references_] != 0) {
63 num_api_references_++;
64 }
65 }
66 }
67#endif // DEBUG
68 CHECK_EQ(magic_number_, SerializedData::kMagicNumber);
69}
70
71void Deserializer::Rehash() {
72 DCHECK(can_rehash() || deserializing_user_code());
73 for (HeapObject item : to_rehash_) {
74 item->RehashBasedOnMap(ReadOnlyRoots(isolate_));
75 }
76}
77
78Deserializer::~Deserializer() {
79#ifdef DEBUG
80 // Do not perform checks if we aborted deserialization.
81 if (source_.position() == 0) return;
82 // Check that we only have padding bytes remaining.
83 while (source_.HasMore()) DCHECK_EQ(kNop, source_.Get());
84 // Check that we've fully used all reserved space.
85 DCHECK(allocator()->ReservationsAreFullyUsed());
86#endif // DEBUG
87}
88
89// This is called on the roots. It is the driver of the deserialization
90// process. It is also called on the body of each function.
91void Deserializer::VisitRootPointers(Root root, const char* description,
92 FullObjectSlot start, FullObjectSlot end) {
93 // We are reading to a location outside of JS heap, so pass NEW_SPACE to
94 // avoid triggering write barriers.
95 ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end), NEW_SPACE,
96 kNullAddress);
97}
98
99void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
100 static const byte expected = kSynchronize;
101 CHECK_EQ(expected, source_.Get());
102}
103
104void Deserializer::DeserializeDeferredObjects() {
105 for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
106 switch (code) {
107 case kAlignmentPrefix:
108 case kAlignmentPrefix + 1:
109 case kAlignmentPrefix + 2: {
110 int alignment = code - (SerializerDeserializer::kAlignmentPrefix - 1);
111 allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
112 break;
113 }
114 default: {
115 int space = code & kSpaceMask;
116 DCHECK_LE(space, kNumberOfSpaces);
117 DCHECK_EQ(code - space, kNewObject);
118 HeapObject object = GetBackReferencedObject(space);
119 int size = source_.GetInt() << kTaggedSizeLog2;
120 Address obj_address = object->address();
121 // Object's map is already initialized, now read the rest.
122 MaybeObjectSlot start(obj_address + kTaggedSize);
123 MaybeObjectSlot end(obj_address + size);
124 bool filled = ReadData(start, end, space, obj_address);
125 CHECK(filled);
126 DCHECK(CanBeDeferred(object));
127 PostProcessNewObject(object, space);
128 }
129 }
130 }
131}
132
133void Deserializer::LogNewObjectEvents() {
134 {
135 // {new_maps_} and {new_code_objects_} are vectors containing raw
136 // pointers, hence there should be no GC happening.
137 DisallowHeapAllocation no_gc;
138 // Issue code events for newly deserialized code objects.
139 LOG_CODE_EVENT(isolate_, LogCodeObjects());
140 }
141 LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
142 LogNewMapEvents();
143}
144
145void Deserializer::LogNewMapEvents() {
146 DisallowHeapAllocation no_gc;
147 for (Map map : new_maps()) {
148 DCHECK(FLAG_trace_maps);
149 LOG(isolate_, MapCreate(map));
150 LOG(isolate_, MapDetails(map));
151 }
152}
153
154void Deserializer::LogScriptEvents(Script script) {
155 DisallowHeapAllocation no_gc;
156 LOG(isolate_,
157 ScriptEvent(Logger::ScriptEventType::kDeserialize, script->id()));
158 LOG(isolate_, ScriptDetails(script));
159 TRACE_EVENT_OBJECT_CREATED_WITH_ID(
160 TRACE_DISABLED_BY_DEFAULT("v8.compile"), "Script",
161 TRACE_ID_WITH_SCOPE("v8::internal::Script", script->id()));
162 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
163 TRACE_DISABLED_BY_DEFAULT("v8.compile"), "Script",
164 TRACE_ID_WITH_SCOPE("v8::internal::Script", script->id()),
165 script->ToTracedValue());
166}
167
168StringTableInsertionKey::StringTableInsertionKey(String string)
169 : StringTableKey(ComputeHashField(string)), string_(string) {
170 DCHECK(string->IsInternalizedString());
171}
172
173bool StringTableInsertionKey::IsMatch(Object string) {
174 // We know that all entries in a hash table had their hash keys created.
175 // Use that knowledge to have fast failure.
176 if (Hash() != String::cast(string)->Hash()) return false;
177 // We want to compare the content of two internalized strings here.
178 return string_->SlowEquals(String::cast(string));
179}
180
181Handle<String> StringTableInsertionKey::AsHandle(Isolate* isolate) {
182 return handle(string_, isolate);
183}
184
185uint32_t StringTableInsertionKey::ComputeHashField(String string) {
186 // Make sure hash_field() is computed.
187 string->Hash();
188 return string->hash_field();
189}
190
191HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) {
192 if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
193 if (obj->IsString()) {
194 // Uninitialize hash field as we need to recompute the hash.
195 String string = String::cast(obj);
196 string->set_hash_field(String::kEmptyHashField);
197 // Rehash strings before read-only space is sealed. Strings outside
198 // read-only space are rehashed lazily. (e.g. when rehashing dictionaries)
199 if (space == RO_SPACE) {
200 to_rehash_.push_back(obj);
201 }
202 } else if (obj->NeedsRehashing()) {
203 to_rehash_.push_back(obj);
204 }
205 }
206
207 if (deserializing_user_code()) {
208 if (obj->IsString()) {
209 String string = String::cast(obj);
210 if (string->IsInternalizedString()) {
211 // Canonicalize the internalized string. If it already exists in the
212 // string table, set it to forward to the existing one.
213 StringTableInsertionKey key(string);
214 String canonical =
215 StringTable::ForwardStringIfExists(isolate_, &key, string);
216
217 if (!canonical.is_null()) return canonical;
218
219 new_internalized_strings_.push_back(handle(string, isolate_));
220 return string;
221 }
222 } else if (obj->IsScript()) {
223 new_scripts_.push_back(handle(Script::cast(obj), isolate_));
224 } else if (obj->IsAllocationSite()) {
225 // We should link new allocation sites, but we can't do this immediately
226 // because |AllocationSite::HasWeakNext()| internally accesses
227 // |Heap::roots_| that may not have been initialized yet. So defer this to
228 // |ObjectDeserializer::CommitPostProcessedObjects()|.
229 new_allocation_sites_.push_back(AllocationSite::cast(obj));
230 } else {
231 DCHECK(CanBeDeferred(obj));
232 }
233 }
234 if (obj->IsScript()) {
235 LogScriptEvents(Script::cast(obj));
236 } else if (obj->IsCode()) {
237 // We flush all code pages after deserializing the startup snapshot.
238 // Hence we only remember each individual code object when deserializing
239 // user code.
240 if (deserializing_user_code() || space == LO_SPACE) {
241 new_code_objects_.push_back(Code::cast(obj));
242 }
243 } else if (FLAG_trace_maps && obj->IsMap()) {
244 // Keep track of all seen Maps to log them later since they might be only
245 // partially initialized at this point.
246 new_maps_.push_back(Map::cast(obj));
247 } else if (obj->IsAccessorInfo()) {
248#ifdef USE_SIMULATOR
249 accessor_infos_.push_back(AccessorInfo::cast(obj));
250#endif
251 } else if (obj->IsCallHandlerInfo()) {
252#ifdef USE_SIMULATOR
253 call_handler_infos_.push_back(CallHandlerInfo::cast(obj));
254#endif
255 } else if (obj->IsExternalString()) {
256 if (obj->map() == ReadOnlyRoots(isolate_).native_source_string_map()) {
257 ExternalOneByteString string = ExternalOneByteString::cast(obj);
258 DCHECK(string->is_uncached());
259 string->SetResource(
260 isolate_, NativesExternalStringResource::DecodeForDeserialization(
261 string->resource()));
262 } else {
263 ExternalString string = ExternalString::cast(obj);
264 uint32_t index = string->resource_as_uint32();
265 Address address =
266 static_cast<Address>(isolate_->api_external_references()[index]);
267 string->set_address_as_resource(address);
268 isolate_->heap()->UpdateExternalString(string, 0,
269 string->ExternalPayloadSize());
270 }
271 isolate_->heap()->RegisterExternalString(String::cast(obj));
272 } else if (obj->IsJSTypedArray()) {
273 JSTypedArray typed_array = JSTypedArray::cast(obj);
274 CHECK_LE(typed_array->byte_offset(), Smi::kMaxValue);
275 int32_t byte_offset = static_cast<int32_t>(typed_array->byte_offset());
276 if (byte_offset > 0) {
277 FixedTypedArrayBase elements =
278 FixedTypedArrayBase::cast(typed_array->elements());
279 // Must be off-heap layout.
280 DCHECK(!typed_array->is_on_heap());
281
282 void* pointer_with_offset = reinterpret_cast<void*>(
283 reinterpret_cast<intptr_t>(elements->external_pointer()) +
284 byte_offset);
285 elements->set_external_pointer(pointer_with_offset);
286 }
287 } else if (obj->IsJSArrayBuffer()) {
288 JSArrayBuffer buffer = JSArrayBuffer::cast(obj);
289 // Only fixup for the off-heap case.
290 if (buffer->backing_store() != nullptr) {
291 Smi store_index(reinterpret_cast<Address>(buffer->backing_store()));
292 void* backing_store = off_heap_backing_stores_[store_index->value()];
293
294 buffer->set_backing_store(backing_store);
295 isolate_->heap()->RegisterNewArrayBuffer(buffer);
296 }
297 } else if (obj->IsFixedTypedArrayBase()) {
298 FixedTypedArrayBase fta = FixedTypedArrayBase::cast(obj);
299 // Only fixup for the off-heap case.
300 if (fta->base_pointer() == Smi::kZero) {
301 Smi store_index(reinterpret_cast<Address>(fta->external_pointer()));
302 void* backing_store = off_heap_backing_stores_[store_index->value()];
303 fta->set_external_pointer(backing_store);
304 }
305 } else if (obj->IsBytecodeArray()) {
306 // TODO(mythria): Remove these once we store the default values for these
307 // fields in the serializer.
308 BytecodeArray bytecode_array = BytecodeArray::cast(obj);
309 bytecode_array->set_osr_loop_nesting_level(0);
310 }
311#ifdef DEBUG
312 if (obj->IsDescriptorArray()) {
313 DescriptorArray descriptor_array = DescriptorArray::cast(obj);
314 DCHECK_EQ(0, descriptor_array->raw_number_of_marked_descriptors());
315 }
316#endif
317
318 // Check alignment.
319 DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
320 HeapObject::RequiredAlignment(obj->map())));
321 return obj;
322}
323
324HeapObject Deserializer::GetBackReferencedObject(int space) {
325 HeapObject obj;
326 switch (space) {
327 case LO_SPACE:
328 obj = allocator()->GetLargeObject(source_.GetInt());
329 break;
330 case MAP_SPACE:
331 obj = allocator()->GetMap(source_.GetInt());
332 break;
333 case RO_SPACE: {
334 uint32_t chunk_index = source_.GetInt();
335 uint32_t chunk_offset = source_.GetInt();
336 if (isolate()->heap()->deserialization_complete()) {
337 PagedSpace* read_only_space = isolate()->heap()->read_only_space();
338 Page* page = read_only_space->first_page();
339 for (uint32_t i = 0; i < chunk_index; ++i) {
340 page = page->next_page();
341 }
342 Address address = page->OffsetToAddress(chunk_offset);
343 obj = HeapObject::FromAddress(address);
344 } else {
345 obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
346 chunk_index, chunk_offset);
347 }
348 break;
349 }
350 default: {
351 uint32_t chunk_index = source_.GetInt();
352 uint32_t chunk_offset = source_.GetInt();
353 obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
354 chunk_index, chunk_offset);
355 break;
356 }
357 }
358
359 if (deserializing_user_code() && obj->IsThinString()) {
360 obj = ThinString::cast(obj)->actual();
361 }
362
363 hot_objects_.Add(obj);
364 DCHECK(!HasWeakHeapObjectTag(obj->ptr()));
365 return obj;
366}
367
368HeapObject Deserializer::ReadObject() {
369 MaybeObject object;
370 // We are reading to a location outside of JS heap, so pass NEW_SPACE to
371 // avoid triggering write barriers.
372 bool filled =
373 ReadData(FullMaybeObjectSlot(&object), FullMaybeObjectSlot(&object + 1),
374 NEW_SPACE, kNullAddress);
375 CHECK(filled);
376 return object.GetHeapObjectAssumeStrong();
377}
378
379HeapObject Deserializer::ReadObject(int space_number) {
380 const int size = source_.GetInt() << kObjectAlignmentBits;
381
382 Address address =
383 allocator()->Allocate(static_cast<AllocationSpace>(space_number), size);
384 HeapObject obj = HeapObject::FromAddress(address);
385
386 isolate_->heap()->OnAllocationEvent(obj, size);
387 MaybeObjectSlot current(address);
388 MaybeObjectSlot limit(address + size);
389
390 if (ReadData(current, limit, space_number, address)) {
391 // Only post process if object content has not been deferred.
392 obj = PostProcessNewObject(obj, space_number);
393 }
394
395#ifdef DEBUG
396 if (obj->IsCode()) {
397 DCHECK(space_number == CODE_SPACE || space_number == CODE_LO_SPACE);
398 } else {
399 DCHECK(space_number != CODE_SPACE && space_number != CODE_LO_SPACE);
400 }
401#endif // DEBUG
402 return obj;
403}
404
405void Deserializer::ReadCodeObjectBody(int space_number,
406 Address code_object_address) {
407 // At this point the code object is already allocated, its map field is
408 // initialized and its raw data fields and code stream are also read.
409 // Now we read the rest of code header's fields.
410 MaybeObjectSlot current(code_object_address + HeapObject::kHeaderSize);
411 MaybeObjectSlot limit(code_object_address + Code::kDataStart);
412 bool filled = ReadData(current, limit, space_number, code_object_address);
413 CHECK(filled);
414
415 // Now iterate RelocInfos the same way it was done by the serialzier and
416 // deserialize respective data into RelocInfos.
417 Code code = Code::cast(HeapObject::FromAddress(code_object_address));
418 RelocIterator it(code, Code::BodyDescriptor::kRelocModeMask);
419 for (; !it.done(); it.next()) {
420 RelocInfo rinfo = *it.rinfo();
421 rinfo.Visit(this);
422 }
423}
424
425void Deserializer::VisitCodeTarget(Code host, RelocInfo* rinfo) {
426 HeapObject object = ReadObject();
427 rinfo->set_target_address(Code::cast(object)->raw_instruction_start());
428}
429
430void Deserializer::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
431 HeapObject object = ReadObject();
432 // Embedded object reference must be a strong one.
433 rinfo->set_target_object(isolate_->heap(), object);
434}
435
436void Deserializer::VisitRuntimeEntry(Code host, RelocInfo* rinfo) {
437 // We no longer serialize code that contains runtime entries.
438 UNREACHABLE();
439}
440
441void Deserializer::VisitExternalReference(Code host, RelocInfo* rinfo) {
442 byte data = source_.Get();
443 CHECK_EQ(data, kExternalReference);
444
445 Address address = ReadExternalReferenceCase();
446
447 if (rinfo->IsCodedSpecially()) {
448 Address location_of_branch_data = rinfo->pc();
449 Assembler::deserialization_set_special_target_at(location_of_branch_data,
450 host, address);
451 } else {
452 WriteUnalignedValue(rinfo->target_address_address(), address);
453 }
454}
455
456void Deserializer::VisitInternalReference(Code host, RelocInfo* rinfo) {
457 byte data = source_.Get();
458 CHECK_EQ(data, kInternalReference);
459
460 // Internal reference target is encoded as an offset from code entry.
461 int target_offset = source_.GetInt();
462 DCHECK_LT(static_cast<unsigned>(target_offset),
463 static_cast<unsigned>(host->raw_instruction_size()));
464 Address target = host->entry() + target_offset;
465 Assembler::deserialization_set_target_internal_reference_at(
466 rinfo->pc(), target, rinfo->rmode());
467}
468
469void Deserializer::VisitOffHeapTarget(Code host, RelocInfo* rinfo) {
470 DCHECK(FLAG_embedded_builtins);
471 byte data = source_.Get();
472 CHECK_EQ(data, kOffHeapTarget);
473
474 int builtin_index = source_.GetInt();
475 DCHECK(Builtins::IsBuiltinId(builtin_index));
476
477 CHECK_NOT_NULL(isolate_->embedded_blob());
478 EmbeddedData d = EmbeddedData::FromBlob();
479 Address address = d.InstructionStartOfBuiltin(builtin_index);
480 CHECK_NE(kNullAddress, address);
481
482 // TODO(ishell): implement RelocInfo::set_target_off_heap_target()
483 if (RelocInfo::OffHeapTargetIsCodedSpecially()) {
484 Address location_of_branch_data = rinfo->pc();
485 Assembler::deserialization_set_special_target_at(location_of_branch_data,
486 host, address);
487 } else {
488 WriteUnalignedValue(rinfo->target_address_address(), address);
489 }
490}
491
492template <typename TSlot>
493TSlot Deserializer::ReadRepeatedObject(TSlot current, int repeat_count) {
494 CHECK_LE(2, repeat_count);
495
496 HeapObject heap_object = ReadObject();
497 DCHECK(!Heap::InYoungGeneration(heap_object));
498 for (int i = 0; i < repeat_count; i++) {
499 // Repeated values are not subject to the write barrier so we don't need
500 // to trigger it.
501 current = Write(current, MaybeObject::FromObject(heap_object));
502 }
503 return current;
504}
505
506static void NoExternalReferencesCallback() {
507 // The following check will trigger if a function or object template
508 // with references to native functions have been deserialized from
509 // snapshot, but no actual external references were provided when the
510 // isolate was created.
511 CHECK_WITH_MSG(false, "No external references provided via API");
512}
513
514template <typename TSlot>
515bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space,
516 Address current_object_address) {
517 Isolate* const isolate = isolate_;
518 // Write barrier support costs around 1% in startup time. In fact there
519 // are no new space objects in current boot snapshots, so it's not needed,
520 // but that may change.
521 bool write_barrier_needed =
522 (current_object_address != kNullAddress && source_space != NEW_SPACE &&
523 source_space != CODE_SPACE);
524 while (current < limit) {
525 byte data = source_.Get();
526 switch (data) {
527#define CASE_STATEMENT(bytecode, space_number) \
528 case bytecode + space_number: \
529 STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
530
531#define CASE_BODY(bytecode, space_number_if_any) \
532 current = ReadDataCase<TSlot, bytecode, space_number_if_any>( \
533 isolate, current, current_object_address, data, write_barrier_needed); \
534 break;
535
536// This generates a case and a body for the new space (which has to do extra
537// write barrier handling) and handles the other spaces with fall-through cases
538// and one body.
539#define ALL_SPACES(bytecode) \
540 CASE_STATEMENT(bytecode, NEW_SPACE) \
541 CASE_BODY(bytecode, NEW_SPACE) \
542 CASE_STATEMENT(bytecode, OLD_SPACE) \
543 V8_FALLTHROUGH; \
544 CASE_STATEMENT(bytecode, CODE_SPACE) \
545 V8_FALLTHROUGH; \
546 CASE_STATEMENT(bytecode, MAP_SPACE) \
547 V8_FALLTHROUGH; \
548 CASE_STATEMENT(bytecode, LO_SPACE) \
549 V8_FALLTHROUGH; \
550 CASE_STATEMENT(bytecode, RO_SPACE) \
551 CASE_BODY(bytecode, kAnyOldSpace)
552
553#define FOUR_CASES(byte_code) \
554 case byte_code: \
555 case byte_code + 1: \
556 case byte_code + 2: \
557 case byte_code + 3:
558
559#define SIXTEEN_CASES(byte_code) \
560 FOUR_CASES(byte_code) \
561 FOUR_CASES(byte_code + 4) \
562 FOUR_CASES(byte_code + 8) \
563 FOUR_CASES(byte_code + 12)
564
565#define SINGLE_CASE(bytecode, space) \
566 CASE_STATEMENT(bytecode, space) \
567 CASE_BODY(bytecode, space)
568
569 // Deserialize a new object and write a pointer to it to the current
570 // object.
571 ALL_SPACES(kNewObject)
572 // Find a recently deserialized object using its offset from the current
573 // allocation point and write a pointer to it to the current object.
574 ALL_SPACES(kBackref)
575 // Find an object in the roots array and write a pointer to it to the
576 // current object.
577 SINGLE_CASE(kRootArray, RO_SPACE)
578 // Find an object in the partial snapshots cache and write a pointer to it
579 // to the current object.
580 SINGLE_CASE(kPartialSnapshotCache, RO_SPACE)
581 // Find an object in the partial snapshots cache and write a pointer to it
582 // to the current object.
583 SINGLE_CASE(kReadOnlyObjectCache, RO_SPACE)
584 // Find an object in the attached references and write a pointer to it to
585 // the current object.
586 SINGLE_CASE(kAttachedReference, RO_SPACE)
587
588#undef CASE_STATEMENT
589#undef CASE_BODY
590#undef ALL_SPACES
591
592 // Find an external reference and write a pointer to it to the current
593 // object.
594 case kExternalReference: {
595 Address address = ReadExternalReferenceCase();
596 current = WriteAddress(current, address);
597 break;
598 }
599
600 case kInternalReference:
601 case kOffHeapTarget: {
602 // These bytecodes are expected only during RelocInfo iteration.
603 UNREACHABLE();
604 break;
605 }
606
607 case kNop:
608 break;
609
610 case kNextChunk: {
611 int space = source_.Get();
612 allocator()->MoveToNextChunk(static_cast<AllocationSpace>(space));
613 break;
614 }
615
616 case kDeferred: {
617 // Deferred can only occur right after the heap object header.
618 DCHECK_EQ(current.address(), current_object_address + kTaggedSize);
619 HeapObject obj = HeapObject::FromAddress(current_object_address);
620 // If the deferred object is a map, its instance type may be used
621 // during deserialization. Initialize it with a temporary value.
622 if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE);
623 current = limit;
624 return false;
625 }
626
627 case kSynchronize:
628 // If we get here then that indicates that you have a mismatch between
629 // the number of GC roots when serializing and deserializing.
630 UNREACHABLE();
631
632 // Deserialize raw data of variable length.
633 case kVariableRawData: {
634 int size_in_bytes = source_.GetInt();
635 DCHECK(IsAligned(size_in_bytes, kTaggedSize));
636 source_.CopyRaw(current.ToVoidPtr(), size_in_bytes);
637 current = TSlot(current.address() + size_in_bytes);
638 break;
639 }
640
641 // Deserialize raw code directly into the body of the code object.
642 case kVariableRawCode: {
643 // VariableRawCode can only occur right after the heap object header.
644 DCHECK_EQ(current.address(), current_object_address + kTaggedSize);
645 int size_in_bytes = source_.GetInt();
646 DCHECK(IsAligned(size_in_bytes, kTaggedSize));
647 source_.CopyRaw(
648 reinterpret_cast<void*>(current_object_address + Code::kDataStart),
649 size_in_bytes);
650 // Deserialize tagged fields in the code object header and reloc infos.
651 ReadCodeObjectBody(source_space, current_object_address);
652 // Set current to the code object end.
653 current = TSlot(current.address() + Code::kDataStart -
654 HeapObject::kHeaderSize + size_in_bytes);
655 CHECK_EQ(current, limit);
656 break;
657 }
658
659 case kVariableRepeat: {
660 int repeats = DecodeVariableRepeatCount(source_.GetInt());
661 current = ReadRepeatedObject(current, repeats);
662 break;
663 }
664
665 case kOffHeapBackingStore: {
666 int byte_length = source_.GetInt();
667 byte* backing_store = static_cast<byte*>(
668 isolate->array_buffer_allocator()->AllocateUninitialized(
669 byte_length));
670 CHECK_NOT_NULL(backing_store);
671 source_.CopyRaw(backing_store, byte_length);
672 off_heap_backing_stores_.push_back(backing_store);
673 break;
674 }
675
676 case kApiReference: {
677 uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
678 Address address;
679 if (isolate->api_external_references()) {
680 DCHECK_WITH_MSG(
681 reference_id < num_api_references_,
682 "too few external references provided through the API");
683 address = static_cast<Address>(
684 isolate->api_external_references()[reference_id]);
685 } else {
686 address = reinterpret_cast<Address>(NoExternalReferencesCallback);
687 }
688 current = WriteAddress(current, address);
689 break;
690 }
691
692 case kClearedWeakReference:
693 current = Write(current, HeapObjectReference::ClearedValue(isolate_));
694 break;
695
696 case kWeakPrefix:
697 DCHECK(!allocator()->next_reference_is_weak());
698 allocator()->set_next_reference_is_weak(true);
699 break;
700
701 case kAlignmentPrefix:
702 case kAlignmentPrefix + 1:
703 case kAlignmentPrefix + 2: {
704 int alignment = data - (SerializerDeserializer::kAlignmentPrefix - 1);
705 allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
706 break;
707 }
708
709 // First kNumberOfRootArrayConstants roots are guaranteed to be in
710 // the old space.
711 STATIC_ASSERT(
712 static_cast<int>(RootIndex::kFirstImmortalImmovableRoot) == 0);
713 STATIC_ASSERT(kNumberOfRootArrayConstants <=
714 static_cast<int>(RootIndex::kLastImmortalImmovableRoot));
715 STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
716 SIXTEEN_CASES(kRootArrayConstants)
717 SIXTEEN_CASES(kRootArrayConstants + 16) {
718 int id = data & kRootArrayConstantsMask;
719 RootIndex root_index = static_cast<RootIndex>(id);
720 MaybeObject object = MaybeObject::FromObject(isolate->root(root_index));
721 DCHECK(!Heap::InYoungGeneration(object));
722 current = Write(current, object);
723 break;
724 }
725
726 STATIC_ASSERT(kNumberOfHotObjects == 8);
727 FOUR_CASES(kHotObject)
728 FOUR_CASES(kHotObject + 4) {
729 int index = data & kHotObjectMask;
730 Object hot_object = hot_objects_.Get(index);
731 MaybeObject hot_maybe_object = MaybeObject::FromObject(hot_object);
732 if (allocator()->GetAndClearNextReferenceIsWeak()) {
733 hot_maybe_object = MaybeObject::MakeWeak(hot_maybe_object);
734 }
735 // Don't update current pointer here as it may be needed for write
736 // barrier.
737 Write(current, hot_maybe_object);
738 if (write_barrier_needed && Heap::InYoungGeneration(hot_object)) {
739 HeapObject current_object =
740 HeapObject::FromAddress(current_object_address);
741 GenerationalBarrier(current_object,
742 MaybeObjectSlot(current.address()),
743 hot_maybe_object);
744 }
745 ++current;
746 break;
747 }
748
749 // Deserialize raw data of fixed length from 1 to 32 words.
750 STATIC_ASSERT(kNumberOfFixedRawData == 32);
751 SIXTEEN_CASES(kFixedRawData)
752 SIXTEEN_CASES(kFixedRawData + 16) {
753 int size_in_tagged = data - kFixedRawDataStart;
754 source_.CopyRaw(current.ToVoidPtr(), size_in_tagged * kTaggedSize);
755 current += size_in_tagged;
756 break;
757 }
758
759 STATIC_ASSERT(kNumberOfFixedRepeat == 16);
760 SIXTEEN_CASES(kFixedRepeat) {
761 int repeats = DecodeFixedRepeatCount(data);
762 current = ReadRepeatedObject(current, repeats);
763 break;
764 }
765
766#ifdef DEBUG
767#define UNUSED_CASE(byte_code) \
768 case byte_code: \
769 UNREACHABLE();
770 UNUSED_SERIALIZER_BYTE_CODES(UNUSED_CASE)
771#endif
772#undef UNUSED_CASE
773
774#undef SIXTEEN_CASES
775#undef FOUR_CASES
776#undef SINGLE_CASE
777 }
778 }
779 CHECK_EQ(limit, current);
780 return true;
781}
782
783Address Deserializer::ReadExternalReferenceCase() {
784 uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
785 return isolate_->external_reference_table()->address(reference_id);
786}
787
788template <typename TSlot, SerializerDeserializer::Bytecode bytecode,
789 int space_number_if_any>
790TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
791 Address current_object_address, byte data,
792 bool write_barrier_needed) {
793 bool emit_write_barrier = false;
794 int space_number = space_number_if_any == kAnyOldSpace ? (data & kSpaceMask)
795 : space_number_if_any;
796 HeapObject heap_object;
797 HeapObjectReferenceType reference_type =
798 allocator()->GetAndClearNextReferenceIsWeak()
799 ? HeapObjectReferenceType::WEAK
800 : HeapObjectReferenceType::STRONG;
801
802 if (bytecode == kNewObject) {
803 heap_object = ReadObject(space_number);
804 emit_write_barrier = (space_number == NEW_SPACE);
805 } else if (bytecode == kBackref) {
806 heap_object = GetBackReferencedObject(space_number);
807 emit_write_barrier = (space_number == NEW_SPACE);
808 } else if (bytecode == kRootArray) {
809 int id = source_.GetInt();
810 RootIndex root_index = static_cast<RootIndex>(id);
811 heap_object = HeapObject::cast(isolate->root(root_index));
812 emit_write_barrier = Heap::InYoungGeneration(heap_object);
813 hot_objects_.Add(heap_object);
814 } else if (bytecode == kReadOnlyObjectCache) {
815 int cache_index = source_.GetInt();
816 heap_object = HeapObject::cast(
817 isolate->heap()->read_only_heap()->read_only_object_cache()->at(
818 cache_index));
819 DCHECK(!Heap::InYoungGeneration(heap_object));
820 emit_write_barrier = false;
821 } else if (bytecode == kPartialSnapshotCache) {
822 int cache_index = source_.GetInt();
823 heap_object =
824 HeapObject::cast(isolate->partial_snapshot_cache()->at(cache_index));
825 emit_write_barrier = Heap::InYoungGeneration(heap_object);
826 } else {
827 DCHECK_EQ(bytecode, kAttachedReference);
828 int index = source_.GetInt();
829 heap_object = *attached_objects_[index];
830 emit_write_barrier = Heap::InYoungGeneration(heap_object);
831 }
832 HeapObjectReference heap_object_ref =
833 reference_type == HeapObjectReferenceType::STRONG
834 ? HeapObjectReference::Strong(heap_object)
835 : HeapObjectReference::Weak(heap_object);
836 // Don't update current pointer here as it may be needed for write barrier.
837 Write(current, heap_object_ref);
838 if (emit_write_barrier && write_barrier_needed) {
839 HeapObject host_object = HeapObject::FromAddress(current_object_address);
840 SLOW_DCHECK(isolate->heap()->Contains(host_object));
841 GenerationalBarrier(host_object, MaybeObjectSlot(current.address()),
842 heap_object_ref);
843 }
844 return current + 1;
845}
846
847} // namespace internal
848} // namespace v8
849