1// Copyright 2018 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/isolate-allocator.h"
6#include "src/base/bounded-page-allocator.h"
7#include "src/isolate.h"
8#include "src/ptr-compr.h"
9#include "src/utils.h"
10
11namespace v8 {
12namespace internal {
13
14IsolateAllocator::IsolateAllocator(IsolateAllocationMode mode) {
15#if V8_TARGET_ARCH_64_BIT
16 if (mode == IsolateAllocationMode::kInV8Heap) {
17 Address heap_reservation_address = InitReservation();
18 CommitPagesForIsolate(heap_reservation_address);
19 return;
20 }
21#endif // V8_TARGET_ARCH_64_BIT
22
23 // Allocate Isolate in C++ heap.
24 CHECK_EQ(mode, IsolateAllocationMode::kInCppHeap);
25 page_allocator_ = GetPlatformPageAllocator();
26 isolate_memory_ = ::operator new(sizeof(Isolate));
27 DCHECK(!reservation_.IsReserved());
28}
29
30IsolateAllocator::~IsolateAllocator() {
31 if (reservation_.IsReserved()) {
32 // The actual memory will be freed when the |reservation_| will die.
33 return;
34 }
35
36 // The memory was allocated in C++ heap.
37 ::operator delete(isolate_memory_);
38}
39
40#if V8_TARGET_ARCH_64_BIT
41Address IsolateAllocator::InitReservation() {
42 v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
43
44 // Reserve a 4Gb region so that the middle is 4Gb aligned.
45 // The VirtualMemory API does not support such an constraint so we have to
46 // implement it manually here.
47 size_t reservation_size = kPtrComprHeapReservationSize;
48 size_t base_alignment = kPtrComprIsolateRootAlignment;
49
50 const int kMaxAttempts = 3;
51 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
52 Address hint = RoundDown(reinterpret_cast<Address>(
53 platform_page_allocator->GetRandomMmapAddr()),
54 base_alignment) +
55 kPtrComprIsolateRootBias;
56
57 // Within this reservation there will be a sub-region with proper alignment.
58 VirtualMemory padded_reservation(platform_page_allocator,
59 reservation_size * 2,
60 reinterpret_cast<void*>(hint));
61 if (!padded_reservation.IsReserved()) break;
62
63 // Find such a sub-region inside the reservation that it's middle is
64 // |base_alignment|-aligned.
65 Address address =
66 RoundUp(padded_reservation.address() + kPtrComprIsolateRootBias,
67 base_alignment) -
68 kPtrComprIsolateRootBias;
69 CHECK(padded_reservation.InVM(address, reservation_size));
70
71#if defined(V8_OS_FUCHSIA)
72 // Fuchsia does not respect given hints so as a workaround we will use
73 // overreserved address space region instead of trying to re-reserve
74 // a subregion.
75 if (padded_reservation.InVM(address, reservation_size)) {
76 reservation_ = std::move(padded_reservation);
77 return address;
78 }
79#else
80 // Now free the padded reservation and immediately try to reserve an exact
81 // region at aligned address. We have to do this dancing because the
82 // reservation address requirement is more complex than just a certain
83 // alignment and not all operating systems support freeing parts of reserved
84 // address space regions.
85 padded_reservation.Free();
86
87 VirtualMemory reservation(platform_page_allocator, reservation_size,
88 reinterpret_cast<void*>(address));
89 if (!reservation.IsReserved()) break;
90
91 // The reservation could still be somewhere else but we can accept it
92 // if the reservation has the required alignment.
93 Address aligned_address =
94 RoundUp(reservation.address() + kPtrComprIsolateRootBias,
95 base_alignment) -
96 kPtrComprIsolateRootBias;
97
98 if (reservation.address() == aligned_address) {
99 reservation_ = std::move(reservation);
100 CHECK_EQ(reservation_.size(), reservation_size);
101 return aligned_address;
102 }
103#endif
104 }
105 V8::FatalProcessOutOfMemory(nullptr,
106 "Failed to reserve memory for new V8 Isolate");
107 return kNullAddress;
108}
109
110void IsolateAllocator::CommitPagesForIsolate(Address heap_address) {
111 CHECK(reservation_.InVM(heap_address, kPtrComprHeapReservationSize));
112
113 Address isolate_root = heap_address + kPtrComprIsolateRootBias;
114 CHECK(IsAligned(isolate_root, kPtrComprIsolateRootAlignment));
115
116 v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
117
118 // Simplify BoundedPageAllocator's life by configuring it to use same page
119 // size as the Heap will use (MemoryChunk::kPageSize).
120 size_t page_size = RoundUp(size_t{1} << kPageSizeBits,
121 platform_page_allocator->AllocatePageSize());
122
123 page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
124 platform_page_allocator, heap_address, kPtrComprHeapReservationSize,
125 page_size);
126 page_allocator_ = page_allocator_instance_.get();
127
128 Address isolate_address = isolate_root - Isolate::isolate_root_bias();
129 Address isolate_end = isolate_address + sizeof(Isolate);
130
131 // Inform the bounded page allocator about reserved pages.
132 {
133 Address reserved_region_address = RoundDown(isolate_address, page_size);
134 size_t reserved_region_size =
135 RoundUp(isolate_end, page_size) - reserved_region_address;
136
137 CHECK(page_allocator_instance_->AllocatePagesAt(
138 reserved_region_address, reserved_region_size,
139 PageAllocator::Permission::kNoAccess));
140 }
141
142 // Commit pages where the Isolate will be stored.
143 {
144 size_t commit_page_size = platform_page_allocator->CommitPageSize();
145 Address committed_region_address =
146 RoundDown(isolate_address, commit_page_size);
147 size_t committed_region_size =
148 RoundUp(isolate_end, commit_page_size) - committed_region_address;
149
150 // We are using |reservation_| directly here because |page_allocator_| has
151 // bigger commit page size than we actually need.
152 CHECK(reservation_.SetPermissions(committed_region_address,
153 committed_region_size,
154 PageAllocator::kReadWrite));
155
156 if (Heap::ShouldZapGarbage()) {
157 for (Address address = committed_region_address;
158 address < committed_region_size; address += kSystemPointerSize) {
159 Memory<Address>(address) = static_cast<Address>(kZapValue);
160 }
161 }
162 }
163 isolate_memory_ = reinterpret_cast<void*>(isolate_address);
164}
165#endif // V8_TARGET_ARCH_64_BIT
166
167} // namespace internal
168} // namespace v8
169