1 | // Copyright 2012 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/allocation.h" |
6 | |
7 | #include <stdlib.h> // For free, malloc. |
8 | #include "src/base/bits.h" |
9 | #include "src/base/lazy-instance.h" |
10 | #include "src/base/logging.h" |
11 | #include "src/base/lsan-page-allocator.h" |
12 | #include "src/base/page-allocator.h" |
13 | #include "src/base/platform/platform.h" |
14 | #include "src/memcopy.h" |
15 | #include "src/v8.h" |
16 | #include "src/vector.h" |
17 | |
18 | #if V8_LIBC_BIONIC |
19 | #include <malloc.h> // NOLINT |
20 | #endif |
21 | |
22 | namespace v8 { |
23 | namespace internal { |
24 | |
25 | namespace { |
26 | |
27 | void* AlignedAllocInternal(size_t size, size_t alignment) { |
28 | void* ptr; |
29 | #if V8_OS_WIN |
30 | ptr = _aligned_malloc(size, alignment); |
31 | #elif V8_LIBC_BIONIC |
32 | // posix_memalign is not exposed in some Android versions, so we fall back to |
33 | // memalign. See http://code.google.com/p/android/issues/detail?id=35391. |
34 | ptr = memalign(alignment, size); |
35 | #else |
36 | if (posix_memalign(&ptr, alignment, size)) ptr = nullptr; |
37 | #endif |
38 | return ptr; |
39 | } |
40 | |
41 | class PageAllocatorInitializer { |
42 | public: |
43 | PageAllocatorInitializer() { |
44 | page_allocator_ = V8::GetCurrentPlatform()->GetPageAllocator(); |
45 | if (page_allocator_ == nullptr) { |
46 | static base::LeakyObject<base::PageAllocator> default_page_allocator; |
47 | page_allocator_ = default_page_allocator.get(); |
48 | } |
49 | #if defined(LEAK_SANITIZER) |
50 | static base::LeakyObject<base::LsanPageAllocator> lsan_allocator( |
51 | page_allocator_); |
52 | page_allocator_ = lsan_allocator.get(); |
53 | #endif |
54 | } |
55 | |
56 | PageAllocator* page_allocator() const { return page_allocator_; } |
57 | |
58 | void SetPageAllocatorForTesting(PageAllocator* allocator) { |
59 | page_allocator_ = allocator; |
60 | } |
61 | |
62 | private: |
63 | PageAllocator* page_allocator_; |
64 | }; |
65 | |
66 | DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer, |
67 | GetPageTableInitializer) |
68 | |
69 | // We will attempt allocation this many times. After each failure, we call |
70 | // OnCriticalMemoryPressure to try to free some memory. |
71 | const int kAllocationTries = 2; |
72 | |
73 | } // namespace |
74 | |
75 | v8::PageAllocator* GetPlatformPageAllocator() { |
76 | DCHECK_NOT_NULL(GetPageTableInitializer()->page_allocator()); |
77 | return GetPageTableInitializer()->page_allocator(); |
78 | } |
79 | |
80 | v8::PageAllocator* SetPlatformPageAllocatorForTesting( |
81 | v8::PageAllocator* new_page_allocator) { |
82 | v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator(); |
83 | GetPageTableInitializer()->SetPageAllocatorForTesting(new_page_allocator); |
84 | return old_page_allocator; |
85 | } |
86 | |
87 | void* Malloced::New(size_t size) { |
88 | void* result = AllocWithRetry(size); |
89 | if (result == nullptr) { |
90 | V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new" ); |
91 | } |
92 | return result; |
93 | } |
94 | |
95 | void Malloced::Delete(void* p) { |
96 | free(p); |
97 | } |
98 | |
99 | char* StrDup(const char* str) { |
100 | int length = StrLength(str); |
101 | char* result = NewArray<char>(length + 1); |
102 | MemCopy(result, str, length); |
103 | result[length] = '\0'; |
104 | return result; |
105 | } |
106 | |
107 | char* StrNDup(const char* str, int n) { |
108 | int length = StrLength(str); |
109 | if (n < length) length = n; |
110 | char* result = NewArray<char>(length + 1); |
111 | MemCopy(result, str, length); |
112 | result[length] = '\0'; |
113 | return result; |
114 | } |
115 | |
116 | void* AllocWithRetry(size_t size) { |
117 | void* result = nullptr; |
118 | for (int i = 0; i < kAllocationTries; ++i) { |
119 | result = malloc(size); |
120 | if (result != nullptr) break; |
121 | if (!OnCriticalMemoryPressure(size)) break; |
122 | } |
123 | return result; |
124 | } |
125 | |
126 | void* AlignedAlloc(size_t size, size_t alignment) { |
127 | DCHECK_LE(alignof(void*), alignment); |
128 | DCHECK(base::bits::IsPowerOfTwo(alignment)); |
129 | void* result = nullptr; |
130 | for (int i = 0; i < kAllocationTries; ++i) { |
131 | result = AlignedAllocInternal(size, alignment); |
132 | if (result != nullptr) break; |
133 | if (!OnCriticalMemoryPressure(size + alignment)) break; |
134 | } |
135 | if (result == nullptr) { |
136 | V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc" ); |
137 | } |
138 | return result; |
139 | } |
140 | |
141 | void AlignedFree(void *ptr) { |
142 | #if V8_OS_WIN |
143 | _aligned_free(ptr); |
144 | #elif V8_LIBC_BIONIC |
145 | // Using free is not correct in general, but for V8_LIBC_BIONIC it is. |
146 | free(ptr); |
147 | #else |
148 | free(ptr); |
149 | #endif |
150 | } |
151 | |
152 | size_t AllocatePageSize() { |
153 | return GetPlatformPageAllocator()->AllocatePageSize(); |
154 | } |
155 | |
156 | size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); } |
157 | |
158 | void SetRandomMmapSeed(int64_t seed) { |
159 | GetPlatformPageAllocator()->SetRandomMmapSeed(seed); |
160 | } |
161 | |
162 | void* GetRandomMmapAddr() { |
163 | return GetPlatformPageAllocator()->GetRandomMmapAddr(); |
164 | } |
165 | |
166 | void* AllocatePages(v8::PageAllocator* page_allocator, void* address, |
167 | size_t size, size_t alignment, |
168 | PageAllocator::Permission access) { |
169 | DCHECK_NOT_NULL(page_allocator); |
170 | DCHECK_EQ(address, AlignedAddress(address, alignment)); |
171 | DCHECK(IsAligned(size, page_allocator->AllocatePageSize())); |
172 | void* result = nullptr; |
173 | for (int i = 0; i < kAllocationTries; ++i) { |
174 | result = page_allocator->AllocatePages(address, size, alignment, access); |
175 | if (result != nullptr) break; |
176 | size_t request_size = size + alignment - page_allocator->AllocatePageSize(); |
177 | if (!OnCriticalMemoryPressure(request_size)) break; |
178 | } |
179 | return result; |
180 | } |
181 | |
182 | bool FreePages(v8::PageAllocator* page_allocator, void* address, |
183 | const size_t size) { |
184 | DCHECK_NOT_NULL(page_allocator); |
185 | DCHECK(IsAligned(size, page_allocator->AllocatePageSize())); |
186 | return page_allocator->FreePages(address, size); |
187 | } |
188 | |
189 | bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size, |
190 | size_t new_size) { |
191 | DCHECK_NOT_NULL(page_allocator); |
192 | DCHECK_LT(new_size, size); |
193 | DCHECK(IsAligned(new_size, page_allocator->CommitPageSize())); |
194 | return page_allocator->ReleasePages(address, size, new_size); |
195 | } |
196 | |
197 | bool SetPermissions(v8::PageAllocator* page_allocator, void* address, |
198 | size_t size, PageAllocator::Permission access) { |
199 | DCHECK_NOT_NULL(page_allocator); |
200 | return page_allocator->SetPermissions(address, size, access); |
201 | } |
202 | |
203 | byte* AllocatePage(v8::PageAllocator* page_allocator, void* address, |
204 | size_t* allocated) { |
205 | DCHECK_NOT_NULL(page_allocator); |
206 | size_t page_size = page_allocator->AllocatePageSize(); |
207 | void* result = AllocatePages(page_allocator, address, page_size, page_size, |
208 | PageAllocator::kReadWrite); |
209 | if (result != nullptr) *allocated = page_size; |
210 | return static_cast<byte*>(result); |
211 | } |
212 | |
213 | bool OnCriticalMemoryPressure(size_t length) { |
214 | // TODO(bbudge) Rework retry logic once embedders implement the more |
215 | // informative overload. |
216 | if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) { |
217 | V8::GetCurrentPlatform()->OnCriticalMemoryPressure(); |
218 | } |
219 | return true; |
220 | } |
221 | |
222 | VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size, |
223 | void* hint, size_t alignment) |
224 | : page_allocator_(page_allocator) { |
225 | DCHECK_NOT_NULL(page_allocator); |
226 | DCHECK(IsAligned(size, page_allocator_->CommitPageSize())); |
227 | size_t page_size = page_allocator_->AllocatePageSize(); |
228 | alignment = RoundUp(alignment, page_size); |
229 | Address address = reinterpret_cast<Address>( |
230 | AllocatePages(page_allocator_, hint, RoundUp(size, page_size), alignment, |
231 | PageAllocator::kNoAccess)); |
232 | if (address != kNullAddress) { |
233 | DCHECK(IsAligned(address, alignment)); |
234 | region_ = base::AddressRegion(address, size); |
235 | } |
236 | } |
237 | |
238 | VirtualMemory::~VirtualMemory() { |
239 | if (IsReserved()) { |
240 | Free(); |
241 | } |
242 | } |
243 | |
244 | void VirtualMemory::Reset() { |
245 | page_allocator_ = nullptr; |
246 | region_ = base::AddressRegion(); |
247 | } |
248 | |
249 | bool VirtualMemory::SetPermissions(Address address, size_t size, |
250 | PageAllocator::Permission access) { |
251 | CHECK(InVM(address, size)); |
252 | bool result = |
253 | v8::internal::SetPermissions(page_allocator_, address, size, access); |
254 | DCHECK(result); |
255 | return result; |
256 | } |
257 | |
258 | size_t VirtualMemory::Release(Address free_start) { |
259 | DCHECK(IsReserved()); |
260 | DCHECK(IsAligned(free_start, page_allocator_->CommitPageSize())); |
261 | // Notice: Order is important here. The VirtualMemory object might live |
262 | // inside the allocated region. |
263 | |
264 | const size_t old_size = region_.size(); |
265 | const size_t free_size = old_size - (free_start - region_.begin()); |
266 | CHECK(InVM(free_start, free_size)); |
267 | region_.set_size(old_size - free_size); |
268 | CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()), |
269 | old_size, region_.size())); |
270 | return free_size; |
271 | } |
272 | |
273 | void VirtualMemory::Free() { |
274 | DCHECK(IsReserved()); |
275 | // Notice: Order is important here. The VirtualMemory object might live |
276 | // inside the allocated region. |
277 | v8::PageAllocator* page_allocator = page_allocator_; |
278 | base::AddressRegion region = region_; |
279 | Reset(); |
280 | // FreePages expects size to be aligned to allocation granularity however |
281 | // ReleasePages may leave size at only commit granularity. Align it here. |
282 | CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()), |
283 | RoundUp(region.size(), page_allocator->AllocatePageSize()))); |
284 | } |
285 | |
286 | void VirtualMemory::TakeControl(VirtualMemory* from) { |
287 | DCHECK(!IsReserved()); |
288 | page_allocator_ = from->page_allocator_; |
289 | region_ = from->region_; |
290 | from->Reset(); |
291 | } |
292 | |
293 | } // namespace internal |
294 | } // namespace v8 |
295 | |