1/*
2 * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "Heap.h"
27
28#include "AvailableMemory.h"
29#include "BulkDecommit.h"
30#include "BumpAllocator.h"
31#include "Chunk.h"
32#include "CryptoRandom.h"
33#include "Environment.h"
34#include "Gigacage.h"
35#include "DebugHeap.h"
36#include "PerProcess.h"
37#include "Scavenger.h"
38#include "SmallLine.h"
39#include "SmallPage.h"
40#include "VMHeap.h"
41#include "bmalloc.h"
42#include <thread>
43#include <vector>
44
45namespace bmalloc {
46
47Heap::Heap(HeapKind kind, std::lock_guard<Mutex>&)
48 : m_kind(kind)
49 , m_vmPageSizePhysical(vmPageSizePhysical())
50{
51 RELEASE_BASSERT(vmPageSizePhysical() >= smallPageSize);
52 RELEASE_BASSERT(vmPageSize() >= vmPageSizePhysical());
53
54 initializeLineMetadata();
55 initializePageMetadata();
56
57 BASSERT(!Environment::get()->isDebugHeapEnabled());
58
59 Gigacage::ensureGigacage();
60#if GIGACAGE_ENABLED
61 if (usingGigacage()) {
62 RELEASE_BASSERT(gigacageBasePtr());
63 uint64_t random[2];
64 cryptoRandom(reinterpret_cast<unsigned char*>(random), sizeof(random));
65 size_t size = roundDownToMultipleOf(vmPageSize(), gigacageSize() - (random[0] % Gigacage::maximumCageSizeReductionForSlide));
66 ptrdiff_t offset = roundDownToMultipleOf(vmPageSize(), random[1] % (gigacageSize() - size));
67 void* base = reinterpret_cast<unsigned char*>(gigacageBasePtr()) + offset;
68 m_largeFree.add(LargeRange(base, size, 0, 0));
69 }
70#endif
71
72 m_scavenger = Scavenger::get();
73}
74
75bool Heap::usingGigacage()
76{
77 return isGigacage(m_kind) && gigacageBasePtr();
78}
79
80void* Heap::gigacageBasePtr()
81{
82 return Gigacage::basePtr(gigacageKind(m_kind));
83}
84
85size_t Heap::gigacageSize()
86{
87 return Gigacage::size(gigacageKind(m_kind));
88}
89
90void Heap::initializeLineMetadata()
91{
92 size_t sizeClassCount = bmalloc::sizeClass(smallLineSize);
93 size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
94 m_smallLineMetadata.grow(sizeClassCount * smallLineCount);
95
96 for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
97 size_t size = objectSize(sizeClass);
98 LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
99
100 size_t object = 0;
101 size_t line = 0;
102 while (object < m_vmPageSizePhysical) {
103 line = object / smallLineSize;
104 size_t leftover = object % smallLineSize;
105
106 size_t objectCount;
107 size_t remainder;
108 divideRoundingUp(smallLineSize - leftover, size, objectCount, remainder);
109
110 pageMetadata[line] = { static_cast<unsigned char>(leftover), static_cast<unsigned char>(objectCount) };
111
112 object += objectCount * size;
113 }
114
115 // Don't allow the last object in a page to escape the page.
116 if (object > m_vmPageSizePhysical) {
117 BASSERT(pageMetadata[line].objectCount);
118 --pageMetadata[line].objectCount;
119 }
120 }
121}
122
123void Heap::initializePageMetadata()
124{
125 auto computePageSize = [&](size_t sizeClass) {
126 size_t size = objectSize(sizeClass);
127 if (sizeClass < bmalloc::sizeClass(smallLineSize))
128 return m_vmPageSizePhysical;
129
130 for (size_t pageSize = m_vmPageSizePhysical;
131 pageSize < pageSizeMax;
132 pageSize += m_vmPageSizePhysical) {
133 RELEASE_BASSERT(pageSize <= chunkSize / 2);
134 size_t waste = pageSize % size;
135 if (waste <= pageSize / pageSizeWasteFactor)
136 return pageSize;
137 }
138
139 return pageSizeMax;
140 };
141
142 for (size_t i = 0; i < sizeClassCount; ++i)
143 m_pageClasses[i] = (computePageSize(i) - 1) / smallPageSize;
144}
145
146size_t Heap::freeableMemory(std::lock_guard<Mutex>&)
147{
148 return m_freeableMemory;
149}
150
151size_t Heap::footprint()
152{
153 return m_footprint;
154}
155
156void Heap::markAllLargeAsEligibile(std::lock_guard<Mutex>&)
157{
158 m_largeFree.markAllAsEligibile();
159 m_hasPendingDecommits = false;
160 m_condition.notify_all();
161}
162
163void Heap::decommitLargeRange(std::lock_guard<Mutex>&, LargeRange& range, BulkDecommit& decommitter)
164{
165 m_footprint -= range.totalPhysicalSize();
166 m_freeableMemory -= range.totalPhysicalSize();
167 decommitter.addLazy(range.begin(), range.size());
168 m_hasPendingDecommits = true;
169 range.setStartPhysicalSize(0);
170 range.setTotalPhysicalSize(0);
171 BASSERT(range.isEligibile());
172 range.setEligible(false);
173#if ENABLE_PHYSICAL_PAGE_MAP
174 m_physicalPageMap.decommit(range.begin(), range.size());
175#endif
176}
177
178void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, size_t& deferredDecommits)
179{
180 for (auto& list : m_freePages) {
181 for (auto* chunk : list) {
182 for (auto* page : chunk->freePages()) {
183 if (!page->hasPhysicalPages())
184 continue;
185 if (page->usedSinceLastScavenge()) {
186 page->clearUsedSinceLastScavenge();
187 deferredDecommits++;
188 continue;
189 }
190
191 size_t pageSize = bmalloc::pageSize(&list - &m_freePages[0]);
192 size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
193 m_freeableMemory -= decommitSize;
194 m_footprint -= decommitSize;
195 decommitter.addEager(page->begin()->begin(), pageSize);
196 page->setHasPhysicalPages(false);
197#if ENABLE_PHYSICAL_PAGE_MAP
198 m_physicalPageMap.decommit(page->begin()->begin(), pageSize);
199#endif
200 }
201 }
202 }
203
204 for (auto& list : m_chunkCache) {
205 while (!list.isEmpty())
206 deallocateSmallChunk(list.pop(), &list - &m_chunkCache[0]);
207 }
208
209 for (LargeRange& range : m_largeFree) {
210 if (range.usedSinceLastScavenge()) {
211 range.clearUsedSinceLastScavenge();
212 deferredDecommits++;
213 continue;
214 }
215 decommitLargeRange(lock, range, decommitter);
216 }
217}
218
219void Heap::deallocateLineCache(std::unique_lock<Mutex>&, LineCache& lineCache)
220{
221 for (auto& list : lineCache) {
222 while (!list.isEmpty()) {
223 size_t sizeClass = &list - &lineCache[0];
224 m_lineCache[sizeClass].push(list.popFront());
225 }
226 }
227}
228
229void Heap::allocateSmallChunk(std::unique_lock<Mutex>& lock, size_t pageClass)
230{
231 RELEASE_BASSERT(isActiveHeapKind(m_kind));
232
233 size_t pageSize = bmalloc::pageSize(pageClass);
234
235 Chunk* chunk = [&]() {
236 if (!m_chunkCache[pageClass].isEmpty())
237 return m_chunkCache[pageClass].pop();
238
239 void* memory = allocateLarge(lock, chunkSize, chunkSize);
240
241 Chunk* chunk = new (memory) Chunk(pageSize);
242
243 m_objectTypes.set(chunk, ObjectType::Small);
244
245 forEachPage(chunk, pageSize, [&](SmallPage* page) {
246 page->setHasPhysicalPages(true);
247 page->setUsedSinceLastScavenge();
248 page->setHasFreeLines(lock, true);
249 chunk->freePages().push(page);
250 });
251
252 m_freeableMemory += chunkSize;
253
254 m_scavenger->schedule(0);
255
256 return chunk;
257 }();
258
259 m_freePages[pageClass].push(chunk);
260}
261
262void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
263{
264 m_objectTypes.set(chunk, ObjectType::Large);
265
266 size_t size = m_largeAllocated.remove(chunk);
267 size_t totalPhysicalSize = size;
268
269 size_t accountedInFreeable = 0;
270
271 bool hasPhysicalPages = true;
272 forEachPage(chunk, pageSize(pageClass), [&](SmallPage* page) {
273 size_t physicalSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize(pageClass));
274 if (!page->hasPhysicalPages()) {
275 totalPhysicalSize -= physicalSize;
276 hasPhysicalPages = false;
277 } else
278 accountedInFreeable += physicalSize;
279 });
280
281 m_freeableMemory -= accountedInFreeable;
282 m_freeableMemory += totalPhysicalSize;
283
284 size_t startPhysicalSize = hasPhysicalPages ? size : 0;
285 m_largeFree.add(LargeRange(chunk, size, startPhysicalSize, totalPhysicalSize));
286}
287
288SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeClass, LineCache& lineCache)
289{
290 RELEASE_BASSERT(isActiveHeapKind(m_kind));
291
292 if (!lineCache[sizeClass].isEmpty())
293 return lineCache[sizeClass].popFront();
294
295 if (!m_lineCache[sizeClass].isEmpty())
296 return m_lineCache[sizeClass].popFront();
297
298 m_scavenger->didStartGrowing();
299
300 SmallPage* page = [&]() {
301 size_t pageClass = m_pageClasses[sizeClass];
302
303 if (m_freePages[pageClass].isEmpty())
304 allocateSmallChunk(lock, pageClass);
305
306 Chunk* chunk = m_freePages[pageClass].tail();
307
308 chunk->ref();
309
310 SmallPage* page = chunk->freePages().pop();
311 if (chunk->freePages().isEmpty())
312 m_freePages[pageClass].remove(chunk);
313
314 size_t pageSize = bmalloc::pageSize(pageClass);
315 size_t physicalSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
316 if (page->hasPhysicalPages())
317 m_freeableMemory -= physicalSize;
318 else {
319 m_scavenger->scheduleIfUnderMemoryPressure(pageSize);
320 m_footprint += physicalSize;
321 vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize);
322 page->setHasPhysicalPages(true);
323#if ENABLE_PHYSICAL_PAGE_MAP
324 m_physicalPageMap.commit(page->begin()->begin(), pageSize);
325#endif
326 }
327 page->setUsedSinceLastScavenge();
328
329 return page;
330 }();
331
332 page->setSizeClass(sizeClass);
333 return page;
334}
335
336void Heap::deallocateSmallLine(std::unique_lock<Mutex>& lock, Object object, LineCache& lineCache)
337{
338 BASSERT(!object.line()->refCount(lock));
339 SmallPage* page = object.page();
340 page->deref(lock);
341
342 if (!page->hasFreeLines(lock)) {
343 page->setHasFreeLines(lock, true);
344 lineCache[page->sizeClass()].push(page);
345 }
346
347 if (page->refCount(lock))
348 return;
349
350 size_t sizeClass = page->sizeClass();
351 size_t pageClass = m_pageClasses[sizeClass];
352
353 m_freeableMemory += physicalPageSizeSloppy(page->begin()->begin(), pageSize(pageClass));
354
355 List<SmallPage>::remove(page); // 'page' may be in any thread's line cache.
356
357 Chunk* chunk = Chunk::get(page);
358 if (chunk->freePages().isEmpty())
359 m_freePages[pageClass].push(chunk);
360 chunk->freePages().push(page);
361
362 chunk->deref();
363
364 if (!chunk->refCount()) {
365 m_freePages[pageClass].remove(chunk);
366
367 if (!m_chunkCache[pageClass].isEmpty())
368 deallocateSmallChunk(m_chunkCache[pageClass].pop(), pageClass);
369
370 m_chunkCache[pageClass].push(chunk);
371 }
372
373 m_scavenger->schedule(pageSize(pageClass));
374}
375
376void Heap::allocateSmallBumpRangesByMetadata(
377 std::unique_lock<Mutex>& lock, size_t sizeClass,
378 BumpAllocator& allocator, BumpRangeCache& rangeCache,
379 LineCache& lineCache)
380{
381 RELEASE_BASSERT(isActiveHeapKind(m_kind));
382
383 SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
384 SmallLine* lines = page->begin();
385 BASSERT(page->hasFreeLines(lock));
386 size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
387 LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
388
389 auto findSmallBumpRange = [&](size_t& lineNumber) {
390 for ( ; lineNumber < smallLineCount; ++lineNumber) {
391 if (!lines[lineNumber].refCount(lock)) {
392 if (pageMetadata[lineNumber].objectCount)
393 return true;
394 }
395 }
396 return false;
397 };
398
399 auto allocateSmallBumpRange = [&](size_t& lineNumber) -> BumpRange {
400 char* begin = lines[lineNumber].begin() + pageMetadata[lineNumber].startOffset;
401 unsigned short objectCount = 0;
402
403 for ( ; lineNumber < smallLineCount; ++lineNumber) {
404 if (lines[lineNumber].refCount(lock))
405 break;
406
407 if (!pageMetadata[lineNumber].objectCount)
408 continue;
409
410 objectCount += pageMetadata[lineNumber].objectCount;
411 lines[lineNumber].ref(lock, pageMetadata[lineNumber].objectCount);
412 page->ref(lock);
413 }
414 return { begin, objectCount };
415 };
416
417 size_t lineNumber = 0;
418 for (;;) {
419 if (!findSmallBumpRange(lineNumber)) {
420 page->setHasFreeLines(lock, false);
421 BASSERT(allocator.canAllocate());
422 return;
423 }
424
425 // In a fragmented page, some free ranges might not fit in the cache.
426 if (rangeCache.size() == rangeCache.capacity()) {
427 lineCache[sizeClass].push(page);
428 BASSERT(allocator.canAllocate());
429 return;
430 }
431
432 BumpRange bumpRange = allocateSmallBumpRange(lineNumber);
433 if (allocator.canAllocate())
434 rangeCache.push(bumpRange);
435 else
436 allocator.refill(bumpRange);
437 }
438}
439
440void Heap::allocateSmallBumpRangesByObject(
441 std::unique_lock<Mutex>& lock, size_t sizeClass,
442 BumpAllocator& allocator, BumpRangeCache& rangeCache,
443 LineCache& lineCache)
444{
445 RELEASE_BASSERT(isActiveHeapKind(m_kind));
446
447 size_t size = allocator.size();
448 SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
449 BASSERT(page->hasFreeLines(lock));
450
451 auto findSmallBumpRange = [&](Object& it, Object& end) {
452 for ( ; it + size <= end; it = it + size) {
453 if (!it.line()->refCount(lock))
454 return true;
455 }
456 return false;
457 };
458
459 auto allocateSmallBumpRange = [&](Object& it, Object& end) -> BumpRange {
460 char* begin = it.address();
461 unsigned short objectCount = 0;
462 for ( ; it + size <= end; it = it + size) {
463 if (it.line()->refCount(lock))
464 break;
465
466 ++objectCount;
467 it.line()->ref(lock);
468 it.page()->ref(lock);
469 }
470 return { begin, objectCount };
471 };
472
473 Object it(page->begin()->begin());
474 Object end(it + pageSize(m_pageClasses[sizeClass]));
475 for (;;) {
476 if (!findSmallBumpRange(it, end)) {
477 page->setHasFreeLines(lock, false);
478 BASSERT(allocator.canAllocate());
479 return;
480 }
481
482 // In a fragmented page, some free ranges might not fit in the cache.
483 if (rangeCache.size() == rangeCache.capacity()) {
484 lineCache[sizeClass].push(page);
485 BASSERT(allocator.canAllocate());
486 return;
487 }
488
489 BumpRange bumpRange = allocateSmallBumpRange(it, end);
490 if (allocator.canAllocate())
491 rangeCache.push(bumpRange);
492 else
493 allocator.refill(bumpRange);
494 }
495}
496
497LargeRange Heap::splitAndAllocate(std::unique_lock<Mutex>&, LargeRange& range, size_t alignment, size_t size)
498{
499 RELEASE_BASSERT(isActiveHeapKind(m_kind));
500
501 LargeRange prev;
502 LargeRange next;
503
504 size_t alignmentMask = alignment - 1;
505 if (test(range.begin(), alignmentMask)) {
506 size_t prefixSize = roundUpToMultipleOf(alignment, range.begin()) - range.begin();
507 std::pair<LargeRange, LargeRange> pair = range.split(prefixSize);
508 prev = pair.first;
509 range = pair.second;
510 }
511
512 if (range.size() - size > size / pageSizeWasteFactor) {
513 std::pair<LargeRange, LargeRange> pair = range.split(size);
514 range = pair.first;
515 next = pair.second;
516 }
517
518 if (range.startPhysicalSize() < range.size()) {
519 m_scavenger->scheduleIfUnderMemoryPressure(range.size());
520 m_footprint += range.size() - range.totalPhysicalSize();
521 vmAllocatePhysicalPagesSloppy(range.begin() + range.startPhysicalSize(), range.size() - range.startPhysicalSize());
522 range.setStartPhysicalSize(range.size());
523 range.setTotalPhysicalSize(range.size());
524#if ENABLE_PHYSICAL_PAGE_MAP
525 m_physicalPageMap.commit(range.begin(), range.size());
526#endif
527 }
528
529 if (prev) {
530 m_freeableMemory += prev.totalPhysicalSize();
531 m_largeFree.add(prev);
532 }
533
534 if (next) {
535 m_freeableMemory += next.totalPhysicalSize();
536 m_largeFree.add(next);
537 }
538
539 m_objectTypes.set(Chunk::get(range.begin()), ObjectType::Large);
540
541 m_largeAllocated.set(range.begin(), range.size());
542 return range;
543}
544
545void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
546{
547 RELEASE_BASSERT(isActiveHeapKind(m_kind));
548
549 BASSERT(isPowerOfTwo(alignment));
550
551 m_scavenger->didStartGrowing();
552
553 size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment;
554 if (roundedSize < size) // Check for overflow
555 return nullptr;
556 size = roundedSize;
557
558 size_t roundedAlignment = roundUpToMultipleOf<largeAlignment>(alignment);
559 if (roundedAlignment < alignment) // Check for overflow
560 return nullptr;
561 alignment = roundedAlignment;
562
563 LargeRange range = m_largeFree.remove(alignment, size);
564 if (!range) {
565 if (m_hasPendingDecommits) {
566 m_condition.wait(lock, [&]() { return !m_hasPendingDecommits; });
567 // Now we're guaranteed we're looking at all available memory.
568 return tryAllocateLarge(lock, alignment, size);
569 }
570
571 if (usingGigacage())
572 return nullptr;
573
574 range = VMHeap::get()->tryAllocateLargeChunk(alignment, size);
575 if (!range)
576 return nullptr;
577
578 m_largeFree.add(range);
579 range = m_largeFree.remove(alignment, size);
580 }
581
582 m_freeableMemory -= range.totalPhysicalSize();
583
584 void* result = splitAndAllocate(lock, range, alignment, size).begin();
585 return result;
586}
587
588void* Heap::allocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
589{
590 void* result = tryAllocateLarge(lock, alignment, size);
591 RELEASE_BASSERT(result);
592 return result;
593}
594
595bool Heap::isLarge(std::unique_lock<Mutex>&, void* object)
596{
597 return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
598}
599
600size_t Heap::largeSize(std::unique_lock<Mutex>&, void* object)
601{
602 return m_largeAllocated.get(object);
603}
604
605void Heap::shrinkLarge(std::unique_lock<Mutex>& lock, const Range& object, size_t newSize)
606{
607 BASSERT(object.size() > newSize);
608
609 size_t size = m_largeAllocated.remove(object.begin());
610 LargeRange range = LargeRange(object, size, size);
611 splitAndAllocate(lock, range, alignment, newSize);
612
613 m_scavenger->schedule(size);
614}
615
616void Heap::deallocateLarge(std::unique_lock<Mutex>&, void* object)
617{
618 size_t size = m_largeAllocated.remove(object);
619 m_largeFree.add(LargeRange(object, size, size, size));
620 m_freeableMemory += size;
621 m_scavenger->schedule(size);
622}
623
624void Heap::externalCommit(void* ptr, size_t size)
625{
626 std::unique_lock<Mutex> lock(Heap::mutex());
627 externalCommit(lock, ptr, size);
628}
629
630void Heap::externalCommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
631{
632 BUNUSED_PARAM(ptr);
633
634 m_footprint += size;
635#if ENABLE_PHYSICAL_PAGE_MAP
636 m_physicalPageMap.commit(ptr, size);
637#endif
638}
639
640void Heap::externalDecommit(void* ptr, size_t size)
641{
642 std::unique_lock<Mutex> lock(Heap::mutex());
643 externalDecommit(lock, ptr, size);
644}
645
646void Heap::externalDecommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
647{
648 BUNUSED_PARAM(ptr);
649
650 m_footprint -= size;
651#if ENABLE_PHYSICAL_PAGE_MAP
652 m_physicalPageMap.decommit(ptr, size);
653#endif
654}
655
656} // namespace bmalloc
657