1/*
2 * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef Heap_h
27#define Heap_h
28
29#include "BumpRange.h"
30#include "Chunk.h"
31#include "HeapKind.h"
32#include "LargeMap.h"
33#include "LineMetadata.h"
34#include "List.h"
35#include "Map.h"
36#include "Mutex.h"
37#include "Object.h"
38#include "PerHeapKind.h"
39#include "PerProcess.h"
40#include "PhysicalPageMap.h"
41#include "SmallLine.h"
42#include "SmallPage.h"
43#include "Vector.h"
44#include <array>
45#include <condition_variable>
46#include <mutex>
47#include <vector>
48
49namespace bmalloc {
50
51class BeginTag;
52class BulkDecommit;
53class BumpAllocator;
54class DebugHeap;
55class EndTag;
56class Scavenger;
57
58class Heap {
59public:
60 Heap(HeapKind, std::lock_guard<Mutex>&);
61
62 static Mutex& mutex() { return PerProcess<PerHeapKind<Heap>>::mutex(); }
63
64 HeapKind kind() const { return m_kind; }
65
66 void allocateSmallBumpRanges(std::unique_lock<Mutex>&, size_t sizeClass,
67 BumpAllocator&, BumpRangeCache&, LineCache&);
68 void derefSmallLine(std::unique_lock<Mutex>&, Object, LineCache&);
69 void deallocateLineCache(std::unique_lock<Mutex>&, LineCache&);
70
71 void* allocateLarge(std::unique_lock<Mutex>&, size_t alignment, size_t);
72 void* tryAllocateLarge(std::unique_lock<Mutex>&, size_t alignment, size_t);
73 void deallocateLarge(std::unique_lock<Mutex>&, void*);
74
75 bool isLarge(std::unique_lock<Mutex>&, void*);
76 size_t largeSize(std::unique_lock<Mutex>&, void*);
77 void shrinkLarge(std::unique_lock<Mutex>&, const Range&, size_t);
78
79 void scavenge(std::lock_guard<Mutex>&, BulkDecommit&, size_t& deferredDecommits);
80 void scavenge(std::lock_guard<Mutex>&, BulkDecommit&, size_t& freed, size_t goal);
81
82 size_t freeableMemory(std::lock_guard<Mutex>&);
83 size_t footprint();
84
85 void externalDecommit(void* ptr, size_t);
86 void externalDecommit(std::unique_lock<Mutex>&, void* ptr, size_t);
87 void externalCommit(void* ptr, size_t);
88 void externalCommit(std::unique_lock<Mutex>&, void* ptr, size_t);
89
90 void markAllLargeAsEligibile(std::lock_guard<Mutex>&);
91
92private:
93 void decommitLargeRange(std::lock_guard<Mutex>&, LargeRange&, BulkDecommit&);
94
95 struct LargeObjectHash {
96 static unsigned hash(void* key)
97 {
98 return static_cast<unsigned>(
99 reinterpret_cast<uintptr_t>(key) / smallMax);
100 }
101 };
102
103 ~Heap() = delete;
104
105 bool usingGigacage();
106 void* gigacageBasePtr(); // May crash if !usingGigacage().
107 size_t gigacageSize();
108
109 void initializeLineMetadata();
110 void initializePageMetadata();
111
112 void allocateSmallBumpRangesByMetadata(std::unique_lock<Mutex>&,
113 size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&);
114 void allocateSmallBumpRangesByObject(std::unique_lock<Mutex>&,
115 size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&);
116
117 SmallPage* allocateSmallPage(std::unique_lock<Mutex>&, size_t sizeClass, LineCache&);
118 void deallocateSmallLine(std::unique_lock<Mutex>&, Object, LineCache&);
119
120 void allocateSmallChunk(std::unique_lock<Mutex>&, size_t pageClass);
121 void deallocateSmallChunk(Chunk*, size_t pageClass);
122
123 void mergeLarge(BeginTag*&, EndTag*&, Range&);
124 void mergeLargeLeft(EndTag*&, BeginTag*&, Range&, bool& inVMHeap);
125 void mergeLargeRight(EndTag*&, BeginTag*&, Range&, bool& inVMHeap);
126
127 LargeRange splitAndAllocate(std::unique_lock<Mutex>&, LargeRange&, size_t alignment, size_t);
128
129 HeapKind m_kind;
130
131 bool m_hasPendingDecommits { false };
132 std::condition_variable_any m_condition;
133
134 size_t m_vmPageSizePhysical;
135 Vector<LineMetadata> m_smallLineMetadata;
136 std::array<size_t, sizeClassCount> m_pageClasses;
137
138 LineCache m_lineCache;
139 std::array<List<Chunk>, pageClassCount> m_freePages;
140 std::array<List<Chunk>, pageClassCount> m_chunkCache;
141
142 Map<void*, size_t, LargeObjectHash> m_largeAllocated;
143 LargeMap m_largeFree;
144
145 Map<Chunk*, ObjectType, ChunkHash> m_objectTypes;
146
147 Scavenger* m_scavenger { nullptr };
148
149 size_t m_footprint { 0 };
150 size_t m_freeableMemory { 0 };
151
152#if ENABLE_PHYSICAL_PAGE_MAP
153 PhysicalPageMap m_physicalPageMap;
154#endif
155};
156
157inline void Heap::allocateSmallBumpRanges(
158 std::unique_lock<Mutex>& lock, size_t sizeClass,
159 BumpAllocator& allocator, BumpRangeCache& rangeCache,
160 LineCache& lineCache)
161{
162 if (sizeClass < bmalloc::sizeClass(smallLineSize))
163 return allocateSmallBumpRangesByMetadata(lock, sizeClass, allocator, rangeCache, lineCache);
164 return allocateSmallBumpRangesByObject(lock, sizeClass, allocator, rangeCache, lineCache);
165}
166
167inline void Heap::derefSmallLine(std::unique_lock<Mutex>& lock, Object object, LineCache& lineCache)
168{
169 if (!object.line()->deref(lock))
170 return;
171 deallocateSmallLine(lock, object, lineCache);
172}
173
174} // namespace bmalloc
175
176#endif // Heap_h
177