1/*
2 * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "Allocator.h"
27#include "BAssert.h"
28#include "Chunk.h"
29#include "Deallocator.h"
30#include "Environment.h"
31#include "Heap.h"
32#include "PerProcess.h"
33#include "Sizes.h"
34#include <algorithm>
35#include <cstdlib>
36
37namespace bmalloc {
38
39Allocator::Allocator(Heap& heap, Deallocator& deallocator)
40 : m_heap(heap)
41 , m_deallocator(deallocator)
42{
43 BASSERT(!Environment::get()->isDebugHeapEnabled());
44 for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass)
45 m_bumpAllocators[sizeClass].init(objectSize(sizeClass));
46}
47
48Allocator::~Allocator()
49{
50 scavenge();
51}
52
53void* Allocator::tryAllocate(size_t size)
54{
55 if (size <= smallMax)
56 return allocate(size);
57
58 std::unique_lock<Mutex> lock(Heap::mutex());
59 return m_heap.tryAllocateLarge(lock, alignment, size);
60}
61
62void* Allocator::allocate(size_t alignment, size_t size)
63{
64 bool crashOnFailure = true;
65 return allocateImpl(alignment, size, crashOnFailure);
66}
67
68void* Allocator::tryAllocate(size_t alignment, size_t size)
69{
70 bool crashOnFailure = false;
71 return allocateImpl(alignment, size, crashOnFailure);
72}
73
74void* Allocator::allocateImpl(size_t alignment, size_t size, bool crashOnFailure)
75{
76 BASSERT(isPowerOfTwo(alignment));
77
78 if (!size)
79 size = alignment;
80
81 if (size <= smallMax && alignment <= smallMax)
82 return allocate(roundUpToMultipleOf(alignment, size));
83
84 std::unique_lock<Mutex> lock(Heap::mutex());
85 if (crashOnFailure)
86 return m_heap.allocateLarge(lock, alignment, size);
87 return m_heap.tryAllocateLarge(lock, alignment, size);
88}
89
90void* Allocator::reallocate(void* object, size_t newSize)
91{
92 bool crashOnFailure = true;
93 return reallocateImpl(object, newSize, crashOnFailure);
94}
95
96void* Allocator::tryReallocate(void* object, size_t newSize)
97{
98 bool crashOnFailure = false;
99 return reallocateImpl(object, newSize, crashOnFailure);
100}
101
102void* Allocator::reallocateImpl(void* object, size_t newSize, bool crashOnFailure)
103{
104 size_t oldSize = 0;
105 switch (objectType(m_heap, object)) {
106 case ObjectType::Small: {
107 BASSERT(objectType(m_heap, nullptr) == ObjectType::Small);
108 if (!object)
109 break;
110
111 size_t sizeClass = Object(object).page()->sizeClass();
112 oldSize = objectSize(sizeClass);
113 break;
114 }
115 case ObjectType::Large: {
116 std::unique_lock<Mutex> lock(Heap::mutex());
117 oldSize = m_heap.largeSize(lock, object);
118
119 if (newSize < oldSize && newSize > smallMax) {
120 m_heap.shrinkLarge(lock, Range(object, oldSize), newSize);
121 return object;
122 }
123 break;
124 }
125 }
126
127 void* result = nullptr;
128 if (crashOnFailure)
129 result = allocate(newSize);
130 else {
131 result = tryAllocate(newSize);
132 if (!result)
133 return nullptr;
134 }
135 size_t copySize = std::min(oldSize, newSize);
136 memcpy(result, object, copySize);
137 m_deallocator.deallocate(object);
138 return result;
139}
140
141void Allocator::scavenge()
142{
143 for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
144 BumpAllocator& allocator = m_bumpAllocators[sizeClass];
145 BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass];
146
147 while (allocator.canAllocate())
148 m_deallocator.deallocate(allocator.allocate());
149
150 while (bumpRangeCache.size()) {
151 allocator.refill(bumpRangeCache.pop());
152 while (allocator.canAllocate())
153 m_deallocator.deallocate(allocator.allocate());
154 }
155
156 allocator.clear();
157 }
158}
159
160BNO_INLINE void Allocator::refillAllocatorSlowCase(BumpAllocator& allocator, size_t sizeClass)
161{
162 BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass];
163
164 std::unique_lock<Mutex> lock(Heap::mutex());
165 m_deallocator.processObjectLog(lock);
166 m_heap.allocateSmallBumpRanges(lock, sizeClass, allocator, bumpRangeCache, m_deallocator.lineCache(lock));
167}
168
169BINLINE void Allocator::refillAllocator(BumpAllocator& allocator, size_t sizeClass)
170{
171 BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass];
172 if (!bumpRangeCache.size())
173 return refillAllocatorSlowCase(allocator, sizeClass);
174 return allocator.refill(bumpRangeCache.pop());
175}
176
177BNO_INLINE void* Allocator::allocateLarge(size_t size)
178{
179 std::unique_lock<Mutex> lock(Heap::mutex());
180 return m_heap.allocateLarge(lock, alignment, size);
181}
182
183BNO_INLINE void* Allocator::allocateLogSizeClass(size_t size)
184{
185 size_t sizeClass = bmalloc::sizeClass(size);
186 BumpAllocator& allocator = m_bumpAllocators[sizeClass];
187 if (!allocator.canAllocate())
188 refillAllocator(allocator, sizeClass);
189 return allocator.allocate();
190}
191
192void* Allocator::allocateSlowCase(size_t size)
193{
194 if (size <= maskSizeClassMax) {
195 size_t sizeClass = bmalloc::maskSizeClass(size);
196 BumpAllocator& allocator = m_bumpAllocators[sizeClass];
197 refillAllocator(allocator, sizeClass);
198 return allocator.allocate();
199 }
200
201 if (size <= smallMax)
202 return allocateLogSizeClass(size);
203
204 return allocateLarge(size);
205}
206
207} // namespace bmalloc
208