1/*
2 * Copyright (C) 2010 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#include <algorithm>
29#include <wtf/PageAllocation.h>
30#include <wtf/PageBlock.h>
31
32namespace WTF {
33
34#define MINIMUM_BUMP_POOL_SIZE 0x1000
35
36class BumpPointerPool {
37public:
38 // ensureCapacity will check whether the current pool has capacity to
39 // allocate 'size' bytes of memory If it does not, it will attempt to
40 // allocate a new pool (which will be added to this one in a chain).
41 //
42 // If allocation fails (out of memory) this method will return null.
43 // If the return value is non-null, then callers should update any
44 // references they have to this current (possibly full) BumpPointerPool
45 // to instead point to the newly returned BumpPointerPool.
46 BumpPointerPool* ensureCapacity(size_t size)
47 {
48 void* allocationEnd = static_cast<char*>(m_current) + size;
49 ASSERT_WITH_SECURITY_IMPLICATION(allocationEnd > m_current); // check for overflow
50 if (allocationEnd <= static_cast<void*>(this))
51 return this;
52 return ensureCapacityCrossPool(this, size);
53 }
54
55 // alloc should only be called after calling ensureCapacity; as such
56 // alloc will never fail.
57 void* alloc(size_t size)
58 {
59 void* current = m_current;
60 void* allocationEnd = static_cast<char*>(current) + size;
61 ASSERT_WITH_SECURITY_IMPLICATION(allocationEnd > current); // check for overflow
62 ASSERT(allocationEnd <= static_cast<void*>(this));
63 m_current = allocationEnd;
64 return current;
65 }
66
67 // The dealloc method releases memory allocated using alloc. Memory
68 // must be released in a LIFO fashion, e.g. if the client calls alloc
69 // four times, returning pointer A, B, C, D, then the only valid order
70 // in which these may be deallocaed is D, C, B, A.
71 //
72 // The client may optionally skip some deallocations. In the example
73 // above, it would be valid to only explicitly dealloc C, A (D being
74 // dealloced along with C, B along with A).
75 //
76 // If pointer was not allocated from this pool (or pools) then dealloc
77 // will CRASH(). Callers should update any references they have to
78 // this current BumpPointerPool to instead point to the returned
79 // BumpPointerPool.
80 BumpPointerPool* dealloc(void* position)
81 {
82 if ((position >= m_start) && (position <= static_cast<void*>(this))) {
83 ASSERT(position <= m_current);
84 m_current = position;
85 return this;
86 }
87 return deallocCrossPool(this, position);
88 }
89
90private:
91 // Placement operator new, returns the last 'size' bytes of allocation for use as this.
92 void* operator new(size_t size, const PageAllocation& allocation)
93 {
94 ASSERT(size < allocation.size());
95 return reinterpret_cast<char*>(reinterpret_cast<intptr_t>(allocation.base()) + allocation.size()) - size;
96 }
97
98 BumpPointerPool(const PageAllocation& allocation)
99 : m_current(allocation.base())
100 , m_start(allocation.base())
101 , m_next(0)
102 , m_previous(0)
103 , m_allocation(allocation)
104 {
105 }
106
107 static BumpPointerPool* create(size_t minimumCapacity = 0)
108 {
109 // Add size of BumpPointerPool object, check for overflow.
110 minimumCapacity += sizeof(BumpPointerPool);
111 if (minimumCapacity < sizeof(BumpPointerPool))
112 return 0;
113
114 size_t poolSize = std::max(static_cast<size_t>(MINIMUM_BUMP_POOL_SIZE), WTF::pageSize());
115 while (poolSize < minimumCapacity) {
116 poolSize <<= 1;
117 // The following if check relies on MINIMUM_BUMP_POOL_SIZE being a power of 2!
118 ASSERT(!(MINIMUM_BUMP_POOL_SIZE & (MINIMUM_BUMP_POOL_SIZE - 1)));
119 if (!poolSize)
120 return 0;
121 }
122
123 PageAllocation allocation = PageAllocation::allocate(poolSize);
124 if (!!allocation)
125 return new (allocation) BumpPointerPool(allocation);
126 return 0;
127 }
128
129 void shrink()
130 {
131 ASSERT(!m_previous);
132 m_current = m_start;
133 while (m_next) {
134 BumpPointerPool* nextNext = m_next->m_next;
135 m_next->destroy();
136 m_next = nextNext;
137 }
138 }
139
140 void destroy()
141 {
142 m_allocation.deallocate();
143 }
144
145 static BumpPointerPool* ensureCapacityCrossPool(BumpPointerPool* previousPool, size_t size)
146 {
147 // The pool passed should not have capacity, so we'll start with the next one.
148 ASSERT(previousPool);
149 ASSERT((static_cast<char*>(previousPool->m_current) + size) > previousPool->m_current); // check for overflow
150 ASSERT((static_cast<char*>(previousPool->m_current) + size) > static_cast<void*>(previousPool));
151 BumpPointerPool* pool = previousPool->m_next;
152
153 while (true) {
154 if (!pool) {
155 // We've run to the end; allocate a new pool.
156 pool = BumpPointerPool::create(size);
157 previousPool->m_next = pool;
158 pool->m_previous = previousPool;
159 return pool;
160 }
161
162 //
163 void* current = pool->m_current;
164 void* allocationEnd = static_cast<char*>(current) + size;
165 ASSERT_WITH_SECURITY_IMPLICATION(allocationEnd > current); // check for overflow
166 if (allocationEnd <= static_cast<void*>(pool))
167 return pool;
168 }
169 }
170
171 static BumpPointerPool* deallocCrossPool(BumpPointerPool* pool, void* position)
172 {
173 // Should only be called if position is not in the current pool.
174 ASSERT((position < pool->m_start) || (position > static_cast<void*>(pool)));
175
176 while (true) {
177 // Unwind the current pool to the start, move back in the chain to the previous pool.
178 pool->m_current = pool->m_start;
179 pool = pool->m_previous;
180
181 // position was nowhere in the chain!
182 if (!pool)
183 CRASH();
184
185 if ((position >= pool->m_start) && (position <= static_cast<void*>(pool))) {
186 ASSERT(position <= pool->m_current);
187 pool->m_current = position;
188 return pool;
189 }
190 }
191 }
192
193 void* m_current;
194 void* m_start;
195 BumpPointerPool* m_next;
196 BumpPointerPool* m_previous;
197 PageAllocation m_allocation;
198
199 friend class BumpPointerAllocator;
200};
201
202// A BumpPointerAllocator manages a set of BumpPointerPool objects, which
203// can be used for LIFO (stack like) allocation.
204//
205// To begin allocating using this class call startAllocator(). The result
206// of this method will be null if the initial pool allocation fails, or a
207// pointer to a BumpPointerPool object that can be used to perform
208// allocations. Whilst running no memory will be released until
209// stopAllocator() is called. At this point all allocations made through
210// this allocator will be reaped, and underlying memory may be freed.
211//
212// (In practice we will still hold on to the initial pool to allow allocation
213// to be quickly restared, but aditional pools will be freed).
214//
215// This allocator is non-renetrant, it is encumbant on the clients to ensure
216// startAllocator() is not called again until stopAllocator() has been called.
217class BumpPointerAllocator {
218public:
219 BumpPointerAllocator()
220 : m_head(0)
221 {
222 }
223
224 ~BumpPointerAllocator()
225 {
226 if (m_head)
227 m_head->destroy();
228 }
229
230 BumpPointerPool* startAllocator()
231 {
232 if (!m_head)
233 m_head = BumpPointerPool::create();
234 return m_head;
235 }
236
237 void stopAllocator()
238 {
239 if (m_head)
240 m_head->shrink();
241 }
242
243private:
244 BumpPointerPool* m_head;
245};
246
247}
248
249using WTF::BumpPointerAllocator;
250