1/*
2 * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef VMAllocate_h
27#define VMAllocate_h
28
29#include "BAssert.h"
30#include "BVMTags.h"
31#include "Logging.h"
32#include "Range.h"
33#include "Sizes.h"
34#include "Syscall.h"
35#include <algorithm>
36#include <sys/mman.h>
37#include <unistd.h>
38
39#if BOS(DARWIN)
40#include <mach/vm_page_size.h>
41#endif
42
43namespace bmalloc {
44
45#ifndef BMALLOC_VM_TAG
46#define BMALLOC_VM_TAG VM_TAG_FOR_TCMALLOC_MEMORY
47#endif
48
49#if BOS(LINUX)
50#define BMALLOC_NORESERVE MAP_NORESERVE
51#else
52#define BMALLOC_NORESERVE 0
53#endif
54
55inline size_t vmPageSize()
56{
57 static size_t cached;
58 if (!cached) {
59 long pageSize = sysconf(_SC_PAGESIZE);
60 if (pageSize < 0)
61 BCRASH();
62 cached = pageSize;
63 }
64 return cached;
65}
66
67inline size_t vmPageShift()
68{
69 static size_t cached;
70 if (!cached)
71 cached = log2(vmPageSize());
72 return cached;
73}
74
75inline size_t vmSize(size_t size)
76{
77 return roundUpToMultipleOf(vmPageSize(), size);
78}
79
80inline void vmValidate(size_t vmSize)
81{
82 BUNUSED(vmSize);
83 BASSERT(vmSize);
84 BASSERT(vmSize == roundUpToMultipleOf(vmPageSize(), vmSize));
85}
86
87inline void vmValidate(void* p, size_t vmSize)
88{
89 vmValidate(vmSize);
90
91 BUNUSED(p);
92 BASSERT(p);
93 BASSERT(p == mask(p, ~(vmPageSize() - 1)));
94}
95
96inline size_t vmPageSizePhysical()
97{
98#if BPLATFORM(IOS_FAMILY)
99 return vm_kernel_page_size;
100#else
101 static size_t cached;
102 if (!cached)
103 cached = sysconf(_SC_PAGESIZE);
104 return cached;
105#endif
106}
107
108inline void vmValidatePhysical(size_t vmSize)
109{
110 BUNUSED(vmSize);
111 BASSERT(vmSize);
112 BASSERT(vmSize == roundUpToMultipleOf(vmPageSizePhysical(), vmSize));
113}
114
115inline void vmValidatePhysical(void* p, size_t vmSize)
116{
117 vmValidatePhysical(vmSize);
118
119 BUNUSED(p);
120 BASSERT(p);
121 BASSERT(p == mask(p, ~(vmPageSizePhysical() - 1)));
122}
123
124inline void* tryVMAllocate(size_t vmSize, VMTag usage = VMTag::Malloc)
125{
126 vmValidate(vmSize);
127 void* result = mmap(0, vmSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | BMALLOC_NORESERVE, static_cast<int>(usage), 0);
128 if (result == MAP_FAILED)
129 return nullptr;
130 return result;
131}
132
133inline void* vmAllocate(size_t vmSize)
134{
135 void* result = tryVMAllocate(vmSize);
136 RELEASE_BASSERT(result);
137 return result;
138}
139
140inline void vmDeallocate(void* p, size_t vmSize)
141{
142 vmValidate(p, vmSize);
143 munmap(p, vmSize);
144}
145
146inline void vmRevokePermissions(void* p, size_t vmSize)
147{
148 vmValidate(p, vmSize);
149 mprotect(p, vmSize, PROT_NONE);
150}
151
152inline void vmZeroAndPurge(void* p, size_t vmSize, VMTag usage = VMTag::Malloc)
153{
154 vmValidate(p, vmSize);
155 // MAP_ANON guarantees the memory is zeroed. This will also cause
156 // page faults on accesses to this range following this call.
157 void* result = mmap(p, vmSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | BMALLOC_NORESERVE, static_cast<int>(usage), 0);
158 RELEASE_BASSERT(result == p);
159}
160
161// Allocates vmSize bytes at a specified power-of-two alignment.
162// Use this function to create maskable memory regions.
163
164inline void* tryVMAllocate(size_t vmAlignment, size_t vmSize, VMTag usage = VMTag::Malloc)
165{
166 vmValidate(vmSize);
167 vmValidate(vmAlignment);
168
169 size_t mappedSize = vmAlignment + vmSize;
170 if (mappedSize < vmAlignment || mappedSize < vmSize) // Check for overflow
171 return nullptr;
172
173 char* mapped = static_cast<char*>(tryVMAllocate(mappedSize, usage));
174 if (!mapped)
175 return nullptr;
176 char* mappedEnd = mapped + mappedSize;
177
178 char* aligned = roundUpToMultipleOf(vmAlignment, mapped);
179 char* alignedEnd = aligned + vmSize;
180
181 RELEASE_BASSERT(alignedEnd <= mappedEnd);
182
183 if (size_t leftExtra = aligned - mapped)
184 vmDeallocate(mapped, leftExtra);
185
186 if (size_t rightExtra = mappedEnd - alignedEnd)
187 vmDeallocate(alignedEnd, rightExtra);
188
189 return aligned;
190}
191
192inline void* vmAllocate(size_t vmAlignment, size_t vmSize)
193{
194 void* result = tryVMAllocate(vmAlignment, vmSize);
195 RELEASE_BASSERT(result);
196 return result;
197}
198
199inline void vmDeallocatePhysicalPages(void* p, size_t vmSize)
200{
201 vmValidatePhysical(p, vmSize);
202#if BOS(DARWIN)
203 SYSCALL(madvise(p, vmSize, MADV_FREE_REUSABLE));
204#elif BOS(FREEBSD)
205 SYSCALL(madvise(p, vmSize, MADV_FREE));
206#else
207 SYSCALL(madvise(p, vmSize, MADV_DONTNEED));
208#if BOS(LINUX)
209 SYSCALL(madvise(p, vmSize, MADV_DONTDUMP));
210#endif
211#endif
212}
213
214inline void vmAllocatePhysicalPages(void* p, size_t vmSize)
215{
216 vmValidatePhysical(p, vmSize);
217#if BOS(DARWIN)
218 SYSCALL(madvise(p, vmSize, MADV_FREE_REUSE));
219#else
220 SYSCALL(madvise(p, vmSize, MADV_NORMAL));
221#if BOS(LINUX)
222 SYSCALL(madvise(p, vmSize, MADV_DODUMP));
223#endif
224#endif
225}
226
227// Returns how much memory you would commit/decommit had you called
228// vmDeallocate/AllocatePhysicalPagesSloppy with p and size.
229inline size_t physicalPageSizeSloppy(void* p, size_t size)
230{
231 char* begin = roundUpToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p));
232 char* end = roundDownToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p) + size);
233
234 if (begin >= end)
235 return 0;
236 return end - begin;
237}
238
239// Trims requests that are un-page-aligned.
240inline void vmDeallocatePhysicalPagesSloppy(void* p, size_t size)
241{
242 char* begin = roundUpToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p));
243 char* end = roundDownToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p) + size);
244
245 if (begin >= end)
246 return;
247
248 vmDeallocatePhysicalPages(begin, end - begin);
249}
250
251// Expands requests that are un-page-aligned.
252inline void vmAllocatePhysicalPagesSloppy(void* p, size_t size)
253{
254 char* begin = roundDownToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p));
255 char* end = roundUpToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p) + size);
256
257 if (begin >= end)
258 return;
259
260 vmAllocatePhysicalPages(begin, end - begin);
261}
262
263} // namespace bmalloc
264
265#endif // VMAllocate_h
266