1 | /* |
2 | * Copyright (C) 2017-2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "bmalloc.h" |
27 | |
28 | #include "DebugHeap.h" |
29 | #include "Environment.h" |
30 | #include "PerProcess.h" |
31 | |
32 | namespace bmalloc { namespace api { |
33 | |
34 | void* mallocOutOfLine(size_t size, HeapKind kind) |
35 | { |
36 | return malloc(size, kind); |
37 | } |
38 | |
39 | void freeOutOfLine(void* object, HeapKind kind) |
40 | { |
41 | free(object, kind); |
42 | } |
43 | |
44 | void* tryLargeZeroedMemalignVirtual(size_t requiredAlignment, size_t requestedSize, HeapKind kind) |
45 | { |
46 | RELEASE_BASSERT(isPowerOfTwo(requiredAlignment)); |
47 | |
48 | size_t pageSize = vmPageSize(); |
49 | size_t alignment = roundUpToMultipleOf(pageSize, requiredAlignment); |
50 | size_t size = roundUpToMultipleOf(pageSize, requestedSize); |
51 | RELEASE_BASSERT(alignment >= requiredAlignment); |
52 | RELEASE_BASSERT(size >= requestedSize); |
53 | |
54 | void* result; |
55 | if (auto* debugHeap = DebugHeap::tryGet()) |
56 | result = debugHeap->memalignLarge(alignment, size); |
57 | else { |
58 | kind = mapToActiveHeapKind(kind); |
59 | Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind); |
60 | |
61 | std::unique_lock<Mutex> lock(Heap::mutex()); |
62 | result = heap.tryAllocateLarge(lock, alignment, size); |
63 | if (result) { |
64 | // Don't track this as dirty memory that dictates how we drive the scavenger. |
65 | // FIXME: We should make it so that users of this API inform bmalloc which |
66 | // pages they dirty: |
67 | // https://bugs.webkit.org/show_bug.cgi?id=184207 |
68 | heap.externalDecommit(lock, result, size); |
69 | } |
70 | } |
71 | |
72 | if (result) |
73 | vmZeroAndPurge(result, size); |
74 | return result; |
75 | } |
76 | |
77 | void freeLargeVirtual(void* object, size_t size, HeapKind kind) |
78 | { |
79 | if (auto* debugHeap = DebugHeap::tryGet()) { |
80 | debugHeap->freeLarge(object); |
81 | return; |
82 | } |
83 | kind = mapToActiveHeapKind(kind); |
84 | Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind); |
85 | std::unique_lock<Mutex> lock(Heap::mutex()); |
86 | // Balance out the externalDecommit when we allocated the zeroed virtual memory. |
87 | heap.externalCommit(lock, object, size); |
88 | heap.deallocateLarge(lock, object); |
89 | } |
90 | |
91 | void scavenge() |
92 | { |
93 | scavengeThisThread(); |
94 | |
95 | if (DebugHeap* debugHeap = DebugHeap::tryGet()) |
96 | debugHeap->scavenge(); |
97 | else |
98 | Scavenger::get()->scavenge(); |
99 | } |
100 | |
101 | bool isEnabled(HeapKind) |
102 | { |
103 | return !Environment::get()->isDebugHeapEnabled(); |
104 | } |
105 | |
106 | #if BOS(DARWIN) |
107 | void setScavengerThreadQOSClass(qos_class_t overrideClass) |
108 | { |
109 | if (DebugHeap::tryGet()) |
110 | return; |
111 | std::unique_lock<Mutex> lock(Heap::mutex()); |
112 | Scavenger::get()->setScavengerThreadQOSClass(overrideClass); |
113 | } |
114 | #endif |
115 | |
116 | void commitAlignedPhysical(void* object, size_t size, HeapKind kind) |
117 | { |
118 | vmValidatePhysical(object, size); |
119 | vmAllocatePhysicalPages(object, size); |
120 | if (!DebugHeap::tryGet()) |
121 | PerProcess<PerHeapKind<Heap>>::get()->at(kind).externalCommit(object, size); |
122 | } |
123 | |
124 | void decommitAlignedPhysical(void* object, size_t size, HeapKind kind) |
125 | { |
126 | vmValidatePhysical(object, size); |
127 | vmDeallocatePhysicalPages(object, size); |
128 | if (!DebugHeap::tryGet()) |
129 | PerProcess<PerHeapKind<Heap>>::get()->at(kind).externalDecommit(object, size); |
130 | } |
131 | |
132 | void enableMiniMode() |
133 | { |
134 | if (!DebugHeap::tryGet()) |
135 | Scavenger::get()->enableMiniMode(); |
136 | } |
137 | |
138 | } } // namespace bmalloc::api |
139 | |
140 | |