| 1 | /* |
| 2 | * Copyright (C) 2017-2019 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * |
| 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
| 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
| 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
| 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 24 | */ |
| 25 | |
| 26 | #pragma once |
| 27 | |
| 28 | #include "IsoHeapImpl.h" |
| 29 | #include "IsoTLSDeallocatorEntry.h" |
| 30 | #include "IsoSharedHeapInlines.h" |
| 31 | #include "IsoSharedPageInlines.h" |
| 32 | |
| 33 | namespace bmalloc { |
| 34 | |
| 35 | template<typename Config> |
| 36 | IsoHeapImpl<Config>::IsoHeapImpl() |
| 37 | : lock(PerProcess<IsoTLSDeallocatorEntry<Config>>::get()->lock) |
| 38 | , m_inlineDirectory(*this) |
| 39 | , m_allocator(*this) |
| 40 | { |
| 41 | addToAllIsoHeaps(); |
| 42 | } |
| 43 | |
| 44 | template<typename Config> |
| 45 | EligibilityResult<Config> IsoHeapImpl<Config>::takeFirstEligible() |
| 46 | { |
| 47 | if (m_isInlineDirectoryEligibleOrDecommitted) { |
| 48 | EligibilityResult<Config> result = m_inlineDirectory.takeFirstEligible(); |
| 49 | if (result.kind == EligibilityKind::Full) |
| 50 | m_isInlineDirectoryEligibleOrDecommitted = false; |
| 51 | else |
| 52 | return result; |
| 53 | } |
| 54 | |
| 55 | if (!m_firstEligibleOrDecommitedDirectory) { |
| 56 | // If nothing is eligible, it can only be because we have no directories. It wouldn't be the end |
| 57 | // of the world if we broke this invariant. It would only mean that didBecomeEligibleOrDecommited() would need |
| 58 | // a null check. |
| 59 | RELEASE_BASSERT(!m_headDirectory); |
| 60 | RELEASE_BASSERT(!m_tailDirectory); |
| 61 | } |
| 62 | |
| 63 | for (; m_firstEligibleOrDecommitedDirectory; m_firstEligibleOrDecommitedDirectory = m_firstEligibleOrDecommitedDirectory->next) { |
| 64 | EligibilityResult<Config> result = m_firstEligibleOrDecommitedDirectory->payload.takeFirstEligible(); |
| 65 | if (result.kind != EligibilityKind::Full) { |
| 66 | m_directoryHighWatermark = std::max(m_directoryHighWatermark, m_firstEligibleOrDecommitedDirectory->index()); |
| 67 | return result; |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | auto* newDirectory = new IsoDirectoryPage<Config>(*this, m_nextDirectoryPageIndex++); |
| 72 | if (m_headDirectory) { |
| 73 | m_tailDirectory->next = newDirectory; |
| 74 | m_tailDirectory = newDirectory; |
| 75 | } else { |
| 76 | RELEASE_BASSERT(!m_tailDirectory); |
| 77 | m_headDirectory = newDirectory; |
| 78 | m_tailDirectory = newDirectory; |
| 79 | } |
| 80 | m_directoryHighWatermark = newDirectory->index(); |
| 81 | m_firstEligibleOrDecommitedDirectory = newDirectory; |
| 82 | EligibilityResult<Config> result = newDirectory->payload.takeFirstEligible(); |
| 83 | RELEASE_BASSERT(result.kind != EligibilityKind::Full); |
| 84 | return result; |
| 85 | } |
| 86 | |
| 87 | template<typename Config> |
| 88 | void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(IsoDirectory<Config, numPagesInInlineDirectory>* directory) |
| 89 | { |
| 90 | RELEASE_BASSERT(directory == &m_inlineDirectory); |
| 91 | m_isInlineDirectoryEligibleOrDecommitted = true; |
| 92 | } |
| 93 | |
| 94 | template<typename Config> |
| 95 | void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>* directory) |
| 96 | { |
| 97 | RELEASE_BASSERT(m_firstEligibleOrDecommitedDirectory); |
| 98 | auto* directoryPage = IsoDirectoryPage<Config>::pageFor(directory); |
| 99 | if (directoryPage->index() < m_firstEligibleOrDecommitedDirectory->index()) |
| 100 | m_firstEligibleOrDecommitedDirectory = directoryPage; |
| 101 | } |
| 102 | |
| 103 | template<typename Config> |
| 104 | void IsoHeapImpl<Config>::scavenge(Vector<DeferredDecommit>& decommits) |
| 105 | { |
| 106 | std::lock_guard<Mutex> locker(this->lock); |
| 107 | forEachDirectory( |
| 108 | [&] (auto& directory) { |
| 109 | directory.scavenge(decommits); |
| 110 | }); |
| 111 | m_directoryHighWatermark = 0; |
| 112 | } |
| 113 | |
| 114 | template<typename Config> |
| 115 | size_t IsoHeapImpl<Config>::freeableMemory() |
| 116 | { |
| 117 | return m_freeableMemory; |
| 118 | } |
| 119 | |
| 120 | template<typename Config> |
| 121 | unsigned IsoHeapImpl<Config>::allocatorOffset() |
| 122 | { |
| 123 | return m_allocator.offset(); |
| 124 | } |
| 125 | |
| 126 | template<typename Config> |
| 127 | unsigned IsoHeapImpl<Config>::deallocatorOffset() |
| 128 | { |
| 129 | return PerProcess<IsoTLSDeallocatorEntry<Config>>::get()->offset(); |
| 130 | } |
| 131 | |
| 132 | template<typename Config> |
| 133 | unsigned IsoHeapImpl<Config>::numLiveObjects() |
| 134 | { |
| 135 | unsigned result = 0; |
| 136 | forEachLiveObject( |
| 137 | [&] (void*) { |
| 138 | result++; |
| 139 | }); |
| 140 | return result; |
| 141 | } |
| 142 | |
| 143 | template<typename Config> |
| 144 | unsigned IsoHeapImpl<Config>::numCommittedPages() |
| 145 | { |
| 146 | unsigned result = 0; |
| 147 | forEachCommittedPage( |
| 148 | [&] (IsoPage<Config>&) { |
| 149 | result++; |
| 150 | }); |
| 151 | return result; |
| 152 | } |
| 153 | |
| 154 | template<typename Config> |
| 155 | template<typename Func> |
| 156 | void IsoHeapImpl<Config>::forEachDirectory(const Func& func) |
| 157 | { |
| 158 | func(m_inlineDirectory); |
| 159 | for (IsoDirectoryPage<Config>* page = m_headDirectory; page; page = page->next) |
| 160 | func(page->payload); |
| 161 | } |
| 162 | |
| 163 | template<typename Config> |
| 164 | template<typename Func> |
| 165 | void IsoHeapImpl<Config>::forEachCommittedPage(const Func& func) |
| 166 | { |
| 167 | forEachDirectory( |
| 168 | [&] (auto& directory) { |
| 169 | directory.forEachCommittedPage(func); |
| 170 | }); |
| 171 | } |
| 172 | |
| 173 | template<typename Config> |
| 174 | template<typename Func> |
| 175 | void IsoHeapImpl<Config>::forEachLiveObject(const Func& func) |
| 176 | { |
| 177 | forEachCommittedPage( |
| 178 | [&] (IsoPage<Config>& page) { |
| 179 | page.forEachLiveObject(func); |
| 180 | }); |
| 181 | for (unsigned index = 0; index < maxAllocationFromShared; ++index) { |
| 182 | void* pointer = m_sharedCells[index]; |
| 183 | if (pointer && !(m_availableShared & (1U << index))) |
| 184 | func(pointer); |
| 185 | } |
| 186 | } |
| 187 | |
| 188 | template<typename Config> |
| 189 | size_t IsoHeapImpl<Config>::() |
| 190 | { |
| 191 | #if ENABLE_PHYSICAL_PAGE_MAP |
| 192 | RELEASE_BASSERT(m_footprint == m_physicalPageMap.footprint()); |
| 193 | #endif |
| 194 | return m_footprint; |
| 195 | } |
| 196 | |
| 197 | template<typename Config> |
| 198 | void IsoHeapImpl<Config>::didCommit(void* ptr, size_t bytes) |
| 199 | { |
| 200 | BUNUSED_PARAM(ptr); |
| 201 | m_footprint += bytes; |
| 202 | #if ENABLE_PHYSICAL_PAGE_MAP |
| 203 | m_physicalPageMap.commit(ptr, bytes); |
| 204 | #endif |
| 205 | } |
| 206 | |
| 207 | template<typename Config> |
| 208 | void IsoHeapImpl<Config>::didDecommit(void* ptr, size_t bytes) |
| 209 | { |
| 210 | BUNUSED_PARAM(ptr); |
| 211 | m_footprint -= bytes; |
| 212 | #if ENABLE_PHYSICAL_PAGE_MAP |
| 213 | m_physicalPageMap.decommit(ptr, bytes); |
| 214 | #endif |
| 215 | } |
| 216 | |
| 217 | template<typename Config> |
| 218 | void IsoHeapImpl<Config>::isNowFreeable(void* ptr, size_t bytes) |
| 219 | { |
| 220 | BUNUSED_PARAM(ptr); |
| 221 | m_freeableMemory += bytes; |
| 222 | } |
| 223 | |
| 224 | template<typename Config> |
| 225 | void IsoHeapImpl<Config>::isNoLongerFreeable(void* ptr, size_t bytes) |
| 226 | { |
| 227 | BUNUSED_PARAM(ptr); |
| 228 | m_freeableMemory -= bytes; |
| 229 | } |
| 230 | |
| 231 | template<typename Config> |
| 232 | AllocationMode IsoHeapImpl<Config>::updateAllocationMode() |
| 233 | { |
| 234 | auto getNewAllocationMode = [&] { |
| 235 | // Exhaust shared free cells, which means we should start activating the fast allocation mode for this type. |
| 236 | if (!m_availableShared) { |
| 237 | m_lastSlowPathTime = std::chrono::steady_clock::now(); |
| 238 | return AllocationMode::Fast; |
| 239 | } |
| 240 | |
| 241 | switch (m_allocationMode) { |
| 242 | case AllocationMode::Shared: |
| 243 | // Currently in the shared allocation mode. Until we exhaust shared free cells, continue using the shared allocation mode. |
| 244 | // But if we allocate so many shared cells within very short period, we should use the fast allocation mode instead. |
| 245 | // This avoids the following pathological case. |
| 246 | // |
| 247 | // for (int i = 0; i < 1e6; ++i) { |
| 248 | // auto* ptr = allocate(); |
| 249 | // ... |
| 250 | // free(ptr); |
| 251 | // } |
| 252 | if (m_numberOfAllocationsFromSharedInOneCycle <= IsoPage<Config>::numObjects) |
| 253 | return AllocationMode::Shared; |
| 254 | BFALLTHROUGH; |
| 255 | |
| 256 | case AllocationMode::Fast: { |
| 257 | // The allocation pattern may change. We should check the allocation rate and decide which mode is more appropriate. |
| 258 | // If we don't go to the allocation slow path during ~1 seconds, we think the allocation becomes quiescent state. |
| 259 | auto now = std::chrono::steady_clock::now(); |
| 260 | if ((now - m_lastSlowPathTime) < std::chrono::seconds(1)) { |
| 261 | m_lastSlowPathTime = now; |
| 262 | return AllocationMode::Fast; |
| 263 | } |
| 264 | |
| 265 | m_numberOfAllocationsFromSharedInOneCycle = 0; |
| 266 | m_lastSlowPathTime = now; |
| 267 | return AllocationMode::Shared; |
| 268 | } |
| 269 | |
| 270 | case AllocationMode::Init: |
| 271 | m_lastSlowPathTime = std::chrono::steady_clock::now(); |
| 272 | return AllocationMode::Shared; |
| 273 | } |
| 274 | |
| 275 | return AllocationMode::Shared; |
| 276 | }; |
| 277 | AllocationMode allocationMode = getNewAllocationMode(); |
| 278 | m_allocationMode = allocationMode; |
| 279 | return allocationMode; |
| 280 | } |
| 281 | |
| 282 | template<typename Config> |
| 283 | void* IsoHeapImpl<Config>::allocateFromShared(const std::lock_guard<Mutex>&, bool abortOnFailure) |
| 284 | { |
| 285 | static constexpr bool verbose = false; |
| 286 | |
| 287 | unsigned indexPlusOne = __builtin_ffs(m_availableShared); |
| 288 | BASSERT(indexPlusOne); |
| 289 | unsigned index = indexPlusOne - 1; |
| 290 | void* result = m_sharedCells[index]; |
| 291 | if (result) { |
| 292 | if (verbose) |
| 293 | fprintf(stderr, "%p: allocated %p from shared again of size %u\n" , this, result, Config::objectSize); |
| 294 | } else { |
| 295 | constexpr unsigned objectSizeWithHeapImplPointer = Config::objectSize + sizeof(uint8_t); |
| 296 | result = IsoSharedHeap::get()->allocateNew<objectSizeWithHeapImplPointer>(abortOnFailure); |
| 297 | if (!result) |
| 298 | return nullptr; |
| 299 | if (verbose) |
| 300 | fprintf(stderr, "%p: allocated %p from shared of size %u\n" , this, result, Config::objectSize); |
| 301 | BASSERT(index < IsoHeapImplBase::maxAllocationFromShared); |
| 302 | *indexSlotFor<Config>(result) = index; |
| 303 | m_sharedCells[index] = result; |
| 304 | } |
| 305 | BASSERT(result); |
| 306 | m_availableShared &= ~(1U << index); |
| 307 | ++m_numberOfAllocationsFromSharedInOneCycle; |
| 308 | return result; |
| 309 | } |
| 310 | |
| 311 | } // namespace bmalloc |
| 312 | |
| 313 | |