1/*
2 * Copyright (C) 2017-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#include "BMalloced.h"
29#include "IsoAllocator.h"
30#include "IsoDirectoryPage.h"
31#include "IsoTLSAllocatorEntry.h"
32#include "PhysicalPageMap.h"
33
34namespace bmalloc {
35
36class AllIsoHeaps;
37
38class BEXPORT IsoHeapImplBase {
39 MAKE_BMALLOCED;
40public:
41 static constexpr unsigned maxAllocationFromShared = 8;
42 static constexpr unsigned maxAllocationFromSharedMask = maxAllocationFromShared - 1;
43 static_assert(maxAllocationFromShared <= bmalloc::alignment, "");
44 static_assert(isPowerOfTwo(maxAllocationFromShared), "");
45
46 virtual ~IsoHeapImplBase();
47
48 virtual void scavenge(Vector<DeferredDecommit>&) = 0;
49 virtual size_t freeableMemory() = 0;
50 virtual size_t footprint() = 0;
51
52 void scavengeNow();
53 static void finishScavenging(Vector<DeferredDecommit>&);
54
55protected:
56 IsoHeapImplBase();
57 void addToAllIsoHeaps();
58
59 friend class IsoSharedPage;
60 friend class AllIsoHeaps;
61
62 IsoHeapImplBase* m_next { nullptr };
63 std::chrono::steady_clock::time_point m_lastSlowPathTime;
64 std::array<void*, maxAllocationFromShared> m_sharedCells { };
65 unsigned m_numberOfAllocationsFromSharedInOneCycle { 0 };
66 unsigned m_availableShared { maxAllocationFromSharedMask };
67 AllocationMode m_allocationMode { AllocationMode::Init };
68
69 static_assert(sizeof(m_availableShared) * 8 >= maxAllocationFromShared, "");
70};
71
72template<typename Config>
73class IsoHeapImpl final : public IsoHeapImplBase {
74 // Pick a size that makes us most efficiently use the bitvectors.
75 static constexpr unsigned numPagesInInlineDirectory = 32;
76
77public:
78 IsoHeapImpl();
79
80 EligibilityResult<Config> takeFirstEligible();
81
82 // Callbacks from directory.
83 void didBecomeEligibleOrDecommited(IsoDirectory<Config, numPagesInInlineDirectory>*);
84 void didBecomeEligibleOrDecommited(IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>*);
85
86 void scavenge(Vector<DeferredDecommit>&) override;
87
88 size_t freeableMemory() override;
89
90 size_t footprint() override;
91
92 unsigned allocatorOffset();
93 unsigned deallocatorOffset();
94
95 // White-box testing functions.
96 unsigned numLiveObjects();
97 unsigned numCommittedPages();
98
99 template<typename Func>
100 void forEachDirectory(const Func&);
101
102 template<typename Func>
103 void forEachCommittedPage(const Func&);
104
105 // This is only accurate when all threads are scavenged. Otherwise it will overestimate.
106 template<typename Func>
107 void forEachLiveObject(const Func&);
108
109 void didCommit(void* ptr, size_t bytes);
110 void didDecommit(void* ptr, size_t bytes);
111
112 void isNowFreeable(void* ptr, size_t bytes);
113 void isNoLongerFreeable(void* ptr, size_t bytes);
114
115 AllocationMode updateAllocationMode();
116 void* allocateFromShared(const std::lock_guard<Mutex>&, bool abortOnFailure);
117
118 // It's almost always the caller's responsibility to grab the lock. This lock comes from the
119 // PerProcess<IsoTLSDeallocatorEntry<Config>>::get()->lock. That's pretty weird, and we don't
120 // try to disguise the fact that it's weird. We only do that because heaps in the same size class
121 // share the same deallocator log, so it makes sense for them to also share the same lock to
122 // amortize lock acquisition costs.
123 Mutex& lock;
124
125private:
126 IsoDirectory<Config, numPagesInInlineDirectory> m_inlineDirectory;
127 IsoDirectoryPage<Config>* m_headDirectory { nullptr };
128 IsoDirectoryPage<Config>* m_tailDirectory { nullptr };
129 size_t m_footprint { 0 };
130 size_t m_freeableMemory { 0 };
131#if ENABLE_PHYSICAL_PAGE_MAP
132 PhysicalPageMap m_physicalPageMap;
133#endif
134 unsigned m_nextDirectoryPageIndex { 1 }; // We start at 1 so that the high water mark being zero means we've only allocated in the inline directory since the last scavenge.
135 unsigned m_directoryHighWatermark { 0 };
136
137 bool m_isInlineDirectoryEligibleOrDecommitted { true };
138 IsoDirectoryPage<Config>* m_firstEligibleOrDecommitedDirectory { nullptr };
139
140 IsoTLSAllocatorEntry<Config> m_allocator;
141};
142
143} // namespace bmalloc
144
145
146