1 | // Copyright 2018 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/microtask-queue.h" |
6 | |
7 | #include <stddef.h> |
8 | #include <algorithm> |
9 | |
10 | #include "src/api-inl.h" |
11 | #include "src/base/logging.h" |
12 | #include "src/handles-inl.h" |
13 | #include "src/isolate.h" |
14 | #include "src/objects/microtask-inl.h" |
15 | #include "src/roots-inl.h" |
16 | #include "src/tracing/trace-event.h" |
17 | #include "src/visitors.h" |
18 | |
19 | namespace v8 { |
20 | namespace internal { |
21 | |
22 | const size_t MicrotaskQueue::kRingBufferOffset = |
23 | OFFSET_OF(MicrotaskQueue, ring_buffer_); |
24 | const size_t MicrotaskQueue::kCapacityOffset = |
25 | OFFSET_OF(MicrotaskQueue, capacity_); |
26 | const size_t MicrotaskQueue::kSizeOffset = OFFSET_OF(MicrotaskQueue, size_); |
27 | const size_t MicrotaskQueue::kStartOffset = OFFSET_OF(MicrotaskQueue, start_); |
28 | const size_t MicrotaskQueue::kFinishedMicrotaskCountOffset = |
29 | OFFSET_OF(MicrotaskQueue, finished_microtask_count_); |
30 | |
31 | const intptr_t MicrotaskQueue::kMinimumCapacity = 8; |
32 | |
33 | // static |
34 | void MicrotaskQueue::SetUpDefaultMicrotaskQueue(Isolate* isolate) { |
35 | DCHECK_NULL(isolate->default_microtask_queue()); |
36 | |
37 | MicrotaskQueue* microtask_queue = new MicrotaskQueue; |
38 | microtask_queue->next_ = microtask_queue; |
39 | microtask_queue->prev_ = microtask_queue; |
40 | isolate->set_default_microtask_queue(microtask_queue); |
41 | } |
42 | |
43 | // static |
44 | std::unique_ptr<MicrotaskQueue> MicrotaskQueue::New(Isolate* isolate) { |
45 | DCHECK_NOT_NULL(isolate->default_microtask_queue()); |
46 | |
47 | std::unique_ptr<MicrotaskQueue> microtask_queue(new MicrotaskQueue); |
48 | |
49 | // Insert the new instance to the next of last MicrotaskQueue instance. |
50 | MicrotaskQueue* last = isolate->default_microtask_queue()->prev_; |
51 | microtask_queue->next_ = last->next_; |
52 | microtask_queue->prev_ = last; |
53 | last->next_->prev_ = microtask_queue.get(); |
54 | last->next_ = microtask_queue.get(); |
55 | |
56 | return microtask_queue; |
57 | } |
58 | |
59 | MicrotaskQueue::MicrotaskQueue() = default; |
60 | |
61 | MicrotaskQueue::~MicrotaskQueue() { |
62 | if (next_ != this) { |
63 | DCHECK_NE(prev_, this); |
64 | next_->prev_ = prev_; |
65 | prev_->next_ = next_; |
66 | } |
67 | delete[] ring_buffer_; |
68 | } |
69 | |
70 | // static |
71 | Address MicrotaskQueue::CallEnqueueMicrotask(Isolate* isolate, |
72 | intptr_t microtask_queue_pointer, |
73 | Address raw_microtask) { |
74 | Microtask microtask = Microtask::cast(Object(raw_microtask)); |
75 | reinterpret_cast<MicrotaskQueue*>(microtask_queue_pointer) |
76 | ->EnqueueMicrotask(microtask); |
77 | return ReadOnlyRoots(isolate).undefined_value().ptr(); |
78 | } |
79 | |
80 | void MicrotaskQueue::EnqueueMicrotask(v8::Isolate* v8_isolate, |
81 | v8::Local<Function> function) { |
82 | Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate); |
83 | HandleScope scope(isolate); |
84 | Handle<CallableTask> microtask = isolate->factory()->NewCallableTask( |
85 | Utils::OpenHandle(*function), isolate->native_context()); |
86 | EnqueueMicrotask(*microtask); |
87 | } |
88 | |
89 | void MicrotaskQueue::EnqueueMicrotask(v8::Isolate* v8_isolate, |
90 | v8::MicrotaskCallback callback, |
91 | void* data) { |
92 | Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate); |
93 | HandleScope scope(isolate); |
94 | Handle<CallbackTask> microtask = isolate->factory()->NewCallbackTask( |
95 | isolate->factory()->NewForeign(reinterpret_cast<Address>(callback)), |
96 | isolate->factory()->NewForeign(reinterpret_cast<Address>(data))); |
97 | EnqueueMicrotask(*microtask); |
98 | } |
99 | |
100 | void MicrotaskQueue::EnqueueMicrotask(Microtask microtask) { |
101 | if (size_ == capacity_) { |
102 | // Keep the capacity of |ring_buffer_| power of 2, so that the JIT |
103 | // implementation can calculate the modulo easily. |
104 | intptr_t new_capacity = std::max(kMinimumCapacity, capacity_ << 1); |
105 | ResizeBuffer(new_capacity); |
106 | } |
107 | |
108 | DCHECK_LT(size_, capacity_); |
109 | ring_buffer_[(start_ + size_) % capacity_] = microtask.ptr(); |
110 | ++size_; |
111 | } |
112 | |
113 | void MicrotaskQueue::PerformCheckpoint(v8::Isolate* v8_isolate) { |
114 | if (!IsRunningMicrotasks() && !GetMicrotasksScopeDepth() && |
115 | !HasMicrotasksSuppressions()) { |
116 | Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate); |
117 | RunMicrotasks(isolate); |
118 | } |
119 | } |
120 | |
121 | namespace { |
122 | |
123 | class SetIsRunningMicrotasks { |
124 | public: |
125 | explicit SetIsRunningMicrotasks(bool* flag) : flag_(flag) { |
126 | DCHECK(!*flag_); |
127 | *flag_ = true; |
128 | } |
129 | |
130 | ~SetIsRunningMicrotasks() { |
131 | DCHECK(*flag_); |
132 | *flag_ = false; |
133 | } |
134 | |
135 | private: |
136 | bool* flag_; |
137 | }; |
138 | |
139 | } // namespace |
140 | |
141 | int MicrotaskQueue::RunMicrotasks(Isolate* isolate) { |
142 | if (!size()) { |
143 | OnCompleted(isolate); |
144 | return 0; |
145 | } |
146 | |
147 | intptr_t base_count = finished_microtask_count_; |
148 | |
149 | HandleScope handle_scope(isolate); |
150 | MaybeHandle<Object> maybe_exception; |
151 | |
152 | MaybeHandle<Object> maybe_result; |
153 | |
154 | int processed_microtask_count; |
155 | { |
156 | SetIsRunningMicrotasks scope(&is_running_microtasks_); |
157 | v8::Isolate::SuppressMicrotaskExecutionScope suppress( |
158 | reinterpret_cast<v8::Isolate*>(isolate)); |
159 | HandleScopeImplementer::EnteredContextRewindScope rewind_scope( |
160 | isolate->handle_scope_implementer()); |
161 | TRACE_EVENT_BEGIN0("v8.execute" , "RunMicrotasks" ); |
162 | TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8" , "V8.RunMicrotasks" ); |
163 | maybe_result = Execution::TryRunMicrotasks(isolate, this, &maybe_exception); |
164 | processed_microtask_count = |
165 | static_cast<int>(finished_microtask_count_ - base_count); |
166 | TRACE_EVENT_END1("v8.execute" , "RunMicrotasks" , "microtask_count" , |
167 | processed_microtask_count); |
168 | } |
169 | |
170 | // If execution is terminating, clean up and propagate that to TryCatch scope. |
171 | if (maybe_result.is_null() && maybe_exception.is_null()) { |
172 | delete[] ring_buffer_; |
173 | ring_buffer_ = nullptr; |
174 | capacity_ = 0; |
175 | size_ = 0; |
176 | start_ = 0; |
177 | isolate->SetTerminationOnExternalTryCatch(); |
178 | OnCompleted(isolate); |
179 | return -1; |
180 | } |
181 | DCHECK_EQ(0, size()); |
182 | OnCompleted(isolate); |
183 | |
184 | return processed_microtask_count; |
185 | } |
186 | |
187 | void MicrotaskQueue::IterateMicrotasks(RootVisitor* visitor) { |
188 | if (size_) { |
189 | // Iterate pending Microtasks as root objects to avoid the write barrier for |
190 | // all single Microtask. If this hurts the GC performance, use a FixedArray. |
191 | visitor->VisitRootPointers( |
192 | Root::kStrongRoots, nullptr, FullObjectSlot(ring_buffer_ + start_), |
193 | FullObjectSlot(ring_buffer_ + std::min(start_ + size_, capacity_))); |
194 | visitor->VisitRootPointers( |
195 | Root::kStrongRoots, nullptr, FullObjectSlot(ring_buffer_), |
196 | FullObjectSlot(ring_buffer_ + std::max(start_ + size_ - capacity_, |
197 | static_cast<intptr_t>(0)))); |
198 | } |
199 | |
200 | if (capacity_ <= kMinimumCapacity) { |
201 | return; |
202 | } |
203 | |
204 | intptr_t new_capacity = capacity_; |
205 | while (new_capacity > 2 * size_) { |
206 | new_capacity >>= 1; |
207 | } |
208 | new_capacity = std::max(new_capacity, kMinimumCapacity); |
209 | if (new_capacity < capacity_) { |
210 | ResizeBuffer(new_capacity); |
211 | } |
212 | } |
213 | |
214 | void MicrotaskQueue::AddMicrotasksCompletedCallback( |
215 | MicrotasksCompletedCallbackWithData callback, void* data) { |
216 | CallbackWithData callback_with_data(callback, data); |
217 | auto pos = |
218 | std::find(microtasks_completed_callbacks_.begin(), |
219 | microtasks_completed_callbacks_.end(), callback_with_data); |
220 | if (pos != microtasks_completed_callbacks_.end()) return; |
221 | microtasks_completed_callbacks_.push_back(callback_with_data); |
222 | } |
223 | |
224 | void MicrotaskQueue::RemoveMicrotasksCompletedCallback( |
225 | MicrotasksCompletedCallbackWithData callback, void* data) { |
226 | CallbackWithData callback_with_data(callback, data); |
227 | auto pos = |
228 | std::find(microtasks_completed_callbacks_.begin(), |
229 | microtasks_completed_callbacks_.end(), callback_with_data); |
230 | if (pos == microtasks_completed_callbacks_.end()) return; |
231 | microtasks_completed_callbacks_.erase(pos); |
232 | } |
233 | |
234 | void MicrotaskQueue::FireMicrotasksCompletedCallback(Isolate* isolate) const { |
235 | std::vector<CallbackWithData> callbacks(microtasks_completed_callbacks_); |
236 | for (auto& callback : callbacks) { |
237 | callback.first(reinterpret_cast<v8::Isolate*>(isolate), callback.second); |
238 | } |
239 | } |
240 | |
241 | Microtask MicrotaskQueue::get(intptr_t index) const { |
242 | DCHECK_LT(index, size_); |
243 | Object microtask(ring_buffer_[(index + start_) % capacity_]); |
244 | return Microtask::cast(microtask); |
245 | } |
246 | |
247 | void MicrotaskQueue::OnCompleted(Isolate* isolate) { |
248 | // TODO(marja): (spec) The discussion about when to clear the KeepDuringJob |
249 | // set is still open (whether to clear it after every microtask or once |
250 | // during a microtask checkpoint). See also |
251 | // https://github.com/tc39/proposal-weakrefs/issues/39 . |
252 | isolate->heap()->ClearKeepDuringJobSet(); |
253 | |
254 | FireMicrotasksCompletedCallback(isolate); |
255 | } |
256 | |
257 | void MicrotaskQueue::ResizeBuffer(intptr_t new_capacity) { |
258 | DCHECK_LE(size_, new_capacity); |
259 | Address* new_ring_buffer = new Address[new_capacity]; |
260 | for (intptr_t i = 0; i < size_; ++i) { |
261 | new_ring_buffer[i] = ring_buffer_[(start_ + i) % capacity_]; |
262 | } |
263 | |
264 | delete[] ring_buffer_; |
265 | ring_buffer_ = new_ring_buffer; |
266 | capacity_ = new_capacity; |
267 | start_ = 0; |
268 | } |
269 | |
270 | } // namespace internal |
271 | } // namespace v8 |
272 | |