1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/futex-emulation.h"
6
7#include <limits>
8
9#include "src/base/macros.h"
10#include "src/base/platform/time.h"
11#include "src/conversions.h"
12#include "src/handles-inl.h"
13#include "src/isolate.h"
14#include "src/objects-inl.h"
15#include "src/objects/js-array-buffer-inl.h"
16
17namespace v8 {
18namespace internal {
19
20using AtomicsWaitEvent = v8::Isolate::AtomicsWaitEvent;
21
22base::LazyMutex FutexEmulation::mutex_ = LAZY_MUTEX_INITIALIZER;
23base::LazyInstance<FutexWaitList>::type FutexEmulation::wait_list_ =
24 LAZY_INSTANCE_INITIALIZER;
25
26
27void FutexWaitListNode::NotifyWake() {
28 // Lock the FutexEmulation mutex before notifying. We know that the mutex
29 // will have been unlocked if we are currently waiting on the condition
30 // variable. The mutex will not be locked if FutexEmulation::Wait hasn't
31 // locked it yet. In that case, we set the interrupted_
32 // flag to true, which will be tested after the mutex locked by a future wait.
33 base::MutexGuard lock_guard(FutexEmulation::mutex_.Pointer());
34 // if not waiting, this will not have any effect.
35 cond_.NotifyOne();
36 interrupted_ = true;
37}
38
39
40FutexWaitList::FutexWaitList() : head_(nullptr), tail_(nullptr) {}
41
42
43void FutexWaitList::AddNode(FutexWaitListNode* node) {
44 DCHECK(node->prev_ == nullptr && node->next_ == nullptr);
45 if (tail_) {
46 tail_->next_ = node;
47 } else {
48 head_ = node;
49 }
50
51 node->prev_ = tail_;
52 node->next_ = nullptr;
53 tail_ = node;
54}
55
56
57void FutexWaitList::RemoveNode(FutexWaitListNode* node) {
58 if (node->prev_) {
59 node->prev_->next_ = node->next_;
60 } else {
61 head_ = node->next_;
62 }
63
64 if (node->next_) {
65 node->next_->prev_ = node->prev_;
66 } else {
67 tail_ = node->prev_;
68 }
69
70 node->prev_ = node->next_ = nullptr;
71}
72
73void AtomicsWaitWakeHandle::Wake() {
74 // Adding a separate `NotifyWake()` variant that doesn't acquire the lock
75 // itself would likely just add unnecessary complexity..
76 // The split lock by itself isn’t an issue, as long as the caller properly
77 // synchronizes this with the closing `AtomicsWaitCallback`.
78 {
79 base::MutexGuard lock_guard(FutexEmulation::mutex_.Pointer());
80 stopped_ = true;
81 }
82 isolate_->futex_wait_list_node()->NotifyWake();
83}
84
85enum WaitReturnValue : int { kOk = 0, kNotEqual = 1, kTimedOut = 2 };
86
87Object FutexEmulation::WaitJs(Isolate* isolate,
88 Handle<JSArrayBuffer> array_buffer, size_t addr,
89 int32_t value, double rel_timeout_ms) {
90 Object res = Wait32(isolate, array_buffer, addr, value, rel_timeout_ms);
91 if (res->IsSmi()) {
92 int val = Smi::ToInt(res);
93 switch (val) {
94 case WaitReturnValue::kOk:
95 return ReadOnlyRoots(isolate).ok();
96 case WaitReturnValue::kNotEqual:
97 return ReadOnlyRoots(isolate).not_equal();
98 case WaitReturnValue::kTimedOut:
99 return ReadOnlyRoots(isolate).timed_out();
100 default:
101 UNREACHABLE();
102 }
103 }
104 return res;
105}
106
107Object FutexEmulation::Wait32(Isolate* isolate,
108 Handle<JSArrayBuffer> array_buffer, size_t addr,
109 int32_t value, double rel_timeout_ms) {
110 return Wait<int32_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
111}
112
113Object FutexEmulation::Wait64(Isolate* isolate,
114 Handle<JSArrayBuffer> array_buffer, size_t addr,
115 int64_t value, double rel_timeout_ms) {
116 return Wait<int64_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
117}
118
119template <typename T>
120Object FutexEmulation::Wait(Isolate* isolate,
121 Handle<JSArrayBuffer> array_buffer, size_t addr,
122 T value, double rel_timeout_ms) {
123 DCHECK_LT(addr, array_buffer->byte_length());
124
125 bool use_timeout = rel_timeout_ms != V8_INFINITY;
126
127 base::TimeDelta rel_timeout;
128 if (use_timeout) {
129 // Convert to nanoseconds.
130 double rel_timeout_ns = rel_timeout_ms *
131 base::Time::kNanosecondsPerMicrosecond *
132 base::Time::kMicrosecondsPerMillisecond;
133 if (rel_timeout_ns >
134 static_cast<double>(std::numeric_limits<int64_t>::max())) {
135 // 2**63 nanoseconds is 292 years. Let's just treat anything greater as
136 // infinite.
137 use_timeout = false;
138 } else {
139 rel_timeout = base::TimeDelta::FromNanoseconds(
140 static_cast<int64_t>(rel_timeout_ns));
141 }
142 }
143
144 AtomicsWaitWakeHandle stop_handle(isolate);
145
146 isolate->RunAtomicsWaitCallback(AtomicsWaitEvent::kStartWait, array_buffer,
147 addr, value, rel_timeout_ms, &stop_handle);
148
149 if (isolate->has_scheduled_exception()) {
150 return isolate->PromoteScheduledException();
151 }
152
153 Object result;
154 AtomicsWaitEvent callback_result = AtomicsWaitEvent::kWokenUp;
155
156 do { // Not really a loop, just makes it easier to break out early.
157 base::MutexGuard lock_guard(mutex_.Pointer());
158 void* backing_store = array_buffer->backing_store();
159
160 FutexWaitListNode* node = isolate->futex_wait_list_node();
161 node->backing_store_ = backing_store;
162 node->wait_addr_ = addr;
163 node->waiting_ = true;
164
165 // Reset node->waiting_ = false when leaving this scope (but while
166 // still holding the lock).
167 ResetWaitingOnScopeExit reset_waiting(node);
168
169 T* p = reinterpret_cast<T*>(static_cast<int8_t*>(backing_store) + addr);
170 if (*p != value) {
171 result = Smi::FromInt(WaitReturnValue::kNotEqual);
172 callback_result = AtomicsWaitEvent::kNotEqual;
173 break;
174 }
175
176 base::TimeTicks timeout_time;
177 base::TimeTicks current_time;
178
179 if (use_timeout) {
180 current_time = base::TimeTicks::Now();
181 timeout_time = current_time + rel_timeout;
182 }
183
184 wait_list_.Pointer()->AddNode(node);
185
186 while (true) {
187 bool interrupted = node->interrupted_;
188 node->interrupted_ = false;
189
190 // Unlock the mutex here to prevent deadlock from lock ordering between
191 // mutex_ and mutexes locked by HandleInterrupts.
192 mutex_.Pointer()->Unlock();
193
194 // Because the mutex is unlocked, we have to be careful about not dropping
195 // an interrupt. The notification can happen in three different places:
196 // 1) Before Wait is called: the notification will be dropped, but
197 // interrupted_ will be set to 1. This will be checked below.
198 // 2) After interrupted has been checked here, but before mutex_ is
199 // acquired: interrupted is checked again below, with mutex_ locked.
200 // Because the wakeup signal also acquires mutex_, we know it will not
201 // be able to notify until mutex_ is released below, when waiting on
202 // the condition variable.
203 // 3) After the mutex is released in the call to WaitFor(): this
204 // notification will wake up the condition variable. node->waiting() will
205 // be false, so we'll loop and then check interrupts.
206 if (interrupted) {
207 Object interrupt_object = isolate->stack_guard()->HandleInterrupts();
208 if (interrupt_object->IsException(isolate)) {
209 result = interrupt_object;
210 callback_result = AtomicsWaitEvent::kTerminatedExecution;
211 mutex_.Pointer()->Lock();
212 break;
213 }
214 }
215
216 mutex_.Pointer()->Lock();
217
218 if (node->interrupted_) {
219 // An interrupt occurred while the mutex_ was unlocked. Don't wait yet.
220 continue;
221 }
222
223 if (stop_handle.has_stopped()) {
224 node->waiting_ = false;
225 callback_result = AtomicsWaitEvent::kAPIStopped;
226 }
227
228 if (!node->waiting_) {
229 result = Smi::FromInt(WaitReturnValue::kOk);
230 break;
231 }
232
233 // No interrupts, now wait.
234 if (use_timeout) {
235 current_time = base::TimeTicks::Now();
236 if (current_time >= timeout_time) {
237 result = Smi::FromInt(WaitReturnValue::kTimedOut);
238 callback_result = AtomicsWaitEvent::kTimedOut;
239 break;
240 }
241
242 base::TimeDelta time_until_timeout = timeout_time - current_time;
243 DCHECK_GE(time_until_timeout.InMicroseconds(), 0);
244 bool wait_for_result =
245 node->cond_.WaitFor(mutex_.Pointer(), time_until_timeout);
246 USE(wait_for_result);
247 } else {
248 node->cond_.Wait(mutex_.Pointer());
249 }
250
251 // Spurious wakeup, interrupt or timeout.
252 }
253
254 wait_list_.Pointer()->RemoveNode(node);
255 } while (false);
256
257 isolate->RunAtomicsWaitCallback(callback_result, array_buffer, addr, value,
258 rel_timeout_ms, nullptr);
259
260 if (isolate->has_scheduled_exception()) {
261 CHECK_NE(callback_result, AtomicsWaitEvent::kTerminatedExecution);
262 result = isolate->PromoteScheduledException();
263 }
264
265 return result;
266}
267
268Object FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
269 uint32_t num_waiters_to_wake) {
270 DCHECK_LT(addr, array_buffer->byte_length());
271
272 int waiters_woken = 0;
273 void* backing_store = array_buffer->backing_store();
274
275 base::MutexGuard lock_guard(mutex_.Pointer());
276 FutexWaitListNode* node = wait_list_.Pointer()->head_;
277 while (node && num_waiters_to_wake > 0) {
278 if (backing_store == node->backing_store_ && addr == node->wait_addr_ &&
279 node->waiting_) {
280 node->waiting_ = false;
281 node->cond_.NotifyOne();
282 if (num_waiters_to_wake != kWakeAll) {
283 --num_waiters_to_wake;
284 }
285 waiters_woken++;
286 }
287
288 node = node->next_;
289 }
290
291 return Smi::FromInt(waiters_woken);
292}
293
294Object FutexEmulation::NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer,
295 size_t addr) {
296 DCHECK_LT(addr, array_buffer->byte_length());
297 void* backing_store = array_buffer->backing_store();
298
299 base::MutexGuard lock_guard(mutex_.Pointer());
300
301 int waiters = 0;
302 FutexWaitListNode* node = wait_list_.Pointer()->head_;
303 while (node) {
304 if (backing_store == node->backing_store_ && addr == node->wait_addr_ &&
305 node->waiting_) {
306 waiters++;
307 }
308
309 node = node->next_;
310 }
311
312 return Smi::FromInt(waiters);
313}
314
315} // namespace internal
316} // namespace v8
317