1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/arguments-inl.h"
6#include "src/base/macros.h"
7#include "src/base/platform/mutex.h"
8#include "src/conversions-inl.h"
9#include "src/counters.h"
10#include "src/heap/factory.h"
11#include "src/objects/js-array-buffer-inl.h"
12#include "src/runtime/runtime-utils.h"
13
14// Implement Atomic accesses to SharedArrayBuffers as defined in the
15// SharedArrayBuffer draft spec, found here
16// https://github.com/tc39/ecmascript_sharedmem
17
18namespace v8 {
19namespace internal {
20
21// Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h.
22#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
23 V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
24
25namespace {
26
27#if V8_CC_GNU
28
29// GCC/Clang helpfully warn us that using 64-bit atomics on 32-bit platforms
30// can be slow. Good to know, but we don't have a choice.
31#ifdef V8_TARGET_ARCH_32_BIT
32#pragma GCC diagnostic push
33#pragma GCC diagnostic ignored "-Wpragmas"
34#pragma GCC diagnostic ignored "-Watomic-alignment"
35#endif // V8_TARGET_ARCH_32_BIT
36
37template <typename T>
38inline T LoadSeqCst(T* p) {
39 return __atomic_load_n(p, __ATOMIC_SEQ_CST);
40}
41
42template <typename T>
43inline void StoreSeqCst(T* p, T value) {
44 __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
45}
46
47template <typename T>
48inline T ExchangeSeqCst(T* p, T value) {
49 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
50}
51
52template <typename T>
53inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
54 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
55 __ATOMIC_SEQ_CST);
56 return oldval;
57}
58
59template <typename T>
60inline T AddSeqCst(T* p, T value) {
61 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
62}
63
64template <typename T>
65inline T SubSeqCst(T* p, T value) {
66 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
67}
68
69template <typename T>
70inline T AndSeqCst(T* p, T value) {
71 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
72}
73
74template <typename T>
75inline T OrSeqCst(T* p, T value) {
76 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
77}
78
79template <typename T>
80inline T XorSeqCst(T* p, T value) {
81 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
82}
83
84#ifdef V8_TARGET_ARCH_32_BIT
85#pragma GCC diagnostic pop
86#endif // V8_TARGET_ARCH_32_BIT
87
88#elif V8_CC_MSVC
89
90#define InterlockedExchange32 _InterlockedExchange
91#define InterlockedCompareExchange32 _InterlockedCompareExchange
92#define InterlockedCompareExchange8 _InterlockedCompareExchange8
93#define InterlockedExchangeAdd32 _InterlockedExchangeAdd
94#define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
95#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
96#define InterlockedAnd32 _InterlockedAnd
97#define InterlockedOr64 _InterlockedOr64
98#define InterlockedOr32 _InterlockedOr
99#define InterlockedXor32 _InterlockedXor
100
101#if defined(V8_HOST_ARCH_ARM64)
102#define InterlockedExchange8 _InterlockedExchange8
103#endif
104
105#define ATOMIC_OPS(type, suffix, vctype) \
106 inline type ExchangeSeqCst(type* p, type value) { \
107 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
108 bit_cast<vctype>(value)); \
109 } \
110 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \
111 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
112 bit_cast<vctype>(newval), \
113 bit_cast<vctype>(oldval)); \
114 } \
115 inline type AddSeqCst(type* p, type value) { \
116 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
117 bit_cast<vctype>(value)); \
118 } \
119 inline type SubSeqCst(type* p, type value) { \
120 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
121 -bit_cast<vctype>(value)); \
122 } \
123 inline type AndSeqCst(type* p, type value) { \
124 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
125 bit_cast<vctype>(value)); \
126 } \
127 inline type OrSeqCst(type* p, type value) { \
128 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
129 bit_cast<vctype>(value)); \
130 } \
131 inline type XorSeqCst(type* p, type value) { \
132 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
133 bit_cast<vctype>(value)); \
134 }
135
136ATOMIC_OPS(int8_t, 8, char)
137ATOMIC_OPS(uint8_t, 8, char)
138ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
139ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
140ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
141ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
142ATOMIC_OPS(int64_t, 64, __int64)
143ATOMIC_OPS(uint64_t, 64, __int64)
144
145template <typename T>
146inline T LoadSeqCst(T* p) {
147 UNREACHABLE();
148}
149
150template <typename T>
151inline void StoreSeqCst(T* p, T value) {
152 UNREACHABLE();
153}
154
155#undef ATOMIC_OPS
156
157#undef InterlockedExchange32
158#undef InterlockedCompareExchange32
159#undef InterlockedCompareExchange8
160#undef InterlockedExchangeAdd32
161#undef InterlockedExchangeAdd16
162#undef InterlockedExchangeAdd8
163#undef InterlockedAnd32
164#undef InterlockedOr64
165#undef InterlockedOr32
166#undef InterlockedXor32
167
168#if defined(V8_HOST_ARCH_ARM64)
169#undef InterlockedExchange8
170#endif
171
172#else
173
174#error Unsupported platform!
175
176#endif
177
178template <typename T>
179T FromObject(Handle<Object> number);
180
181template <>
182inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
183 return NumberToUint32(*number);
184}
185
186template <>
187inline int8_t FromObject<int8_t>(Handle<Object> number) {
188 return NumberToInt32(*number);
189}
190
191template <>
192inline uint16_t FromObject<uint16_t>(Handle<Object> number) {
193 return NumberToUint32(*number);
194}
195
196template <>
197inline int16_t FromObject<int16_t>(Handle<Object> number) {
198 return NumberToInt32(*number);
199}
200
201template <>
202inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
203 return NumberToUint32(*number);
204}
205
206template <>
207inline int32_t FromObject<int32_t>(Handle<Object> number) {
208 return NumberToInt32(*number);
209}
210
211template <>
212inline uint64_t FromObject<uint64_t>(Handle<Object> bigint) {
213 return Handle<BigInt>::cast(bigint)->AsUint64();
214}
215
216template <>
217inline int64_t FromObject<int64_t>(Handle<Object> bigint) {
218 return Handle<BigInt>::cast(bigint)->AsInt64();
219}
220
221inline Object ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
222
223inline Object ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); }
224
225inline Object ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); }
226
227inline Object ToObject(Isolate* isolate, uint16_t t) { return Smi::FromInt(t); }
228
229inline Object ToObject(Isolate* isolate, int32_t t) {
230 return *isolate->factory()->NewNumber(t);
231}
232
233inline Object ToObject(Isolate* isolate, uint32_t t) {
234 return *isolate->factory()->NewNumber(t);
235}
236
237inline Object ToObject(Isolate* isolate, int64_t t) {
238 return *BigInt::FromInt64(isolate, t);
239}
240
241inline Object ToObject(Isolate* isolate, uint64_t t) {
242 return *BigInt::FromUint64(isolate, t);
243}
244
245template <typename T>
246struct Load {
247 static inline Object Do(Isolate* isolate, void* buffer, size_t index) {
248 T result = LoadSeqCst(static_cast<T*>(buffer) + index);
249 return ToObject(isolate, result);
250 }
251};
252
253template <typename T>
254struct Store {
255 static inline void Do(Isolate* isolate, void* buffer, size_t index,
256 Handle<Object> obj) {
257 T value = FromObject<T>(obj);
258 StoreSeqCst(static_cast<T*>(buffer) + index, value);
259 }
260};
261
262template <typename T>
263struct Exchange {
264 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
265 Handle<Object> obj) {
266 T value = FromObject<T>(obj);
267 T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
268 return ToObject(isolate, result);
269 }
270};
271
272template <typename T>
273inline Object DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
274 Handle<Object> oldobj, Handle<Object> newobj) {
275 T oldval = FromObject<T>(oldobj);
276 T newval = FromObject<T>(newobj);
277 T result =
278 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval);
279 return ToObject(isolate, result);
280}
281
282template <typename T>
283struct Add {
284 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
285 Handle<Object> obj) {
286 T value = FromObject<T>(obj);
287 T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
288 return ToObject(isolate, result);
289 }
290};
291
292template <typename T>
293struct Sub {
294 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
295 Handle<Object> obj) {
296 T value = FromObject<T>(obj);
297 T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
298 return ToObject(isolate, result);
299 }
300};
301
302template <typename T>
303struct And {
304 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
305 Handle<Object> obj) {
306 T value = FromObject<T>(obj);
307 T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
308 return ToObject(isolate, result);
309 }
310};
311
312template <typename T>
313struct Or {
314 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
315 Handle<Object> obj) {
316 T value = FromObject<T>(obj);
317 T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
318 return ToObject(isolate, result);
319 }
320};
321
322template <typename T>
323struct Xor {
324 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
325 Handle<Object> obj) {
326 T value = FromObject<T>(obj);
327 T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
328 return ToObject(isolate, result);
329 }
330};
331
332} // anonymous namespace
333
334// Duplicated from objects.h
335// V has parameters (Type, type, TYPE, C type)
336#define INTEGER_TYPED_ARRAYS(V) \
337 V(Uint8, uint8, UINT8, uint8_t) \
338 V(Int8, int8, INT8, int8_t) \
339 V(Uint16, uint16, UINT16, uint16_t) \
340 V(Int16, int16, INT16, int16_t) \
341 V(Uint32, uint32, UINT32, uint32_t) \
342 V(Int32, int32, INT32, int32_t)
343
344// This is https://tc39.github.io/ecma262/#sec-getmodifysetvalueinbuffer
345// but also includes the ToInteger/ToBigInt conversion that's part of
346// https://tc39.github.io/ecma262/#sec-atomicreadmodifywrite
347template <template <typename> class Op>
348Object GetModifySetValueInBuffer(Arguments args, Isolate* isolate) {
349 HandleScope scope(isolate);
350 DCHECK_EQ(3, args.length());
351 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
352 CONVERT_SIZE_ARG_CHECKED(index, 1);
353 CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2);
354 CHECK(sta->GetBuffer()->is_shared());
355
356 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
357 sta->byte_offset();
358
359 if (sta->type() >= kExternalBigInt64Array) {
360 Handle<BigInt> bigint;
361 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
362 BigInt::FromObject(isolate, value_obj));
363 // SharedArrayBuffers are not detachable.
364 CHECK_LT(index, sta->length());
365 if (sta->type() == kExternalBigInt64Array) {
366 return Op<int64_t>::Do(isolate, source, index, bigint);
367 }
368 DCHECK(sta->type() == kExternalBigUint64Array);
369 return Op<uint64_t>::Do(isolate, source, index, bigint);
370 }
371
372 Handle<Object> value;
373 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
374 Object::ToInteger(isolate, value_obj));
375 // SharedArrayBuffers are not detachable.
376 CHECK_LT(index, sta->length());
377
378 switch (sta->type()) {
379#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
380 case kExternal##Type##Array: \
381 return Op<ctype>::Do(isolate, source, index, value);
382
383 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
384#undef TYPED_ARRAY_CASE
385
386 default:
387 break;
388 }
389
390 UNREACHABLE();
391}
392
393RUNTIME_FUNCTION(Runtime_AtomicsLoad64) {
394 HandleScope scope(isolate);
395 DCHECK_EQ(2, args.length());
396 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
397 CONVERT_SIZE_ARG_CHECKED(index, 1);
398 CHECK(sta->GetBuffer()->is_shared());
399
400 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
401 sta->byte_offset();
402
403 DCHECK(sta->type() == kExternalBigInt64Array ||
404 sta->type() == kExternalBigUint64Array);
405 // SharedArrayBuffers are not detachable.
406 CHECK_LT(index, sta->length());
407 if (sta->type() == kExternalBigInt64Array) {
408 return Load<int64_t>::Do(isolate, source, index);
409 }
410 DCHECK(sta->type() == kExternalBigUint64Array);
411 return Load<uint64_t>::Do(isolate, source, index);
412}
413
414RUNTIME_FUNCTION(Runtime_AtomicsStore64) {
415 HandleScope scope(isolate);
416 DCHECK_EQ(3, args.length());
417 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
418 CONVERT_SIZE_ARG_CHECKED(index, 1);
419 CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2);
420 CHECK(sta->GetBuffer()->is_shared());
421
422 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
423 sta->byte_offset();
424
425 Handle<BigInt> bigint;
426 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
427 BigInt::FromObject(isolate, value_obj));
428
429 DCHECK(sta->type() == kExternalBigInt64Array ||
430 sta->type() == kExternalBigUint64Array);
431 // SharedArrayBuffers are not detachable.
432 CHECK_LT(index, sta->length());
433 if (sta->type() == kExternalBigInt64Array) {
434 Store<int64_t>::Do(isolate, source, index, bigint);
435 return *bigint;
436 }
437 DCHECK(sta->type() == kExternalBigUint64Array);
438 Store<uint64_t>::Do(isolate, source, index, bigint);
439 return *bigint;
440}
441
442RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
443 return GetModifySetValueInBuffer<Exchange>(args, isolate);
444}
445
446RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
447 HandleScope scope(isolate);
448 DCHECK_EQ(4, args.length());
449 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
450 CONVERT_SIZE_ARG_CHECKED(index, 1);
451 CONVERT_ARG_HANDLE_CHECKED(Object, old_value_obj, 2);
452 CONVERT_ARG_HANDLE_CHECKED(Object, new_value_obj, 3);
453 CHECK(sta->GetBuffer()->is_shared());
454 CHECK_LT(index, sta->length());
455
456 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
457 sta->byte_offset();
458
459 if (sta->type() >= kExternalBigInt64Array) {
460 Handle<BigInt> old_bigint;
461 Handle<BigInt> new_bigint;
462 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
463 isolate, old_bigint, BigInt::FromObject(isolate, old_value_obj));
464 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
465 isolate, new_bigint, BigInt::FromObject(isolate, new_value_obj));
466 // SharedArrayBuffers are not detachable.
467 CHECK_LT(index, sta->length());
468 if (sta->type() == kExternalBigInt64Array) {
469 return DoCompareExchange<int64_t>(isolate, source, index, old_bigint,
470 new_bigint);
471 }
472 DCHECK(sta->type() == kExternalBigUint64Array);
473 return DoCompareExchange<uint64_t>(isolate, source, index, old_bigint,
474 new_bigint);
475 }
476
477 Handle<Object> old_value;
478 Handle<Object> new_value;
479 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, old_value,
480 Object::ToInteger(isolate, old_value_obj));
481 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_value,
482 Object::ToInteger(isolate, new_value_obj));
483 // SharedArrayBuffers are not detachable.
484 CHECK_LT(index, sta->length());
485
486 switch (sta->type()) {
487#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
488 case kExternal##Type##Array: \
489 return DoCompareExchange<ctype>(isolate, source, index, old_value, \
490 new_value);
491
492 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
493#undef TYPED_ARRAY_CASE
494
495 default:
496 break;
497 }
498
499 UNREACHABLE();
500}
501
502// ES #sec-atomics.add
503// Atomics.add( typedArray, index, value )
504RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
505 return GetModifySetValueInBuffer<Add>(args, isolate);
506}
507
508// ES #sec-atomics.sub
509// Atomics.sub( typedArray, index, value )
510RUNTIME_FUNCTION(Runtime_AtomicsSub) {
511 return GetModifySetValueInBuffer<Sub>(args, isolate);
512}
513
514// ES #sec-atomics.and
515// Atomics.and( typedArray, index, value )
516RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
517 return GetModifySetValueInBuffer<And>(args, isolate);
518}
519
520// ES #sec-atomics.or
521// Atomics.or( typedArray, index, value )
522RUNTIME_FUNCTION(Runtime_AtomicsOr) {
523 return GetModifySetValueInBuffer<Or>(args, isolate);
524}
525
526// ES #sec-atomics.xor
527// Atomics.xor( typedArray, index, value )
528RUNTIME_FUNCTION(Runtime_AtomicsXor) {
529 return GetModifySetValueInBuffer<Xor>(args, isolate);
530}
531
532#undef INTEGER_TYPED_ARRAYS
533
534#else
535
536RUNTIME_FUNCTION(Runtime_AtomicsLoad64) { UNREACHABLE(); }
537
538RUNTIME_FUNCTION(Runtime_AtomicsStore64) { UNREACHABLE(); }
539
540RUNTIME_FUNCTION(Runtime_AtomicsExchange) { UNREACHABLE(); }
541
542RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { UNREACHABLE(); }
543
544RUNTIME_FUNCTION(Runtime_AtomicsAdd) { UNREACHABLE(); }
545
546RUNTIME_FUNCTION(Runtime_AtomicsSub) { UNREACHABLE(); }
547
548RUNTIME_FUNCTION(Runtime_AtomicsAnd) { UNREACHABLE(); }
549
550RUNTIME_FUNCTION(Runtime_AtomicsOr) { UNREACHABLE(); }
551
552RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }
553
554#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
555 // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
556
557} // namespace internal
558} // namespace v8
559