1 | // Copyright 2015 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/arguments-inl.h" |
6 | #include "src/base/macros.h" |
7 | #include "src/base/platform/mutex.h" |
8 | #include "src/conversions-inl.h" |
9 | #include "src/counters.h" |
10 | #include "src/heap/factory.h" |
11 | #include "src/objects/js-array-buffer-inl.h" |
12 | #include "src/runtime/runtime-utils.h" |
13 | |
14 | // Implement Atomic accesses to SharedArrayBuffers as defined in the |
15 | // SharedArrayBuffer draft spec, found here |
16 | // https://github.com/tc39/ecmascript_sharedmem |
17 | |
18 | namespace v8 { |
19 | namespace internal { |
20 | |
21 | // Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h. |
22 | #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ |
23 | V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X |
24 | |
25 | namespace { |
26 | |
27 | #if V8_CC_GNU |
28 | |
29 | // GCC/Clang helpfully warn us that using 64-bit atomics on 32-bit platforms |
30 | // can be slow. Good to know, but we don't have a choice. |
31 | #ifdef V8_TARGET_ARCH_32_BIT |
32 | #pragma GCC diagnostic push |
33 | #pragma GCC diagnostic ignored "-Wpragmas" |
34 | #pragma GCC diagnostic ignored "-Watomic-alignment" |
35 | #endif // V8_TARGET_ARCH_32_BIT |
36 | |
37 | template <typename T> |
38 | inline T LoadSeqCst(T* p) { |
39 | return __atomic_load_n(p, __ATOMIC_SEQ_CST); |
40 | } |
41 | |
42 | template <typename T> |
43 | inline void StoreSeqCst(T* p, T value) { |
44 | __atomic_store_n(p, value, __ATOMIC_SEQ_CST); |
45 | } |
46 | |
47 | template <typename T> |
48 | inline T ExchangeSeqCst(T* p, T value) { |
49 | return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); |
50 | } |
51 | |
52 | template <typename T> |
53 | inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { |
54 | (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, |
55 | __ATOMIC_SEQ_CST); |
56 | return oldval; |
57 | } |
58 | |
59 | template <typename T> |
60 | inline T AddSeqCst(T* p, T value) { |
61 | return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); |
62 | } |
63 | |
64 | template <typename T> |
65 | inline T SubSeqCst(T* p, T value) { |
66 | return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); |
67 | } |
68 | |
69 | template <typename T> |
70 | inline T AndSeqCst(T* p, T value) { |
71 | return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); |
72 | } |
73 | |
74 | template <typename T> |
75 | inline T OrSeqCst(T* p, T value) { |
76 | return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); |
77 | } |
78 | |
79 | template <typename T> |
80 | inline T XorSeqCst(T* p, T value) { |
81 | return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); |
82 | } |
83 | |
84 | #ifdef V8_TARGET_ARCH_32_BIT |
85 | #pragma GCC diagnostic pop |
86 | #endif // V8_TARGET_ARCH_32_BIT |
87 | |
88 | #elif V8_CC_MSVC |
89 | |
90 | #define InterlockedExchange32 _InterlockedExchange |
91 | #define InterlockedCompareExchange32 _InterlockedCompareExchange |
92 | #define InterlockedCompareExchange8 _InterlockedCompareExchange8 |
93 | #define InterlockedExchangeAdd32 _InterlockedExchangeAdd |
94 | #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 |
95 | #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 |
96 | #define InterlockedAnd32 _InterlockedAnd |
97 | #define InterlockedOr64 _InterlockedOr64 |
98 | #define InterlockedOr32 _InterlockedOr |
99 | #define InterlockedXor32 _InterlockedXor |
100 | |
101 | #if defined(V8_HOST_ARCH_ARM64) |
102 | #define InterlockedExchange8 _InterlockedExchange8 |
103 | #endif |
104 | |
105 | #define ATOMIC_OPS(type, suffix, vctype) \ |
106 | inline type ExchangeSeqCst(type* p, type value) { \ |
107 | return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ |
108 | bit_cast<vctype>(value)); \ |
109 | } \ |
110 | inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ |
111 | return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ |
112 | bit_cast<vctype>(newval), \ |
113 | bit_cast<vctype>(oldval)); \ |
114 | } \ |
115 | inline type AddSeqCst(type* p, type value) { \ |
116 | return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
117 | bit_cast<vctype>(value)); \ |
118 | } \ |
119 | inline type SubSeqCst(type* p, type value) { \ |
120 | return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
121 | -bit_cast<vctype>(value)); \ |
122 | } \ |
123 | inline type AndSeqCst(type* p, type value) { \ |
124 | return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ |
125 | bit_cast<vctype>(value)); \ |
126 | } \ |
127 | inline type OrSeqCst(type* p, type value) { \ |
128 | return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ |
129 | bit_cast<vctype>(value)); \ |
130 | } \ |
131 | inline type XorSeqCst(type* p, type value) { \ |
132 | return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ |
133 | bit_cast<vctype>(value)); \ |
134 | } |
135 | |
136 | ATOMIC_OPS(int8_t, 8, char) |
137 | ATOMIC_OPS(uint8_t, 8, char) |
138 | ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ |
139 | ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ |
140 | ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ |
141 | ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ |
142 | ATOMIC_OPS(int64_t, 64, __int64) |
143 | ATOMIC_OPS(uint64_t, 64, __int64) |
144 | |
145 | template <typename T> |
146 | inline T LoadSeqCst(T* p) { |
147 | UNREACHABLE(); |
148 | } |
149 | |
150 | template <typename T> |
151 | inline void StoreSeqCst(T* p, T value) { |
152 | UNREACHABLE(); |
153 | } |
154 | |
155 | #undef ATOMIC_OPS |
156 | |
157 | #undef InterlockedExchange32 |
158 | #undef InterlockedCompareExchange32 |
159 | #undef InterlockedCompareExchange8 |
160 | #undef InterlockedExchangeAdd32 |
161 | #undef InterlockedExchangeAdd16 |
162 | #undef InterlockedExchangeAdd8 |
163 | #undef InterlockedAnd32 |
164 | #undef InterlockedOr64 |
165 | #undef InterlockedOr32 |
166 | #undef InterlockedXor32 |
167 | |
168 | #if defined(V8_HOST_ARCH_ARM64) |
169 | #undef InterlockedExchange8 |
170 | #endif |
171 | |
172 | #else |
173 | |
174 | #error Unsupported platform! |
175 | |
176 | #endif |
177 | |
178 | template <typename T> |
179 | T FromObject(Handle<Object> number); |
180 | |
181 | template <> |
182 | inline uint8_t FromObject<uint8_t>(Handle<Object> number) { |
183 | return NumberToUint32(*number); |
184 | } |
185 | |
186 | template <> |
187 | inline int8_t FromObject<int8_t>(Handle<Object> number) { |
188 | return NumberToInt32(*number); |
189 | } |
190 | |
191 | template <> |
192 | inline uint16_t FromObject<uint16_t>(Handle<Object> number) { |
193 | return NumberToUint32(*number); |
194 | } |
195 | |
196 | template <> |
197 | inline int16_t FromObject<int16_t>(Handle<Object> number) { |
198 | return NumberToInt32(*number); |
199 | } |
200 | |
201 | template <> |
202 | inline uint32_t FromObject<uint32_t>(Handle<Object> number) { |
203 | return NumberToUint32(*number); |
204 | } |
205 | |
206 | template <> |
207 | inline int32_t FromObject<int32_t>(Handle<Object> number) { |
208 | return NumberToInt32(*number); |
209 | } |
210 | |
211 | template <> |
212 | inline uint64_t FromObject<uint64_t>(Handle<Object> bigint) { |
213 | return Handle<BigInt>::cast(bigint)->AsUint64(); |
214 | } |
215 | |
216 | template <> |
217 | inline int64_t FromObject<int64_t>(Handle<Object> bigint) { |
218 | return Handle<BigInt>::cast(bigint)->AsInt64(); |
219 | } |
220 | |
221 | inline Object ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); } |
222 | |
223 | inline Object ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); } |
224 | |
225 | inline Object ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); } |
226 | |
227 | inline Object ToObject(Isolate* isolate, uint16_t t) { return Smi::FromInt(t); } |
228 | |
229 | inline Object ToObject(Isolate* isolate, int32_t t) { |
230 | return *isolate->factory()->NewNumber(t); |
231 | } |
232 | |
233 | inline Object ToObject(Isolate* isolate, uint32_t t) { |
234 | return *isolate->factory()->NewNumber(t); |
235 | } |
236 | |
237 | inline Object ToObject(Isolate* isolate, int64_t t) { |
238 | return *BigInt::FromInt64(isolate, t); |
239 | } |
240 | |
241 | inline Object ToObject(Isolate* isolate, uint64_t t) { |
242 | return *BigInt::FromUint64(isolate, t); |
243 | } |
244 | |
245 | template <typename T> |
246 | struct Load { |
247 | static inline Object Do(Isolate* isolate, void* buffer, size_t index) { |
248 | T result = LoadSeqCst(static_cast<T*>(buffer) + index); |
249 | return ToObject(isolate, result); |
250 | } |
251 | }; |
252 | |
253 | template <typename T> |
254 | struct Store { |
255 | static inline void Do(Isolate* isolate, void* buffer, size_t index, |
256 | Handle<Object> obj) { |
257 | T value = FromObject<T>(obj); |
258 | StoreSeqCst(static_cast<T*>(buffer) + index, value); |
259 | } |
260 | }; |
261 | |
262 | template <typename T> |
263 | struct Exchange { |
264 | static inline Object Do(Isolate* isolate, void* buffer, size_t index, |
265 | Handle<Object> obj) { |
266 | T value = FromObject<T>(obj); |
267 | T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value); |
268 | return ToObject(isolate, result); |
269 | } |
270 | }; |
271 | |
272 | template <typename T> |
273 | inline Object DoCompareExchange(Isolate* isolate, void* buffer, size_t index, |
274 | Handle<Object> oldobj, Handle<Object> newobj) { |
275 | T oldval = FromObject<T>(oldobj); |
276 | T newval = FromObject<T>(newobj); |
277 | T result = |
278 | CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); |
279 | return ToObject(isolate, result); |
280 | } |
281 | |
282 | template <typename T> |
283 | struct Add { |
284 | static inline Object Do(Isolate* isolate, void* buffer, size_t index, |
285 | Handle<Object> obj) { |
286 | T value = FromObject<T>(obj); |
287 | T result = AddSeqCst(static_cast<T*>(buffer) + index, value); |
288 | return ToObject(isolate, result); |
289 | } |
290 | }; |
291 | |
292 | template <typename T> |
293 | struct Sub { |
294 | static inline Object Do(Isolate* isolate, void* buffer, size_t index, |
295 | Handle<Object> obj) { |
296 | T value = FromObject<T>(obj); |
297 | T result = SubSeqCst(static_cast<T*>(buffer) + index, value); |
298 | return ToObject(isolate, result); |
299 | } |
300 | }; |
301 | |
302 | template <typename T> |
303 | struct And { |
304 | static inline Object Do(Isolate* isolate, void* buffer, size_t index, |
305 | Handle<Object> obj) { |
306 | T value = FromObject<T>(obj); |
307 | T result = AndSeqCst(static_cast<T*>(buffer) + index, value); |
308 | return ToObject(isolate, result); |
309 | } |
310 | }; |
311 | |
312 | template <typename T> |
313 | struct Or { |
314 | static inline Object Do(Isolate* isolate, void* buffer, size_t index, |
315 | Handle<Object> obj) { |
316 | T value = FromObject<T>(obj); |
317 | T result = OrSeqCst(static_cast<T*>(buffer) + index, value); |
318 | return ToObject(isolate, result); |
319 | } |
320 | }; |
321 | |
322 | template <typename T> |
323 | struct Xor { |
324 | static inline Object Do(Isolate* isolate, void* buffer, size_t index, |
325 | Handle<Object> obj) { |
326 | T value = FromObject<T>(obj); |
327 | T result = XorSeqCst(static_cast<T*>(buffer) + index, value); |
328 | return ToObject(isolate, result); |
329 | } |
330 | }; |
331 | |
332 | } // anonymous namespace |
333 | |
334 | // Duplicated from objects.h |
335 | // V has parameters (Type, type, TYPE, C type) |
336 | #define INTEGER_TYPED_ARRAYS(V) \ |
337 | V(Uint8, uint8, UINT8, uint8_t) \ |
338 | V(Int8, int8, INT8, int8_t) \ |
339 | V(Uint16, uint16, UINT16, uint16_t) \ |
340 | V(Int16, int16, INT16, int16_t) \ |
341 | V(Uint32, uint32, UINT32, uint32_t) \ |
342 | V(Int32, int32, INT32, int32_t) |
343 | |
344 | // This is https://tc39.github.io/ecma262/#sec-getmodifysetvalueinbuffer |
345 | // but also includes the ToInteger/ToBigInt conversion that's part of |
346 | // https://tc39.github.io/ecma262/#sec-atomicreadmodifywrite |
347 | template <template <typename> class Op> |
348 | Object GetModifySetValueInBuffer(Arguments args, Isolate* isolate) { |
349 | HandleScope scope(isolate); |
350 | DCHECK_EQ(3, args.length()); |
351 | CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); |
352 | CONVERT_SIZE_ARG_CHECKED(index, 1); |
353 | CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2); |
354 | CHECK(sta->GetBuffer()->is_shared()); |
355 | |
356 | uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
357 | sta->byte_offset(); |
358 | |
359 | if (sta->type() >= kExternalBigInt64Array) { |
360 | Handle<BigInt> bigint; |
361 | ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint, |
362 | BigInt::FromObject(isolate, value_obj)); |
363 | // SharedArrayBuffers are not detachable. |
364 | CHECK_LT(index, sta->length()); |
365 | if (sta->type() == kExternalBigInt64Array) { |
366 | return Op<int64_t>::Do(isolate, source, index, bigint); |
367 | } |
368 | DCHECK(sta->type() == kExternalBigUint64Array); |
369 | return Op<uint64_t>::Do(isolate, source, index, bigint); |
370 | } |
371 | |
372 | Handle<Object> value; |
373 | ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, |
374 | Object::ToInteger(isolate, value_obj)); |
375 | // SharedArrayBuffers are not detachable. |
376 | CHECK_LT(index, sta->length()); |
377 | |
378 | switch (sta->type()) { |
379 | #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \ |
380 | case kExternal##Type##Array: \ |
381 | return Op<ctype>::Do(isolate, source, index, value); |
382 | |
383 | INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
384 | #undef TYPED_ARRAY_CASE |
385 | |
386 | default: |
387 | break; |
388 | } |
389 | |
390 | UNREACHABLE(); |
391 | } |
392 | |
393 | RUNTIME_FUNCTION(Runtime_AtomicsLoad64) { |
394 | HandleScope scope(isolate); |
395 | DCHECK_EQ(2, args.length()); |
396 | CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); |
397 | CONVERT_SIZE_ARG_CHECKED(index, 1); |
398 | CHECK(sta->GetBuffer()->is_shared()); |
399 | |
400 | uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
401 | sta->byte_offset(); |
402 | |
403 | DCHECK(sta->type() == kExternalBigInt64Array || |
404 | sta->type() == kExternalBigUint64Array); |
405 | // SharedArrayBuffers are not detachable. |
406 | CHECK_LT(index, sta->length()); |
407 | if (sta->type() == kExternalBigInt64Array) { |
408 | return Load<int64_t>::Do(isolate, source, index); |
409 | } |
410 | DCHECK(sta->type() == kExternalBigUint64Array); |
411 | return Load<uint64_t>::Do(isolate, source, index); |
412 | } |
413 | |
414 | RUNTIME_FUNCTION(Runtime_AtomicsStore64) { |
415 | HandleScope scope(isolate); |
416 | DCHECK_EQ(3, args.length()); |
417 | CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); |
418 | CONVERT_SIZE_ARG_CHECKED(index, 1); |
419 | CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2); |
420 | CHECK(sta->GetBuffer()->is_shared()); |
421 | |
422 | uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
423 | sta->byte_offset(); |
424 | |
425 | Handle<BigInt> bigint; |
426 | ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint, |
427 | BigInt::FromObject(isolate, value_obj)); |
428 | |
429 | DCHECK(sta->type() == kExternalBigInt64Array || |
430 | sta->type() == kExternalBigUint64Array); |
431 | // SharedArrayBuffers are not detachable. |
432 | CHECK_LT(index, sta->length()); |
433 | if (sta->type() == kExternalBigInt64Array) { |
434 | Store<int64_t>::Do(isolate, source, index, bigint); |
435 | return *bigint; |
436 | } |
437 | DCHECK(sta->type() == kExternalBigUint64Array); |
438 | Store<uint64_t>::Do(isolate, source, index, bigint); |
439 | return *bigint; |
440 | } |
441 | |
442 | RUNTIME_FUNCTION(Runtime_AtomicsExchange) { |
443 | return GetModifySetValueInBuffer<Exchange>(args, isolate); |
444 | } |
445 | |
446 | RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { |
447 | HandleScope scope(isolate); |
448 | DCHECK_EQ(4, args.length()); |
449 | CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); |
450 | CONVERT_SIZE_ARG_CHECKED(index, 1); |
451 | CONVERT_ARG_HANDLE_CHECKED(Object, old_value_obj, 2); |
452 | CONVERT_ARG_HANDLE_CHECKED(Object, new_value_obj, 3); |
453 | CHECK(sta->GetBuffer()->is_shared()); |
454 | CHECK_LT(index, sta->length()); |
455 | |
456 | uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
457 | sta->byte_offset(); |
458 | |
459 | if (sta->type() >= kExternalBigInt64Array) { |
460 | Handle<BigInt> old_bigint; |
461 | Handle<BigInt> new_bigint; |
462 | ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
463 | isolate, old_bigint, BigInt::FromObject(isolate, old_value_obj)); |
464 | ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
465 | isolate, new_bigint, BigInt::FromObject(isolate, new_value_obj)); |
466 | // SharedArrayBuffers are not detachable. |
467 | CHECK_LT(index, sta->length()); |
468 | if (sta->type() == kExternalBigInt64Array) { |
469 | return DoCompareExchange<int64_t>(isolate, source, index, old_bigint, |
470 | new_bigint); |
471 | } |
472 | DCHECK(sta->type() == kExternalBigUint64Array); |
473 | return DoCompareExchange<uint64_t>(isolate, source, index, old_bigint, |
474 | new_bigint); |
475 | } |
476 | |
477 | Handle<Object> old_value; |
478 | Handle<Object> new_value; |
479 | ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, old_value, |
480 | Object::ToInteger(isolate, old_value_obj)); |
481 | ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_value, |
482 | Object::ToInteger(isolate, new_value_obj)); |
483 | // SharedArrayBuffers are not detachable. |
484 | CHECK_LT(index, sta->length()); |
485 | |
486 | switch (sta->type()) { |
487 | #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \ |
488 | case kExternal##Type##Array: \ |
489 | return DoCompareExchange<ctype>(isolate, source, index, old_value, \ |
490 | new_value); |
491 | |
492 | INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
493 | #undef TYPED_ARRAY_CASE |
494 | |
495 | default: |
496 | break; |
497 | } |
498 | |
499 | UNREACHABLE(); |
500 | } |
501 | |
502 | // ES #sec-atomics.add |
503 | // Atomics.add( typedArray, index, value ) |
504 | RUNTIME_FUNCTION(Runtime_AtomicsAdd) { |
505 | return GetModifySetValueInBuffer<Add>(args, isolate); |
506 | } |
507 | |
508 | // ES #sec-atomics.sub |
509 | // Atomics.sub( typedArray, index, value ) |
510 | RUNTIME_FUNCTION(Runtime_AtomicsSub) { |
511 | return GetModifySetValueInBuffer<Sub>(args, isolate); |
512 | } |
513 | |
514 | // ES #sec-atomics.and |
515 | // Atomics.and( typedArray, index, value ) |
516 | RUNTIME_FUNCTION(Runtime_AtomicsAnd) { |
517 | return GetModifySetValueInBuffer<And>(args, isolate); |
518 | } |
519 | |
520 | // ES #sec-atomics.or |
521 | // Atomics.or( typedArray, index, value ) |
522 | RUNTIME_FUNCTION(Runtime_AtomicsOr) { |
523 | return GetModifySetValueInBuffer<Or>(args, isolate); |
524 | } |
525 | |
526 | // ES #sec-atomics.xor |
527 | // Atomics.xor( typedArray, index, value ) |
528 | RUNTIME_FUNCTION(Runtime_AtomicsXor) { |
529 | return GetModifySetValueInBuffer<Xor>(args, isolate); |
530 | } |
531 | |
532 | #undef INTEGER_TYPED_ARRAYS |
533 | |
534 | #else |
535 | |
536 | RUNTIME_FUNCTION(Runtime_AtomicsLoad64) { UNREACHABLE(); } |
537 | |
538 | RUNTIME_FUNCTION(Runtime_AtomicsStore64) { UNREACHABLE(); } |
539 | |
540 | RUNTIME_FUNCTION(Runtime_AtomicsExchange) { UNREACHABLE(); } |
541 | |
542 | RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { UNREACHABLE(); } |
543 | |
544 | RUNTIME_FUNCTION(Runtime_AtomicsAdd) { UNREACHABLE(); } |
545 | |
546 | RUNTIME_FUNCTION(Runtime_AtomicsSub) { UNREACHABLE(); } |
547 | |
548 | RUNTIME_FUNCTION(Runtime_AtomicsAnd) { UNREACHABLE(); } |
549 | |
550 | RUNTIME_FUNCTION(Runtime_AtomicsOr) { UNREACHABLE(); } |
551 | |
552 | RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); } |
553 | |
554 | #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 |
555 | // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X |
556 | |
557 | } // namespace internal |
558 | } // namespace v8 |
559 | |