1// Copyright 2016 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_CODE_STUB_ASSEMBLER_H_
6#define V8_CODE_STUB_ASSEMBLER_H_
7
8#include <functional>
9
10#include "src/bailout-reason.h"
11#include "src/base/macros.h"
12#include "src/compiler/code-assembler.h"
13#include "src/frames.h"
14#include "src/globals.h"
15#include "src/message-template.h"
16#include "src/objects.h"
17#include "src/objects/arguments.h"
18#include "src/objects/bigint.h"
19#include "src/objects/shared-function-info.h"
20#include "src/objects/smi.h"
21#include "src/roots.h"
22
23#include "torque-generated/builtins-base-from-dsl-gen.h"
24
25namespace v8 {
26namespace internal {
27
28class CallInterfaceDescriptor;
29class CodeStubArguments;
30class CodeStubAssembler;
31class StatsCounter;
32class StubCache;
33
34enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
35
36#define HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
37 V(ArraySpeciesProtector, array_species_protector, ArraySpeciesProtector) \
38 V(PromiseSpeciesProtector, promise_species_protector, \
39 PromiseSpeciesProtector) \
40 V(TypedArraySpeciesProtector, typed_array_species_protector, \
41 TypedArraySpeciesProtector) \
42 V(RegExpSpeciesProtector, regexp_species_protector, RegExpSpeciesProtector)
43
44#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
45 V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
46 V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
47 V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap) \
48 V(AllocationSiteWithoutWeakNextMap, allocation_site_without_weaknext_map, \
49 AllocationSiteWithoutWeakNextMap) \
50 V(BooleanMap, boolean_map, BooleanMap) \
51 V(CodeMap, code_map, CodeMap) \
52 V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
53 V(EmptyPropertyDictionary, empty_property_dictionary, \
54 EmptyPropertyDictionary) \
55 V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
56 EmptySlowElementDictionary) \
57 V(empty_string, empty_string, EmptyString) \
58 V(FalseValue, false_value, False) \
59 V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
60 V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
61 V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
62 V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \
63 V(FunctionTemplateInfoMap, function_template_info_map, \
64 FunctionTemplateInfoMap) \
65 V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
66 V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
67 V(HeapNumberMap, heap_number_map, HeapNumberMap) \
68 V(iterator_symbol, iterator_symbol, IteratorSymbol) \
69 V(length_string, length_string, LengthString) \
70 V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
71 V(MetaMap, meta_map, MetaMap) \
72 V(MinusZeroValue, minus_zero_value, MinusZero) \
73 V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \
74 V(NanValue, nan_value, Nan) \
75 V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
76 V(NullValue, null_value, Null) \
77 V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
78 V(PreparseDataMap, preparse_data_map, PreparseDataMap) \
79 V(prototype_string, prototype_string, PrototypeString) \
80 V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
81 V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
82 V(SymbolMap, symbol_map, SymbolMap) \
83 V(TheHoleValue, the_hole_value, TheHole) \
84 V(TransitionArrayMap, transition_array_map, TransitionArrayMap) \
85 V(TrueValue, true_value, True) \
86 V(Tuple2Map, tuple2_map, Tuple2Map) \
87 V(Tuple3Map, tuple3_map, Tuple3Map) \
88 V(ArrayBoilerplateDescriptionMap, array_boilerplate_description_map, \
89 ArrayBoilerplateDescriptionMap) \
90 V(UncompiledDataWithoutPreparseDataMap, \
91 uncompiled_data_without_preparse_data_map, \
92 UncompiledDataWithoutPreparseDataMap) \
93 V(UncompiledDataWithPreparseDataMap, uncompiled_data_with_preparse_data_map, \
94 UncompiledDataWithPreparseDataMap) \
95 V(UndefinedValue, undefined_value, Undefined) \
96 V(WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArrayMap)
97
98#define HEAP_IMMOVABLE_OBJECT_LIST(V) \
99 HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
100 HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V)
101
102#ifdef DEBUG
103#define CSA_CHECK(csa, x) \
104 (csa)->Check( \
105 [&]() -> compiler::Node* { \
106 return implicit_cast<compiler::SloppyTNode<Word32T>>(x); \
107 }, \
108 #x, __FILE__, __LINE__)
109#else
110#define CSA_CHECK(csa, x) (csa)->FastCheck(x)
111#endif
112
113#ifdef DEBUG
114// Add stringified versions to the given values, except the first. That is,
115// transform
116// x, a, b, c, d, e, f
117// to
118// a, "a", b, "b", c, "c", d, "d", e, "e", f, "f"
119//
120// __VA_ARGS__ is ignored to allow the caller to pass through too many
121// parameters, and the first element is ignored to support having no extra
122// values without empty __VA_ARGS__ (which cause all sorts of problems with
123// extra commas).
124#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(_, v1, v2, v3, v4, v5, ...) \
125 v1, #v1, v2, #v2, v3, #v3, v4, #v4, v5, #v5
126
127// Stringify the given variable number of arguments. The arguments are trimmed
128// to 5 if there are too many, and padded with nullptr if there are not enough.
129#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES(...) \
130 CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(__VA_ARGS__, nullptr, nullptr, nullptr, \
131 nullptr, nullptr)
132
133#define CSA_ASSERT_GET_FIRST(x, ...) (x)
134#define CSA_ASSERT_GET_FIRST_STR(x, ...) #x
135
136// CSA_ASSERT(csa, <condition>, <extra values to print...>)
137
138// We have to jump through some hoops to allow <extra values to print...> to be
139// empty.
140#define CSA_ASSERT(csa, ...) \
141 (csa)->Assert( \
142 [&]() -> compiler::Node* { \
143 return implicit_cast<compiler::SloppyTNode<Word32T>>( \
144 EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__))); \
145 }, \
146 EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, __LINE__, \
147 CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
148
149// CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...},
150// <extra values to print...>)
151
152#define CSA_ASSERT_BRANCH(csa, ...) \
153 (csa)->Assert(EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__)), \
154 EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, \
155 __LINE__, CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
156
157#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
158 (csa)->Assert( \
159 [&]() -> compiler::Node* { \
160 compiler::Node* const argc = \
161 (csa)->Parameter(Descriptor::kJSActualArgumentsCount); \
162 return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
163 }, \
164 "argc " #op " " #expected, __FILE__, __LINE__, \
165 SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \
166 "argc")
167
168#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
169 CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected)
170
171#define CSA_DEBUG_INFO(name) \
172 { #name, __FILE__, __LINE__ }
173#define BIND(label) Bind(label, CSA_DEBUG_INFO(label))
174#define VARIABLE(name, ...) \
175 Variable name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
176#define VARIABLE_CONSTRUCTOR(name, ...) \
177 name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
178#define TYPED_VARIABLE_DEF(type, name, ...) \
179 TVariable<type> name(CSA_DEBUG_INFO(name), __VA_ARGS__)
180#else // DEBUG
181#define CSA_ASSERT(csa, ...) ((void)0)
182#define CSA_ASSERT_BRANCH(csa, ...) ((void)0)
183#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0)
184#define BIND(label) Bind(label)
185#define VARIABLE(name, ...) Variable name(this, __VA_ARGS__)
186#define VARIABLE_CONSTRUCTOR(name, ...) name(this, __VA_ARGS__)
187#define TYPED_VARIABLE_DEF(type, name, ...) TVariable<type> name(__VA_ARGS__)
188#endif // DEBUG
189
190#define TVARIABLE(...) EXPAND(TYPED_VARIABLE_DEF(__VA_ARGS__, this))
191
192#ifdef ENABLE_SLOW_DCHECKS
193#define CSA_SLOW_ASSERT(csa, ...) \
194 if (FLAG_enable_slow_asserts) { \
195 CSA_ASSERT(csa, __VA_ARGS__); \
196 }
197#else
198#define CSA_SLOW_ASSERT(csa, ...) ((void)0)
199#endif
200
201// Provides JavaScript-specific "macro-assembler" functionality on top of the
202// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
203// it's possible to add JavaScript-specific useful CodeAssembler "macros"
204// without modifying files in the compiler directory (and requiring a review
205// from a compiler directory OWNER).
206class V8_EXPORT_PRIVATE CodeStubAssembler
207 : public compiler::CodeAssembler,
208 public BaseBuiltinsFromDSLAssembler {
209 public:
210 using Node = compiler::Node;
211 template <class T>
212 using TNode = compiler::TNode<T>;
213 template <class T>
214 using SloppyTNode = compiler::SloppyTNode<T>;
215
216 template <typename T>
217 using LazyNode = std::function<TNode<T>()>;
218
219 explicit CodeStubAssembler(compiler::CodeAssemblerState* state);
220
221 enum AllocationFlag : uint8_t {
222 kNone = 0,
223 kDoubleAlignment = 1,
224 kPretenured = 1 << 1,
225 kAllowLargeObjectAllocation = 1 << 2,
226 };
227
228 enum SlackTrackingMode { kWithSlackTracking, kNoSlackTracking };
229
230 typedef base::Flags<AllocationFlag> AllocationFlags;
231
232 enum ParameterMode { SMI_PARAMETERS, INTPTR_PARAMETERS };
233
234 // On 32-bit platforms, there is a slight performance advantage to doing all
235 // of the array offset/index arithmetic with SMIs, since it's possible
236 // to save a few tag/untag operations without paying an extra expense when
237 // calculating array offset (the smi math can be folded away) and there are
238 // fewer live ranges. Thus only convert indices to untagged value on 64-bit
239 // platforms.
240 ParameterMode OptimalParameterMode() const {
241 return Is64() ? INTPTR_PARAMETERS : SMI_PARAMETERS;
242 }
243
244 MachineRepresentation ParameterRepresentation(ParameterMode mode) const {
245 return mode == INTPTR_PARAMETERS ? MachineType::PointerRepresentation()
246 : MachineRepresentation::kTaggedSigned;
247 }
248
249 MachineRepresentation OptimalParameterRepresentation() const {
250 return ParameterRepresentation(OptimalParameterMode());
251 }
252
253 TNode<IntPtrT> ParameterToIntPtr(Node* value, ParameterMode mode) {
254 if (mode == SMI_PARAMETERS) value = SmiUntag(value);
255 return UncheckedCast<IntPtrT>(value);
256 }
257
258 Node* IntPtrToParameter(SloppyTNode<IntPtrT> value, ParameterMode mode) {
259 if (mode == SMI_PARAMETERS) return SmiTag(value);
260 return value;
261 }
262
263 Node* Int32ToParameter(SloppyTNode<Int32T> value, ParameterMode mode) {
264 return IntPtrToParameter(ChangeInt32ToIntPtr(value), mode);
265 }
266
267 TNode<Smi> ParameterToTagged(Node* value, ParameterMode mode) {
268 if (mode != SMI_PARAMETERS) return SmiTag(value);
269 return UncheckedCast<Smi>(value);
270 }
271
272 Node* TaggedToParameter(SloppyTNode<Smi> value, ParameterMode mode) {
273 if (mode != SMI_PARAMETERS) return SmiUntag(value);
274 return value;
275 }
276
277 bool ToParameterConstant(Node* node, intptr_t* out, ParameterMode mode) {
278 if (mode == ParameterMode::SMI_PARAMETERS) {
279 Smi constant;
280 if (ToSmiConstant(node, &constant)) {
281 *out = static_cast<intptr_t>(constant->value());
282 return true;
283 }
284 } else {
285 DCHECK_EQ(mode, ParameterMode::INTPTR_PARAMETERS);
286 intptr_t constant;
287 if (ToIntPtrConstant(node, constant)) {
288 *out = constant;
289 return true;
290 }
291 }
292
293 return false;
294 }
295
296#if defined(V8_HOST_ARCH_32_BIT)
297 TNode<Smi> BIntToSmi(TNode<BInt> source) { return source; }
298 TNode<IntPtrT> BIntToIntPtr(TNode<BInt> source) {
299 return SmiToIntPtr(source);
300 }
301 TNode<BInt> SmiToBInt(TNode<Smi> source) { return source; }
302 TNode<BInt> IntPtrToBInt(TNode<IntPtrT> source) {
303 return SmiFromIntPtr(source);
304 }
305#elif defined(V8_HOST_ARCH_64_BIT)
306 TNode<Smi> BIntToSmi(TNode<BInt> source) { return SmiFromIntPtr(source); }
307 TNode<IntPtrT> BIntToIntPtr(TNode<BInt> source) { return source; }
308 TNode<BInt> SmiToBInt(TNode<Smi> source) { return SmiToIntPtr(source); }
309 TNode<BInt> IntPtrToBInt(TNode<IntPtrT> source) { return source; }
310#else
311#error Unknown architecture.
312#endif
313
314 TNode<Smi> TaggedToSmi(TNode<Object> value, Label* fail) {
315 GotoIf(TaggedIsNotSmi(value), fail);
316 return UncheckedCast<Smi>(value);
317 }
318
319 TNode<Smi> TaggedToPositiveSmi(TNode<Object> value, Label* fail) {
320 GotoIfNot(TaggedIsPositiveSmi(value), fail);
321 return UncheckedCast<Smi>(value);
322 }
323
324 TNode<String> TaggedToDirectString(TNode<Object> value, Label* fail);
325
326 TNode<Number> TaggedToNumber(TNode<Object> value, Label* fail) {
327 GotoIfNot(IsNumber(value), fail);
328 return UncheckedCast<Number>(value);
329 }
330
331 TNode<HeapObject> TaggedToHeapObject(TNode<Object> value, Label* fail) {
332 GotoIf(TaggedIsSmi(value), fail);
333 return UncheckedCast<HeapObject>(value);
334 }
335
336 TNode<JSArray> HeapObjectToJSArray(TNode<HeapObject> heap_object,
337 Label* fail) {
338 GotoIfNot(IsJSArray(heap_object), fail);
339 return UncheckedCast<JSArray>(heap_object);
340 }
341
342 TNode<JSArrayBuffer> HeapObjectToJSArrayBuffer(TNode<HeapObject> heap_object,
343 Label* fail) {
344 GotoIfNot(IsJSArrayBuffer(heap_object), fail);
345 return UncheckedCast<JSArrayBuffer>(heap_object);
346 }
347
348 TNode<JSArray> TaggedToFastJSArray(TNode<Context> context,
349 TNode<Object> value, Label* fail) {
350 GotoIf(TaggedIsSmi(value), fail);
351 TNode<HeapObject> heap_object = CAST(value);
352 GotoIfNot(IsFastJSArray(heap_object, context), fail);
353 return UncheckedCast<JSArray>(heap_object);
354 }
355
356 TNode<JSDataView> HeapObjectToJSDataView(TNode<HeapObject> heap_object,
357 Label* fail) {
358 GotoIfNot(IsJSDataView(heap_object), fail);
359 return CAST(heap_object);
360 }
361
362 TNode<JSProxy> HeapObjectToJSProxy(TNode<HeapObject> heap_object,
363 Label* fail) {
364 GotoIfNot(IsJSProxy(heap_object), fail);
365 return CAST(heap_object);
366 }
367
368 TNode<JSReceiver> HeapObjectToCallable(TNode<HeapObject> heap_object,
369 Label* fail) {
370 GotoIfNot(IsCallable(heap_object), fail);
371 return CAST(heap_object);
372 }
373
374 TNode<String> HeapObjectToString(TNode<HeapObject> heap_object, Label* fail) {
375 GotoIfNot(IsString(heap_object), fail);
376 return CAST(heap_object);
377 }
378
379 TNode<JSReceiver> HeapObjectToConstructor(TNode<HeapObject> heap_object,
380 Label* fail) {
381 GotoIfNot(IsConstructor(heap_object), fail);
382 return CAST(heap_object);
383 }
384
385 Node* MatchesParameterMode(Node* value, ParameterMode mode);
386
387#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
388 Node* OpName(Node* a, Node* b, ParameterMode mode) { \
389 if (mode == SMI_PARAMETERS) { \
390 return SmiOpName(CAST(a), CAST(b)); \
391 } else { \
392 DCHECK_EQ(INTPTR_PARAMETERS, mode); \
393 return IntPtrOpName(a, b); \
394 } \
395 }
396 PARAMETER_BINOP(IntPtrOrSmiMin, IntPtrMin, SmiMin)
397 PARAMETER_BINOP(IntPtrOrSmiAdd, IntPtrAdd, SmiAdd)
398 PARAMETER_BINOP(IntPtrOrSmiSub, IntPtrSub, SmiSub)
399 PARAMETER_BINOP(IntPtrOrSmiLessThan, IntPtrLessThan, SmiLessThan)
400 PARAMETER_BINOP(IntPtrOrSmiLessThanOrEqual, IntPtrLessThanOrEqual,
401 SmiLessThanOrEqual)
402 PARAMETER_BINOP(IntPtrOrSmiGreaterThan, IntPtrGreaterThan, SmiGreaterThan)
403 PARAMETER_BINOP(IntPtrOrSmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual,
404 SmiGreaterThanOrEqual)
405 PARAMETER_BINOP(UintPtrOrSmiLessThan, UintPtrLessThan, SmiBelow)
406 PARAMETER_BINOP(UintPtrOrSmiGreaterThanOrEqual, UintPtrGreaterThanOrEqual,
407 SmiAboveOrEqual)
408#undef PARAMETER_BINOP
409
410 uintptr_t ConstexprUintPtrShl(uintptr_t a, int32_t b) { return a << b; }
411 uintptr_t ConstexprUintPtrShr(uintptr_t a, int32_t b) { return a >> b; }
412 intptr_t ConstexprIntPtrAdd(intptr_t a, intptr_t b) { return a + b; }
413 uintptr_t ConstexprUintPtrAdd(uintptr_t a, uintptr_t b) { return a + b; }
414 intptr_t ConstexprWordNot(intptr_t a) { return ~a; }
415 uintptr_t ConstexprWordNot(uintptr_t a) { return ~a; }
416
417 TNode<Object> NoContextConstant();
418
419#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
420 compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
421 std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type> \
422 name##Constant();
423 HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
424#undef HEAP_CONSTANT_ACCESSOR
425
426#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
427 compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
428 std::declval<Heap>().rootAccessorName())>::type>::type> \
429 name##Constant();
430 HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
431#undef HEAP_CONSTANT_ACCESSOR
432
433#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
434 TNode<BoolT> Is##name(SloppyTNode<Object> value); \
435 TNode<BoolT> IsNot##name(SloppyTNode<Object> value);
436 HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST)
437#undef HEAP_CONSTANT_TEST
438
439 Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
440
441 bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode);
442 bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value,
443 ParameterMode mode);
444
445 // Round the 32bits payload of the provided word up to the next power of two.
446 TNode<IntPtrT> IntPtrRoundUpToPowerOfTwo32(TNode<IntPtrT> value);
447 // Select the maximum of the two provided IntPtr values.
448 TNode<IntPtrT> IntPtrMax(SloppyTNode<IntPtrT> left,
449 SloppyTNode<IntPtrT> right);
450 // Select the minimum of the two provided IntPtr values.
451 TNode<IntPtrT> IntPtrMin(SloppyTNode<IntPtrT> left,
452 SloppyTNode<IntPtrT> right);
453
454 // Float64 operations.
455 TNode<Float64T> Float64Ceil(SloppyTNode<Float64T> x);
456 TNode<Float64T> Float64Floor(SloppyTNode<Float64T> x);
457 TNode<Float64T> Float64Round(SloppyTNode<Float64T> x);
458 TNode<Float64T> Float64RoundToEven(SloppyTNode<Float64T> x);
459 TNode<Float64T> Float64Trunc(SloppyTNode<Float64T> x);
460 // Select the minimum of the two provided Number values.
461 TNode<Number> NumberMax(SloppyTNode<Number> left, SloppyTNode<Number> right);
462 // Select the minimum of the two provided Number values.
463 TNode<Number> NumberMin(SloppyTNode<Number> left, SloppyTNode<Number> right);
464
465 // After converting an index to an integer, calculate a relative index: if
466 // index < 0, max(length + index, 0); else min(index, length)
467 TNode<IntPtrT> ConvertToRelativeIndex(TNode<Context> context,
468 TNode<Object> index,
469 TNode<IntPtrT> length);
470
471 // Returns true iff the given value fits into smi range and is >= 0.
472 TNode<BoolT> IsValidPositiveSmi(TNode<IntPtrT> value);
473
474 // Tag an IntPtr as a Smi value.
475 TNode<Smi> SmiTag(SloppyTNode<IntPtrT> value);
476 // Untag a Smi value as an IntPtr.
477 TNode<IntPtrT> SmiUntag(SloppyTNode<Smi> value);
478
479 // Smi conversions.
480 TNode<Float64T> SmiToFloat64(SloppyTNode<Smi> value);
481 TNode<Smi> SmiFromIntPtr(SloppyTNode<IntPtrT> value) { return SmiTag(value); }
482 TNode<Smi> SmiFromInt32(SloppyTNode<Int32T> value);
483 TNode<IntPtrT> SmiToIntPtr(SloppyTNode<Smi> value) { return SmiUntag(value); }
484 TNode<Int32T> SmiToInt32(SloppyTNode<Smi> value);
485
486 // Smi operations.
487#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \
488 TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
489 if (SmiValuesAre32Bits()) { \
490 return BitcastWordToTaggedSigned( \
491 IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b))); \
492 } else { \
493 DCHECK(SmiValuesAre31Bits()); \
494 if (kSystemPointerSize == kInt64Size) { \
495 CSA_ASSERT(this, IsValidSmi(a)); \
496 CSA_ASSERT(this, IsValidSmi(b)); \
497 } \
498 return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \
499 Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \
500 TruncateIntPtrToInt32(BitcastTaggedToWord(b))))); \
501 } \
502 }
503 SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add)
504 SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub)
505 SMI_ARITHMETIC_BINOP(SmiAnd, WordAnd, Word32And)
506 SMI_ARITHMETIC_BINOP(SmiOr, WordOr, Word32Or)
507#undef SMI_ARITHMETIC_BINOP
508 TNode<Smi> SmiInc(TNode<Smi> value) { return SmiAdd(value, SmiConstant(1)); }
509
510 TNode<IntPtrT> TryIntPtrAdd(TNode<IntPtrT> a, TNode<IntPtrT> b,
511 Label* if_overflow);
512 TNode<Smi> TrySmiAdd(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
513 TNode<Smi> TrySmiSub(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
514
515 TNode<Smi> SmiShl(TNode<Smi> a, int shift) {
516 return BitcastWordToTaggedSigned(WordShl(BitcastTaggedToWord(a), shift));
517 }
518
519 TNode<Smi> SmiShr(TNode<Smi> a, int shift) {
520 return BitcastWordToTaggedSigned(
521 WordAnd(WordShr(BitcastTaggedToWord(a), shift),
522 BitcastTaggedToWord(SmiConstant(-1))));
523 }
524
525 TNode<Smi> SmiSar(TNode<Smi> a, int shift) {
526 return BitcastWordToTaggedSigned(
527 WordAnd(WordSar(BitcastTaggedToWord(a), shift),
528 BitcastTaggedToWord(SmiConstant(-1))));
529 }
530
531 Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) {
532 if (mode == SMI_PARAMETERS) {
533 return SmiShl(CAST(a), shift);
534 } else {
535 DCHECK_EQ(INTPTR_PARAMETERS, mode);
536 return WordShl(a, shift);
537 }
538 }
539
540 Node* WordOrSmiShr(Node* a, int shift, ParameterMode mode) {
541 if (mode == SMI_PARAMETERS) {
542 return SmiShr(CAST(a), shift);
543 } else {
544 DCHECK_EQ(INTPTR_PARAMETERS, mode);
545 return WordShr(a, shift);
546 }
547 }
548
549#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \
550 TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
551 if (SmiValuesAre32Bits()) { \
552 return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \
553 } else { \
554 DCHECK(SmiValuesAre31Bits()); \
555 if (kSystemPointerSize == kInt64Size) { \
556 CSA_ASSERT(this, IsValidSmi(a)); \
557 CSA_ASSERT(this, IsValidSmi(b)); \
558 } \
559 return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \
560 TruncateIntPtrToInt32(BitcastTaggedToWord(b))); \
561 } \
562 }
563 SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal)
564 SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual)
565 SMI_COMPARISON_OP(SmiAbove, UintPtrGreaterThan, Uint32GreaterThan)
566 SMI_COMPARISON_OP(SmiAboveOrEqual, UintPtrGreaterThanOrEqual,
567 Uint32GreaterThanOrEqual)
568 SMI_COMPARISON_OP(SmiBelow, UintPtrLessThan, Uint32LessThan)
569 SMI_COMPARISON_OP(SmiLessThan, IntPtrLessThan, Int32LessThan)
570 SMI_COMPARISON_OP(SmiLessThanOrEqual, IntPtrLessThanOrEqual,
571 Int32LessThanOrEqual)
572 SMI_COMPARISON_OP(SmiGreaterThan, IntPtrGreaterThan, Int32GreaterThan)
573 SMI_COMPARISON_OP(SmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual,
574 Int32GreaterThanOrEqual)
575#undef SMI_COMPARISON_OP
576 TNode<Smi> SmiMax(TNode<Smi> a, TNode<Smi> b);
577 TNode<Smi> SmiMin(TNode<Smi> a, TNode<Smi> b);
578 // Computes a % b for Smi inputs a and b; result is not necessarily a Smi.
579 TNode<Number> SmiMod(TNode<Smi> a, TNode<Smi> b);
580 // Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
581 TNode<Number> SmiMul(TNode<Smi> a, TNode<Smi> b);
582 // Tries to compute dividend / divisor for Smi inputs; branching to bailout
583 // if the division needs to be performed as a floating point operation.
584 TNode<Smi> TrySmiDiv(TNode<Smi> dividend, TNode<Smi> divisor, Label* bailout);
585
586 // Compares two Smis a and b as if they were converted to strings and then
587 // compared lexicographically. Returns:
588 // -1 iff x < y.
589 // 0 iff x == y.
590 // 1 iff x > y.
591 TNode<Smi> SmiLexicographicCompare(TNode<Smi> x, TNode<Smi> y);
592
593 // Smi | HeapNumber operations.
594 TNode<Number> NumberInc(SloppyTNode<Number> value);
595 TNode<Number> NumberDec(SloppyTNode<Number> value);
596 TNode<Number> NumberAdd(SloppyTNode<Number> a, SloppyTNode<Number> b);
597 TNode<Number> NumberSub(SloppyTNode<Number> a, SloppyTNode<Number> b);
598 void GotoIfNotNumber(Node* value, Label* is_not_number);
599 void GotoIfNumber(Node* value, Label* is_number);
600 TNode<Number> SmiToNumber(TNode<Smi> v) { return v; }
601
602 TNode<Number> BitwiseOp(Node* left32, Node* right32, Operation bitwise_op);
603
604 // Allocate an object of the given size.
605 TNode<HeapObject> AllocateInNewSpace(TNode<IntPtrT> size,
606 AllocationFlags flags = kNone);
607 TNode<HeapObject> AllocateInNewSpace(int size, AllocationFlags flags = kNone);
608 TNode<HeapObject> Allocate(TNode<IntPtrT> size,
609 AllocationFlags flags = kNone);
610 TNode<HeapObject> Allocate(int size, AllocationFlags flags = kNone);
611 TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous, int offset);
612 TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous,
613 TNode<IntPtrT> offset);
614
615 TNode<BoolT> IsRegularHeapObjectSize(TNode<IntPtrT> size);
616
617 typedef std::function<void(Label*, Label*)> BranchGenerator;
618 typedef std::function<Node*()> NodeGenerator;
619
620 void Assert(const BranchGenerator& branch, const char* message = nullptr,
621 const char* file = nullptr, int line = 0,
622 Node* extra_node1 = nullptr, const char* extra_node1_name = "",
623 Node* extra_node2 = nullptr, const char* extra_node2_name = "",
624 Node* extra_node3 = nullptr, const char* extra_node3_name = "",
625 Node* extra_node4 = nullptr, const char* extra_node4_name = "",
626 Node* extra_node5 = nullptr, const char* extra_node5_name = "");
627 void Assert(const NodeGenerator& condition_body,
628 const char* message = nullptr, const char* file = nullptr,
629 int line = 0, Node* extra_node1 = nullptr,
630 const char* extra_node1_name = "", Node* extra_node2 = nullptr,
631 const char* extra_node2_name = "", Node* extra_node3 = nullptr,
632 const char* extra_node3_name = "", Node* extra_node4 = nullptr,
633 const char* extra_node4_name = "", Node* extra_node5 = nullptr,
634 const char* extra_node5_name = "");
635 void Check(const BranchGenerator& branch, const char* message = nullptr,
636 const char* file = nullptr, int line = 0,
637 Node* extra_node1 = nullptr, const char* extra_node1_name = "",
638 Node* extra_node2 = nullptr, const char* extra_node2_name = "",
639 Node* extra_node3 = nullptr, const char* extra_node3_name = "",
640 Node* extra_node4 = nullptr, const char* extra_node4_name = "",
641 Node* extra_node5 = nullptr, const char* extra_node5_name = "");
642 void Check(const NodeGenerator& condition_body, const char* message = nullptr,
643 const char* file = nullptr, int line = 0,
644 Node* extra_node1 = nullptr, const char* extra_node1_name = "",
645 Node* extra_node2 = nullptr, const char* extra_node2_name = "",
646 Node* extra_node3 = nullptr, const char* extra_node3_name = "",
647 Node* extra_node4 = nullptr, const char* extra_node4_name = "",
648 Node* extra_node5 = nullptr, const char* extra_node5_name = "");
649 void FailAssert(
650 const char* message = nullptr, const char* file = nullptr, int line = 0,
651 Node* extra_node1 = nullptr, const char* extra_node1_name = "",
652 Node* extra_node2 = nullptr, const char* extra_node2_name = "",
653 Node* extra_node3 = nullptr, const char* extra_node3_name = "",
654 Node* extra_node4 = nullptr, const char* extra_node4_name = "",
655 Node* extra_node5 = nullptr, const char* extra_node5_name = "");
656
657 void FastCheck(TNode<BoolT> condition);
658
659 // The following Call wrappers call an object according to the semantics that
660 // one finds in the EcmaScript spec, operating on an Callable (e.g. a
661 // JSFunction or proxy) rather than a Code object.
662 template <class... TArgs>
663 TNode<Object> Call(TNode<Context> context, TNode<Object> callable,
664 TNode<JSReceiver> receiver, TArgs... args) {
665 return UncheckedCast<Object>(CallJS(
666 CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
667 context, callable, receiver, args...));
668 }
669 template <class... TArgs>
670 TNode<Object> Call(TNode<Context> context, TNode<Object> callable,
671 TNode<Object> receiver, TArgs... args) {
672 if (IsUndefinedConstant(receiver) || IsNullConstant(receiver)) {
673 return UncheckedCast<Object>(CallJS(
674 CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
675 context, callable, receiver, args...));
676 }
677 return UncheckedCast<Object>(CallJS(CodeFactory::Call(isolate()), context,
678 callable, receiver, args...));
679 }
680
681 template <class... TArgs>
682 TNode<JSReceiver> ConstructWithTarget(TNode<Context> context,
683 TNode<JSReceiver> target,
684 TNode<JSReceiver> new_target,
685 TArgs... args) {
686 return CAST(ConstructJSWithTarget(CodeFactory::Construct(isolate()),
687 context, target, new_target,
688 implicit_cast<TNode<Object>>(args)...));
689 }
690 template <class... TArgs>
691 TNode<JSReceiver> Construct(TNode<Context> context,
692 TNode<JSReceiver> new_target, TArgs... args) {
693 return ConstructWithTarget(context, new_target, new_target, args...);
694 }
695
696 template <class A, class F, class G>
697 TNode<A> Select(SloppyTNode<BoolT> condition, const F& true_body,
698 const G& false_body) {
699 return UncheckedCast<A>(SelectImpl(
700 condition,
701 [&]() -> Node* { return implicit_cast<TNode<A>>(true_body()); },
702 [&]() -> Node* { return implicit_cast<TNode<A>>(false_body()); },
703 MachineRepresentationOf<A>::value));
704 }
705
706 template <class A>
707 TNode<A> SelectConstant(TNode<BoolT> condition, TNode<A> true_value,
708 TNode<A> false_value) {
709 return Select<A>(condition, [=] { return true_value; },
710 [=] { return false_value; });
711 }
712
713 TNode<Int32T> SelectInt32Constant(SloppyTNode<BoolT> condition,
714 int true_value, int false_value);
715 TNode<IntPtrT> SelectIntPtrConstant(SloppyTNode<BoolT> condition,
716 int true_value, int false_value);
717 TNode<Oddball> SelectBooleanConstant(SloppyTNode<BoolT> condition);
718 TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi true_value,
719 Smi false_value);
720 TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, int true_value,
721 Smi false_value) {
722 return SelectSmiConstant(condition, Smi::FromInt(true_value), false_value);
723 }
724 TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi true_value,
725 int false_value) {
726 return SelectSmiConstant(condition, true_value, Smi::FromInt(false_value));
727 }
728 TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, int true_value,
729 int false_value) {
730 return SelectSmiConstant(condition, Smi::FromInt(true_value),
731 Smi::FromInt(false_value));
732 }
733
734 TNode<String> SingleCharacterStringConstant(char const* single_char) {
735 DCHECK_EQ(strlen(single_char), 1);
736 return HeapConstant(
737 isolate()->factory()->LookupSingleCharacterStringFromCode(
738 single_char[0]));
739 }
740
741 TNode<Int32T> TruncateIntPtrToInt32(SloppyTNode<IntPtrT> value);
742
743 // Check a value for smi-ness
744 TNode<BoolT> TaggedIsSmi(SloppyTNode<Object> a);
745 TNode<BoolT> TaggedIsSmi(TNode<MaybeObject> a);
746 TNode<BoolT> TaggedIsNotSmi(SloppyTNode<Object> a);
747 // Check that the value is a non-negative smi.
748 TNode<BoolT> TaggedIsPositiveSmi(SloppyTNode<Object> a);
749 // Check that a word has a word-aligned address.
750 TNode<BoolT> WordIsAligned(SloppyTNode<WordT> word, size_t alignment);
751 TNode<BoolT> WordIsPowerOfTwo(SloppyTNode<IntPtrT> value);
752
753#if DEBUG
754 void Bind(Label* label, AssemblerDebugInfo debug_info);
755#endif // DEBUG
756 void Bind(Label* label);
757
758 template <class... T>
759 void Bind(compiler::CodeAssemblerParameterizedLabel<T...>* label,
760 TNode<T>*... phis) {
761 CodeAssembler::Bind(label, phis...);
762 }
763
764 void BranchIfSmiEqual(TNode<Smi> a, TNode<Smi> b, Label* if_true,
765 Label* if_false) {
766 Branch(SmiEqual(a, b), if_true, if_false);
767 }
768
769 void BranchIfSmiLessThan(TNode<Smi> a, TNode<Smi> b, Label* if_true,
770 Label* if_false) {
771 Branch(SmiLessThan(a, b), if_true, if_false);
772 }
773
774 void BranchIfSmiLessThanOrEqual(TNode<Smi> a, TNode<Smi> b, Label* if_true,
775 Label* if_false) {
776 Branch(SmiLessThanOrEqual(a, b), if_true, if_false);
777 }
778
779 void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) {
780 Branch(Float64Equal(value, value), if_false, if_true);
781 }
782
783 // Branches to {if_true} if ToBoolean applied to {value} yields true,
784 // otherwise goes to {if_false}.
785 void BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false);
786
787 void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false);
788
789 // Branches to {if_true} when --force-slow-path flag has been passed.
790 // It's used for testing to ensure that slow path implementation behave
791 // equivalent to corresponding fast paths (where applicable).
792 //
793 // Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise.
794 void GotoIfForceSlowPath(Label* if_true);
795
796 // Branches to {if_true} when Debug::ExecutionMode is DebugInfo::kSideEffect.
797 void GotoIfDebugExecutionModeChecksSideEffects(Label* if_true);
798
799 // Load value from current parent frame by given offset in bytes.
800 Node* LoadFromParentFrame(int offset,
801 MachineType rep = MachineType::AnyTagged());
802
803 // Load an object pointer from a buffer that isn't in the heap.
804 Node* LoadBufferObject(Node* buffer, int offset,
805 MachineType rep = MachineType::AnyTagged());
806 TNode<RawPtrT> LoadBufferPointer(TNode<RawPtrT> buffer, int offset) {
807 return UncheckedCast<RawPtrT>(
808 LoadBufferObject(buffer, offset, MachineType::Pointer()));
809 }
810 TNode<Smi> LoadBufferSmi(TNode<RawPtrT> buffer, int offset) {
811 return CAST(LoadBufferObject(buffer, offset, MachineType::TaggedSigned()));
812 }
813 // Load a field from an object on the heap.
814 Node* LoadObjectField(SloppyTNode<HeapObject> object, int offset,
815 MachineType rep);
816 template <class T, typename std::enable_if<
817 std::is_convertible<TNode<T>, TNode<Object>>::value,
818 int>::type = 0>
819 TNode<T> LoadObjectField(TNode<HeapObject> object, int offset) {
820 return CAST(LoadObjectField(object, offset, MachineTypeOf<T>::value));
821 }
822 template <class T, typename std::enable_if<
823 std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
824 int>::type = 0>
825 TNode<T> LoadObjectField(TNode<HeapObject> object, int offset) {
826 return UncheckedCast<T>(
827 LoadObjectField(object, offset, MachineTypeOf<T>::value));
828 }
829 TNode<Object> LoadObjectField(SloppyTNode<HeapObject> object, int offset) {
830 return UncheckedCast<Object>(
831 LoadObjectField(object, offset, MachineType::AnyTagged()));
832 }
833 Node* LoadObjectField(SloppyTNode<HeapObject> object,
834 SloppyTNode<IntPtrT> offset, MachineType rep);
835 TNode<Object> LoadObjectField(SloppyTNode<HeapObject> object,
836 SloppyTNode<IntPtrT> offset) {
837 return UncheckedCast<Object>(
838 LoadObjectField(object, offset, MachineType::AnyTagged()));
839 }
840 template <class T, typename std::enable_if<
841 std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
842 int>::type = 0>
843 TNode<T> LoadObjectField(TNode<HeapObject> object, TNode<IntPtrT> offset) {
844 return UncheckedCast<T>(
845 LoadObjectField(object, offset, MachineTypeOf<T>::value));
846 }
847 // Load a SMI field and untag it.
848 TNode<IntPtrT> LoadAndUntagObjectField(SloppyTNode<HeapObject> object,
849 int offset);
850 // Load a SMI field, untag it, and convert to Word32.
851 TNode<Int32T> LoadAndUntagToWord32ObjectField(Node* object, int offset);
852 // Load a SMI and untag it.
853 TNode<IntPtrT> LoadAndUntagSmi(Node* base, int index);
854
855 TNode<MaybeObject> LoadMaybeWeakObjectField(SloppyTNode<HeapObject> object,
856 int offset) {
857 return UncheckedCast<MaybeObject>(
858 LoadObjectField(object, offset, MachineType::AnyTagged()));
859 }
860
861 // Reference is the CSA-equivalent of a Torque reference value,
862 // representing an inner pointer into a HeapObject.
863 struct Reference {
864 TNode<HeapObject> object;
865 TNode<IntPtrT> offset;
866
867 std::tuple<TNode<HeapObject>, TNode<IntPtrT>> Flatten() const {
868 return std::make_tuple(object, offset);
869 }
870 };
871
872 template <class T, typename std::enable_if<
873 std::is_convertible<TNode<T>, TNode<Object>>::value,
874 int>::type = 0>
875 TNode<T> LoadReference(Reference reference) {
876 return CAST(LoadObjectField(reference.object, reference.offset,
877 MachineTypeOf<T>::value));
878 }
879 template <class T, typename std::enable_if<
880 std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
881 int>::type = 0>
882 TNode<T> LoadReference(Reference reference) {
883 return UncheckedCast<T>(LoadObjectField(reference.object, reference.offset,
884 MachineTypeOf<T>::value));
885 }
886 template <class T, typename std::enable_if<
887 std::is_convertible<TNode<T>, TNode<Object>>::value,
888 int>::type = 0>
889 void StoreReference(Reference reference, TNode<T> value) {
890 int const_offset;
891 if (std::is_same<T, Smi>::value) {
892 StoreObjectFieldNoWriteBarrier(reference.object, reference.offset, value);
893 } else if (std::is_same<T, Map>::value &&
894 ToInt32Constant(reference.offset, const_offset) &&
895 const_offset == HeapObject::kMapOffset) {
896 StoreMap(reference.object, value);
897 } else {
898 StoreObjectField(reference.object, reference.offset, value);
899 }
900 }
901 template <class T, typename std::enable_if<
902 std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
903 int>::type = 0>
904 void StoreReference(Reference reference, TNode<T> value) {
905 StoreObjectFieldNoWriteBarrier<T>(reference.object, reference.offset,
906 value);
907 }
908
909 // Tag a smi and store it.
910 void StoreAndTagSmi(Node* base, int offset, Node* value);
911
912 // Load the floating point value of a HeapNumber.
913 TNode<Float64T> LoadHeapNumberValue(SloppyTNode<HeapNumber> object);
914 // Load the Map of an HeapObject.
915 TNode<Map> LoadMap(SloppyTNode<HeapObject> object);
916 // Load the instance type of an HeapObject.
917 TNode<Int32T> LoadInstanceType(SloppyTNode<HeapObject> object);
918 // Compare the instance the type of the object against the provided one.
919 TNode<BoolT> HasInstanceType(SloppyTNode<HeapObject> object,
920 InstanceType type);
921 TNode<BoolT> DoesntHaveInstanceType(SloppyTNode<HeapObject> object,
922 InstanceType type);
923 TNode<BoolT> TaggedDoesntHaveInstanceType(SloppyTNode<HeapObject> any_tagged,
924 InstanceType type);
925 // Load the properties backing store of a JSObject.
926 TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSObject> object);
927 TNode<HeapObject> LoadFastProperties(SloppyTNode<JSObject> object);
928 // Load the elements backing store of a JSObject.
929 TNode<FixedArrayBase> LoadElements(SloppyTNode<JSObject> object) {
930 return LoadJSObjectElements(object);
931 }
932 // Load the length of a JSArray instance.
933 TNode<Object> LoadJSArgumentsObjectWithLength(
934 SloppyTNode<JSArgumentsObjectWithLength> array);
935 // Load the length of a JSArray instance.
936 TNode<Number> LoadJSArrayLength(SloppyTNode<JSArray> array);
937 // Load the length of a fast JSArray instance. Returns a positive Smi.
938 TNode<Smi> LoadFastJSArrayLength(SloppyTNode<JSArray> array);
939 // Load the length of a fixed array base instance.
940 TNode<Smi> LoadFixedArrayBaseLength(SloppyTNode<FixedArrayBase> array);
941 // Load the length of a fixed array base instance.
942 TNode<IntPtrT> LoadAndUntagFixedArrayBaseLength(
943 SloppyTNode<FixedArrayBase> array);
944 // Load the length of a WeakFixedArray.
945 TNode<Smi> LoadWeakFixedArrayLength(TNode<WeakFixedArray> array);
946 TNode<IntPtrT> LoadAndUntagWeakFixedArrayLength(
947 SloppyTNode<WeakFixedArray> array);
948 // Load the number of descriptors in DescriptorArray.
949 TNode<Int32T> LoadNumberOfDescriptors(TNode<DescriptorArray> array);
950 // Load the bit field of a Map.
951 TNode<Int32T> LoadMapBitField(SloppyTNode<Map> map);
952 // Load bit field 2 of a map.
953 TNode<Int32T> LoadMapBitField2(SloppyTNode<Map> map);
954 // Load bit field 3 of a map.
955 TNode<Uint32T> LoadMapBitField3(SloppyTNode<Map> map);
956 // Load the instance type of a map.
957 TNode<Int32T> LoadMapInstanceType(SloppyTNode<Map> map);
958 // Load the ElementsKind of a map.
959 TNode<Int32T> LoadMapElementsKind(SloppyTNode<Map> map);
960 TNode<Int32T> LoadElementsKind(SloppyTNode<HeapObject> object);
961 // Load the instance descriptors of a map.
962 TNode<DescriptorArray> LoadMapDescriptors(SloppyTNode<Map> map);
963 // Load the prototype of a map.
964 TNode<HeapObject> LoadMapPrototype(SloppyTNode<Map> map);
965 // Load the prototype info of a map. The result has to be checked if it is a
966 // prototype info object or not.
967 TNode<PrototypeInfo> LoadMapPrototypeInfo(SloppyTNode<Map> map,
968 Label* if_has_no_proto_info);
969 // Load the instance size of a Map.
970 TNode<IntPtrT> LoadMapInstanceSizeInWords(SloppyTNode<Map> map);
971 // Load the inobject properties start of a Map (valid only for JSObjects).
972 TNode<IntPtrT> LoadMapInobjectPropertiesStartInWords(SloppyTNode<Map> map);
973 // Load the constructor function index of a Map (only for primitive maps).
974 TNode<IntPtrT> LoadMapConstructorFunctionIndex(SloppyTNode<Map> map);
975 // Load the constructor of a Map (equivalent to Map::GetConstructor()).
976 TNode<Object> LoadMapConstructor(SloppyTNode<Map> map);
977 // Load the EnumLength of a Map.
978 Node* LoadMapEnumLength(SloppyTNode<Map> map);
979 // Load the back-pointer of a Map.
980 TNode<Object> LoadMapBackPointer(SloppyTNode<Map> map);
981 // Checks that |map| has only simple properties, returns bitfield3.
982 TNode<Uint32T> EnsureOnlyHasSimpleProperties(TNode<Map> map,
983 TNode<Int32T> instance_type,
984 Label* bailout);
985 // Load the identity hash of a JSRececiver.
986 TNode<IntPtrT> LoadJSReceiverIdentityHash(SloppyTNode<Object> receiver,
987 Label* if_no_hash = nullptr);
988
989 // This is only used on a newly allocated PropertyArray which
990 // doesn't have an existing hash.
991 void InitializePropertyArrayLength(Node* property_array, Node* length,
992 ParameterMode mode);
993
994 // Check if the map is set for slow properties.
995 TNode<BoolT> IsDictionaryMap(SloppyTNode<Map> map);
996
997 // Load the hash field of a name as an uint32 value.
998 TNode<Uint32T> LoadNameHashField(SloppyTNode<Name> name);
999 // Load the hash value of a name as an uint32 value.
1000 // If {if_hash_not_computed} label is specified then it also checks if
1001 // hash is actually computed.
1002 TNode<Uint32T> LoadNameHash(SloppyTNode<Name> name,
1003 Label* if_hash_not_computed = nullptr);
1004
1005 // Load length field of a String object as Smi value.
1006 TNode<Smi> LoadStringLengthAsSmi(SloppyTNode<String> string);
1007 // Load length field of a String object as intptr_t value.
1008 TNode<IntPtrT> LoadStringLengthAsWord(SloppyTNode<String> string);
1009 // Load length field of a String object as uint32_t value.
1010 TNode<Uint32T> LoadStringLengthAsWord32(SloppyTNode<String> string);
1011 // Loads a pointer to the sequential String char array.
1012 Node* PointerToSeqStringData(Node* seq_string);
1013 // Load value field of a JSValue object.
1014 Node* LoadJSValueValue(Node* object);
1015
1016 // Figures out whether the value of maybe_object is:
1017 // - a SMI (jump to "if_smi", "extracted" will be the SMI value)
1018 // - a cleared weak reference (jump to "if_cleared", "extracted" will be
1019 // untouched)
1020 // - a weak reference (jump to "if_weak", "extracted" will be the object
1021 // pointed to)
1022 // - a strong reference (jump to "if_strong", "extracted" will be the object
1023 // pointed to)
1024 void DispatchMaybeObject(TNode<MaybeObject> maybe_object, Label* if_smi,
1025 Label* if_cleared, Label* if_weak, Label* if_strong,
1026 TVariable<Object>* extracted);
1027 // See MaybeObject for semantics of these functions.
1028 TNode<BoolT> IsStrong(TNode<MaybeObject> value);
1029 // This variant is for overzealous checking.
1030 TNode<BoolT> IsStrong(TNode<Object> value) {
1031 return IsStrong(ReinterpretCast<MaybeObject>(value));
1032 }
1033 TNode<HeapObject> GetHeapObjectIfStrong(TNode<MaybeObject> value,
1034 Label* if_not_strong);
1035
1036 TNode<BoolT> IsWeakOrCleared(TNode<MaybeObject> value);
1037 TNode<BoolT> IsCleared(TNode<MaybeObject> value);
1038 TNode<BoolT> IsNotCleared(TNode<MaybeObject> value);
1039
1040 // Removes the weak bit + asserts it was set.
1041 TNode<HeapObject> GetHeapObjectAssumeWeak(TNode<MaybeObject> value);
1042
1043 TNode<HeapObject> GetHeapObjectAssumeWeak(TNode<MaybeObject> value,
1044 Label* if_cleared);
1045
1046 TNode<BoolT> IsWeakReferenceTo(TNode<MaybeObject> object,
1047 TNode<Object> value);
1048 TNode<BoolT> IsNotWeakReferenceTo(TNode<MaybeObject> object,
1049 TNode<Object> value);
1050 TNode<BoolT> IsStrongReferenceTo(TNode<MaybeObject> object,
1051 TNode<Object> value);
1052
1053 TNode<MaybeObject> MakeWeak(TNode<HeapObject> value);
1054
1055 void FixedArrayBoundsCheck(TNode<FixedArrayBase> array, Node* index,
1056 int additional_offset = 0,
1057 ParameterMode parameter_mode = INTPTR_PARAMETERS);
1058
1059 // Array is any array-like type that has a fixed header followed by
1060 // tagged elements.
1061 template <typename Array>
1062 TNode<IntPtrT> LoadArrayLength(TNode<Array> array);
1063
1064 // Array is any array-like type that has a fixed header followed by
1065 // tagged elements.
1066 template <typename Array>
1067 TNode<MaybeObject> LoadArrayElement(
1068 TNode<Array> array, int array_header_size, Node* index,
1069 int additional_offset = 0,
1070 ParameterMode parameter_mode = INTPTR_PARAMETERS,
1071 LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
1072
1073 TNode<Object> LoadFixedArrayElement(
1074 TNode<FixedArray> object, Node* index, int additional_offset = 0,
1075 ParameterMode parameter_mode = INTPTR_PARAMETERS,
1076 LoadSensitivity needs_poisoning = LoadSensitivity::kSafe,
1077 CheckBounds check_bounds = CheckBounds::kAlways);
1078
1079 // This doesn't emit a bounds-check. As part of the security-performance
1080 // tradeoff, only use it if it is performance critical.
1081 TNode<Object> UnsafeLoadFixedArrayElement(
1082 TNode<FixedArray> object, Node* index, int additional_offset = 0,
1083 ParameterMode parameter_mode = INTPTR_PARAMETERS,
1084 LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
1085 return LoadFixedArrayElement(object, index, additional_offset,
1086 parameter_mode, needs_poisoning,
1087 CheckBounds::kDebugOnly);
1088 }
1089
1090 TNode<Object> LoadFixedArrayElement(
1091 TNode<FixedArray> object, TNode<IntPtrT> index,
1092 LoadSensitivity needs_poisoning,
1093 CheckBounds check_bounds = CheckBounds::kAlways) {
1094 return LoadFixedArrayElement(object, index, 0, INTPTR_PARAMETERS,
1095 needs_poisoning, check_bounds);
1096 }
1097 // This doesn't emit a bounds-check. As part of the security-performance
1098 // tradeoff, only use it if it is performance critical.
1099 TNode<Object> UnsafeLoadFixedArrayElement(TNode<FixedArray> object,
1100 TNode<IntPtrT> index,
1101 LoadSensitivity needs_poisoning) {
1102 return LoadFixedArrayElement(object, index, needs_poisoning,
1103 CheckBounds::kDebugOnly);
1104 }
1105
1106 TNode<Object> LoadFixedArrayElement(
1107 TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset = 0,
1108 LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
1109 return LoadFixedArrayElement(object, index, additional_offset,
1110 INTPTR_PARAMETERS, needs_poisoning);
1111 }
1112
1113 TNode<Object> LoadFixedArrayElement(
1114 TNode<FixedArray> object, int index, int additional_offset = 0,
1115 LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
1116 return LoadFixedArrayElement(object, IntPtrConstant(index),
1117 additional_offset, INTPTR_PARAMETERS,
1118 needs_poisoning);
1119 }
1120 // This doesn't emit a bounds-check. As part of the security-performance
1121 // tradeoff, only use it if it is performance critical.
1122 TNode<Object> UnsafeLoadFixedArrayElement(
1123 TNode<FixedArray> object, int index, int additional_offset = 0,
1124 LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
1125 return LoadFixedArrayElement(object, IntPtrConstant(index),
1126 additional_offset, INTPTR_PARAMETERS,
1127 needs_poisoning, CheckBounds::kDebugOnly);
1128 }
1129 TNode<Object> LoadFixedArrayElement(TNode<FixedArray> object,
1130 TNode<Smi> index) {
1131 return LoadFixedArrayElement(object, index, 0, SMI_PARAMETERS);
1132 }
1133
1134 TNode<Object> LoadPropertyArrayElement(TNode<PropertyArray> object,
1135 SloppyTNode<IntPtrT> index);
1136 TNode<IntPtrT> LoadPropertyArrayLength(TNode<PropertyArray> object);
1137
1138 // Load an element from an array and untag it and return it as Word32.
1139 // Array is any array-like type that has a fixed header followed by
1140 // tagged elements.
1141 template <typename Array>
1142 TNode<Int32T> LoadAndUntagToWord32ArrayElement(
1143 TNode<Array> array, int array_header_size, Node* index,
1144 int additional_offset = 0,
1145 ParameterMode parameter_mode = INTPTR_PARAMETERS);
1146
1147 // Load an array element from a FixedArray, untag it and return it as Word32.
1148 TNode<Int32T> LoadAndUntagToWord32FixedArrayElement(
1149 TNode<FixedArray> object, Node* index, int additional_offset = 0,
1150 ParameterMode parameter_mode = INTPTR_PARAMETERS);
1151
1152 TNode<Int32T> LoadAndUntagToWord32FixedArrayElement(
1153 TNode<FixedArray> object, int index, int additional_offset = 0) {
1154 return LoadAndUntagToWord32FixedArrayElement(
1155 object, IntPtrConstant(index), additional_offset, INTPTR_PARAMETERS);
1156 }
1157
1158 // Load an array element from a WeakFixedArray.
1159 TNode<MaybeObject> LoadWeakFixedArrayElement(
1160 TNode<WeakFixedArray> object, Node* index, int additional_offset = 0,
1161 ParameterMode parameter_mode = INTPTR_PARAMETERS,
1162 LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
1163
1164 TNode<MaybeObject> LoadWeakFixedArrayElement(
1165 TNode<WeakFixedArray> object, int index, int additional_offset = 0,
1166 LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
1167 return LoadWeakFixedArrayElement(object, IntPtrConstant(index),
1168 additional_offset, INTPTR_PARAMETERS,
1169 needs_poisoning);
1170 }
1171
1172 // Load an array element from a FixedDoubleArray.
1173 TNode<Float64T> LoadFixedDoubleArrayElement(
1174 SloppyTNode<FixedDoubleArray> object, Node* index,
1175 MachineType machine_type, int additional_offset = 0,
1176 ParameterMode parameter_mode = INTPTR_PARAMETERS,
1177 Label* if_hole = nullptr);
1178
1179 Node* LoadFixedDoubleArrayElement(TNode<FixedDoubleArray> object,
1180 TNode<Smi> index,
1181 Label* if_hole = nullptr) {
1182 return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0,
1183 SMI_PARAMETERS, if_hole);
1184 }
1185
1186 Node* LoadFixedDoubleArrayElement(TNode<FixedDoubleArray> object,
1187 TNode<IntPtrT> index,
1188 Label* if_hole = nullptr) {
1189 return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0,
1190 INTPTR_PARAMETERS, if_hole);
1191 }
1192
1193 // Load an array element from a FixedArray, FixedDoubleArray or a
1194 // NumberDictionary (depending on the |elements_kind|) and return
1195 // it as a tagged value. Assumes that the |index| passed a length
1196 // check before. Bails out to |if_accessor| if the element that
1197 // was found is an accessor, or to |if_hole| if the element at
1198 // the given |index| is not found in |elements|.
1199 TNode<Object> LoadFixedArrayBaseElementAsTagged(
1200 TNode<FixedArrayBase> elements, TNode<IntPtrT> index,
1201 TNode<Int32T> elements_kind, Label* if_accessor, Label* if_hole);
1202
1203 // Load a feedback slot from a FeedbackVector.
1204 TNode<MaybeObject> LoadFeedbackVectorSlot(
1205 Node* object, Node* index, int additional_offset = 0,
1206 ParameterMode parameter_mode = INTPTR_PARAMETERS);
1207
1208 TNode<IntPtrT> LoadFeedbackVectorLength(TNode<FeedbackVector>);
1209 TNode<Float64T> LoadDoubleWithHoleCheck(TNode<FixedDoubleArray> array,
1210 TNode<Smi> index,
1211 Label* if_hole = nullptr);
1212 TNode<Float64T> LoadDoubleWithHoleCheck(TNode<FixedDoubleArray> array,
1213 TNode<IntPtrT> index,
1214 Label* if_hole = nullptr);
1215
1216 // Load Float64 value by |base| + |offset| address. If the value is a double
1217 // hole then jump to |if_hole|. If |machine_type| is None then only the hole
1218 // check is generated.
1219 TNode<Float64T> LoadDoubleWithHoleCheck(
1220 SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole,
1221 MachineType machine_type = MachineType::Float64());
1222 TNode<RawPtrT> LoadFixedTypedArrayBackingStore(
1223 TNode<FixedTypedArrayBase> typed_array);
1224 TNode<RawPtrT> LoadFixedTypedArrayOnHeapBackingStore(
1225 TNode<FixedTypedArrayBase> typed_array);
1226 Node* LoadFixedTypedArrayElementAsTagged(
1227 Node* data_pointer, Node* index_node, ElementsKind elements_kind,
1228 ParameterMode parameter_mode = INTPTR_PARAMETERS);
1229 TNode<Numeric> LoadFixedTypedArrayElementAsTagged(
1230 TNode<WordT> data_pointer, TNode<Smi> index, TNode<Int32T> elements_kind);
1231 // Parts of the above, factored out for readability:
1232 Node* LoadFixedBigInt64ArrayElementAsTagged(Node* data_pointer, Node* offset);
1233 Node* LoadFixedBigUint64ArrayElementAsTagged(Node* data_pointer,
1234 Node* offset);
1235 // 64-bit platforms only:
1236 TNode<BigInt> BigIntFromInt64(TNode<IntPtrT> value);
1237 TNode<BigInt> BigIntFromUint64(TNode<UintPtrT> value);
1238 // 32-bit platforms only:
1239 TNode<BigInt> BigIntFromInt32Pair(TNode<IntPtrT> low, TNode<IntPtrT> high);
1240 TNode<BigInt> BigIntFromUint32Pair(TNode<UintPtrT> low, TNode<UintPtrT> high);
1241
1242 void StoreFixedTypedArrayElementFromTagged(
1243 TNode<Context> context, TNode<FixedTypedArrayBase> elements,
1244 TNode<Object> index_node, TNode<Object> value, ElementsKind elements_kind,
1245 ParameterMode parameter_mode);
1246
1247 // Context manipulation
1248 TNode<Object> LoadContextElement(SloppyTNode<Context> context,
1249 int slot_index);
1250 TNode<Object> LoadContextElement(SloppyTNode<Context> context,
1251 SloppyTNode<IntPtrT> slot_index);
1252 TNode<Object> LoadContextElement(TNode<Context> context,
1253 TNode<Smi> slot_index);
1254 void StoreContextElement(SloppyTNode<Context> context, int slot_index,
1255 SloppyTNode<Object> value);
1256 void StoreContextElement(SloppyTNode<Context> context,
1257 SloppyTNode<IntPtrT> slot_index,
1258 SloppyTNode<Object> value);
1259 void StoreContextElementNoWriteBarrier(SloppyTNode<Context> context,
1260 int slot_index,
1261 SloppyTNode<Object> value);
1262 TNode<Context> LoadNativeContext(SloppyTNode<Context> context);
1263 // Calling this is only valid if there's a module context in the chain.
1264 TNode<Context> LoadModuleContext(SloppyTNode<Context> context);
1265
1266 void GotoIfContextElementEqual(Node* value, Node* native_context,
1267 int slot_index, Label* if_equal) {
1268 GotoIf(WordEqual(value, LoadContextElement(native_context, slot_index)),
1269 if_equal);
1270 }
1271
1272 TNode<Map> LoadJSArrayElementsMap(ElementsKind kind,
1273 SloppyTNode<Context> native_context);
1274 TNode<Map> LoadJSArrayElementsMap(SloppyTNode<Int32T> kind,
1275 SloppyTNode<Context> native_context);
1276
1277 TNode<BoolT> IsGeneratorFunction(TNode<JSFunction> function);
1278 TNode<BoolT> HasPrototypeProperty(TNode<JSFunction> function, TNode<Map> map);
1279 void GotoIfPrototypeRequiresRuntimeLookup(TNode<JSFunction> function,
1280 TNode<Map> map, Label* runtime);
1281 // Load the "prototype" property of a JSFunction.
1282 Node* LoadJSFunctionPrototype(Node* function, Label* if_bailout);
1283
1284 TNode<BytecodeArray> LoadSharedFunctionInfoBytecodeArray(
1285 SloppyTNode<SharedFunctionInfo> shared);
1286
1287 void StoreObjectByteNoWriteBarrier(TNode<HeapObject> object, int offset,
1288 TNode<Word32T> value);
1289
1290 // Store the floating point value of a HeapNumber.
1291 void StoreHeapNumberValue(SloppyTNode<HeapNumber> object,
1292 SloppyTNode<Float64T> value);
1293 void StoreMutableHeapNumberValue(SloppyTNode<MutableHeapNumber> object,
1294 SloppyTNode<Float64T> value);
1295 // Store a field to an object on the heap.
1296 void StoreObjectField(Node* object, int offset, Node* value);
1297 void StoreObjectField(Node* object, Node* offset, Node* value);
1298 void StoreObjectFieldNoWriteBarrier(
1299 Node* object, int offset, Node* value,
1300 MachineRepresentation rep = MachineRepresentation::kTagged);
1301 void StoreObjectFieldNoWriteBarrier(
1302 Node* object, Node* offset, Node* value,
1303 MachineRepresentation rep = MachineRepresentation::kTagged);
1304
1305 template <class T = Object>
1306 void StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object,
1307 TNode<IntPtrT> offset, TNode<T> value) {
1308 StoreObjectFieldNoWriteBarrier(object, offset, value,
1309 MachineRepresentationOf<T>::value);
1310 }
1311 template <class T = Object>
1312 void StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object, int offset,
1313 TNode<T> value) {
1314 StoreObjectFieldNoWriteBarrier(object, offset, value,
1315 MachineRepresentationOf<T>::value);
1316 }
1317
1318 // Store the Map of an HeapObject.
1319 void StoreMap(Node* object, Node* map);
1320 void StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index);
1321 void StoreMapNoWriteBarrier(Node* object, Node* map);
1322 void StoreObjectFieldRoot(Node* object, int offset, RootIndex root);
1323 // Store an array element to a FixedArray.
1324 void StoreFixedArrayElement(
1325 TNode<FixedArray> object, int index, SloppyTNode<Object> value,
1326 WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
1327 CheckBounds check_bounds = CheckBounds::kAlways) {
1328 return StoreFixedArrayElement(object, IntPtrConstant(index), value,
1329 barrier_mode, 0, INTPTR_PARAMETERS,
1330 check_bounds);
1331 }
1332 // This doesn't emit a bounds-check. As part of the security-performance
1333 // tradeoff, only use it if it is performance critical.
1334 void UnsafeStoreFixedArrayElement(
1335 TNode<FixedArray> object, int index, SloppyTNode<Object> value,
1336 WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
1337 return StoreFixedArrayElement(object, index, value, barrier_mode,
1338 CheckBounds::kDebugOnly);
1339 }
1340 void StoreFixedArrayElement(TNode<FixedArray> object, int index,
1341 TNode<Smi> value,
1342 CheckBounds check_bounds = CheckBounds::kAlways) {
1343 return StoreFixedArrayElement(object, IntPtrConstant(index), value,
1344 SKIP_WRITE_BARRIER, 0, INTPTR_PARAMETERS,
1345 check_bounds);
1346 }
1347 // This doesn't emit a bounds-check. As part of the security-performance
1348 // tradeoff, only use it if it is performance critical.
1349 void UnsafeStoreFixedArrayElement(TNode<FixedArray> object, int index,
1350 TNode<Smi> value) {
1351 return StoreFixedArrayElement(object, index, value,
1352 CheckBounds::kDebugOnly);
1353 }
1354
1355 void StoreJSArrayLength(TNode<JSArray> array, TNode<Smi> length);
1356 void StoreElements(TNode<Object> object, TNode<FixedArrayBase> elements);
1357
1358 void StoreFixedArrayOrPropertyArrayElement(
1359 Node* array, Node* index, Node* value,
1360 WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
1361 int additional_offset = 0,
1362 ParameterMode parameter_mode = INTPTR_PARAMETERS);
1363
1364 void StoreFixedArrayElement(
1365 TNode<FixedArray> array, Node* index, SloppyTNode<Object> value,
1366 WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
1367 int additional_offset = 0,
1368 ParameterMode parameter_mode = INTPTR_PARAMETERS,
1369 CheckBounds check_bounds = CheckBounds::kAlways) {
1370 if (NeedsBoundsCheck(check_bounds)) {
1371 FixedArrayBoundsCheck(array, index, additional_offset, parameter_mode);
1372 }
1373 StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode,
1374 additional_offset, parameter_mode);
1375 }
1376
1377 // This doesn't emit a bounds-check. As part of the security-performance
1378 // tradeoff, only use it if it is performance critical.
1379 void UnsafeStoreFixedArrayElement(
1380 TNode<FixedArray> array, Node* index, SloppyTNode<Object> value,
1381 WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
1382 int additional_offset = 0,
1383 ParameterMode parameter_mode = INTPTR_PARAMETERS) {
1384 return StoreFixedArrayElement(array, index, value, barrier_mode,
1385 additional_offset, parameter_mode,
1386 CheckBounds::kDebugOnly);
1387 }
1388
1389 void StorePropertyArrayElement(
1390 TNode<PropertyArray> array, Node* index, SloppyTNode<Object> value,
1391 WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
1392 int additional_offset = 0,
1393 ParameterMode parameter_mode = INTPTR_PARAMETERS) {
1394 StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode,
1395 additional_offset, parameter_mode);
1396 }
1397
1398 void StoreFixedArrayElementSmi(
1399 TNode<FixedArray> array, TNode<Smi> index, TNode<Object> value,
1400 WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
1401 StoreFixedArrayElement(array, index, value, barrier_mode, 0,
1402 SMI_PARAMETERS);
1403 }
1404 void StoreFixedArrayElement(TNode<FixedArray> array, TNode<IntPtrT> index,
1405 TNode<Smi> value) {
1406 StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER, 0);
1407 }
1408 void StoreFixedArrayElement(TNode<FixedArray> array, TNode<Smi> index,
1409 TNode<Smi> value) {
1410 StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER, 0,
1411 SMI_PARAMETERS);
1412 }
1413
1414 void StoreFixedDoubleArrayElement(
1415 TNode<FixedDoubleArray> object, Node* index, TNode<Float64T> value,
1416 ParameterMode parameter_mode = INTPTR_PARAMETERS,
1417 CheckBounds check_bounds = CheckBounds::kAlways);
1418 // This doesn't emit a bounds-check. As part of the security-performance
1419 // tradeoff, only use it if it is performance critical.
1420 void UnsafeStoreFixedDoubleArrayElement(
1421 TNode<FixedDoubleArray> object, Node* index, TNode<Float64T> value,
1422 ParameterMode parameter_mode = INTPTR_PARAMETERS) {
1423 return StoreFixedDoubleArrayElement(object, index, value, parameter_mode,
1424 CheckBounds::kDebugOnly);
1425 }
1426
1427 void StoreFixedDoubleArrayElementSmi(TNode<FixedDoubleArray> object,
1428 TNode<Smi> index,
1429 TNode<Float64T> value) {
1430 StoreFixedDoubleArrayElement(object, index, value, SMI_PARAMETERS);
1431 }
1432
1433 void StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array, Node* index,
1434 ParameterMode mode = INTPTR_PARAMETERS);
1435 void StoreFixedDoubleArrayHoleSmi(TNode<FixedDoubleArray> array,
1436 TNode<Smi> index) {
1437 StoreFixedDoubleArrayHole(array, index, SMI_PARAMETERS);
1438 }
1439
1440 void StoreFeedbackVectorSlot(
1441 Node* object, Node* index, Node* value,
1442 WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
1443 int additional_offset = 0,
1444 ParameterMode parameter_mode = INTPTR_PARAMETERS);
1445
1446 void EnsureArrayLengthWritable(TNode<Map> map, Label* bailout);
1447
1448 // EnsureArrayPushable verifies that receiver with this map is:
1449 // 1. Is not a prototype.
1450 // 2. Is not a dictionary.
1451 // 3. Has a writeable length property.
1452 // It returns ElementsKind as a node for further division into cases.
1453 TNode<Int32T> EnsureArrayPushable(TNode<Map> map, Label* bailout);
1454
1455 void TryStoreArrayElement(ElementsKind kind, ParameterMode mode,
1456 Label* bailout, Node* elements, Node* index,
1457 Node* value);
1458 // Consumes args into the array, and returns tagged new length.
1459 TNode<Smi> BuildAppendJSArray(ElementsKind kind, SloppyTNode<JSArray> array,
1460 CodeStubArguments* args,
1461 TVariable<IntPtrT>* arg_index, Label* bailout);
1462 // Pushes value onto the end of array.
1463 void BuildAppendJSArray(ElementsKind kind, Node* array, Node* value,
1464 Label* bailout);
1465
1466 void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address,
1467 Node* value);
1468
1469 Node* AllocateCellWithValue(Node* value,
1470 WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
1471 Node* AllocateSmiCell(int value = 0) {
1472 return AllocateCellWithValue(SmiConstant(value), SKIP_WRITE_BARRIER);
1473 }
1474
1475 Node* LoadCellValue(Node* cell);
1476
1477 void StoreCellValue(Node* cell, Node* value,
1478 WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
1479
1480 // Allocate a HeapNumber without initializing its value.
1481 TNode<HeapNumber> AllocateHeapNumber();
1482 // Allocate a HeapNumber with a specific value.
1483 TNode<HeapNumber> AllocateHeapNumberWithValue(SloppyTNode<Float64T> value);
1484 TNode<HeapNumber> AllocateHeapNumberWithValue(double value) {
1485 return AllocateHeapNumberWithValue(Float64Constant(value));
1486 }
1487
1488 // Allocate a MutableHeapNumber with a specific value.
1489 TNode<MutableHeapNumber> AllocateMutableHeapNumberWithValue(
1490 SloppyTNode<Float64T> value);
1491
1492 // Allocate a BigInt with {length} digits. Sets the sign bit to {false}.
1493 // Does not initialize the digits.
1494 TNode<BigInt> AllocateBigInt(TNode<IntPtrT> length);
1495 // Like above, but allowing custom bitfield initialization.
1496 TNode<BigInt> AllocateRawBigInt(TNode<IntPtrT> length);
1497 void StoreBigIntBitfield(TNode<BigInt> bigint, TNode<Word32T> bitfield);
1498 void StoreBigIntDigit(TNode<BigInt> bigint, int digit_index,
1499 TNode<UintPtrT> digit);
1500 TNode<Word32T> LoadBigIntBitfield(TNode<BigInt> bigint);
1501 TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint, int digit_index);
1502
1503 // Allocate a SeqOneByteString with the given length.
1504 TNode<String> AllocateSeqOneByteString(uint32_t length,
1505 AllocationFlags flags = kNone);
1506 TNode<String> AllocateSeqOneByteString(Node* context, TNode<Uint32T> length,
1507 AllocationFlags flags = kNone);
1508 // Allocate a SeqTwoByteString with the given length.
1509 TNode<String> AllocateSeqTwoByteString(uint32_t length,
1510 AllocationFlags flags = kNone);
1511 TNode<String> AllocateSeqTwoByteString(Node* context, TNode<Uint32T> length,
1512 AllocationFlags flags = kNone);
1513
1514 // Allocate a SlicedOneByteString with the given length, parent and offset.
1515 // |length| and |offset| are expected to be tagged.
1516
1517 TNode<String> AllocateSlicedOneByteString(TNode<Uint32T> length,
1518 TNode<String> parent,
1519 TNode<Smi> offset);
1520 // Allocate a SlicedTwoByteString with the given length, parent and offset.
1521 // |length| and |offset| are expected to be tagged.
1522 TNode<String> AllocateSlicedTwoByteString(TNode<Uint32T> length,
1523 TNode<String> parent,
1524 TNode<Smi> offset);
1525
1526 // Allocate an appropriate one- or two-byte ConsString with the first and
1527 // second parts specified by |left| and |right|.
1528 TNode<String> AllocateConsString(TNode<Uint32T> length, TNode<String> left,
1529 TNode<String> right, Variable* var_feedback);
1530
1531 TNode<NameDictionary> AllocateNameDictionary(int at_least_space_for);
1532 TNode<NameDictionary> AllocateNameDictionary(
1533 TNode<IntPtrT> at_least_space_for);
1534 TNode<NameDictionary> AllocateNameDictionaryWithCapacity(
1535 TNode<IntPtrT> capacity);
1536 TNode<NameDictionary> CopyNameDictionary(TNode<NameDictionary> dictionary,
1537 Label* large_object_fallback);
1538
1539 template <typename CollectionType>
1540 Node* AllocateOrderedHashTable();
1541
1542 // Builds code that finds OrderedHashTable entry for a key with hash code
1543 // {hash} with using the comparison code generated by {key_compare}. The code
1544 // jumps to {entry_found} if the key is found, or to {not_found} if the key
1545 // was not found. In the {entry_found} branch, the variable
1546 // entry_start_position will be bound to the index of the entry (relative to
1547 // OrderedHashTable::kHashTableStartIndex).
1548 //
1549 // The {CollectionType} template parameter stands for the particular instance
1550 // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet.
1551 template <typename CollectionType>
1552 void FindOrderedHashTableEntry(
1553 Node* table, Node* hash,
1554 const std::function<void(Node*, Label*, Label*)>& key_compare,
1555 Variable* entry_start_position, Label* entry_found, Label* not_found);
1556
1557 template <typename CollectionType>
1558 TNode<CollectionType> AllocateSmallOrderedHashTable(TNode<IntPtrT> capacity);
1559
1560 Node* AllocateStruct(Node* map, AllocationFlags flags = kNone);
1561 void InitializeStructBody(Node* object, Node* map, Node* size,
1562 int start_offset = Struct::kHeaderSize);
1563
1564 Node* AllocateJSObjectFromMap(
1565 Node* map, Node* properties = nullptr, Node* elements = nullptr,
1566 AllocationFlags flags = kNone,
1567 SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
1568
1569 void InitializeJSObjectFromMap(
1570 Node* object, Node* map, Node* instance_size, Node* properties = nullptr,
1571 Node* elements = nullptr,
1572 SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
1573
1574 void InitializeJSObjectBodyWithSlackTracking(Node* object, Node* map,
1575 Node* instance_size);
1576 void InitializeJSObjectBodyNoSlackTracking(
1577 Node* object, Node* map, Node* instance_size,
1578 int start_offset = JSObject::kHeaderSize);
1579
1580 TNode<BoolT> IsValidFastJSArrayCapacity(Node* capacity,
1581 ParameterMode capacity_mode);
1582
1583 //
1584 // Allocate and return a JSArray with initialized header fields and its
1585 // uninitialized elements.
1586 // The ParameterMode argument is only used for the capacity parameter.
1587 std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
1588 AllocateUninitializedJSArrayWithElements(
1589 ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
1590 Node* allocation_site, Node* capacity,
1591 ParameterMode capacity_mode = INTPTR_PARAMETERS,
1592 AllocationFlags allocation_flags = kNone);
1593
1594 // Allocate a JSArray and fill elements with the hole.
1595 // The ParameterMode argument is only used for the capacity parameter.
1596 TNode<JSArray> AllocateJSArray(
1597 ElementsKind kind, TNode<Map> array_map, Node* capacity,
1598 TNode<Smi> length, Node* allocation_site = nullptr,
1599 ParameterMode capacity_mode = INTPTR_PARAMETERS,
1600 AllocationFlags allocation_flags = kNone);
1601
1602 TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
1603 TNode<Smi> capacity, TNode<Smi> length) {
1604 return AllocateJSArray(kind, array_map, capacity, length, nullptr,
1605 SMI_PARAMETERS);
1606 }
1607
1608 TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
1609 TNode<IntPtrT> capacity, TNode<Smi> length) {
1610 return AllocateJSArray(kind, array_map, capacity, length, nullptr,
1611 INTPTR_PARAMETERS);
1612 }
1613
1614 // Allocate a JSArray and initialize the header fields.
1615 TNode<JSArray> AllocateJSArray(TNode<Map> array_map,
1616 TNode<FixedArrayBase> elements,
1617 TNode<Smi> length,
1618 Node* allocation_site = nullptr);
1619
1620 enum class HoleConversionMode { kDontConvert, kConvertToUndefined };
1621 // Clone a fast JSArray |array| into a new fast JSArray.
1622 // |convert_holes| tells the function to convert holes into undefined or not.
1623 // If |convert_holes| is set to kConvertToUndefined, but the function did not
1624 // find any hole in |array|, the resulting array will have the same elements
1625 // kind as |array|. If the function did find a hole, it will convert holes in
1626 // |array| to undefined in the resulting array, who will now have
1627 // PACKED_ELEMENTS kind.
1628 // If |convert_holes| is set kDontConvert, holes are also copied to the
1629 // resulting array, who will have the same elements kind as |array|. The
1630 // function generates significantly less code in this case.
1631 Node* CloneFastJSArray(
1632 Node* context, Node* array, ParameterMode mode = INTPTR_PARAMETERS,
1633 Node* allocation_site = nullptr,
1634 HoleConversionMode convert_holes = HoleConversionMode::kDontConvert);
1635
1636 Node* ExtractFastJSArray(Node* context, Node* array, Node* begin, Node* count,
1637 ParameterMode mode = INTPTR_PARAMETERS,
1638 Node* capacity = nullptr,
1639 Node* allocation_site = nullptr);
1640
1641 TNode<FixedArrayBase> AllocateFixedArray(
1642 ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS,
1643 AllocationFlags flags = kNone,
1644 SloppyTNode<Map> fixed_array_map = nullptr);
1645
1646 TNode<FixedArrayBase> AllocateFixedArray(
1647 ElementsKind kind, TNode<IntPtrT> capacity, AllocationFlags flags,
1648 SloppyTNode<Map> fixed_array_map = nullptr) {
1649 return AllocateFixedArray(kind, capacity, INTPTR_PARAMETERS, flags,
1650 fixed_array_map);
1651 }
1652
1653 TNode<FixedArray> AllocateUninitializedFixedArray(intptr_t capacity) {
1654 return UncheckedCast<FixedArray>(AllocateFixedArray(
1655 PACKED_ELEMENTS, IntPtrConstant(capacity), AllocationFlag::kNone));
1656 }
1657
1658 TNode<FixedArray> AllocateZeroedFixedArray(TNode<IntPtrT> capacity) {
1659 TNode<FixedArray> result = UncheckedCast<FixedArray>(
1660 AllocateFixedArray(PACKED_ELEMENTS, capacity,
1661 AllocationFlag::kAllowLargeObjectAllocation));
1662 FillFixedArrayWithSmiZero(result, capacity);
1663 return result;
1664 }
1665
1666 TNode<FixedDoubleArray> AllocateZeroedFixedDoubleArray(
1667 TNode<IntPtrT> capacity) {
1668 TNode<FixedDoubleArray> result = UncheckedCast<FixedDoubleArray>(
1669 AllocateFixedArray(PACKED_DOUBLE_ELEMENTS, capacity,
1670 AllocationFlag::kAllowLargeObjectAllocation));
1671 FillFixedDoubleArrayWithZero(result, capacity);
1672 return result;
1673 }
1674
1675 TNode<FixedArray> AllocateFixedArrayWithHoles(TNode<IntPtrT> capacity,
1676 AllocationFlags flags) {
1677 TNode<FixedArray> result = UncheckedCast<FixedArray>(
1678 AllocateFixedArray(PACKED_ELEMENTS, capacity, flags));
1679 FillFixedArrayWithValue(PACKED_ELEMENTS, result, IntPtrConstant(0),
1680 capacity, RootIndex::kTheHoleValue);
1681 return result;
1682 }
1683
1684 TNode<FixedDoubleArray> AllocateFixedDoubleArrayWithHoles(
1685 TNode<IntPtrT> capacity, AllocationFlags flags) {
1686 TNode<FixedDoubleArray> result = UncheckedCast<FixedDoubleArray>(
1687 AllocateFixedArray(PACKED_DOUBLE_ELEMENTS, capacity, flags));
1688 FillFixedArrayWithValue(PACKED_DOUBLE_ELEMENTS, result, IntPtrConstant(0),
1689 capacity, RootIndex::kTheHoleValue);
1690 return result;
1691 }
1692
1693 Node* AllocatePropertyArray(Node* capacity,
1694 ParameterMode mode = INTPTR_PARAMETERS,
1695 AllocationFlags flags = kNone);
1696
1697 // Perform CreateArrayIterator (ES #sec-createarrayiterator).
1698 TNode<JSArrayIterator> CreateArrayIterator(TNode<Context> context,
1699 TNode<Object> object,
1700 IterationKind mode);
1701
1702 Node* AllocateJSIteratorResult(Node* context, Node* value, Node* done);
1703 Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value);
1704
1705 TNode<JSReceiver> ArraySpeciesCreate(TNode<Context> context,
1706 TNode<Object>