1// Copyright 2016 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// Note 1: Any file that includes this one should include object-macros-undef.h
6// at the bottom.
7
8// Note 2: This file is deliberately missing the include guards (the undeffing
9// approach wouldn't work otherwise).
10//
11// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
12
13// The accessors with RELAXED_, ACQUIRE_, and RELEASE_ prefixes should be used
14// for fields that can be written to and read from multiple threads at the same
15// time. See comments in src/base/atomicops.h for the memory ordering sematics.
16
17#include <src/v8memory.h>
18
19// Since this changes visibility, it should always be last in a class
20// definition.
21#define OBJECT_CONSTRUCTORS(Type, ...) \
22 public: \
23 constexpr Type() : __VA_ARGS__() {} \
24 Type* operator->() { return this; } \
25 const Type* operator->() const { return this; } \
26 \
27 protected: \
28 explicit inline Type(Address ptr)
29
30#define OBJECT_CONSTRUCTORS_IMPL(Type, Super) \
31 inline Type::Type(Address ptr) : Super(ptr) { SLOW_DCHECK(Is##Type()); }
32
33#define NEVER_READ_ONLY_SPACE \
34 inline Heap* GetHeap() const; \
35 inline Isolate* GetIsolate() const;
36
37// TODO(leszeks): Add checks in the factory that we never allocate these
38// objects in RO space.
39#define NEVER_READ_ONLY_SPACE_IMPL(Type) \
40 Heap* Type::GetHeap() const { \
41 return NeverReadOnlySpaceObject::GetHeap(*this); \
42 } \
43 Isolate* Type::GetIsolate() const { \
44 return NeverReadOnlySpaceObject::GetIsolate(*this); \
45 }
46
47#define DECL_PRIMITIVE_ACCESSORS(name, type) \
48 inline type name() const; \
49 inline void set_##name(type value);
50
51#define DECL_BOOLEAN_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, bool)
52
53#define DECL_INT_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int)
54
55#define DECL_INT32_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int32_t)
56
57#define DECL_UINT16_ACCESSORS(name) \
58 inline uint16_t name() const; \
59 inline void set_##name(int value);
60
61#define DECL_INT16_ACCESSORS(name) \
62 inline int16_t name() const; \
63 inline void set_##name(int16_t value);
64
65#define DECL_UINT8_ACCESSORS(name) \
66 inline uint8_t name() const; \
67 inline void set_##name(int value);
68
69#define DECL_ACCESSORS(name, type) \
70 inline type name() const; \
71 inline void set_##name(type value, \
72 WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
73
74#define DECL_CAST(Type) \
75 V8_INLINE static Type cast(Object object); \
76 V8_INLINE static Type unchecked_cast(Object object) { \
77 return bit_cast<Type>(object); \
78 }
79
80#define CAST_ACCESSOR(Type) \
81 Type Type::cast(Object object) { return Type(object.ptr()); }
82
83#define INT_ACCESSORS(holder, name, offset) \
84 int holder::name() const { return READ_INT_FIELD(*this, offset); } \
85 void holder::set_##name(int value) { WRITE_INT_FIELD(*this, offset, value); }
86
87#define INT32_ACCESSORS(holder, name, offset) \
88 int32_t holder::name() const { return READ_INT32_FIELD(*this, offset); } \
89 void holder::set_##name(int32_t value) { \
90 WRITE_INT32_FIELD(*this, offset, value); \
91 }
92
93#define RELAXED_INT32_ACCESSORS(holder, name, offset) \
94 int32_t holder::name() const { \
95 return RELAXED_READ_INT32_FIELD(*this, offset); \
96 } \
97 void holder::set_##name(int32_t value) { \
98 RELAXED_WRITE_INT32_FIELD(*this, offset, value); \
99 }
100
101#define UINT16_ACCESSORS(holder, name, offset) \
102 uint16_t holder::name() const { return READ_UINT16_FIELD(*this, offset); } \
103 void holder::set_##name(int value) { \
104 DCHECK_GE(value, 0); \
105 DCHECK_LE(value, static_cast<uint16_t>(-1)); \
106 WRITE_UINT16_FIELD(*this, offset, value); \
107 }
108
109#define UINT8_ACCESSORS(holder, name, offset) \
110 uint8_t holder::name() const { return READ_UINT8_FIELD(*this, offset); } \
111 void holder::set_##name(int value) { \
112 DCHECK_GE(value, 0); \
113 DCHECK_LE(value, static_cast<uint8_t>(-1)); \
114 WRITE_UINT8_FIELD(*this, offset, value); \
115 }
116
117#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
118 set_condition) \
119 type holder::name() const { \
120 type value = type::cast(READ_FIELD(*this, offset)); \
121 DCHECK(get_condition); \
122 return value; \
123 } \
124 void holder::set_##name(type value, WriteBarrierMode mode) { \
125 DCHECK(set_condition); \
126 WRITE_FIELD(*this, offset, value); \
127 CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
128 }
129
130#define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
131 ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
132
133#define ACCESSORS(holder, name, type, offset) \
134 ACCESSORS_CHECKED(holder, name, type, offset, true)
135
136#define SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, \
137 get_condition, set_condition) \
138 type holder::name() const { \
139 type value = type::cast(ACQUIRE_READ_FIELD(*this, offset)); \
140 DCHECK(get_condition); \
141 return value; \
142 } \
143 void holder::set_##name(type value, WriteBarrierMode mode) { \
144 DCHECK(set_condition); \
145 RELEASE_WRITE_FIELD(*this, offset, value); \
146 CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
147 }
148
149#define SYNCHRONIZED_ACCESSORS_CHECKED(holder, name, type, offset, condition) \
150 SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, condition, \
151 condition)
152
153#define SYNCHRONIZED_ACCESSORS(holder, name, type, offset) \
154 SYNCHRONIZED_ACCESSORS_CHECKED(holder, name, type, offset, true)
155
156#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
157 set_condition) \
158 MaybeObject holder::name() const { \
159 MaybeObject value = READ_WEAK_FIELD(*this, offset); \
160 DCHECK(get_condition); \
161 return value; \
162 } \
163 void holder::set_##name(MaybeObject value, WriteBarrierMode mode) { \
164 DCHECK(set_condition); \
165 WRITE_WEAK_FIELD(*this, offset, value); \
166 CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode); \
167 }
168
169#define WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \
170 WEAK_ACCESSORS_CHECKED2(holder, name, offset, condition, condition)
171
172#define WEAK_ACCESSORS(holder, name, offset) \
173 WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
174
175// Getter that returns a Smi as an int and writes an int as a Smi.
176#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
177 int holder::name() const { \
178 DCHECK(condition); \
179 Object value = READ_FIELD(*this, offset); \
180 return Smi::ToInt(value); \
181 } \
182 void holder::set_##name(int value) { \
183 DCHECK(condition); \
184 WRITE_FIELD(*this, offset, Smi::FromInt(value)); \
185 }
186
187#define SMI_ACCESSORS(holder, name, offset) \
188 SMI_ACCESSORS_CHECKED(holder, name, offset, true)
189
190#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
191 int holder::synchronized_##name() const { \
192 Object value = ACQUIRE_READ_FIELD(*this, offset); \
193 return Smi::ToInt(value); \
194 } \
195 void holder::synchronized_set_##name(int value) { \
196 RELEASE_WRITE_FIELD(*this, offset, Smi::FromInt(value)); \
197 }
198
199#define RELAXED_SMI_ACCESSORS(holder, name, offset) \
200 int holder::relaxed_read_##name() const { \
201 Object value = RELAXED_READ_FIELD(*this, offset); \
202 return Smi::ToInt(value); \
203 } \
204 void holder::relaxed_write_##name(int value) { \
205 RELAXED_WRITE_FIELD(*this, offset, Smi::FromInt(value)); \
206 }
207
208#define BOOL_GETTER(holder, field, name, offset) \
209 bool holder::name() const { return BooleanBit::get(field(), offset); }
210
211#define BOOL_ACCESSORS(holder, field, name, offset) \
212 bool holder::name() const { return BooleanBit::get(field(), offset); } \
213 void holder::set_##name(bool value) { \
214 set_##field(BooleanBit::set(field(), offset, value)); \
215 }
216
217#define BIT_FIELD_ACCESSORS(holder, field, name, BitField) \
218 typename BitField::FieldType holder::name() const { \
219 return BitField::decode(field()); \
220 } \
221 void holder::set_##name(typename BitField::FieldType value) { \
222 set_##field(BitField::update(field(), value)); \
223 }
224
225#define INSTANCE_TYPE_CHECKER(type, forinstancetype) \
226 V8_INLINE bool Is##type(InstanceType instance_type) { \
227 return instance_type == forinstancetype; \
228 }
229
230#define TYPE_CHECKER(type, ...) \
231 bool HeapObject::Is##type() const { \
232 return InstanceTypeChecker::Is##type(map()->instance_type()); \
233 }
234
235#define RELAXED_INT16_ACCESSORS(holder, name, offset) \
236 int16_t holder::name() const { \
237 return RELAXED_READ_INT16_FIELD(*this, offset); \
238 } \
239 void holder::set_##name(int16_t value) { \
240 RELAXED_WRITE_INT16_FIELD(*this, offset, value); \
241 }
242
243#define FIELD_ADDR(p, offset) ((p).ptr() + offset - kHeapObjectTag)
244
245#define READ_FIELD(p, offset) (*ObjectSlot(FIELD_ADDR(p, offset)))
246
247#define READ_WEAK_FIELD(p, offset) (*MaybeObjectSlot(FIELD_ADDR(p, offset)))
248
249#define ACQUIRE_READ_FIELD(p, offset) \
250 ObjectSlot(FIELD_ADDR(p, offset)).Acquire_Load()
251
252#define RELAXED_READ_FIELD(p, offset) \
253 ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
254
255#define RELAXED_READ_WEAK_FIELD(p, offset) \
256 MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
257
258#ifdef V8_CONCURRENT_MARKING
259#define WRITE_FIELD(p, offset, value) \
260 ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
261#define WRITE_WEAK_FIELD(p, offset, value) \
262 MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
263#else
264#define WRITE_FIELD(p, offset, value) \
265 ObjectSlot(FIELD_ADDR(p, offset)).store(value)
266#define WRITE_WEAK_FIELD(p, offset, value) \
267 MaybeObjectSlot(FIELD_ADDR(p, offset)).store(value)
268#endif
269
270#define RELEASE_WRITE_FIELD(p, offset, value) \
271 ObjectSlot(FIELD_ADDR(p, offset)).Release_Store(value)
272
273#define RELAXED_WRITE_FIELD(p, offset, value) \
274 ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
275
276#define RELAXED_WRITE_WEAK_FIELD(p, offset, value) \
277 MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
278
279#define WRITE_BARRIER(object, offset, value) \
280 do { \
281 DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
282 MarkingBarrier(object, (object)->RawField(offset), value); \
283 GenerationalBarrier(object, (object)->RawField(offset), value); \
284 } while (false)
285
286#define WEAK_WRITE_BARRIER(object, offset, value) \
287 do { \
288 DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
289 MarkingBarrier(object, (object)->RawMaybeWeakField(offset), value); \
290 GenerationalBarrier(object, (object)->RawMaybeWeakField(offset), value); \
291 } while (false)
292
293#define EPHEMERON_KEY_WRITE_BARRIER(object, offset, value) \
294 do { \
295 DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
296 EphemeronHashTable table = EphemeronHashTable::cast(object); \
297 MarkingBarrier(object, (object)->RawField(offset), value); \
298 GenerationalEphemeronKeyBarrier(table, (object)->RawField(offset), value); \
299 } while (false)
300
301#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \
302 do { \
303 DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
304 DCHECK_NE(mode, UPDATE_EPHEMERON_KEY_WRITE_BARRIER); \
305 if (mode != SKIP_WRITE_BARRIER) { \
306 if (mode == UPDATE_WRITE_BARRIER) { \
307 MarkingBarrier(object, (object)->RawField(offset), value); \
308 } \
309 GenerationalBarrier(object, (object)->RawField(offset), value); \
310 } \
311 } while (false)
312
313#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \
314 do { \
315 DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
316 DCHECK_NE(mode, UPDATE_EPHEMERON_KEY_WRITE_BARRIER); \
317 if (mode != SKIP_WRITE_BARRIER) { \
318 if (mode == UPDATE_WRITE_BARRIER) { \
319 MarkingBarrier(object, (object)->RawMaybeWeakField(offset), value); \
320 } \
321 GenerationalBarrier(object, (object)->RawMaybeWeakField(offset), value); \
322 } \
323 } while (false)
324
325#define CONDITIONAL_EPHEMERON_KEY_WRITE_BARRIER(object, offset, value, mode) \
326 do { \
327 DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
328 DCHECK_NE(mode, UPDATE_EPHEMERON_KEY_WRITE_BARRIER); \
329 EphemeronHashTable table = EphemeronHashTable::cast(object); \
330 if (mode != SKIP_WRITE_BARRIER) { \
331 if (mode == UPDATE_WRITE_BARRIER) { \
332 MarkingBarrier(object, (object)->RawField(offset), value); \
333 } \
334 GenerationalEphemeronKeyBarrier(table, (object)->RawField(offset), \
335 value); \
336 } \
337 } while (false)
338
339#define READ_DOUBLE_FIELD(p, offset) ReadDoubleValue(FIELD_ADDR(p, offset))
340
341#define WRITE_DOUBLE_FIELD(p, offset, value) \
342 WriteDoubleValue(FIELD_ADDR(p, offset), value)
343
344#define READ_INT_FIELD(p, offset) \
345 (*reinterpret_cast<const int*>(FIELD_ADDR(p, offset)))
346
347#define WRITE_INT_FIELD(p, offset, value) \
348 (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
349
350#define ACQUIRE_READ_INT32_FIELD(p, offset) \
351 static_cast<int32_t>(base::Acquire_Load( \
352 reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
353
354#define READ_UINT8_FIELD(p, offset) \
355 (*reinterpret_cast<const uint8_t*>(FIELD_ADDR(p, offset)))
356
357#define WRITE_UINT8_FIELD(p, offset, value) \
358 (*reinterpret_cast<uint8_t*>(FIELD_ADDR(p, offset)) = value)
359
360#define RELAXED_WRITE_INT8_FIELD(p, offset, value) \
361 base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
362 static_cast<base::Atomic8>(value));
363
364#define READ_INT8_FIELD(p, offset) \
365 (*reinterpret_cast<const int8_t*>(FIELD_ADDR(p, offset)))
366
367#define RELAXED_READ_INT8_FIELD(p, offset) \
368 static_cast<int8_t>(base::Relaxed_Load( \
369 reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
370
371#define WRITE_INT8_FIELD(p, offset, value) \
372 (*reinterpret_cast<int8_t*>(FIELD_ADDR(p, offset)) = value)
373
374#define READ_UINT16_FIELD(p, offset) \
375 (*reinterpret_cast<const uint16_t*>(FIELD_ADDR(p, offset)))
376
377#define WRITE_UINT16_FIELD(p, offset, value) \
378 (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
379
380#define READ_INT16_FIELD(p, offset) \
381 (*reinterpret_cast<const int16_t*>(FIELD_ADDR(p, offset)))
382
383#define WRITE_INT16_FIELD(p, offset, value) \
384 (*reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value)
385
386#define RELAXED_READ_INT16_FIELD(p, offset) \
387 static_cast<int16_t>(base::Relaxed_Load( \
388 reinterpret_cast<const base::Atomic16*>(FIELD_ADDR(p, offset))))
389
390#define RELAXED_WRITE_INT16_FIELD(p, offset, value) \
391 base::Relaxed_Store( \
392 reinterpret_cast<base::Atomic16*>(FIELD_ADDR(p, offset)), \
393 static_cast<base::Atomic16>(value));
394
395#define READ_UINT32_FIELD(p, offset) \
396 (*reinterpret_cast<const uint32_t*>(FIELD_ADDR(p, offset)))
397
398#define RELAXED_READ_UINT32_FIELD(p, offset) \
399 static_cast<uint32_t>(base::Relaxed_Load( \
400 reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
401
402#define WRITE_UINT32_FIELD(p, offset, value) \
403 (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
404
405#define RELAXED_WRITE_UINT32_FIELD(p, offset, value) \
406 base::Relaxed_Store( \
407 reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
408 static_cast<base::Atomic32>(value));
409
410#define READ_INT32_FIELD(p, offset) \
411 (*reinterpret_cast<const int32_t*>(FIELD_ADDR(p, offset)))
412
413#define RELAXED_READ_INT32_FIELD(p, offset) \
414 static_cast<int32_t>(base::Relaxed_Load( \
415 reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
416
417#define WRITE_INT32_FIELD(p, offset, value) \
418 (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
419
420#define RELEASE_WRITE_INT32_FIELD(p, offset, value) \
421 base::Release_Store( \
422 reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
423 static_cast<base::Atomic32>(value))
424
425#define RELAXED_WRITE_INT32_FIELD(p, offset, value) \
426 base::Relaxed_Store( \
427 reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
428 static_cast<base::Atomic32>(value));
429
430#define READ_FLOAT_FIELD(p, offset) \
431 (*reinterpret_cast<const float*>(FIELD_ADDR(p, offset)))
432
433#define WRITE_FLOAT_FIELD(p, offset, value) \
434 (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value)
435
436// TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size fields
437// (external pointers, doubles and BigInt data) are only kTaggedSize aligned so
438// we have to use unaligned pointer friendly way of accessing them in order to
439// avoid undefined behavior in C++ code.
440#ifdef V8_COMPRESS_POINTERS
441
442#define READ_INTPTR_FIELD(p, offset) \
443 ReadUnalignedValue<intptr_t>(FIELD_ADDR(p, offset))
444
445#define WRITE_INTPTR_FIELD(p, offset, value) \
446 WriteUnalignedValue<intptr_t>(FIELD_ADDR(p, offset), value)
447
448#define READ_UINTPTR_FIELD(p, offset) \
449 ReadUnalignedValue<uintptr_t>(FIELD_ADDR(p, offset))
450
451#define WRITE_UINTPTR_FIELD(p, offset, value) \
452 WriteUnalignedValue<uintptr_t>(FIELD_ADDR(p, offset), value)
453
454#define READ_UINT64_FIELD(p, offset) \
455 ReadUnalignedValue<uint64_t>(FIELD_ADDR(p, offset))
456
457#define WRITE_UINT64_FIELD(p, offset, value) \
458 WriteUnalignedValue<uint64_t>(FIELD_ADDR(p, offset), value)
459
460#else // V8_COMPRESS_POINTERS
461
462#define READ_INTPTR_FIELD(p, offset) \
463 (*reinterpret_cast<const intptr_t*>(FIELD_ADDR(p, offset)))
464
465#define WRITE_INTPTR_FIELD(p, offset, value) \
466 (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
467
468#define READ_UINTPTR_FIELD(p, offset) \
469 (*reinterpret_cast<const uintptr_t*>(FIELD_ADDR(p, offset)))
470
471#define WRITE_UINTPTR_FIELD(p, offset, value) \
472 (*reinterpret_cast<uintptr_t*>(FIELD_ADDR(p, offset)) = value)
473
474#define READ_UINT64_FIELD(p, offset) \
475 (*reinterpret_cast<const uint64_t*>(FIELD_ADDR(p, offset)))
476
477#define WRITE_UINT64_FIELD(p, offset, value) \
478 (*reinterpret_cast<uint64_t*>(FIELD_ADDR(p, offset)) = value)
479
480#endif // V8_COMPRESS_POINTERS
481
482#define READ_BYTE_FIELD(p, offset) \
483 (*reinterpret_cast<const byte*>(FIELD_ADDR(p, offset)))
484
485#define RELAXED_READ_BYTE_FIELD(p, offset) \
486 static_cast<byte>(base::Relaxed_Load( \
487 reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
488
489#define WRITE_BYTE_FIELD(p, offset, value) \
490 (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
491
492#define RELAXED_WRITE_BYTE_FIELD(p, offset, value) \
493 base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
494 static_cast<base::Atomic8>(value));
495
496#ifdef VERIFY_HEAP
497#define DECL_VERIFIER(Name) void Name##Verify(Isolate* isolate);
498#define EXPORT_DECL_VERIFIER(Name) \
499 V8_EXPORT_PRIVATE void Name##Verify(Isolate* isolate);
500#else
501#define DECL_VERIFIER(Name)
502#define EXPORT_DECL_VERIFIER(Name)
503#endif
504
505#define DEFINE_DEOPT_ELEMENT_ACCESSORS(name, type) \
506 type DeoptimizationData::name() const { \
507 return type::cast(get(k##name##Index)); \
508 } \
509 void DeoptimizationData::Set##name(type value) { set(k##name##Index, value); }
510
511#define DEFINE_DEOPT_ENTRY_ACCESSORS(name, type) \
512 type DeoptimizationData::name(int i) const { \
513 return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
514 } \
515 void DeoptimizationData::Set##name(int i, type value) { \
516 set(IndexForEntry(i) + k##name##Offset, value); \
517 }
518