1 | // Copyright 2014 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/register-configuration.h" |
6 | #include "src/base/lazy-instance.h" |
7 | #include "src/cpu-features.h" |
8 | #include "src/globals.h" |
9 | #include "src/register-arch.h" |
10 | |
11 | namespace v8 { |
12 | namespace internal { |
13 | |
14 | namespace { |
15 | |
16 | #define REGISTER_COUNT(R) 1 + |
17 | static const int kMaxAllocatableGeneralRegisterCount = |
18 | ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT)0; |
19 | static const int kMaxAllocatableDoubleRegisterCount = |
20 | ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT)0; |
21 | |
22 | static const int kAllocatableGeneralCodes[] = { |
23 | #define REGISTER_CODE(R) kRegCode_##R, |
24 | ALLOCATABLE_GENERAL_REGISTERS(REGISTER_CODE)}; |
25 | #undef REGISTER_CODE |
26 | |
27 | #define REGISTER_CODE(R) kDoubleCode_##R, |
28 | static const int kAllocatableDoubleCodes[] = { |
29 | ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_CODE)}; |
30 | #if V8_TARGET_ARCH_ARM |
31 | static const int kAllocatableNoVFP32DoubleCodes[] = { |
32 | ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_CODE)}; |
33 | #endif // V8_TARGET_ARCH_ARM |
34 | #undef REGISTER_CODE |
35 | |
36 | STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >= |
37 | Register::kNumRegisters); |
38 | STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >= |
39 | FloatRegister::kNumRegisters); |
40 | STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >= |
41 | DoubleRegister::kNumRegisters); |
42 | STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >= |
43 | Simd128Register::kNumRegisters); |
44 | |
45 | static int get_num_allocatable_double_registers() { |
46 | return |
47 | #if V8_TARGET_ARCH_IA32 |
48 | kMaxAllocatableDoubleRegisterCount; |
49 | #elif V8_TARGET_ARCH_X64 |
50 | kMaxAllocatableDoubleRegisterCount; |
51 | #elif V8_TARGET_ARCH_ARM |
52 | CpuFeatures::IsSupported(VFP32DREGS) |
53 | ? kMaxAllocatableDoubleRegisterCount |
54 | : (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0); |
55 | #elif V8_TARGET_ARCH_ARM64 |
56 | kMaxAllocatableDoubleRegisterCount; |
57 | #elif V8_TARGET_ARCH_MIPS |
58 | kMaxAllocatableDoubleRegisterCount; |
59 | #elif V8_TARGET_ARCH_MIPS64 |
60 | kMaxAllocatableDoubleRegisterCount; |
61 | #elif V8_TARGET_ARCH_PPC |
62 | kMaxAllocatableDoubleRegisterCount; |
63 | #elif V8_TARGET_ARCH_S390 |
64 | kMaxAllocatableDoubleRegisterCount; |
65 | #else |
66 | #error Unsupported target architecture. |
67 | #endif |
68 | } |
69 | |
70 | #undef REGISTER_COUNT |
71 | |
72 | static const int* get_allocatable_double_codes() { |
73 | return |
74 | #if V8_TARGET_ARCH_ARM |
75 | CpuFeatures::IsSupported(VFP32DREGS) ? kAllocatableDoubleCodes |
76 | : kAllocatableNoVFP32DoubleCodes; |
77 | #else |
78 | kAllocatableDoubleCodes; |
79 | #endif |
80 | } |
81 | |
82 | class ArchDefaultRegisterConfiguration : public RegisterConfiguration { |
83 | public: |
84 | ArchDefaultRegisterConfiguration() |
85 | : RegisterConfiguration( |
86 | Register::kNumRegisters, DoubleRegister::kNumRegisters, |
87 | kMaxAllocatableGeneralRegisterCount, |
88 | get_num_allocatable_double_registers(), kAllocatableGeneralCodes, |
89 | get_allocatable_double_codes(), |
90 | kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) { |
91 | } |
92 | }; |
93 | |
94 | DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration, |
95 | GetDefaultRegisterConfiguration) |
96 | |
97 | // Allocatable registers with the masking register removed. |
98 | class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration { |
99 | public: |
100 | ArchDefaultPoisoningRegisterConfiguration() |
101 | : RegisterConfiguration( |
102 | Register::kNumRegisters, DoubleRegister::kNumRegisters, |
103 | kMaxAllocatableGeneralRegisterCount - 1, |
104 | get_num_allocatable_double_registers(), |
105 | InitializeGeneralRegisterCodes(), get_allocatable_double_codes(), |
106 | kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) { |
107 | } |
108 | |
109 | private: |
110 | static const int* InitializeGeneralRegisterCodes() { |
111 | int filtered_index = 0; |
112 | for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) { |
113 | if (kAllocatableGeneralCodes[i] != kSpeculationPoisonRegister.code()) { |
114 | allocatable_general_codes_[filtered_index] = |
115 | kAllocatableGeneralCodes[i]; |
116 | filtered_index++; |
117 | } |
118 | } |
119 | DCHECK_EQ(filtered_index, kMaxAllocatableGeneralRegisterCount - 1); |
120 | return allocatable_general_codes_; |
121 | } |
122 | |
123 | static int |
124 | allocatable_general_codes_[kMaxAllocatableGeneralRegisterCount - 1]; |
125 | }; |
126 | |
127 | int ArchDefaultPoisoningRegisterConfiguration::allocatable_general_codes_ |
128 | [kMaxAllocatableGeneralRegisterCount - 1]; |
129 | |
130 | DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultPoisoningRegisterConfiguration, |
131 | GetDefaultPoisoningRegisterConfiguration) |
132 | |
133 | // RestrictedRegisterConfiguration uses the subset of allocatable general |
134 | // registers the architecture support, which results into generating assembly |
135 | // to use less registers. Currently, it's only used by RecordWrite code stub. |
136 | class RestrictedRegisterConfiguration : public RegisterConfiguration { |
137 | public: |
138 | RestrictedRegisterConfiguration( |
139 | int num_allocatable_general_registers, |
140 | std::unique_ptr<int[]> allocatable_general_register_codes, |
141 | std::unique_ptr<char const* []> allocatable_general_register_names) |
142 | : RegisterConfiguration( |
143 | Register::kNumRegisters, DoubleRegister::kNumRegisters, |
144 | num_allocatable_general_registers, |
145 | get_num_allocatable_double_registers(), |
146 | allocatable_general_register_codes.get(), |
147 | get_allocatable_double_codes(), |
148 | kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE), |
149 | allocatable_general_register_codes_( |
150 | std::move(allocatable_general_register_codes)), |
151 | allocatable_general_register_names_( |
152 | std::move(allocatable_general_register_names)) { |
153 | for (int i = 0; i < num_allocatable_general_registers; ++i) { |
154 | DCHECK( |
155 | IsAllocatableGeneralRegister(allocatable_general_register_codes_[i])); |
156 | } |
157 | } |
158 | |
159 | bool IsAllocatableGeneralRegister(int code) { |
160 | for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) { |
161 | if (code == kAllocatableGeneralCodes[i]) { |
162 | return true; |
163 | } |
164 | } |
165 | return false; |
166 | } |
167 | |
168 | private: |
169 | std::unique_ptr<int[]> allocatable_general_register_codes_; |
170 | std::unique_ptr<char const* []> allocatable_general_register_names_; |
171 | }; |
172 | |
173 | } // namespace |
174 | |
175 | const RegisterConfiguration* RegisterConfiguration::Default() { |
176 | return GetDefaultRegisterConfiguration(); |
177 | } |
178 | |
179 | const RegisterConfiguration* RegisterConfiguration::Poisoning() { |
180 | return GetDefaultPoisoningRegisterConfiguration(); |
181 | } |
182 | |
183 | const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters( |
184 | RegList registers) { |
185 | int num = NumRegs(registers); |
186 | std::unique_ptr<int[]> codes{new int[num]}; |
187 | std::unique_ptr<char const* []> names { new char const*[num] }; |
188 | int counter = 0; |
189 | for (int i = 0; i < Default()->num_allocatable_general_registers(); ++i) { |
190 | auto reg = Register::from_code(Default()->GetAllocatableGeneralCode(i)); |
191 | if (reg.bit() & registers) { |
192 | DCHECK(counter < num); |
193 | codes[counter] = reg.code(); |
194 | names[counter] = RegisterName(Register::from_code(i)); |
195 | counter++; |
196 | } |
197 | } |
198 | |
199 | return new RestrictedRegisterConfiguration(num, std::move(codes), |
200 | std::move(names)); |
201 | } |
202 | |
203 | RegisterConfiguration::RegisterConfiguration( |
204 | int num_general_registers, int num_double_registers, |
205 | int num_allocatable_general_registers, int num_allocatable_double_registers, |
206 | const int* allocatable_general_codes, const int* allocatable_double_codes, |
207 | AliasingKind fp_aliasing_kind) |
208 | : num_general_registers_(num_general_registers), |
209 | num_float_registers_(0), |
210 | num_double_registers_(num_double_registers), |
211 | num_simd128_registers_(0), |
212 | num_allocatable_general_registers_(num_allocatable_general_registers), |
213 | num_allocatable_float_registers_(0), |
214 | num_allocatable_double_registers_(num_allocatable_double_registers), |
215 | num_allocatable_simd128_registers_(0), |
216 | allocatable_general_codes_mask_(0), |
217 | allocatable_float_codes_mask_(0), |
218 | allocatable_double_codes_mask_(0), |
219 | allocatable_simd128_codes_mask_(0), |
220 | allocatable_general_codes_(allocatable_general_codes), |
221 | allocatable_double_codes_(allocatable_double_codes), |
222 | fp_aliasing_kind_(fp_aliasing_kind) { |
223 | DCHECK_LE(num_general_registers_, |
224 | RegisterConfiguration::kMaxGeneralRegisters); |
225 | DCHECK_LE(num_double_registers_, RegisterConfiguration::kMaxFPRegisters); |
226 | for (int i = 0; i < num_allocatable_general_registers_; ++i) { |
227 | allocatable_general_codes_mask_ |= (1 << allocatable_general_codes_[i]); |
228 | } |
229 | for (int i = 0; i < num_allocatable_double_registers_; ++i) { |
230 | allocatable_double_codes_mask_ |= (1 << allocatable_double_codes_[i]); |
231 | } |
232 | |
233 | if (fp_aliasing_kind_ == COMBINE) { |
234 | num_float_registers_ = num_double_registers_ * 2 <= kMaxFPRegisters |
235 | ? num_double_registers_ * 2 |
236 | : kMaxFPRegisters; |
237 | num_allocatable_float_registers_ = 0; |
238 | for (int i = 0; i < num_allocatable_double_registers_; i++) { |
239 | int base_code = allocatable_double_codes_[i] * 2; |
240 | if (base_code >= kMaxFPRegisters) continue; |
241 | allocatable_float_codes_[num_allocatable_float_registers_++] = base_code; |
242 | allocatable_float_codes_[num_allocatable_float_registers_++] = |
243 | base_code + 1; |
244 | allocatable_float_codes_mask_ |= (0x3 << base_code); |
245 | } |
246 | num_simd128_registers_ = num_double_registers_ / 2; |
247 | num_allocatable_simd128_registers_ = 0; |
248 | int last_simd128_code = allocatable_double_codes_[0] / 2; |
249 | for (int i = 1; i < num_allocatable_double_registers_; i++) { |
250 | int next_simd128_code = allocatable_double_codes_[i] / 2; |
251 | // This scheme assumes allocatable_double_codes_ are strictly increasing. |
252 | DCHECK_GE(next_simd128_code, last_simd128_code); |
253 | if (last_simd128_code == next_simd128_code) { |
254 | allocatable_simd128_codes_[num_allocatable_simd128_registers_++] = |
255 | next_simd128_code; |
256 | allocatable_simd128_codes_mask_ |= (0x1 << next_simd128_code); |
257 | } |
258 | last_simd128_code = next_simd128_code; |
259 | } |
260 | } else { |
261 | DCHECK(fp_aliasing_kind_ == OVERLAP); |
262 | num_float_registers_ = num_simd128_registers_ = num_double_registers_; |
263 | num_allocatable_float_registers_ = num_allocatable_simd128_registers_ = |
264 | num_allocatable_double_registers_; |
265 | for (int i = 0; i < num_allocatable_float_registers_; ++i) { |
266 | allocatable_float_codes_[i] = allocatable_simd128_codes_[i] = |
267 | allocatable_double_codes_[i]; |
268 | } |
269 | allocatable_float_codes_mask_ = allocatable_simd128_codes_mask_ = |
270 | allocatable_double_codes_mask_; |
271 | } |
272 | } |
273 | |
274 | // Assert that kFloat32, kFloat64, and kSimd128 are consecutive values. |
275 | STATIC_ASSERT(static_cast<int>(MachineRepresentation::kSimd128) == |
276 | static_cast<int>(MachineRepresentation::kFloat64) + 1); |
277 | STATIC_ASSERT(static_cast<int>(MachineRepresentation::kFloat64) == |
278 | static_cast<int>(MachineRepresentation::kFloat32) + 1); |
279 | |
280 | int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index, |
281 | MachineRepresentation other_rep, |
282 | int* alias_base_index) const { |
283 | DCHECK(fp_aliasing_kind_ == COMBINE); |
284 | DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep)); |
285 | if (rep == other_rep) { |
286 | *alias_base_index = index; |
287 | return 1; |
288 | } |
289 | int rep_int = static_cast<int>(rep); |
290 | int other_rep_int = static_cast<int>(other_rep); |
291 | if (rep_int > other_rep_int) { |
292 | int shift = rep_int - other_rep_int; |
293 | int base_index = index << shift; |
294 | if (base_index >= kMaxFPRegisters) { |
295 | // Alias indices would be out of FP register range. |
296 | return 0; |
297 | } |
298 | *alias_base_index = base_index; |
299 | return 1 << shift; |
300 | } |
301 | int shift = other_rep_int - rep_int; |
302 | *alias_base_index = index >> shift; |
303 | return 1; |
304 | } |
305 | |
306 | bool RegisterConfiguration::AreAliases(MachineRepresentation rep, int index, |
307 | MachineRepresentation other_rep, |
308 | int other_index) const { |
309 | DCHECK(fp_aliasing_kind_ == COMBINE); |
310 | DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep)); |
311 | if (rep == other_rep) { |
312 | return index == other_index; |
313 | } |
314 | int rep_int = static_cast<int>(rep); |
315 | int other_rep_int = static_cast<int>(other_rep); |
316 | if (rep_int > other_rep_int) { |
317 | int shift = rep_int - other_rep_int; |
318 | return index == other_index >> shift; |
319 | } |
320 | int shift = other_rep_int - rep_int; |
321 | return index >> shift == other_index; |
322 | } |
323 | |
324 | } // namespace internal |
325 | } // namespace v8 |
326 | |