1 | // Copyright 2014 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include <algorithm> |
6 | |
7 | #include "src/base/adapters.h" |
8 | #include "src/base/overflowing-math.h" |
9 | #include "src/compiler/backend/instruction-selector-impl.h" |
10 | #include "src/compiler/node-matchers.h" |
11 | #include "src/compiler/node-properties.h" |
12 | #include "src/roots-inl.h" |
13 | |
14 | namespace v8 { |
15 | namespace internal { |
16 | namespace compiler { |
17 | |
18 | // Adds X64-specific methods for generating operands. |
19 | class X64OperandGenerator final : public OperandGenerator { |
20 | public: |
21 | explicit X64OperandGenerator(InstructionSelector* selector) |
22 | : OperandGenerator(selector) {} |
23 | |
24 | bool CanBeImmediate(Node* node) { |
25 | switch (node->opcode()) { |
26 | case IrOpcode::kInt32Constant: |
27 | case IrOpcode::kRelocatableInt32Constant: |
28 | return true; |
29 | case IrOpcode::kInt64Constant: { |
30 | const int64_t value = OpParameter<int64_t>(node->op()); |
31 | return std::numeric_limits<int32_t>::min() < value && |
32 | value <= std::numeric_limits<int32_t>::max(); |
33 | } |
34 | case IrOpcode::kNumberConstant: { |
35 | const double value = OpParameter<double>(node->op()); |
36 | return bit_cast<int64_t>(value) == 0; |
37 | } |
38 | default: |
39 | return false; |
40 | } |
41 | } |
42 | |
43 | int32_t GetImmediateIntegerValue(Node* node) { |
44 | DCHECK(CanBeImmediate(node)); |
45 | if (node->opcode() == IrOpcode::kInt32Constant) { |
46 | return OpParameter<int32_t>(node->op()); |
47 | } |
48 | DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode()); |
49 | return static_cast<int32_t>(OpParameter<int64_t>(node->op())); |
50 | } |
51 | |
52 | bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input, |
53 | int effect_level) { |
54 | if (input->opcode() != IrOpcode::kLoad || |
55 | !selector()->CanCover(node, input)) { |
56 | return false; |
57 | } |
58 | if (effect_level != selector()->GetEffectLevel(input)) { |
59 | return false; |
60 | } |
61 | MachineRepresentation rep = |
62 | LoadRepresentationOf(input->op()).representation(); |
63 | switch (opcode) { |
64 | case kX64And: |
65 | case kX64Or: |
66 | case kX64Xor: |
67 | case kX64Add: |
68 | case kX64Sub: |
69 | case kX64Push: |
70 | case kX64Cmp: |
71 | case kX64Test: |
72 | // When pointer compression is enabled 64-bit memory operands can't be |
73 | // used for tagged values. |
74 | return rep == MachineRepresentation::kWord64 || |
75 | (!COMPRESS_POINTERS_BOOL && IsAnyTagged(rep)); |
76 | case kX64And32: |
77 | case kX64Or32: |
78 | case kX64Xor32: |
79 | case kX64Add32: |
80 | case kX64Sub32: |
81 | case kX64Cmp32: |
82 | case kX64Test32: |
83 | // When pointer compression is enabled 32-bit memory operands can be |
84 | // used for tagged values. |
85 | return rep == MachineRepresentation::kWord32 || |
86 | (COMPRESS_POINTERS_BOOL && IsAnyTagged(rep)); |
87 | case kX64Cmp16: |
88 | case kX64Test16: |
89 | return rep == MachineRepresentation::kWord16; |
90 | case kX64Cmp8: |
91 | case kX64Test8: |
92 | return rep == MachineRepresentation::kWord8; |
93 | default: |
94 | break; |
95 | } |
96 | return false; |
97 | } |
98 | |
99 | AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent, |
100 | Node* base, Node* displacement, |
101 | DisplacementMode displacement_mode, |
102 | InstructionOperand inputs[], |
103 | size_t* input_count) { |
104 | AddressingMode mode = kMode_MRI; |
105 | if (base != nullptr && (index != nullptr || displacement != nullptr)) { |
106 | if (base->opcode() == IrOpcode::kInt32Constant && |
107 | OpParameter<int32_t>(base->op()) == 0) { |
108 | base = nullptr; |
109 | } else if (base->opcode() == IrOpcode::kInt64Constant && |
110 | OpParameter<int64_t>(base->op()) == 0) { |
111 | base = nullptr; |
112 | } |
113 | } |
114 | if (base != nullptr) { |
115 | inputs[(*input_count)++] = UseRegister(base); |
116 | if (index != nullptr) { |
117 | DCHECK(scale_exponent >= 0 && scale_exponent <= 3); |
118 | inputs[(*input_count)++] = UseRegister(index); |
119 | if (displacement != nullptr) { |
120 | inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement |
121 | ? UseNegatedImmediate(displacement) |
122 | : UseImmediate(displacement); |
123 | static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I, |
124 | kMode_MR4I, kMode_MR8I}; |
125 | mode = kMRnI_modes[scale_exponent]; |
126 | } else { |
127 | static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2, |
128 | kMode_MR4, kMode_MR8}; |
129 | mode = kMRn_modes[scale_exponent]; |
130 | } |
131 | } else { |
132 | if (displacement == nullptr) { |
133 | mode = kMode_MR; |
134 | } else { |
135 | inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement |
136 | ? UseNegatedImmediate(displacement) |
137 | : UseImmediate(displacement); |
138 | mode = kMode_MRI; |
139 | } |
140 | } |
141 | } else { |
142 | DCHECK(scale_exponent >= 0 && scale_exponent <= 3); |
143 | if (displacement != nullptr) { |
144 | if (index == nullptr) { |
145 | inputs[(*input_count)++] = UseRegister(displacement); |
146 | mode = kMode_MR; |
147 | } else { |
148 | inputs[(*input_count)++] = UseRegister(index); |
149 | inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement |
150 | ? UseNegatedImmediate(displacement) |
151 | : UseImmediate(displacement); |
152 | static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I, |
153 | kMode_M4I, kMode_M8I}; |
154 | mode = kMnI_modes[scale_exponent]; |
155 | } |
156 | } else { |
157 | inputs[(*input_count)++] = UseRegister(index); |
158 | static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1, |
159 | kMode_M4, kMode_M8}; |
160 | mode = kMn_modes[scale_exponent]; |
161 | if (mode == kMode_MR1) { |
162 | // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0] |
163 | inputs[(*input_count)++] = UseRegister(index); |
164 | } |
165 | } |
166 | } |
167 | return mode; |
168 | } |
169 | |
170 | AddressingMode GetEffectiveAddressMemoryOperand(Node* operand, |
171 | InstructionOperand inputs[], |
172 | size_t* input_count) { |
173 | if (selector()->CanAddressRelativeToRootsRegister()) { |
174 | LoadMatcher<ExternalReferenceMatcher> m(operand); |
175 | if (m.index().HasValue() && m.object().HasValue()) { |
176 | ptrdiff_t const delta = |
177 | m.index().Value() + |
178 | TurboAssemblerBase::RootRegisterOffsetForExternalReference( |
179 | selector()->isolate(), m.object().Value()); |
180 | if (is_int32(delta)) { |
181 | inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta)); |
182 | return kMode_Root; |
183 | } |
184 | } |
185 | } |
186 | BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll); |
187 | DCHECK(m.matches()); |
188 | if (m.displacement() == nullptr || CanBeImmediate(m.displacement())) { |
189 | return GenerateMemoryOperandInputs( |
190 | m.index(), m.scale(), m.base(), m.displacement(), |
191 | m.displacement_mode(), inputs, input_count); |
192 | } else if (m.base() == nullptr && |
193 | m.displacement_mode() == kPositiveDisplacement) { |
194 | // The displacement cannot be an immediate, but we can use the |
195 | // displacement as base instead and still benefit from addressing |
196 | // modes for the scale. |
197 | return GenerateMemoryOperandInputs(m.index(), m.scale(), m.displacement(), |
198 | nullptr, m.displacement_mode(), inputs, |
199 | input_count); |
200 | } else { |
201 | inputs[(*input_count)++] = UseRegister(operand->InputAt(0)); |
202 | inputs[(*input_count)++] = UseRegister(operand->InputAt(1)); |
203 | return kMode_MR1; |
204 | } |
205 | } |
206 | |
207 | InstructionOperand GetEffectiveIndexOperand(Node* index, |
208 | AddressingMode* mode) { |
209 | if (CanBeImmediate(index)) { |
210 | *mode = kMode_MRI; |
211 | return UseImmediate(index); |
212 | } else { |
213 | *mode = kMode_MR1; |
214 | return UseUniqueRegister(index); |
215 | } |
216 | } |
217 | |
218 | bool CanBeBetterLeftOperand(Node* node) const { |
219 | return !selector()->IsLive(node); |
220 | } |
221 | }; |
222 | |
223 | namespace { |
224 | ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) { |
225 | ArchOpcode opcode = kArchNop; |
226 | switch (load_rep.representation()) { |
227 | case MachineRepresentation::kFloat32: |
228 | opcode = kX64Movss; |
229 | break; |
230 | case MachineRepresentation::kFloat64: |
231 | opcode = kX64Movsd; |
232 | break; |
233 | case MachineRepresentation::kBit: // Fall through. |
234 | case MachineRepresentation::kWord8: |
235 | opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl; |
236 | break; |
237 | case MachineRepresentation::kWord16: |
238 | opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl; |
239 | break; |
240 | case MachineRepresentation::kWord32: |
241 | opcode = kX64Movl; |
242 | break; |
243 | #ifdef V8_COMPRESS_POINTERS |
244 | case MachineRepresentation::kTaggedSigned: |
245 | opcode = kX64MovqDecompressTaggedSigned; |
246 | break; |
247 | case MachineRepresentation::kTaggedPointer: |
248 | opcode = kX64MovqDecompressTaggedPointer; |
249 | break; |
250 | case MachineRepresentation::kTagged: |
251 | opcode = kX64MovqDecompressAnyTagged; |
252 | break; |
253 | case MachineRepresentation::kCompressedSigned: // Fall through. |
254 | case MachineRepresentation::kCompressedPointer: // Fall through. |
255 | case MachineRepresentation::kCompressed: |
256 | opcode = kX64Movl; |
257 | break; |
258 | #else |
259 | case MachineRepresentation::kCompressedSigned: // Fall through. |
260 | case MachineRepresentation::kCompressedPointer: // Fall through. |
261 | case MachineRepresentation::kCompressed: |
262 | UNREACHABLE(); |
263 | break; |
264 | case MachineRepresentation::kTaggedSigned: // Fall through. |
265 | case MachineRepresentation::kTaggedPointer: // Fall through. |
266 | case MachineRepresentation::kTagged: |
267 | opcode = kX64Movq; |
268 | break; |
269 | #endif |
270 | case MachineRepresentation::kWord64: |
271 | opcode = kX64Movq; |
272 | break; |
273 | case MachineRepresentation::kSimd128: // Fall through. |
274 | opcode = kX64Movdqu; |
275 | break; |
276 | case MachineRepresentation::kNone: |
277 | UNREACHABLE(); |
278 | break; |
279 | } |
280 | return opcode; |
281 | } |
282 | |
283 | ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) { |
284 | switch (store_rep.representation()) { |
285 | case MachineRepresentation::kFloat32: |
286 | return kX64Movss; |
287 | break; |
288 | case MachineRepresentation::kFloat64: |
289 | return kX64Movsd; |
290 | break; |
291 | case MachineRepresentation::kBit: // Fall through. |
292 | case MachineRepresentation::kWord8: |
293 | return kX64Movb; |
294 | break; |
295 | case MachineRepresentation::kWord16: |
296 | return kX64Movw; |
297 | break; |
298 | case MachineRepresentation::kWord32: |
299 | return kX64Movl; |
300 | break; |
301 | #ifdef V8_COMPRESS_POINTERS |
302 | case MachineRepresentation::kTaggedSigned: // Fall through. |
303 | case MachineRepresentation::kTaggedPointer: // Fall through. |
304 | case MachineRepresentation::kTagged: |
305 | return kX64MovqCompressTagged; |
306 | case MachineRepresentation::kCompressedSigned: // Fall through. |
307 | case MachineRepresentation::kCompressedPointer: // Fall through. |
308 | case MachineRepresentation::kCompressed: |
309 | return kX64Movl; |
310 | #else |
311 | case MachineRepresentation::kCompressedSigned: // Fall through. |
312 | case MachineRepresentation::kCompressedPointer: // Fall through. |
313 | case MachineRepresentation::kCompressed: |
314 | UNREACHABLE(); |
315 | case MachineRepresentation::kTaggedSigned: // Fall through. |
316 | case MachineRepresentation::kTaggedPointer: // Fall through. |
317 | case MachineRepresentation::kTagged: |
318 | return kX64Movq; |
319 | break; |
320 | #endif |
321 | case MachineRepresentation::kWord64: |
322 | return kX64Movq; |
323 | break; |
324 | case MachineRepresentation::kSimd128: // Fall through. |
325 | return kX64Movdqu; |
326 | break; |
327 | case MachineRepresentation::kNone: |
328 | UNREACHABLE(); |
329 | } |
330 | UNREACHABLE(); |
331 | } |
332 | |
333 | } // namespace |
334 | |
335 | void InstructionSelector::VisitStackSlot(Node* node) { |
336 | StackSlotRepresentation rep = StackSlotRepresentationOf(node->op()); |
337 | int slot = frame_->AllocateSpillSlot(rep.size()); |
338 | OperandGenerator g(this); |
339 | |
340 | Emit(kArchStackSlot, g.DefineAsRegister(node), |
341 | sequence()->AddImmediate(Constant(slot)), 0, nullptr); |
342 | } |
343 | |
344 | void InstructionSelector::VisitDebugAbort(Node* node) { |
345 | X64OperandGenerator g(this); |
346 | Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx)); |
347 | } |
348 | |
349 | void InstructionSelector::VisitLoad(Node* node) { |
350 | LoadRepresentation load_rep = LoadRepresentationOf(node->op()); |
351 | X64OperandGenerator g(this); |
352 | |
353 | ArchOpcode opcode = GetLoadOpcode(load_rep); |
354 | InstructionOperand outputs[] = {g.DefineAsRegister(node)}; |
355 | InstructionOperand inputs[3]; |
356 | size_t input_count = 0; |
357 | AddressingMode mode = |
358 | g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); |
359 | InstructionCode code = opcode | AddressingModeField::encode(mode); |
360 | if (node->opcode() == IrOpcode::kProtectedLoad) { |
361 | code |= MiscField::encode(kMemoryAccessProtected); |
362 | } else if (node->opcode() == IrOpcode::kPoisonedLoad) { |
363 | CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); |
364 | code |= MiscField::encode(kMemoryAccessPoisoned); |
365 | } |
366 | Emit(code, 1, outputs, input_count, inputs); |
367 | } |
368 | |
369 | void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } |
370 | |
371 | void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); } |
372 | |
373 | void InstructionSelector::VisitStore(Node* node) { |
374 | X64OperandGenerator g(this); |
375 | Node* base = node->InputAt(0); |
376 | Node* index = node->InputAt(1); |
377 | Node* value = node->InputAt(2); |
378 | |
379 | StoreRepresentation store_rep = StoreRepresentationOf(node->op()); |
380 | WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); |
381 | |
382 | if (write_barrier_kind != kNoWriteBarrier) { |
383 | DCHECK(CanBeTaggedPointer(store_rep.representation())); |
384 | AddressingMode addressing_mode; |
385 | InstructionOperand inputs[] = { |
386 | g.UseUniqueRegister(base), |
387 | g.GetEffectiveIndexOperand(index, &addressing_mode), |
388 | g.UseUniqueRegister(value)}; |
389 | RecordWriteMode record_write_mode = |
390 | WriteBarrierKindToRecordWriteMode(write_barrier_kind); |
391 | InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; |
392 | InstructionCode code = kArchStoreWithWriteBarrier; |
393 | code |= AddressingModeField::encode(addressing_mode); |
394 | code |= MiscField::encode(static_cast<int>(record_write_mode)); |
395 | Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps); |
396 | } else { |
397 | ArchOpcode opcode = GetStoreOpcode(store_rep); |
398 | InstructionOperand inputs[4]; |
399 | size_t input_count = 0; |
400 | AddressingMode addressing_mode = |
401 | g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); |
402 | InstructionCode code = |
403 | opcode | AddressingModeField::encode(addressing_mode); |
404 | if ((ElementSizeLog2Of(store_rep.representation()) < |
405 | kSystemPointerSizeLog2) && |
406 | (value->opcode() == IrOpcode::kTruncateInt64ToInt32) && |
407 | CanCover(node, value)) { |
408 | value = value->InputAt(0); |
409 | } |
410 | InstructionOperand value_operand = |
411 | g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value); |
412 | inputs[input_count++] = value_operand; |
413 | Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, |
414 | inputs); |
415 | } |
416 | } |
417 | |
418 | void InstructionSelector::VisitProtectedStore(Node* node) { |
419 | X64OperandGenerator g(this); |
420 | Node* value = node->InputAt(2); |
421 | |
422 | StoreRepresentation store_rep = StoreRepresentationOf(node->op()); |
423 | |
424 | ArchOpcode opcode = GetStoreOpcode(store_rep); |
425 | InstructionOperand inputs[4]; |
426 | size_t input_count = 0; |
427 | AddressingMode addressing_mode = |
428 | g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); |
429 | InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) | |
430 | MiscField::encode(kMemoryAccessProtected); |
431 | InstructionOperand value_operand = |
432 | g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value); |
433 | inputs[input_count++] = value_operand; |
434 | Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs); |
435 | } |
436 | |
437 | // Architecture supports unaligned access, therefore VisitLoad is used instead |
438 | void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); } |
439 | |
440 | // Architecture supports unaligned access, therefore VisitStore is used instead |
441 | void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); } |
442 | |
443 | // Shared routine for multiple binary operations. |
444 | static void VisitBinop(InstructionSelector* selector, Node* node, |
445 | InstructionCode opcode, FlagsContinuation* cont) { |
446 | X64OperandGenerator g(selector); |
447 | Int32BinopMatcher m(node); |
448 | Node* left = m.left().node(); |
449 | Node* right = m.right().node(); |
450 | InstructionOperand inputs[8]; |
451 | size_t input_count = 0; |
452 | InstructionOperand outputs[1]; |
453 | size_t output_count = 0; |
454 | |
455 | // TODO(turbofan): match complex addressing modes. |
456 | if (left == right) { |
457 | // If both inputs refer to the same operand, enforce allocating a register |
458 | // for both of them to ensure that we don't end up generating code like |
459 | // this: |
460 | // |
461 | // mov rax, [rbp-0x10] |
462 | // add rax, [rbp-0x10] |
463 | // jo label |
464 | InstructionOperand const input = g.UseRegister(left); |
465 | inputs[input_count++] = input; |
466 | inputs[input_count++] = input; |
467 | } else if (g.CanBeImmediate(right)) { |
468 | inputs[input_count++] = g.UseRegister(left); |
469 | inputs[input_count++] = g.UseImmediate(right); |
470 | } else { |
471 | int effect_level = selector->GetEffectLevel(node); |
472 | if (cont->IsBranch()) { |
473 | effect_level = selector->GetEffectLevel( |
474 | cont->true_block()->PredecessorAt(0)->control_input()); |
475 | } |
476 | if (node->op()->HasProperty(Operator::kCommutative) && |
477 | g.CanBeBetterLeftOperand(right) && |
478 | (!g.CanBeBetterLeftOperand(left) || |
479 | !g.CanBeMemoryOperand(opcode, node, right, effect_level))) { |
480 | std::swap(left, right); |
481 | } |
482 | if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) { |
483 | inputs[input_count++] = g.UseRegister(left); |
484 | AddressingMode addressing_mode = |
485 | g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count); |
486 | opcode |= AddressingModeField::encode(addressing_mode); |
487 | } else { |
488 | inputs[input_count++] = g.UseRegister(left); |
489 | inputs[input_count++] = g.Use(right); |
490 | } |
491 | } |
492 | |
493 | if (cont->IsBranch()) { |
494 | inputs[input_count++] = g.Label(cont->true_block()); |
495 | inputs[input_count++] = g.Label(cont->false_block()); |
496 | } |
497 | |
498 | outputs[output_count++] = g.DefineSameAsFirst(node); |
499 | |
500 | DCHECK_NE(0u, input_count); |
501 | DCHECK_EQ(1u, output_count); |
502 | DCHECK_GE(arraysize(inputs), input_count); |
503 | DCHECK_GE(arraysize(outputs), output_count); |
504 | |
505 | selector->EmitWithContinuation(opcode, output_count, outputs, input_count, |
506 | inputs, cont); |
507 | } |
508 | |
509 | // Shared routine for multiple binary operations. |
510 | static void VisitBinop(InstructionSelector* selector, Node* node, |
511 | InstructionCode opcode) { |
512 | FlagsContinuation cont; |
513 | VisitBinop(selector, node, opcode, &cont); |
514 | } |
515 | |
516 | void InstructionSelector::VisitWord32And(Node* node) { |
517 | X64OperandGenerator g(this); |
518 | Uint32BinopMatcher m(node); |
519 | if (m.right().Is(0xFF)) { |
520 | Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node())); |
521 | } else if (m.right().Is(0xFFFF)) { |
522 | Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node())); |
523 | } else { |
524 | VisitBinop(this, node, kX64And32); |
525 | } |
526 | } |
527 | |
528 | void InstructionSelector::VisitWord64And(Node* node) { |
529 | VisitBinop(this, node, kX64And); |
530 | } |
531 | |
532 | void InstructionSelector::VisitWord32Or(Node* node) { |
533 | VisitBinop(this, node, kX64Or32); |
534 | } |
535 | |
536 | void InstructionSelector::VisitWord64Or(Node* node) { |
537 | VisitBinop(this, node, kX64Or); |
538 | } |
539 | |
540 | void InstructionSelector::VisitWord32Xor(Node* node) { |
541 | X64OperandGenerator g(this); |
542 | Uint32BinopMatcher m(node); |
543 | if (m.right().Is(-1)) { |
544 | Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node())); |
545 | } else { |
546 | VisitBinop(this, node, kX64Xor32); |
547 | } |
548 | } |
549 | |
550 | void InstructionSelector::VisitWord64Xor(Node* node) { |
551 | X64OperandGenerator g(this); |
552 | Uint64BinopMatcher m(node); |
553 | if (m.right().Is(-1)) { |
554 | Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node())); |
555 | } else { |
556 | VisitBinop(this, node, kX64Xor); |
557 | } |
558 | } |
559 | |
560 | namespace { |
561 | |
562 | bool TryMergeTruncateInt64ToInt32IntoLoad(InstructionSelector* selector, |
563 | Node* node, Node* load) { |
564 | if (load->opcode() == IrOpcode::kLoad && selector->CanCover(node, load)) { |
565 | LoadRepresentation load_rep = LoadRepresentationOf(load->op()); |
566 | MachineRepresentation rep = load_rep.representation(); |
567 | InstructionCode opcode = kArchNop; |
568 | switch (rep) { |
569 | case MachineRepresentation::kBit: // Fall through. |
570 | case MachineRepresentation::kWord8: |
571 | opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl; |
572 | break; |
573 | case MachineRepresentation::kWord16: |
574 | opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl; |
575 | break; |
576 | case MachineRepresentation::kWord32: |
577 | case MachineRepresentation::kWord64: |
578 | case MachineRepresentation::kTaggedSigned: |
579 | case MachineRepresentation::kTagged: |
580 | case MachineRepresentation::kCompressedSigned: // Fall through. |
581 | case MachineRepresentation::kCompressed: // Fall through. |
582 | opcode = kX64Movl; |
583 | break; |
584 | default: |
585 | UNREACHABLE(); |
586 | return false; |
587 | } |
588 | X64OperandGenerator g(selector); |
589 | InstructionOperand outputs[] = {g.DefineAsRegister(node)}; |
590 | size_t input_count = 0; |
591 | InstructionOperand inputs[3]; |
592 | AddressingMode mode = g.GetEffectiveAddressMemoryOperand( |
593 | node->InputAt(0), inputs, &input_count); |
594 | opcode |= AddressingModeField::encode(mode); |
595 | selector->Emit(opcode, 1, outputs, input_count, inputs); |
596 | return true; |
597 | } |
598 | return false; |
599 | } |
600 | |
601 | // Shared routine for multiple 32-bit shift operations. |
602 | // TODO(bmeurer): Merge this with VisitWord64Shift using template magic? |
603 | void VisitWord32Shift(InstructionSelector* selector, Node* node, |
604 | ArchOpcode opcode) { |
605 | X64OperandGenerator g(selector); |
606 | Int32BinopMatcher m(node); |
607 | Node* left = m.left().node(); |
608 | Node* right = m.right().node(); |
609 | |
610 | if (left->opcode() == IrOpcode::kTruncateInt64ToInt32 && |
611 | selector->CanCover(node, left)) { |
612 | left = left->InputAt(0); |
613 | } |
614 | |
615 | if (g.CanBeImmediate(right)) { |
616 | selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), |
617 | g.UseImmediate(right)); |
618 | } else { |
619 | selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), |
620 | g.UseFixed(right, rcx)); |
621 | } |
622 | } |
623 | |
624 | // Shared routine for multiple 64-bit shift operations. |
625 | // TODO(bmeurer): Merge this with VisitWord32Shift using template magic? |
626 | void VisitWord64Shift(InstructionSelector* selector, Node* node, |
627 | ArchOpcode opcode) { |
628 | X64OperandGenerator g(selector); |
629 | Int64BinopMatcher m(node); |
630 | Node* left = m.left().node(); |
631 | Node* right = m.right().node(); |
632 | |
633 | if (g.CanBeImmediate(right)) { |
634 | selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), |
635 | g.UseImmediate(right)); |
636 | } else { |
637 | if (m.right().IsWord64And()) { |
638 | Int64BinopMatcher mright(right); |
639 | if (mright.right().Is(0x3F)) { |
640 | right = mright.left().node(); |
641 | } |
642 | } |
643 | selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), |
644 | g.UseFixed(right, rcx)); |
645 | } |
646 | } |
647 | |
648 | // Shared routine for multiple shift operations with continuation. |
649 | template <typename BinopMatcher, int Bits> |
650 | bool TryVisitWordShift(InstructionSelector* selector, Node* node, |
651 | ArchOpcode opcode, FlagsContinuation* cont) { |
652 | X64OperandGenerator g(selector); |
653 | BinopMatcher m(node); |
654 | Node* left = m.left().node(); |
655 | Node* right = m.right().node(); |
656 | |
657 | // If the shift count is 0, the flags are not affected. |
658 | if (!g.CanBeImmediate(right) || |
659 | (g.GetImmediateIntegerValue(right) & (Bits - 1)) == 0) { |
660 | return false; |
661 | } |
662 | InstructionOperand output = g.DefineSameAsFirst(node); |
663 | InstructionOperand inputs[2]; |
664 | inputs[0] = g.UseRegister(left); |
665 | inputs[1] = g.UseImmediate(right); |
666 | selector->EmitWithContinuation(opcode, 1, &output, 2, inputs, cont); |
667 | return true; |
668 | } |
669 | |
670 | void EmitLea(InstructionSelector* selector, InstructionCode opcode, |
671 | Node* result, Node* index, int scale, Node* base, |
672 | Node* displacement, DisplacementMode displacement_mode) { |
673 | X64OperandGenerator g(selector); |
674 | |
675 | InstructionOperand inputs[4]; |
676 | size_t input_count = 0; |
677 | AddressingMode mode = |
678 | g.GenerateMemoryOperandInputs(index, scale, base, displacement, |
679 | displacement_mode, inputs, &input_count); |
680 | |
681 | DCHECK_NE(0u, input_count); |
682 | DCHECK_GE(arraysize(inputs), input_count); |
683 | |
684 | InstructionOperand outputs[1]; |
685 | outputs[0] = g.DefineAsRegister(result); |
686 | |
687 | opcode = AddressingModeField::encode(mode) | opcode; |
688 | |
689 | selector->Emit(opcode, 1, outputs, input_count, inputs); |
690 | } |
691 | |
692 | } // namespace |
693 | |
694 | void InstructionSelector::VisitWord32Shl(Node* node) { |
695 | Int32ScaleMatcher m(node, true); |
696 | if (m.matches()) { |
697 | Node* index = node->InputAt(0); |
698 | Node* base = m.power_of_two_plus_one() ? index : nullptr; |
699 | EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr, |
700 | kPositiveDisplacement); |
701 | return; |
702 | } |
703 | VisitWord32Shift(this, node, kX64Shl32); |
704 | } |
705 | |
706 | void InstructionSelector::VisitWord64Shl(Node* node) { |
707 | X64OperandGenerator g(this); |
708 | Int64ScaleMatcher m(node, true); |
709 | if (m.matches()) { |
710 | Node* index = node->InputAt(0); |
711 | Node* base = m.power_of_two_plus_one() ? index : nullptr; |
712 | EmitLea(this, kX64Lea, node, index, m.scale(), base, nullptr, |
713 | kPositiveDisplacement); |
714 | return; |
715 | } else { |
716 | Int64BinopMatcher m(node); |
717 | if ((m.left().IsChangeInt32ToInt64() || |
718 | m.left().IsChangeUint32ToUint64()) && |
719 | m.right().IsInRange(32, 63)) { |
720 | // There's no need to sign/zero-extend to 64-bit if we shift out the upper |
721 | // 32 bits anyway. |
722 | Emit(kX64Shl, g.DefineSameAsFirst(node), |
723 | g.UseRegister(m.left().node()->InputAt(0)), |
724 | g.UseImmediate(m.right().node())); |
725 | return; |
726 | } |
727 | } |
728 | VisitWord64Shift(this, node, kX64Shl); |
729 | } |
730 | |
731 | void InstructionSelector::VisitWord32Shr(Node* node) { |
732 | VisitWord32Shift(this, node, kX64Shr32); |
733 | } |
734 | |
735 | namespace { |
736 | |
737 | inline AddressingMode AddDisplacementToAddressingMode(AddressingMode mode) { |
738 | switch (mode) { |
739 | case kMode_MR: |
740 | return kMode_MRI; |
741 | break; |
742 | case kMode_MR1: |
743 | return kMode_MR1I; |
744 | break; |
745 | case kMode_MR2: |
746 | return kMode_MR2I; |
747 | break; |
748 | case kMode_MR4: |
749 | return kMode_MR4I; |
750 | break; |
751 | case kMode_MR8: |
752 | return kMode_MR8I; |
753 | break; |
754 | case kMode_M1: |
755 | return kMode_M1I; |
756 | break; |
757 | case kMode_M2: |
758 | return kMode_M2I; |
759 | break; |
760 | case kMode_M4: |
761 | return kMode_M4I; |
762 | break; |
763 | case kMode_M8: |
764 | return kMode_M8I; |
765 | break; |
766 | case kMode_None: |
767 | case kMode_MRI: |
768 | case kMode_MR1I: |
769 | case kMode_MR2I: |
770 | case kMode_MR4I: |
771 | case kMode_MR8I: |
772 | case kMode_M1I: |
773 | case kMode_M2I: |
774 | case kMode_M4I: |
775 | case kMode_M8I: |
776 | case kMode_Root: |
777 | UNREACHABLE(); |
778 | } |
779 | UNREACHABLE(); |
780 | } |
781 | |
782 | bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node, |
783 | InstructionCode opcode) { |
784 | DCHECK(IrOpcode::kWord64Sar == node->opcode() || |
785 | IrOpcode::kWord64Shr == node->opcode()); |
786 | X64OperandGenerator g(selector); |
787 | Int64BinopMatcher m(node); |
788 | if (selector->CanCover(m.node(), m.left().node()) && m.left().IsLoad() && |
789 | m.right().Is(32)) { |
790 | DCHECK_EQ(selector->GetEffectLevel(node), |
791 | selector->GetEffectLevel(m.left().node())); |
792 | // Just load and sign-extend the interesting 4 bytes instead. This happens, |
793 | // for example, when we're loading and untagging SMIs. |
794 | BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), |
795 | AddressOption::kAllowAll); |
796 | if (mleft.matches() && (mleft.displacement() == nullptr || |
797 | g.CanBeImmediate(mleft.displacement()))) { |
798 | size_t input_count = 0; |
799 | InstructionOperand inputs[3]; |
800 | AddressingMode mode = g.GetEffectiveAddressMemoryOperand( |
801 | m.left().node(), inputs, &input_count); |
802 | if (mleft.displacement() == nullptr) { |
803 | // Make sure that the addressing mode indicates the presence of an |
804 | // immediate displacement. It seems that we never use M1 and M2, but we |
805 | // handle them here anyways. |
806 | mode = AddDisplacementToAddressingMode(mode); |
807 | inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4); |
808 | } else { |
809 | // In the case that the base address was zero, the displacement will be |
810 | // in a register and replacing it with an immediate is not allowed. This |
811 | // usually only happens in dead code anyway. |
812 | if (!inputs[input_count - 1].IsImmediate()) return false; |
813 | int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement()); |
814 | inputs[input_count - 1] = |
815 | ImmediateOperand(ImmediateOperand::INLINE, displacement + 4); |
816 | } |
817 | InstructionOperand outputs[] = {g.DefineAsRegister(node)}; |
818 | InstructionCode code = opcode | AddressingModeField::encode(mode); |
819 | selector->Emit(code, 1, outputs, input_count, inputs); |
820 | return true; |
821 | } |
822 | } |
823 | return false; |
824 | } |
825 | |
826 | } // namespace |
827 | |
828 | void InstructionSelector::VisitWord64Shr(Node* node) { |
829 | if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movl)) return; |
830 | VisitWord64Shift(this, node, kX64Shr); |
831 | } |
832 | |
833 | void InstructionSelector::VisitWord32Sar(Node* node) { |
834 | X64OperandGenerator g(this); |
835 | Int32BinopMatcher m(node); |
836 | if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) { |
837 | Int32BinopMatcher mleft(m.left().node()); |
838 | if (mleft.right().Is(16) && m.right().Is(16)) { |
839 | Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node())); |
840 | return; |
841 | } else if (mleft.right().Is(24) && m.right().Is(24)) { |
842 | Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node())); |
843 | return; |
844 | } |
845 | } |
846 | VisitWord32Shift(this, node, kX64Sar32); |
847 | } |
848 | |
849 | void InstructionSelector::VisitWord64Sar(Node* node) { |
850 | if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movsxlq)) return; |
851 | VisitWord64Shift(this, node, kX64Sar); |
852 | } |
853 | |
854 | void InstructionSelector::VisitWord32Ror(Node* node) { |
855 | VisitWord32Shift(this, node, kX64Ror32); |
856 | } |
857 | |
858 | void InstructionSelector::VisitWord64Ror(Node* node) { |
859 | VisitWord64Shift(this, node, kX64Ror); |
860 | } |
861 | |
862 | void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); } |
863 | |
864 | void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); } |
865 | |
866 | void InstructionSelector::VisitWord64ReverseBytes(Node* node) { |
867 | X64OperandGenerator g(this); |
868 | Emit(kX64Bswap, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0))); |
869 | } |
870 | |
871 | void InstructionSelector::VisitWord32ReverseBytes(Node* node) { |
872 | X64OperandGenerator g(this); |
873 | Emit(kX64Bswap32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0))); |
874 | } |
875 | |
876 | void InstructionSelector::VisitInt32Add(Node* node) { |
877 | X64OperandGenerator g(this); |
878 | |
879 | // Try to match the Add to a leal pattern |
880 | BaseWithIndexAndDisplacement32Matcher m(node); |
881 | if (m.matches() && |
882 | (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) { |
883 | EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(), |
884 | m.displacement(), m.displacement_mode()); |
885 | return; |
886 | } |
887 | |
888 | // No leal pattern match, use addl |
889 | VisitBinop(this, node, kX64Add32); |
890 | } |
891 | |
892 | void InstructionSelector::VisitInt64Add(Node* node) { |
893 | X64OperandGenerator g(this); |
894 | |
895 | // Try to match the Add to a leaq pattern |
896 | BaseWithIndexAndDisplacement64Matcher m(node); |
897 | if (m.matches() && |
898 | (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) { |
899 | EmitLea(this, kX64Lea, node, m.index(), m.scale(), m.base(), |
900 | m.displacement(), m.displacement_mode()); |
901 | return; |
902 | } |
903 | |
904 | // No leal pattern match, use addq |
905 | VisitBinop(this, node, kX64Add); |
906 | } |
907 | |
908 | void InstructionSelector::VisitInt64AddWithOverflow(Node* node) { |
909 | if (Node* ovf = NodeProperties::FindProjection(node, 1)) { |
910 | FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); |
911 | return VisitBinop(this, node, kX64Add, &cont); |
912 | } |
913 | FlagsContinuation cont; |
914 | VisitBinop(this, node, kX64Add, &cont); |
915 | } |
916 | |
917 | void InstructionSelector::VisitInt32Sub(Node* node) { |
918 | X64OperandGenerator g(this); |
919 | DCHECK_EQ(node->InputCount(), 2); |
920 | Node* input1 = node->InputAt(0); |
921 | Node* input2 = node->InputAt(1); |
922 | if (input1->opcode() == IrOpcode::kTruncateInt64ToInt32 && |
923 | g.CanBeImmediate(input2)) { |
924 | int32_t imm = g.GetImmediateIntegerValue(input2); |
925 | InstructionOperand int64_input = g.UseRegister(input1->InputAt(0)); |
926 | if (imm == 0) { |
927 | // Emit "movl" for subtraction of 0. |
928 | Emit(kX64Movl, g.DefineAsRegister(node), int64_input); |
929 | } else { |
930 | // Omit truncation and turn subtractions of constant values into immediate |
931 | // "leal" instructions by negating the value. |
932 | Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), |
933 | g.DefineAsRegister(node), int64_input, g.TempImmediate(-imm)); |
934 | } |
935 | return; |
936 | } |
937 | |
938 | Int32BinopMatcher m(node); |
939 | if (m.left().Is(0)) { |
940 | Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node())); |
941 | } else if (m.right().Is(0)) { |
942 | // TODO(jarin): We should be able to use {EmitIdentity} here |
943 | // (https://crbug.com/v8/7947). |
944 | Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(m.left().node())); |
945 | } else if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) { |
946 | // Turn subtractions of constant values into immediate "leal" instructions |
947 | // by negating the value. |
948 | Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), |
949 | g.DefineAsRegister(node), g.UseRegister(m.left().node()), |
950 | g.TempImmediate(base::NegateWithWraparound(m.right().Value()))); |
951 | } else { |
952 | VisitBinop(this, node, kX64Sub32); |
953 | } |
954 | } |
955 | |
956 | void InstructionSelector::VisitInt64Sub(Node* node) { |
957 | X64OperandGenerator g(this); |
958 | Int64BinopMatcher m(node); |
959 | if (m.left().Is(0)) { |
960 | Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node())); |
961 | } else { |
962 | if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) { |
963 | // Turn subtractions of constant values into immediate "leaq" instructions |
964 | // by negating the value. |
965 | Emit(kX64Lea | AddressingModeField::encode(kMode_MRI), |
966 | g.DefineAsRegister(node), g.UseRegister(m.left().node()), |
967 | g.TempImmediate(-static_cast<int32_t>(m.right().Value()))); |
968 | return; |
969 | } |
970 | VisitBinop(this, node, kX64Sub); |
971 | } |
972 | } |
973 | |
974 | void InstructionSelector::VisitInt64SubWithOverflow(Node* node) { |
975 | if (Node* ovf = NodeProperties::FindProjection(node, 1)) { |
976 | FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); |
977 | return VisitBinop(this, node, kX64Sub, &cont); |
978 | } |
979 | FlagsContinuation cont; |
980 | VisitBinop(this, node, kX64Sub, &cont); |
981 | } |
982 | |
983 | namespace { |
984 | |
985 | void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) { |
986 | X64OperandGenerator g(selector); |
987 | Int32BinopMatcher m(node); |
988 | Node* left = m.left().node(); |
989 | Node* right = m.right().node(); |
990 | if (g.CanBeImmediate(right)) { |
991 | selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left), |
992 | g.UseImmediate(right)); |
993 | } else { |
994 | if (g.CanBeBetterLeftOperand(right)) { |
995 | std::swap(left, right); |
996 | } |
997 | selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), |
998 | g.Use(right)); |
999 | } |
1000 | } |
1001 | |
1002 | void VisitMulHigh(InstructionSelector* selector, Node* node, |
1003 | ArchOpcode opcode) { |
1004 | X64OperandGenerator g(selector); |
1005 | Node* left = node->InputAt(0); |
1006 | Node* right = node->InputAt(1); |
1007 | if (selector->IsLive(left) && !selector->IsLive(right)) { |
1008 | std::swap(left, right); |
1009 | } |
1010 | InstructionOperand temps[] = {g.TempRegister(rax)}; |
1011 | // TODO(turbofan): We use UseUniqueRegister here to improve register |
1012 | // allocation. |
1013 | selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax), |
1014 | g.UseUniqueRegister(right), arraysize(temps), temps); |
1015 | } |
1016 | |
1017 | void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) { |
1018 | X64OperandGenerator g(selector); |
1019 | InstructionOperand temps[] = {g.TempRegister(rdx)}; |
1020 | selector->Emit( |
1021 | opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax), |
1022 | g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); |
1023 | } |
1024 | |
1025 | void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) { |
1026 | X64OperandGenerator g(selector); |
1027 | InstructionOperand temps[] = {g.TempRegister(rax)}; |
1028 | selector->Emit( |
1029 | opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax), |
1030 | g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); |
1031 | } |
1032 | |
1033 | } // namespace |
1034 | |
1035 | void InstructionSelector::VisitInt32Mul(Node* node) { |
1036 | Int32ScaleMatcher m(node, true); |
1037 | if (m.matches()) { |
1038 | Node* index = node->InputAt(0); |
1039 | Node* base = m.power_of_two_plus_one() ? index : nullptr; |
1040 | EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr, |
1041 | kPositiveDisplacement); |
1042 | return; |
1043 | } |
1044 | VisitMul(this, node, kX64Imul32); |
1045 | } |
1046 | |
1047 | void InstructionSelector::VisitInt32MulWithOverflow(Node* node) { |
1048 | // TODO(mvstanton): Use Int32ScaleMatcher somehow. |
1049 | if (Node* ovf = NodeProperties::FindProjection(node, 1)) { |
1050 | FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); |
1051 | return VisitBinop(this, node, kX64Imul32, &cont); |
1052 | } |
1053 | FlagsContinuation cont; |
1054 | VisitBinop(this, node, kX64Imul32, &cont); |
1055 | } |
1056 | |
1057 | void InstructionSelector::VisitInt64Mul(Node* node) { |
1058 | VisitMul(this, node, kX64Imul); |
1059 | } |
1060 | |
1061 | void InstructionSelector::VisitInt32MulHigh(Node* node) { |
1062 | VisitMulHigh(this, node, kX64ImulHigh32); |
1063 | } |
1064 | |
1065 | void InstructionSelector::VisitInt32Div(Node* node) { |
1066 | VisitDiv(this, node, kX64Idiv32); |
1067 | } |
1068 | |
1069 | void InstructionSelector::VisitInt64Div(Node* node) { |
1070 | VisitDiv(this, node, kX64Idiv); |
1071 | } |
1072 | |
1073 | void InstructionSelector::VisitUint32Div(Node* node) { |
1074 | VisitDiv(this, node, kX64Udiv32); |
1075 | } |
1076 | |
1077 | void InstructionSelector::VisitUint64Div(Node* node) { |
1078 | VisitDiv(this, node, kX64Udiv); |
1079 | } |
1080 | |
1081 | void InstructionSelector::VisitInt32Mod(Node* node) { |
1082 | VisitMod(this, node, kX64Idiv32); |
1083 | } |
1084 | |
1085 | void InstructionSelector::VisitInt64Mod(Node* node) { |
1086 | VisitMod(this, node, kX64Idiv); |
1087 | } |
1088 | |
1089 | void InstructionSelector::VisitUint32Mod(Node* node) { |
1090 | VisitMod(this, node, kX64Udiv32); |
1091 | } |
1092 | |
1093 | void InstructionSelector::VisitUint64Mod(Node* node) { |
1094 | VisitMod(this, node, kX64Udiv); |
1095 | } |
1096 | |
1097 | void InstructionSelector::VisitUint32MulHigh(Node* node) { |
1098 | VisitMulHigh(this, node, kX64UmulHigh32); |
1099 | } |
1100 | |
1101 | void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) { |
1102 | X64OperandGenerator g(this); |
1103 | InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; |
1104 | InstructionOperand outputs[2]; |
1105 | size_t output_count = 0; |
1106 | outputs[output_count++] = g.DefineAsRegister(node); |
1107 | |
1108 | Node* success_output = NodeProperties::FindProjection(node, 1); |
1109 | if (success_output) { |
1110 | outputs[output_count++] = g.DefineAsRegister(success_output); |
1111 | } |
1112 | |
1113 | Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs); |
1114 | } |
1115 | |
1116 | void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) { |
1117 | X64OperandGenerator g(this); |
1118 | InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; |
1119 | InstructionOperand outputs[2]; |
1120 | size_t output_count = 0; |
1121 | outputs[output_count++] = g.DefineAsRegister(node); |
1122 | |
1123 | Node* success_output = NodeProperties::FindProjection(node, 1); |
1124 | if (success_output) { |
1125 | outputs[output_count++] = g.DefineAsRegister(success_output); |
1126 | } |
1127 | |
1128 | Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs); |
1129 | } |
1130 | |
1131 | void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) { |
1132 | X64OperandGenerator g(this); |
1133 | InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; |
1134 | InstructionOperand outputs[2]; |
1135 | size_t output_count = 0; |
1136 | outputs[output_count++] = g.DefineAsRegister(node); |
1137 | |
1138 | Node* success_output = NodeProperties::FindProjection(node, 1); |
1139 | if (success_output) { |
1140 | outputs[output_count++] = g.DefineAsRegister(success_output); |
1141 | } |
1142 | |
1143 | Emit(kSSEFloat32ToUint64, output_count, outputs, 1, inputs); |
1144 | } |
1145 | |
1146 | void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) { |
1147 | X64OperandGenerator g(this); |
1148 | InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; |
1149 | InstructionOperand outputs[2]; |
1150 | size_t output_count = 0; |
1151 | outputs[output_count++] = g.DefineAsRegister(node); |
1152 | |
1153 | Node* success_output = NodeProperties::FindProjection(node, 1); |
1154 | if (success_output) { |
1155 | outputs[output_count++] = g.DefineAsRegister(success_output); |
1156 | } |
1157 | |
1158 | Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs); |
1159 | } |
1160 | |
1161 | void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { |
1162 | X64OperandGenerator g(this); |
1163 | Node* const value = node->InputAt(0); |
1164 | if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) { |
1165 | LoadRepresentation load_rep = LoadRepresentationOf(value->op()); |
1166 | MachineRepresentation rep = load_rep.representation(); |
1167 | InstructionCode opcode = kArchNop; |
1168 | switch (rep) { |
1169 | case MachineRepresentation::kBit: // Fall through. |
1170 | case MachineRepresentation::kWord8: |
1171 | opcode = load_rep.IsSigned() ? kX64Movsxbq : kX64Movzxbq; |
1172 | break; |
1173 | case MachineRepresentation::kWord16: |
1174 | opcode = load_rep.IsSigned() ? kX64Movsxwq : kX64Movzxwq; |
1175 | break; |
1176 | case MachineRepresentation::kWord32: |
1177 | opcode = load_rep.IsSigned() ? kX64Movsxlq : kX64Movl; |
1178 | break; |
1179 | default: |
1180 | UNREACHABLE(); |
1181 | return; |
1182 | } |
1183 | InstructionOperand outputs[] = {g.DefineAsRegister(node)}; |
1184 | size_t input_count = 0; |
1185 | InstructionOperand inputs[3]; |
1186 | AddressingMode mode = g.GetEffectiveAddressMemoryOperand( |
1187 | node->InputAt(0), inputs, &input_count); |
1188 | opcode |= AddressingModeField::encode(mode); |
1189 | Emit(opcode, 1, outputs, input_count, inputs); |
1190 | } else { |
1191 | Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0))); |
1192 | } |
1193 | } |
1194 | |
1195 | namespace { |
1196 | |
1197 | bool ZeroExtendsWord32ToWord64(Node* node) { |
1198 | switch (node->opcode()) { |
1199 | case IrOpcode::kWord32And: |
1200 | case IrOpcode::kWord32Or: |
1201 | case IrOpcode::kWord32Xor: |
1202 | case IrOpcode::kWord32Shl: |
1203 | case IrOpcode::kWord32Shr: |
1204 | case IrOpcode::kWord32Sar: |
1205 | case IrOpcode::kWord32Ror: |
1206 | case IrOpcode::kWord32Equal: |
1207 | case IrOpcode::kInt32Add: |
1208 | case IrOpcode::kInt32Sub: |
1209 | case IrOpcode::kInt32Mul: |
1210 | case IrOpcode::kInt32MulHigh: |
1211 | case IrOpcode::kInt32Div: |
1212 | case IrOpcode::kInt32LessThan: |
1213 | case IrOpcode::kInt32LessThanOrEqual: |
1214 | case IrOpcode::kInt32Mod: |
1215 | case IrOpcode::kUint32Div: |
1216 | case IrOpcode::kUint32LessThan: |
1217 | case IrOpcode::kUint32LessThanOrEqual: |
1218 | case IrOpcode::kUint32Mod: |
1219 | case IrOpcode::kUint32MulHigh: |
1220 | case IrOpcode::kTruncateInt64ToInt32: |
1221 | // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the |
1222 | // zero-extension is a no-op. |
1223 | return true; |
1224 | case IrOpcode::kProjection: { |
1225 | Node* const value = node->InputAt(0); |
1226 | switch (value->opcode()) { |
1227 | case IrOpcode::kInt32AddWithOverflow: |
1228 | case IrOpcode::kInt32SubWithOverflow: |
1229 | case IrOpcode::kInt32MulWithOverflow: |
1230 | return true; |
1231 | default: |
1232 | return false; |
1233 | } |
1234 | } |
1235 | case IrOpcode::kLoad: |
1236 | case IrOpcode::kProtectedLoad: |
1237 | case IrOpcode::kPoisonedLoad: { |
1238 | // The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly |
1239 | // zero-extend to 64-bit on x64, so the zero-extension is a no-op. |
1240 | LoadRepresentation load_rep = LoadRepresentationOf(node->op()); |
1241 | switch (load_rep.representation()) { |
1242 | case MachineRepresentation::kWord8: |
1243 | case MachineRepresentation::kWord16: |
1244 | case MachineRepresentation::kWord32: |
1245 | return true; |
1246 | default: |
1247 | return false; |
1248 | } |
1249 | } |
1250 | default: |
1251 | return false; |
1252 | } |
1253 | } |
1254 | |
1255 | } // namespace |
1256 | |
1257 | void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { |
1258 | X64OperandGenerator g(this); |
1259 | Node* value = node->InputAt(0); |
1260 | if (ZeroExtendsWord32ToWord64(value)) { |
1261 | // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the |
1262 | // zero-extension is a no-op. |
1263 | return EmitIdentity(node); |
1264 | } |
1265 | Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value)); |
1266 | } |
1267 | |
1268 | void InstructionSelector::VisitChangeTaggedToCompressed(Node* node) { |
1269 | X64OperandGenerator g(this); |
1270 | Node* value = node->InputAt(0); |
1271 | Emit(kX64CompressAny, g.DefineAsRegister(node), g.Use(value)); |
1272 | } |
1273 | |
1274 | void InstructionSelector::VisitChangeTaggedPointerToCompressedPointer( |
1275 | Node* node) { |
1276 | X64OperandGenerator g(this); |
1277 | Node* value = node->InputAt(0); |
1278 | Emit(kX64CompressPointer, g.DefineAsRegister(node), g.Use(value)); |
1279 | } |
1280 | |
1281 | void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned( |
1282 | Node* node) { |
1283 | X64OperandGenerator g(this); |
1284 | Node* value = node->InputAt(0); |
1285 | Emit(kX64CompressSigned, g.DefineAsRegister(node), g.Use(value)); |
1286 | } |
1287 | |
1288 | void InstructionSelector::VisitChangeCompressedToTagged(Node* node) { |
1289 | X64OperandGenerator g(this); |
1290 | Node* const value = node->InputAt(0); |
1291 | Emit(kX64DecompressAny, g.DefineAsRegister(node), g.Use(value)); |
1292 | } |
1293 | |
1294 | void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer( |
1295 | Node* node) { |
1296 | X64OperandGenerator g(this); |
1297 | Node* const value = node->InputAt(0); |
1298 | Emit(kX64DecompressPointer, g.DefineAsRegister(node), g.Use(value)); |
1299 | } |
1300 | |
1301 | void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned( |
1302 | Node* node) { |
1303 | X64OperandGenerator g(this); |
1304 | Node* const value = node->InputAt(0); |
1305 | Emit(kX64DecompressSigned, g.DefineAsRegister(node), g.Use(value)); |
1306 | } |
1307 | |
1308 | namespace { |
1309 | |
1310 | void VisitRO(InstructionSelector* selector, Node* node, |
1311 | InstructionCode opcode) { |
1312 | X64OperandGenerator g(selector); |
1313 | selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); |
1314 | } |
1315 | |
1316 | void VisitRR(InstructionSelector* selector, Node* node, |
1317 | InstructionCode opcode) { |
1318 | X64OperandGenerator g(selector); |
1319 | selector->Emit(opcode, g.DefineAsRegister(node), |
1320 | g.UseRegister(node->InputAt(0))); |
1321 | } |
1322 | |
1323 | void VisitRRO(InstructionSelector* selector, Node* node, |
1324 | InstructionCode opcode) { |
1325 | X64OperandGenerator g(selector); |
1326 | selector->Emit(opcode, g.DefineSameAsFirst(node), |
1327 | g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); |
1328 | } |
1329 | |
1330 | void VisitFloatBinop(InstructionSelector* selector, Node* node, |
1331 | ArchOpcode avx_opcode, ArchOpcode sse_opcode) { |
1332 | X64OperandGenerator g(selector); |
1333 | InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); |
1334 | InstructionOperand operand1 = g.Use(node->InputAt(1)); |
1335 | if (selector->IsSupported(AVX)) { |
1336 | selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1); |
1337 | } else { |
1338 | selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1); |
1339 | } |
1340 | } |
1341 | |
1342 | void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input, |
1343 | ArchOpcode avx_opcode, ArchOpcode sse_opcode) { |
1344 | X64OperandGenerator g(selector); |
1345 | if (selector->IsSupported(AVX)) { |
1346 | selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input)); |
1347 | } else { |
1348 | selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input)); |
1349 | } |
1350 | } |
1351 | |
1352 | } // namespace |
1353 | |
1354 | #define RO_OP_LIST(V) \ |
1355 | V(Word64Clz, kX64Lzcnt) \ |
1356 | V(Word32Clz, kX64Lzcnt32) \ |
1357 | V(Word64Ctz, kX64Tzcnt) \ |
1358 | V(Word32Ctz, kX64Tzcnt32) \ |
1359 | V(Word64Popcnt, kX64Popcnt) \ |
1360 | V(Word32Popcnt, kX64Popcnt32) \ |
1361 | V(Float64Sqrt, kSSEFloat64Sqrt) \ |
1362 | V(Float32Sqrt, kSSEFloat32Sqrt) \ |
1363 | V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \ |
1364 | V(ChangeFloat64ToInt64, kSSEFloat64ToInt64) \ |
1365 | V(ChangeFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(1)) \ |
1366 | V(TruncateFloat64ToInt64, kSSEFloat64ToInt64) \ |
1367 | V(TruncateFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(0)) \ |
1368 | V(ChangeFloat64ToUint64, kSSEFloat64ToUint64) \ |
1369 | V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \ |
1370 | V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \ |
1371 | V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \ |
1372 | V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \ |
1373 | V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \ |
1374 | V(ChangeInt64ToFloat64, kSSEInt64ToFloat64) \ |
1375 | V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \ |
1376 | V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \ |
1377 | V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \ |
1378 | V(RoundInt64ToFloat32, kSSEInt64ToFloat32) \ |
1379 | V(RoundUint64ToFloat32, kSSEUint64ToFloat32) \ |
1380 | V(RoundInt64ToFloat64, kSSEInt64ToFloat64) \ |
1381 | V(RoundUint64ToFloat64, kSSEUint64ToFloat64) \ |
1382 | V(RoundUint32ToFloat32, kSSEUint32ToFloat32) \ |
1383 | V(BitcastFloat32ToInt32, kX64BitcastFI) \ |
1384 | V(BitcastFloat64ToInt64, kX64BitcastDL) \ |
1385 | V(BitcastInt32ToFloat32, kX64BitcastIF) \ |
1386 | V(BitcastInt64ToFloat64, kX64BitcastLD) \ |
1387 | V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \ |
1388 | V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \ |
1389 | V(SignExtendWord8ToInt32, kX64Movsxbl) \ |
1390 | V(SignExtendWord16ToInt32, kX64Movsxwl) \ |
1391 | V(SignExtendWord8ToInt64, kX64Movsxbq) \ |
1392 | V(SignExtendWord16ToInt64, kX64Movsxwq) \ |
1393 | V(SignExtendWord32ToInt64, kX64Movsxlq) |
1394 | |
1395 | #define RR_OP_LIST(V) \ |
1396 | V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \ |
1397 | V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown)) \ |
1398 | V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp)) \ |
1399 | V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp)) \ |
1400 | V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \ |
1401 | V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \ |
1402 | V(Float32RoundTiesEven, \ |
1403 | kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \ |
1404 | V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest)) |
1405 | |
1406 | #define RO_VISITOR(Name, opcode) \ |
1407 | void InstructionSelector::Visit##Name(Node* node) { \ |
1408 | VisitRO(this, node, opcode); \ |
1409 | } |
1410 | RO_OP_LIST(RO_VISITOR) |
1411 | #undef RO_VISITOR |
1412 | #undef RO_OP_LIST |
1413 | |
1414 | #define RR_VISITOR(Name, opcode) \ |
1415 | void InstructionSelector::Visit##Name(Node* node) { \ |
1416 | VisitRR(this, node, opcode); \ |
1417 | } |
1418 | RR_OP_LIST(RR_VISITOR) |
1419 | #undef RR_VISITOR |
1420 | #undef RR_OP_LIST |
1421 | |
1422 | void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) { |
1423 | VisitRR(this, node, kArchTruncateDoubleToI); |
1424 | } |
1425 | |
1426 | void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { |
1427 | // We rely on the fact that TruncateInt64ToInt32 zero extends the |
1428 | // value (see ZeroExtendsWord32ToWord64). So all code paths here |
1429 | // have to satisfy that condition. |
1430 | X64OperandGenerator g(this); |
1431 | Node* value = node->InputAt(0); |
1432 | if (CanCover(node, value)) { |
1433 | switch (value->opcode()) { |
1434 | case IrOpcode::kWord64Sar: |
1435 | case IrOpcode::kWord64Shr: { |
1436 | Int64BinopMatcher m(value); |
1437 | if (m.right().Is(32)) { |
1438 | if (CanCoverTransitively(node, value, value->InputAt(0)) && |
1439 | TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) { |
1440 | return EmitIdentity(node); |
1441 | } |
1442 | Emit(kX64Shr, g.DefineSameAsFirst(node), |
1443 | g.UseRegister(m.left().node()), g.TempImmediate(32)); |
1444 | return; |
1445 | } |
1446 | break; |
1447 | } |
1448 | case IrOpcode::kLoad: { |
1449 | if (TryMergeTruncateInt64ToInt32IntoLoad(this, node, value)) { |
1450 | return; |
1451 | } |
1452 | break; |
1453 | } |
1454 | default: |
1455 | break; |
1456 | } |
1457 | } |
1458 | Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value)); |
1459 | } |
1460 | |
1461 | void InstructionSelector::VisitFloat32Add(Node* node) { |
1462 | VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add); |
1463 | } |
1464 | |
1465 | void InstructionSelector::VisitFloat32Sub(Node* node) { |
1466 | VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub); |
1467 | } |
1468 | |
1469 | void InstructionSelector::VisitFloat32Mul(Node* node) { |
1470 | VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul); |
1471 | } |
1472 | |
1473 | void InstructionSelector::VisitFloat32Div(Node* node) { |
1474 | VisitFloatBinop(this, node, kAVXFloat32Div, kSSEFloat32Div); |
1475 | } |
1476 | |
1477 | void InstructionSelector::VisitFloat32Abs(Node* node) { |
1478 | VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs); |
1479 | } |
1480 | |
1481 | void InstructionSelector::VisitFloat32Max(Node* node) { |
1482 | VisitRRO(this, node, kSSEFloat32Max); |
1483 | } |
1484 | |
1485 | void InstructionSelector::VisitFloat32Min(Node* node) { |
1486 | VisitRRO(this, node, kSSEFloat32Min); |
1487 | } |
1488 | |
1489 | void InstructionSelector::VisitFloat64Add(Node* node) { |
1490 | VisitFloatBinop(this, node, kAVXFloat64Add, kSSEFloat64Add); |
1491 | } |
1492 | |
1493 | void InstructionSelector::VisitFloat64Sub(Node* node) { |
1494 | VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub); |
1495 | } |
1496 | |
1497 | void InstructionSelector::VisitFloat64Mul(Node* node) { |
1498 | VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul); |
1499 | } |
1500 | |
1501 | void InstructionSelector::VisitFloat64Div(Node* node) { |
1502 | VisitFloatBinop(this, node, kAVXFloat64Div, kSSEFloat64Div); |
1503 | } |
1504 | |
1505 | void InstructionSelector::VisitFloat64Mod(Node* node) { |
1506 | X64OperandGenerator g(this); |
1507 | InstructionOperand temps[] = {g.TempRegister(rax)}; |
1508 | Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node), |
1509 | g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1, |
1510 | temps); |
1511 | } |
1512 | |
1513 | void InstructionSelector::VisitFloat64Max(Node* node) { |
1514 | VisitRRO(this, node, kSSEFloat64Max); |
1515 | } |
1516 | |
1517 | void InstructionSelector::VisitFloat64Min(Node* node) { |
1518 | VisitRRO(this, node, kSSEFloat64Min); |
1519 | } |
1520 | |
1521 | void InstructionSelector::VisitFloat64Abs(Node* node) { |
1522 | VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs); |
1523 | } |
1524 | |
1525 | void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) { |
1526 | UNREACHABLE(); |
1527 | } |
1528 | |
1529 | void InstructionSelector::VisitFloat32Neg(Node* node) { |
1530 | VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg); |
1531 | } |
1532 | |
1533 | void InstructionSelector::VisitFloat64Neg(Node* node) { |
1534 | VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg); |
1535 | } |
1536 | |
1537 | void InstructionSelector::VisitFloat64Ieee754Binop(Node* node, |
1538 | InstructionCode opcode) { |
1539 | X64OperandGenerator g(this); |
1540 | Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0), |
1541 | g.UseFixed(node->InputAt(1), xmm1)) |
1542 | ->MarkAsCall(); |
1543 | } |
1544 | |
1545 | void InstructionSelector::VisitFloat64Ieee754Unop(Node* node, |
1546 | InstructionCode opcode) { |
1547 | X64OperandGenerator g(this); |
1548 | Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0)) |
1549 | ->MarkAsCall(); |
1550 | } |
1551 | |
1552 | void InstructionSelector::EmitPrepareArguments( |
1553 | ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor, |
1554 | Node* node) { |
1555 | X64OperandGenerator g(this); |
1556 | |
1557 | // Prepare for C function call. |
1558 | if (call_descriptor->IsCFunctionCall()) { |
1559 | Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>( |
1560 | call_descriptor->ParameterCount())), |
1561 | 0, nullptr, 0, nullptr); |
1562 | |
1563 | // Poke any stack arguments. |
1564 | for (size_t n = 0; n < arguments->size(); ++n) { |
1565 | PushParameter input = (*arguments)[n]; |
1566 | if (input.node) { |
1567 | int slot = static_cast<int>(n); |
1568 | InstructionOperand value = g.CanBeImmediate(input.node) |
1569 | ? g.UseImmediate(input.node) |
1570 | : g.UseRegister(input.node); |
1571 | Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value); |
1572 | } |
1573 | } |
1574 | } else { |
1575 | // Push any stack arguments. |
1576 | int effect_level = GetEffectLevel(node); |
1577 | for (PushParameter input : base::Reversed(*arguments)) { |
1578 | // Skip any alignment holes in pushed nodes. We may have one in case of a |
1579 | // Simd128 stack argument. |
1580 | if (input.node == nullptr) continue; |
1581 | if (g.CanBeImmediate(input.node)) { |
1582 | Emit(kX64Push, g.NoOutput(), g.UseImmediate(input.node)); |
1583 | } else if (IsSupported(ATOM) || |
1584 | sequence()->IsFP(GetVirtualRegister(input.node))) { |
1585 | // TODO(titzer): X64Push cannot handle stack->stack double moves |
1586 | // because there is no way to encode fixed double slots. |
1587 | Emit(kX64Push, g.NoOutput(), g.UseRegister(input.node)); |
1588 | } else if (g.CanBeMemoryOperand(kX64Push, node, input.node, |
1589 | effect_level)) { |
1590 | InstructionOperand outputs[1]; |
1591 | InstructionOperand inputs[4]; |
1592 | size_t input_count = 0; |
1593 | InstructionCode opcode = kX64Push; |
1594 | AddressingMode mode = g.GetEffectiveAddressMemoryOperand( |
1595 | input.node, inputs, &input_count); |
1596 | opcode |= AddressingModeField::encode(mode); |
1597 | Emit(opcode, 0, outputs, input_count, inputs); |
1598 | } else { |
1599 | Emit(kX64Push, g.NoOutput(), g.UseAny(input.node)); |
1600 | } |
1601 | } |
1602 | } |
1603 | } |
1604 | |
1605 | void InstructionSelector::EmitPrepareResults( |
1606 | ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor, |
1607 | Node* node) { |
1608 | X64OperandGenerator g(this); |
1609 | |
1610 | int reverse_slot = 0; |
1611 | for (PushParameter output : *results) { |
1612 | if (!output.location.IsCallerFrameSlot()) continue; |
1613 | reverse_slot += output.location.GetSizeInPointers(); |
1614 | // Skip any alignment holes in nodes. |
1615 | if (output.node == nullptr) continue; |
1616 | DCHECK(!call_descriptor->IsCFunctionCall()); |
1617 | if (output.location.GetType() == MachineType::Float32()) { |
1618 | MarkAsFloat32(output.node); |
1619 | } else if (output.location.GetType() == MachineType::Float64()) { |
1620 | MarkAsFloat64(output.node); |
1621 | } |
1622 | InstructionOperand result = g.DefineAsRegister(output.node); |
1623 | InstructionOperand slot = g.UseImmediate(reverse_slot); |
1624 | Emit(kX64Peek, 1, &result, 1, &slot); |
1625 | } |
1626 | } |
1627 | |
1628 | bool InstructionSelector::IsTailCallAddressImmediate() { return true; } |
1629 | |
1630 | int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; } |
1631 | |
1632 | namespace { |
1633 | |
1634 | void VisitCompareWithMemoryOperand(InstructionSelector* selector, |
1635 | InstructionCode opcode, Node* left, |
1636 | InstructionOperand right, |
1637 | FlagsContinuation* cont) { |
1638 | DCHECK_EQ(IrOpcode::kLoad, left->opcode()); |
1639 | X64OperandGenerator g(selector); |
1640 | size_t input_count = 0; |
1641 | InstructionOperand inputs[4]; |
1642 | AddressingMode addressing_mode = |
1643 | g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count); |
1644 | opcode |= AddressingModeField::encode(addressing_mode); |
1645 | inputs[input_count++] = right; |
1646 | |
1647 | selector->EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont); |
1648 | } |
1649 | |
1650 | // Shared routine for multiple compare operations. |
1651 | void VisitCompare(InstructionSelector* selector, InstructionCode opcode, |
1652 | InstructionOperand left, InstructionOperand right, |
1653 | FlagsContinuation* cont) { |
1654 | selector->EmitWithContinuation(opcode, left, right, cont); |
1655 | } |
1656 | |
1657 | // Shared routine for multiple compare operations. |
1658 | void VisitCompare(InstructionSelector* selector, InstructionCode opcode, |
1659 | Node* left, Node* right, FlagsContinuation* cont, |
1660 | bool commutative) { |
1661 | X64OperandGenerator g(selector); |
1662 | if (commutative && g.CanBeBetterLeftOperand(right)) { |
1663 | std::swap(left, right); |
1664 | } |
1665 | VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont); |
1666 | } |
1667 | |
1668 | MachineType MachineTypeForNarrow(Node* node, Node* hint_node) { |
1669 | if (hint_node->opcode() == IrOpcode::kLoad) { |
1670 | MachineType hint = LoadRepresentationOf(hint_node->op()); |
1671 | if (node->opcode() == IrOpcode::kInt32Constant || |
1672 | node->opcode() == IrOpcode::kInt64Constant) { |
1673 | int64_t constant = node->opcode() == IrOpcode::kInt32Constant |
1674 | ? OpParameter<int32_t>(node->op()) |
1675 | : OpParameter<int64_t>(node->op()); |
1676 | if (hint == MachineType::Int8()) { |
1677 | if (constant >= std::numeric_limits<int8_t>::min() && |
1678 | constant <= std::numeric_limits<int8_t>::max()) { |
1679 | return hint; |
1680 | } |
1681 | } else if (hint == MachineType::Uint8()) { |
1682 | if (constant >= std::numeric_limits<uint8_t>::min() && |
1683 | constant <= std::numeric_limits<uint8_t>::max()) { |
1684 | return hint; |
1685 | } |
1686 | } else if (hint == MachineType::Int16()) { |
1687 | if (constant >= std::numeric_limits<int16_t>::min() && |
1688 | constant <= std::numeric_limits<int16_t>::max()) { |
1689 | return hint; |
1690 | } |
1691 | } else if (hint == MachineType::Uint16()) { |
1692 | if (constant >= std::numeric_limits<uint16_t>::min() && |
1693 | constant <= std::numeric_limits<uint16_t>::max()) { |
1694 | return hint; |
1695 | } |
1696 | } else if (hint == MachineType::Int32()) { |
1697 | return hint; |
1698 | } else if (hint == MachineType::Uint32()) { |
1699 | if (constant >= 0) return hint; |
1700 | } |
1701 | } |
1702 | } |
1703 | return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op()) |
1704 | : MachineType::None(); |
1705 | } |
1706 | |
1707 | // Tries to match the size of the given opcode to that of the operands, if |
1708 | // possible. |
1709 | InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left, |
1710 | Node* right, FlagsContinuation* cont) { |
1711 | // TODO(epertoso): we can probably get some size information out phi nodes. |
1712 | // If the load representations don't match, both operands will be |
1713 | // zero/sign-extended to 32bit. |
1714 | MachineType left_type = MachineTypeForNarrow(left, right); |
1715 | MachineType right_type = MachineTypeForNarrow(right, left); |
1716 | if (left_type == right_type) { |
1717 | switch (left_type.representation()) { |
1718 | case MachineRepresentation::kBit: |
1719 | case MachineRepresentation::kWord8: { |
1720 | if (opcode == kX64Test32) return kX64Test8; |
1721 | if (opcode == kX64Cmp32) { |
1722 | if (left_type.semantic() == MachineSemantic::kUint32) { |
1723 | cont->OverwriteUnsignedIfSigned(); |
1724 | } else { |
1725 | CHECK_EQ(MachineSemantic::kInt32, left_type.semantic()); |
1726 | } |
1727 | return kX64Cmp8; |
1728 | } |
1729 | break; |
1730 | } |
1731 | case MachineRepresentation::kWord16: |
1732 | if (opcode == kX64Test32) return kX64Test16; |
1733 | if (opcode == kX64Cmp32) { |
1734 | if (left_type.semantic() == MachineSemantic::kUint32) { |
1735 | cont->OverwriteUnsignedIfSigned(); |
1736 | } else { |
1737 | CHECK_EQ(MachineSemantic::kInt32, left_type.semantic()); |
1738 | } |
1739 | return kX64Cmp16; |
1740 | } |
1741 | break; |
1742 | #ifdef V8_COMPRESS_POINTERS |
1743 | case MachineRepresentation::kTaggedSigned: |
1744 | case MachineRepresentation::kTaggedPointer: |
1745 | case MachineRepresentation::kTagged: |
1746 | // When pointer compression is enabled the lower 32-bits uniquely |
1747 | // identify tagged value. |
1748 | if (opcode == kX64Cmp) return kX64Cmp32; |
1749 | break; |
1750 | #endif |
1751 | default: |
1752 | break; |
1753 | } |
1754 | } |
1755 | return opcode; |
1756 | } |
1757 | |
1758 | // Shared routine for multiple word compare operations. |
1759 | void VisitWordCompare(InstructionSelector* selector, Node* node, |
1760 | InstructionCode opcode, FlagsContinuation* cont) { |
1761 | X64OperandGenerator g(selector); |
1762 | Node* left = node->InputAt(0); |
1763 | Node* right = node->InputAt(1); |
1764 | |
1765 | // The 32-bit comparisons automatically truncate Word64 |
1766 | // values to Word32 range, no need to do that explicitly. |
1767 | if (opcode == kX64Cmp32 || opcode == kX64Test32) { |
1768 | if (left->opcode() == IrOpcode::kTruncateInt64ToInt32 && |
1769 | selector->CanCover(node, left)) { |
1770 | left = left->InputAt(0); |
1771 | } |
1772 | |
1773 | if (right->opcode() == IrOpcode::kTruncateInt64ToInt32 && |
1774 | selector->CanCover(node, right)) { |
1775 | right = right->InputAt(0); |
1776 | } |
1777 | } |
1778 | |
1779 | opcode = TryNarrowOpcodeSize(opcode, left, right, cont); |
1780 | |
1781 | // If one of the two inputs is an immediate, make sure it's on the right, or |
1782 | // if one of the two inputs is a memory operand, make sure it's on the left. |
1783 | int effect_level = selector->GetEffectLevel(node); |
1784 | if (cont->IsBranch()) { |
1785 | effect_level = selector->GetEffectLevel( |
1786 | cont->true_block()->PredecessorAt(0)->control_input()); |
1787 | } |
1788 | |
1789 | if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) || |
1790 | (g.CanBeMemoryOperand(opcode, node, right, effect_level) && |
1791 | !g.CanBeMemoryOperand(opcode, node, left, effect_level))) { |
1792 | if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute(); |
1793 | std::swap(left, right); |
1794 | } |
1795 | |
1796 | // Match immediates on right side of comparison. |
1797 | if (g.CanBeImmediate(right)) { |
1798 | if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) { |
1799 | return VisitCompareWithMemoryOperand(selector, opcode, left, |
1800 | g.UseImmediate(right), cont); |
1801 | } |
1802 | return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), |
1803 | cont); |
1804 | } |
1805 | |
1806 | // Match memory operands on left side of comparison. |
1807 | if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) { |
1808 | return VisitCompareWithMemoryOperand(selector, opcode, left, |
1809 | g.UseRegister(right), cont); |
1810 | } |
1811 | |
1812 | return VisitCompare(selector, opcode, left, right, cont, |
1813 | node->op()->HasProperty(Operator::kCommutative)); |
1814 | } |
1815 | |
1816 | // Shared routine for 64-bit word comparison operations. |
1817 | void VisitWord64Compare(InstructionSelector* selector, Node* node, |
1818 | FlagsContinuation* cont) { |
1819 | X64OperandGenerator g(selector); |
1820 | if (selector->CanUseRootsRegister()) { |
1821 | const RootsTable& roots_table = selector->isolate()->roots_table(); |
1822 | RootIndex root_index; |
1823 | HeapObjectBinopMatcher m(node); |
1824 | if (m.right().HasValue() && |
1825 | roots_table.IsRootHandle(m.right().Value(), &root_index)) { |
1826 | if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute(); |
1827 | InstructionCode opcode = |
1828 | kX64Cmp | AddressingModeField::encode(kMode_Root); |
1829 | return VisitCompare( |
1830 | selector, opcode, |
1831 | g.TempImmediate( |
1832 | TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), |
1833 | g.UseRegister(m.left().node()), cont); |
1834 | } else if (m.left().HasValue() && |
1835 | roots_table.IsRootHandle(m.left().Value(), &root_index)) { |
1836 | InstructionCode opcode = |
1837 | kX64Cmp | AddressingModeField::encode(kMode_Root); |
1838 | return VisitCompare( |
1839 | selector, opcode, |
1840 | g.TempImmediate( |
1841 | TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), |
1842 | g.UseRegister(m.right().node()), cont); |
1843 | } |
1844 | } |
1845 | if (selector->isolate() != nullptr) { |
1846 | StackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> m( |
1847 | selector->isolate(), node); |
1848 | if (m.Matched()) { |
1849 | // Compare(Load(js_stack_limit), LoadStackPointer) |
1850 | if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute(); |
1851 | InstructionCode opcode = cont->Encode(kX64StackCheck); |
1852 | CHECK(cont->IsBranch()); |
1853 | selector->EmitWithContinuation(opcode, cont); |
1854 | return; |
1855 | } |
1856 | } |
1857 | WasmStackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> wasm_m( |
1858 | node); |
1859 | if (wasm_m.Matched()) { |
1860 | // This is a wasm stack check. By structure, we know that we can use the |
1861 | // stack pointer directly, as wasm code does not modify the stack at points |
1862 | // where stack checks are performed. |
1863 | Node* left = node->InputAt(0); |
1864 | LocationOperand rsp(InstructionOperand::EXPLICIT, LocationOperand::REGISTER, |
1865 | InstructionSequence::DefaultRepresentation(), |
1866 | RegisterCode::kRegCode_rsp); |
1867 | return VisitCompareWithMemoryOperand(selector, kX64Cmp, left, rsp, cont); |
1868 | } |
1869 | VisitWordCompare(selector, node, kX64Cmp, cont); |
1870 | } |
1871 | |
1872 | // Shared routine for comparison with zero. |
1873 | void VisitCompareZero(InstructionSelector* selector, Node* user, Node* node, |
1874 | InstructionCode opcode, FlagsContinuation* cont) { |
1875 | X64OperandGenerator g(selector); |
1876 | if (cont->IsBranch() && |
1877 | (cont->condition() == kNotEqual || cont->condition() == kEqual)) { |
1878 | switch (node->opcode()) { |
1879 | #define FLAGS_SET_BINOP_LIST(V) \ |
1880 | V(kInt32Add, VisitBinop, kX64Add32) \ |
1881 | V(kInt32Sub, VisitBinop, kX64Sub32) \ |
1882 | V(kWord32And, VisitBinop, kX64And32) \ |
1883 | V(kWord32Or, VisitBinop, kX64Or32) \ |
1884 | V(kInt64Add, VisitBinop, kX64Add) \ |
1885 | V(kInt64Sub, VisitBinop, kX64Sub) \ |
1886 | V(kWord64And, VisitBinop, kX64And) \ |
1887 | V(kWord64Or, VisitBinop, kX64Or) |
1888 | #define FLAGS_SET_BINOP(opcode, Visit, archOpcode) \ |
1889 | case IrOpcode::opcode: \ |
1890 | if (selector->IsOnlyUserOfNodeInSameBlock(user, node)) { \ |
1891 | return Visit(selector, node, archOpcode, cont); \ |
1892 | } \ |
1893 | break; |
1894 | FLAGS_SET_BINOP_LIST(FLAGS_SET_BINOP) |
1895 | #undef FLAGS_SET_BINOP_LIST |
1896 | #undef FLAGS_SET_BINOP |
1897 | |
1898 | #define TRY_VISIT_WORD32_SHIFT TryVisitWordShift<Int32BinopMatcher, 32> |
1899 | #define TRY_VISIT_WORD64_SHIFT TryVisitWordShift<Int64BinopMatcher, 64> |
1900 | // Skip Word64Sar/Word32Sar since no instruction reduction in most cases. |
1901 | #define FLAGS_SET_SHIFT_LIST(V) \ |
1902 | V(kWord32Shl, TRY_VISIT_WORD32_SHIFT, kX64Shl32) \ |
1903 | V(kWord32Shr, TRY_VISIT_WORD32_SHIFT, kX64Shr32) \ |
1904 | V(kWord64Shl, TRY_VISIT_WORD64_SHIFT, kX64Shl) \ |
1905 | V(kWord64Shr, TRY_VISIT_WORD64_SHIFT, kX64Shr) |
1906 | #define FLAGS_SET_SHIFT(opcode, TryVisit, archOpcode) \ |
1907 | case IrOpcode::opcode: \ |
1908 | if (selector->IsOnlyUserOfNodeInSameBlock(user, node)) { \ |
1909 | if (TryVisit(selector, node, archOpcode, cont)) return; \ |
1910 | } \ |
1911 | break; |
1912 | FLAGS_SET_SHIFT_LIST(FLAGS_SET_SHIFT) |
1913 | #undef TRY_VISIT_WORD32_SHIFT |
1914 | #undef TRY_VISIT_WORD64_SHIFT |
1915 | #undef FLAGS_SET_SHIFT_LIST |
1916 | #undef FLAGS_SET_SHIFT |
1917 | default: |
1918 | break; |
1919 | } |
1920 | } |
1921 | int effect_level = selector->GetEffectLevel(node); |
1922 | if (cont->IsBranch()) { |
1923 | effect_level = selector->GetEffectLevel( |
1924 | cont->true_block()->PredecessorAt(0)->control_input()); |
1925 | } |
1926 | if (node->opcode() == IrOpcode::kLoad) { |
1927 | switch (LoadRepresentationOf(node->op()).representation()) { |
1928 | case MachineRepresentation::kWord8: |
1929 | if (opcode == kX64Cmp32) { |
1930 | opcode = kX64Cmp8; |
1931 | } else if (opcode == kX64Test32) { |
1932 | opcode = kX64Test8; |
1933 | } |
1934 | break; |
1935 | case MachineRepresentation::kWord16: |
1936 | if (opcode == kX64Cmp32) { |
1937 | opcode = kX64Cmp16; |
1938 | } else if (opcode == kX64Test32) { |
1939 | opcode = kX64Test16; |
1940 | } |
1941 | break; |
1942 | default: |
1943 | break; |
1944 | } |
1945 | } |
1946 | if (g.CanBeMemoryOperand(opcode, user, node, effect_level)) { |
1947 | VisitCompareWithMemoryOperand(selector, opcode, node, g.TempImmediate(0), |
1948 | cont); |
1949 | } else { |
1950 | VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont); |
1951 | } |
1952 | } |
1953 | |
1954 | // Shared routine for multiple float32 compare operations (inputs commuted). |
1955 | void VisitFloat32Compare(InstructionSelector* selector, Node* node, |
1956 | FlagsContinuation* cont) { |
1957 | Node* const left = node->InputAt(0); |
1958 | Node* const right = node->InputAt(1); |
1959 | InstructionCode const opcode = |
1960 | selector->IsSupported(AVX) ? kAVXFloat32Cmp : kSSEFloat32Cmp; |
1961 | VisitCompare(selector, opcode, right, left, cont, false); |
1962 | } |
1963 | |
1964 | // Shared routine for multiple float64 compare operations (inputs commuted). |
1965 | void VisitFloat64Compare(InstructionSelector* selector, Node* node, |
1966 | FlagsContinuation* cont) { |
1967 | Node* const left = node->InputAt(0); |
1968 | Node* const right = node->InputAt(1); |
1969 | InstructionCode const opcode = |
1970 | selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp; |
1971 | VisitCompare(selector, opcode, right, left, cont, false); |
1972 | } |
1973 | |
1974 | // Shared routine for Word32/Word64 Atomic Binops |
1975 | void VisitAtomicBinop(InstructionSelector* selector, Node* node, |
1976 | ArchOpcode opcode) { |
1977 | X64OperandGenerator g(selector); |
1978 | Node* base = node->InputAt(0); |
1979 | Node* index = node->InputAt(1); |
1980 | Node* value = node->InputAt(2); |
1981 | AddressingMode addressing_mode; |
1982 | InstructionOperand inputs[] = { |
1983 | g.UseUniqueRegister(value), g.UseUniqueRegister(base), |
1984 | g.GetEffectiveIndexOperand(index, &addressing_mode)}; |
1985 | InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)}; |
1986 | InstructionOperand temps[] = {g.TempRegister()}; |
1987 | InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); |
1988 | selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, |
1989 | arraysize(temps), temps); |
1990 | } |
1991 | |
1992 | // Shared routine for Word32/Word64 Atomic CmpExchg |
1993 | void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node, |
1994 | ArchOpcode opcode) { |
1995 | X64OperandGenerator g(selector); |
1996 | Node* base = node->InputAt(0); |
1997 | Node* index = node->InputAt(1); |
1998 | Node* old_value = node->InputAt(2); |
1999 | Node* new_value = node->InputAt(3); |
2000 | AddressingMode addressing_mode; |
2001 | InstructionOperand inputs[] = { |
2002 | g.UseFixed(old_value, rax), g.UseUniqueRegister(new_value), |
2003 | g.UseUniqueRegister(base), |
2004 | g.GetEffectiveIndexOperand(index, &addressing_mode)}; |
2005 | InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)}; |
2006 | InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); |
2007 | selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs); |
2008 | } |
2009 | |
2010 | // Shared routine for Word32/Word64 Atomic Exchange |
2011 | void VisitAtomicExchange(InstructionSelector* selector, Node* node, |
2012 | ArchOpcode opcode) { |
2013 | X64OperandGenerator g(selector); |
2014 | Node* base = node->InputAt(0); |
2015 | Node* index = node->InputAt(1); |
2016 | Node* value = node->InputAt(2); |
2017 | AddressingMode addressing_mode; |
2018 | InstructionOperand inputs[] = { |
2019 | g.UseUniqueRegister(value), g.UseUniqueRegister(base), |
2020 | g.GetEffectiveIndexOperand(index, &addressing_mode)}; |
2021 | InstructionOperand outputs[] = {g.DefineSameAsFirst(node)}; |
2022 | InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); |
2023 | selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs); |
2024 | } |
2025 | |
2026 | } // namespace |
2027 | |
2028 | // Shared routine for word comparison against zero. |
2029 | void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, |
2030 | FlagsContinuation* cont) { |
2031 | // Try to combine with comparisons against 0 by simply inverting the branch. |
2032 | while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) { |
2033 | Int32BinopMatcher m(value); |
2034 | if (!m.right().Is(0)) break; |
2035 | |
2036 | user = value; |
2037 | value = m.left().node(); |
2038 | cont->Negate(); |
2039 | } |
2040 | |
2041 | if (CanCover(user, value)) { |
2042 | switch (value->opcode()) { |
2043 | case IrOpcode::kWord32Equal: |
2044 | cont->OverwriteAndNegateIfEqual(kEqual); |
2045 | return VisitWordCompare(this, value, kX64Cmp32, cont); |
2046 | case IrOpcode::kInt32LessThan: |
2047 | cont->OverwriteAndNegateIfEqual(kSignedLessThan); |
2048 | return VisitWordCompare(this, value, kX64Cmp32, cont); |
2049 | case IrOpcode::kInt32LessThanOrEqual: |
2050 | cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); |
2051 | return VisitWordCompare(this, value, kX64Cmp32, cont); |
2052 | case IrOpcode::kUint32LessThan: |
2053 | cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); |
2054 | return VisitWordCompare(this, value, kX64Cmp32, cont); |
2055 | case IrOpcode::kUint32LessThanOrEqual: |
2056 | cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); |
2057 | return VisitWordCompare(this, value, kX64Cmp32, cont); |
2058 | case IrOpcode::kWord64Equal: { |
2059 | cont->OverwriteAndNegateIfEqual(kEqual); |
2060 | Int64BinopMatcher m(value); |
2061 | if (m.right().Is(0)) { |
2062 | // Try to combine the branch with a comparison. |
2063 | Node* const user = m.node(); |
2064 | Node* const value = m.left().node(); |
2065 | if (CanCover(user, value)) { |
2066 | switch (value->opcode()) { |
2067 | case IrOpcode::kInt64Sub: |
2068 | return VisitWord64Compare(this, value, cont); |
2069 | case IrOpcode::kWord64And: |
2070 | return VisitWordCompare(this, value, kX64Test, cont); |
2071 | default: |
2072 | break; |
2073 | } |
2074 | } |
2075 | return VisitCompareZero(this, user, value, kX64Cmp, cont); |
2076 | } |
2077 | return VisitWord64Compare(this, value, cont); |
2078 | } |
2079 | case IrOpcode::kInt64LessThan: |
2080 | cont->OverwriteAndNegateIfEqual(kSignedLessThan); |
2081 | return VisitWord64Compare(this, value, cont); |
2082 | case IrOpcode::kInt64LessThanOrEqual: |
2083 | cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); |
2084 | return VisitWord64Compare(this, value, cont); |
2085 | case IrOpcode::kUint64LessThan: |
2086 | cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); |
2087 | return VisitWord64Compare(this, value, cont); |
2088 | case IrOpcode::kUint64LessThanOrEqual: |
2089 | cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); |
2090 | return VisitWord64Compare(this, value, cont); |
2091 | case IrOpcode::kFloat32Equal: |
2092 | cont->OverwriteAndNegateIfEqual(kUnorderedEqual); |
2093 | return VisitFloat32Compare(this, value, cont); |
2094 | case IrOpcode::kFloat32LessThan: |
2095 | cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan); |
2096 | return VisitFloat32Compare(this, value, cont); |
2097 | case IrOpcode::kFloat32LessThanOrEqual: |
2098 | cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual); |
2099 | return VisitFloat32Compare(this, value, cont); |
2100 | case IrOpcode::kFloat64Equal: |
2101 | cont->OverwriteAndNegateIfEqual(kUnorderedEqual); |
2102 | return VisitFloat64Compare(this, value, cont); |
2103 | case IrOpcode::kFloat64LessThan: { |
2104 | Float64BinopMatcher m(value); |
2105 | if (m.left().Is(0.0) && m.right().IsFloat64Abs()) { |
2106 | // This matches the pattern |
2107 | // |
2108 | // Float64LessThan(#0.0, Float64Abs(x)) |
2109 | // |
2110 | // which TurboFan generates for NumberToBoolean in the general case, |
2111 | // and which evaluates to false if x is 0, -0 or NaN. We can compile |
2112 | // this to a simple (v)ucomisd using not_equal flags condition, which |
2113 | // avoids the costly Float64Abs. |
2114 | cont->OverwriteAndNegateIfEqual(kNotEqual); |
2115 | InstructionCode const opcode = |
2116 | IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp; |
2117 | return VisitCompare(this, opcode, m.left().node(), |
2118 | m.right().InputAt(0), cont, false); |
2119 | } |
2120 | cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan); |
2121 | return VisitFloat64Compare(this, value, cont); |
2122 | } |
2123 | case IrOpcode::kFloat64LessThanOrEqual: |
2124 | cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual); |
2125 | return VisitFloat64Compare(this, value, cont); |
2126 | case IrOpcode::kProjection: |
2127 | // Check if this is the overflow output projection of an |
2128 | // <Operation>WithOverflow node. |
2129 | if (ProjectionIndexOf(value->op()) == 1u) { |
2130 | // We cannot combine the <Operation>WithOverflow with this branch |
2131 | // unless the 0th projection (the use of the actual value of the |
2132 | // <Operation> is either nullptr, which means there's no use of the |
2133 | // actual value, or was already defined, which means it is scheduled |
2134 | // *AFTER* this branch). |
2135 | Node* const node = value->InputAt(0); |
2136 | Node* const result = NodeProperties::FindProjection(node, 0); |
2137 | if (result == nullptr || IsDefined(result)) { |
2138 | switch (node->opcode()) { |
2139 | case IrOpcode::kInt32AddWithOverflow: |
2140 | cont->OverwriteAndNegateIfEqual(kOverflow); |
2141 | return VisitBinop(this, node, kX64Add32, cont); |
2142 | case IrOpcode::kInt32SubWithOverflow: |
2143 | cont->OverwriteAndNegateIfEqual(kOverflow); |
2144 | return VisitBinop(this, node, kX64Sub32, cont); |
2145 | case IrOpcode::kInt32MulWithOverflow: |
2146 | cont->OverwriteAndNegateIfEqual(kOverflow); |
2147 | return VisitBinop(this, node, kX64Imul32, cont); |
2148 | case IrOpcode::kInt64AddWithOverflow: |
2149 | cont->OverwriteAndNegateIfEqual(kOverflow); |
2150 | return VisitBinop(this, node, kX64Add, cont); |
2151 | case IrOpcode::kInt64SubWithOverflow: |
2152 | cont->OverwriteAndNegateIfEqual(kOverflow); |
2153 | return VisitBinop(this, node, kX64Sub, cont); |
2154 | default: |
2155 | break; |
2156 | } |
2157 | } |
2158 | } |
2159 | break; |
2160 | case IrOpcode::kInt32Sub: |
2161 | return VisitWordCompare(this, value, kX64Cmp32, cont); |
2162 | case IrOpcode::kWord32And: |
2163 | return VisitWordCompare(this, value, kX64Test32, cont); |
2164 | default: |
2165 | break; |
2166 | } |
2167 | } |
2168 | |
2169 | // Branch could not be combined with a compare, emit compare against 0. |
2170 | VisitCompareZero(this, user, value, kX64Cmp32, cont); |
2171 | } |
2172 | |
2173 | void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { |
2174 | X64OperandGenerator g(this); |
2175 | InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); |
2176 | |
2177 | // Emit either ArchTableSwitch or ArchLookupSwitch. |
2178 | if (enable_switch_jump_table_ == kEnableSwitchJumpTable) { |
2179 | static const size_t kMaxTableSwitchValueRange = 2 << 16; |
2180 | size_t table_space_cost = 4 + sw.value_range(); |
2181 | size_t table_time_cost = 3; |
2182 | size_t lookup_space_cost = 3 + 2 * sw.case_count(); |
2183 | size_t lookup_time_cost = sw.case_count(); |
2184 | if (sw.case_count() > 4 && |
2185 | table_space_cost + 3 * table_time_cost <= |
2186 | lookup_space_cost + 3 * lookup_time_cost && |
2187 | sw.min_value() > std::numeric_limits<int32_t>::min() && |
2188 | sw.value_range() <= kMaxTableSwitchValueRange) { |
2189 | InstructionOperand index_operand = g.TempRegister(); |
2190 | if (sw.min_value()) { |
2191 | // The leal automatically zero extends, so result is a valid 64-bit |
2192 | // index. |
2193 | Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand, |
2194 | value_operand, g.TempImmediate(-sw.min_value())); |
2195 | } else { |
2196 | // Zero extend, because we use it as 64-bit index into the jump table. |
2197 | Emit(kX64Movl, index_operand, value_operand); |
2198 | } |
2199 | // Generate a table lookup. |
2200 | return EmitTableSwitch(sw, index_operand); |
2201 | } |
2202 | } |
2203 | |
2204 | // Generate a tree of conditional jumps. |
2205 | return EmitBinarySearchSwitch(sw, value_operand); |
2206 | } |
2207 | |
2208 | void InstructionSelector::VisitWord32Equal(Node* const node) { |
2209 | Node* user = node; |
2210 | FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); |
2211 | Int32BinopMatcher m(user); |
2212 | if (m.right().Is(0)) { |
2213 | return VisitWordCompareZero(m.node(), m.left().node(), &cont); |
2214 | } |
2215 | VisitWordCompare(this, node, kX64Cmp32, &cont); |
2216 | } |
2217 | |
2218 | void InstructionSelector::VisitInt32LessThan(Node* node) { |
2219 | FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); |
2220 | VisitWordCompare(this, node, kX64Cmp32, &cont); |
2221 | } |
2222 | |
2223 | void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) { |
2224 | FlagsContinuation cont = |
2225 | FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); |
2226 | VisitWordCompare(this, node, kX64Cmp32, &cont); |
2227 | } |
2228 | |
2229 | void InstructionSelector::VisitUint32LessThan(Node* node) { |
2230 | FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); |
2231 | VisitWordCompare(this, node, kX64Cmp32, &cont); |
2232 | } |
2233 | |
2234 | void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { |
2235 | FlagsContinuation cont = |
2236 | FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); |
2237 | VisitWordCompare(this, node, kX64Cmp32, &cont); |
2238 | } |
2239 | |
2240 | void InstructionSelector::VisitWord64Equal(Node* const node) { |
2241 | FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); |
2242 | Int64BinopMatcher m(node); |
2243 | if (m.right().Is(0)) { |
2244 | // Try to combine the equality check with a comparison. |
2245 | Node* const user = m.node(); |
2246 | Node* const value = m.left().node(); |
2247 | if (CanCover(user, value)) { |
2248 | switch (value->opcode()) { |
2249 | case IrOpcode::kInt64Sub: |
2250 | return VisitWord64Compare(this, value, &cont); |
2251 | case IrOpcode::kWord64And: |
2252 | return VisitWordCompare(this, value, kX64Test, &cont); |
2253 | default: |
2254 | break; |
2255 | } |
2256 | } |
2257 | } |
2258 | VisitWord64Compare(this, node, &cont); |
2259 | } |
2260 | |
2261 | void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { |
2262 | if (Node* ovf = NodeProperties::FindProjection(node, 1)) { |
2263 | FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); |
2264 | return VisitBinop(this, node, kX64Add32, &cont); |
2265 | } |
2266 | FlagsContinuation cont; |
2267 | VisitBinop(this, node, kX64Add32, &cont); |
2268 | } |
2269 | |
2270 | void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { |
2271 | if (Node* ovf = NodeProperties::FindProjection(node, 1)) { |
2272 | FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); |
2273 | return VisitBinop(this, node, kX64Sub32, &cont); |
2274 | } |
2275 | FlagsContinuation cont; |
2276 | VisitBinop(this, node, kX64Sub32, &cont); |
2277 | } |
2278 | |
2279 | void InstructionSelector::VisitInt64LessThan(Node* node) { |
2280 | FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); |
2281 | VisitWord64Compare(this, node, &cont); |
2282 | } |
2283 | |
2284 | void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { |
2285 | FlagsContinuation cont = |
2286 | FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); |
2287 | VisitWord64Compare(this, node, &cont); |
2288 | } |
2289 | |
2290 | void InstructionSelector::VisitUint64LessThan(Node* node) { |
2291 | FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); |
2292 | VisitWord64Compare(this, node, &cont); |
2293 | } |
2294 | |
2295 | void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) { |
2296 | FlagsContinuation cont = |
2297 | FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); |
2298 | VisitWord64Compare(this, node, &cont); |
2299 | } |
2300 | |
2301 | void InstructionSelector::VisitFloat32Equal(Node* node) { |
2302 | FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node); |
2303 | VisitFloat32Compare(this, node, &cont); |
2304 | } |
2305 | |
2306 | void InstructionSelector::VisitFloat32LessThan(Node* node) { |
2307 | FlagsContinuation cont = |
2308 | FlagsContinuation::ForSet(kUnsignedGreaterThan, node); |
2309 | VisitFloat32Compare(this, node, &cont); |
2310 | } |
2311 | |
2312 | void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) { |
2313 | FlagsContinuation cont = |
2314 | FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node); |
2315 | VisitFloat32Compare(this, node, &cont); |
2316 | } |
2317 | |
2318 | void InstructionSelector::VisitFloat64Equal(Node* node) { |
2319 | FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node); |
2320 | VisitFloat64Compare(this, node, &cont); |
2321 | } |
2322 | |
2323 | void InstructionSelector::VisitFloat64LessThan(Node* node) { |
2324 | Float64BinopMatcher m(node); |
2325 | if (m.left().Is(0.0) && m.right().IsFloat64Abs()) { |
2326 | // This matches the pattern |
2327 | // |
2328 | // Float64LessThan(#0.0, Float64Abs(x)) |
2329 | // |
2330 | // which TurboFan generates for NumberToBoolean in the general case, |
2331 | // and which evaluates to false if x is 0, -0 or NaN. We can compile |
2332 | // this to a simple (v)ucomisd using not_equal flags condition, which |
2333 | // avoids the costly Float64Abs. |
2334 | FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, node); |
2335 | InstructionCode const opcode = |
2336 | IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp; |
2337 | return VisitCompare(this, opcode, m.left().node(), m.right().InputAt(0), |
2338 | &cont, false); |
2339 | } |
2340 | FlagsContinuation cont = |
2341 | FlagsContinuation::ForSet(kUnsignedGreaterThan, node); |
2342 | VisitFloat64Compare(this, node, &cont); |
2343 | } |
2344 | |
2345 | void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { |
2346 | FlagsContinuation cont = |
2347 | FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node); |
2348 | VisitFloat64Compare(this, node, &cont); |
2349 | } |
2350 | |
2351 | void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) { |
2352 | X64OperandGenerator g(this); |
2353 | Node* left = node->InputAt(0); |
2354 | Node* right = node->InputAt(1); |
2355 | Float64Matcher mleft(left); |
2356 | if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) { |
2357 | Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right)); |
2358 | return; |
2359 | } |
2360 | Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node), |
2361 | g.UseRegister(left), g.Use(right)); |
2362 | } |
2363 | |
2364 | void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { |
2365 | X64OperandGenerator g(this); |
2366 | Node* left = node->InputAt(0); |
2367 | Node* right = node->InputAt(1); |
2368 | Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node), |
2369 | g.UseRegister(left), g.Use(right)); |
2370 | } |
2371 | |
2372 | void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { |
2373 | X64OperandGenerator g(this); |
2374 | Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node), |
2375 | g.UseRegister(node->InputAt(0))); |
2376 | } |
2377 | |
2378 | void InstructionSelector::VisitWord32AtomicLoad(Node* node) { |
2379 | LoadRepresentation load_rep = LoadRepresentationOf(node->op()); |
2380 | DCHECK(load_rep.representation() == MachineRepresentation::kWord8 || |
2381 | load_rep.representation() == MachineRepresentation::kWord16 || |
2382 | load_rep.representation() == MachineRepresentation::kWord32); |
2383 | USE(load_rep); |
2384 | VisitLoad(node); |
2385 | } |
2386 | |
2387 | void InstructionSelector::VisitWord64AtomicLoad(Node* node) { |
2388 | LoadRepresentation load_rep = LoadRepresentationOf(node->op()); |
2389 | USE(load_rep); |
2390 | VisitLoad(node); |
2391 | } |
2392 | |
2393 | void InstructionSelector::VisitWord32AtomicStore(Node* node) { |
2394 | MachineRepresentation rep = AtomicStoreRepresentationOf(node->op()); |
2395 | ArchOpcode opcode = kArchNop; |
2396 | switch (rep) { |
2397 | case MachineRepresentation::kWord8: |
2398 | opcode = kWord32AtomicExchangeInt8; |
2399 | break; |
2400 | case MachineRepresentation::kWord16: |
2401 | opcode = kWord32AtomicExchangeInt16; |
2402 | break; |
2403 | case MachineRepresentation::kWord32: |
2404 | opcode = kWord32AtomicExchangeWord32; |
2405 | break; |
2406 | default: |
2407 | UNREACHABLE(); |
2408 | return; |
2409 | } |
2410 | VisitAtomicExchange(this, node, opcode); |
2411 | } |
2412 | |
2413 | void InstructionSelector::VisitWord64AtomicStore(Node* node) { |
2414 | MachineRepresentation rep = AtomicStoreRepresentationOf(node->op()); |
2415 | ArchOpcode opcode = kArchNop; |
2416 | switch (rep) { |
2417 | case MachineRepresentation::kWord8: |
2418 | opcode = kX64Word64AtomicExchangeUint8; |
2419 | break; |
2420 | case MachineRepresentation::kWord16: |
2421 | opcode = kX64Word64AtomicExchangeUint16; |
2422 | break; |
2423 | case MachineRepresentation::kWord32: |
2424 | opcode = kX64Word64AtomicExchangeUint32; |
2425 | break; |
2426 | case MachineRepresentation::kWord64: |
2427 | opcode = kX64Word64AtomicExchangeUint64; |
2428 | break; |
2429 | default: |
2430 | UNREACHABLE(); |
2431 | return; |
2432 | } |
2433 | VisitAtomicExchange(this, node, opcode); |
2434 | } |
2435 | |
2436 | void InstructionSelector::VisitWord32AtomicExchange(Node* node) { |
2437 | MachineType type = AtomicOpType(node->op()); |
2438 | ArchOpcode opcode = kArchNop; |
2439 | if (type == MachineType::Int8()) { |
2440 | opcode = kWord32AtomicExchangeInt8; |
2441 | } else if (type == MachineType::Uint8()) { |
2442 | opcode = kWord32AtomicExchangeUint8; |
2443 | } else if (type == MachineType::Int16()) { |
2444 | opcode = kWord32AtomicExchangeInt16; |
2445 | } else if (type == MachineType::Uint16()) { |
2446 | opcode = kWord32AtomicExchangeUint16; |
2447 | } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { |
2448 | opcode = kWord32AtomicExchangeWord32; |
2449 | } else { |
2450 | UNREACHABLE(); |
2451 | return; |
2452 | } |
2453 | VisitAtomicExchange(this, node, opcode); |
2454 | } |
2455 | |
2456 | void InstructionSelector::VisitWord64AtomicExchange(Node* node) { |
2457 | MachineType type = AtomicOpType(node->op()); |
2458 | ArchOpcode opcode = kArchNop; |
2459 | if (type == MachineType::Uint8()) { |
2460 | opcode = kX64Word64AtomicExchangeUint8; |
2461 | } else if (type == MachineType::Uint16()) { |
2462 | opcode = kX64Word64AtomicExchangeUint16; |
2463 | } else if (type == MachineType::Uint32()) { |
2464 | opcode = kX64Word64AtomicExchangeUint32; |
2465 | } else if (type == MachineType::Uint64()) { |
2466 | opcode = kX64Word64AtomicExchangeUint64; |
2467 | } else { |
2468 | UNREACHABLE(); |
2469 | return; |
2470 | } |
2471 | VisitAtomicExchange(this, node, opcode); |
2472 | } |
2473 | |
2474 | void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { |
2475 | MachineType type = AtomicOpType(node->op()); |
2476 | ArchOpcode opcode = kArchNop; |
2477 | if (type == MachineType::Int8()) { |
2478 | opcode = kWord32AtomicCompareExchangeInt8; |
2479 | } else if (type == MachineType::Uint8()) { |
2480 | opcode = kWord32AtomicCompareExchangeUint8; |
2481 | } else if (type == MachineType::Int16()) { |
2482 | opcode = kWord32AtomicCompareExchangeInt16; |
2483 | } else if (type == MachineType::Uint16()) { |
2484 | opcode = kWord32AtomicCompareExchangeUint16; |
2485 | } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { |
2486 | opcode = kWord32AtomicCompareExchangeWord32; |
2487 | } else { |
2488 | UNREACHABLE(); |
2489 | return; |
2490 | } |
2491 | VisitAtomicCompareExchange(this, node, opcode); |
2492 | } |
2493 | |
2494 | void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { |
2495 | MachineType type = AtomicOpType(node->op()); |
2496 | ArchOpcode opcode = kArchNop; |
2497 | if (type == MachineType::Uint8()) { |
2498 | opcode = kX64Word64AtomicCompareExchangeUint8; |
2499 | } else if (type == MachineType::Uint16()) { |
2500 | opcode = kX64Word64AtomicCompareExchangeUint16; |
2501 | } else if (type == MachineType::Uint32()) { |
2502 | opcode = kX64Word64AtomicCompareExchangeUint32; |
2503 | } else if (type == MachineType::Uint64()) { |
2504 | opcode = kX64Word64AtomicCompareExchangeUint64; |
2505 | } else { |
2506 | UNREACHABLE(); |
2507 | return; |
2508 | } |
2509 | VisitAtomicCompareExchange(this, node, opcode); |
2510 | } |
2511 | |
2512 | void InstructionSelector::VisitWord32AtomicBinaryOperation( |
2513 | Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, |
2514 | ArchOpcode uint16_op, ArchOpcode word32_op) { |
2515 | MachineType type = AtomicOpType(node->op()); |
2516 | ArchOpcode opcode = kArchNop; |
2517 | if (type == MachineType::Int8()) { |
2518 | opcode = int8_op; |
2519 | } else if (type == MachineType::Uint8()) { |
2520 | opcode = uint8_op; |
2521 | } else if (type == MachineType::Int16()) { |
2522 | opcode = int16_op; |
2523 | } else if (type == MachineType::Uint16()) { |
2524 | opcode = uint16_op; |
2525 | } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { |
2526 | opcode = word32_op; |
2527 | } else { |
2528 | UNREACHABLE(); |
2529 | return; |
2530 | } |
2531 | VisitAtomicBinop(this, node, opcode); |
2532 | } |
2533 | |
2534 | #define VISIT_ATOMIC_BINOP(op) \ |
2535 | void InstructionSelector::VisitWord32Atomic##op(Node* node) { \ |
2536 | VisitWord32AtomicBinaryOperation( \ |
2537 | node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \ |
2538 | kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \ |
2539 | kWord32Atomic##op##Word32); \ |
2540 | } |
2541 | VISIT_ATOMIC_BINOP(Add) |
2542 | VISIT_ATOMIC_BINOP(Sub) |
2543 | VISIT_ATOMIC_BINOP(And) |
2544 | VISIT_ATOMIC_BINOP(Or) |
2545 | VISIT_ATOMIC_BINOP(Xor) |
2546 | #undef VISIT_ATOMIC_BINOP |
2547 | |
2548 | void InstructionSelector::VisitWord64AtomicBinaryOperation( |
2549 | Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op, |
2550 | ArchOpcode word64_op) { |
2551 | MachineType type = AtomicOpType(node->op()); |
2552 | ArchOpcode opcode = kArchNop; |
2553 | if (type == MachineType::Uint8()) { |
2554 | opcode = uint8_op; |
2555 | } else if (type == MachineType::Uint16()) { |
2556 | opcode = uint16_op; |
2557 | } else if (type == MachineType::Uint32()) { |
2558 | opcode = uint32_op; |
2559 | } else if (type == MachineType::Uint64()) { |
2560 | opcode = word64_op; |
2561 | } else { |
2562 | UNREACHABLE(); |
2563 | return; |
2564 | } |
2565 | VisitAtomicBinop(this, node, opcode); |
2566 | } |
2567 | |
2568 | #define VISIT_ATOMIC_BINOP(op) \ |
2569 | void InstructionSelector::VisitWord64Atomic##op(Node* node) { \ |
2570 | VisitWord64AtomicBinaryOperation( \ |
2571 | node, kX64Word64Atomic##op##Uint8, kX64Word64Atomic##op##Uint16, \ |
2572 | kX64Word64Atomic##op##Uint32, kX64Word64Atomic##op##Uint64); \ |
2573 | } |
2574 | VISIT_ATOMIC_BINOP(Add) |
2575 | VISIT_ATOMIC_BINOP(Sub) |
2576 | VISIT_ATOMIC_BINOP(And) |
2577 | VISIT_ATOMIC_BINOP(Or) |
2578 | VISIT_ATOMIC_BINOP(Xor) |
2579 | #undef VISIT_ATOMIC_BINOP |
2580 | |
2581 | #define SIMD_TYPES(V) \ |
2582 | V(F32x4) \ |
2583 | V(I32x4) \ |
2584 | V(I16x8) \ |
2585 | V(I8x16) |
2586 | |
2587 | #define SIMD_BINOP_LIST(V) \ |
2588 | V(F32x4Add) \ |
2589 | V(F32x4AddHoriz) \ |
2590 | V(F32x4Sub) \ |
2591 | V(F32x4Mul) \ |
2592 | V(F32x4Min) \ |
2593 | V(F32x4Max) \ |
2594 | V(F32x4Eq) \ |
2595 | V(F32x4Ne) \ |
2596 | V(F32x4Lt) \ |
2597 | V(F32x4Le) \ |
2598 | V(I32x4Add) \ |
2599 | V(I32x4AddHoriz) \ |
2600 | V(I32x4Sub) \ |
2601 | V(I32x4Mul) \ |
2602 | V(I32x4MinS) \ |
2603 | V(I32x4MaxS) \ |
2604 | V(I32x4Eq) \ |
2605 | V(I32x4Ne) \ |
2606 | V(I32x4GtS) \ |
2607 | V(I32x4GeS) \ |
2608 | V(I32x4MinU) \ |
2609 | V(I32x4MaxU) \ |
2610 | V(I32x4GtU) \ |
2611 | V(I32x4GeU) \ |
2612 | V(I16x8SConvertI32x4) \ |
2613 | V(I16x8Add) \ |
2614 | V(I16x8AddSaturateS) \ |
2615 | V(I16x8AddHoriz) \ |
2616 | V(I16x8Sub) \ |
2617 | V(I16x8SubSaturateS) \ |
2618 | V(I16x8Mul) \ |
2619 | V(I16x8MinS) \ |
2620 | V(I16x8MaxS) \ |
2621 | V(I16x8Eq) \ |
2622 | V(I16x8Ne) \ |
2623 | V(I16x8GtS) \ |
2624 | V(I16x8GeS) \ |
2625 | V(I16x8AddSaturateU) \ |
2626 | V(I16x8SubSaturateU) \ |
2627 | V(I16x8MinU) \ |
2628 | V(I16x8MaxU) \ |
2629 | V(I16x8GtU) \ |
2630 | V(I16x8GeU) \ |
2631 | V(I8x16SConvertI16x8) \ |
2632 | V(I8x16Add) \ |
2633 | V(I8x16AddSaturateS) \ |
2634 | V(I8x16Sub) \ |
2635 | V(I8x16SubSaturateS) \ |
2636 | V(I8x16MinS) \ |
2637 | V(I8x16MaxS) \ |
2638 | V(I8x16Eq) \ |
2639 | V(I8x16Ne) \ |
2640 | V(I8x16GtS) \ |
2641 | V(I8x16GeS) \ |
2642 | V(I8x16AddSaturateU) \ |
2643 | V(I8x16SubSaturateU) \ |
2644 | V(I8x16MinU) \ |
2645 | V(I8x16MaxU) \ |
2646 | V(I8x16GtU) \ |
2647 | V(I8x16GeU) \ |
2648 | V(S128And) \ |
2649 | V(S128Or) \ |
2650 | V(S128Xor) |
2651 | |
2652 | #define SIMD_UNOP_LIST(V) \ |
2653 | V(F32x4SConvertI32x4) \ |
2654 | V(F32x4Abs) \ |
2655 | V(F32x4Neg) \ |
2656 | V(F32x4RecipApprox) \ |
2657 | V(F32x4RecipSqrtApprox) \ |
2658 | V(I32x4SConvertI16x8Low) \ |
2659 | V(I32x4SConvertI16x8High) \ |
2660 | V(I32x4Neg) \ |
2661 | V(I32x4UConvertI16x8Low) \ |
2662 | V(I32x4UConvertI16x8High) \ |
2663 | V(I16x8SConvertI8x16Low) \ |
2664 | V(I16x8SConvertI8x16High) \ |
2665 | V(I16x8Neg) \ |
2666 | V(I16x8UConvertI8x16Low) \ |
2667 | V(I16x8UConvertI8x16High) \ |
2668 | V(I8x16Neg) \ |
2669 | V(S128Not) |
2670 | |
2671 | #define SIMD_SHIFT_OPCODES(V) \ |
2672 | V(I32x4Shl) \ |
2673 | V(I32x4ShrS) \ |
2674 | V(I32x4ShrU) \ |
2675 | V(I16x8Shl) \ |
2676 | V(I16x8ShrS) \ |
2677 | V(I16x8ShrU) \ |
2678 | V(I8x16Shl) \ |
2679 | V(I8x16ShrS) \ |
2680 | V(I8x16ShrU) |
2681 | |
2682 | #define SIMD_ANYTRUE_LIST(V) \ |
2683 | V(S1x4AnyTrue) \ |
2684 | V(S1x8AnyTrue) \ |
2685 | V(S1x16AnyTrue) |
2686 | |
2687 | #define SIMD_ALLTRUE_LIST(V) \ |
2688 | V(S1x4AllTrue) \ |
2689 | V(S1x8AllTrue) \ |
2690 | V(S1x16AllTrue) |
2691 | |
2692 | void InstructionSelector::VisitS128Zero(Node* node) { |
2693 | X64OperandGenerator g(this); |
2694 | Emit(kX64S128Zero, g.DefineAsRegister(node)); |
2695 | } |
2696 | |
2697 | #define VISIT_SIMD_SPLAT(Type) \ |
2698 | void InstructionSelector::Visit##Type##Splat(Node* node) { \ |
2699 | X64OperandGenerator g(this); \ |
2700 | Emit(kX64##Type##Splat, g.DefineAsRegister(node), \ |
2701 | g.Use(node->InputAt(0))); \ |
2702 | } |
2703 | SIMD_TYPES(VISIT_SIMD_SPLAT) |
2704 | #undef VISIT_SIMD_SPLAT |
2705 | |
2706 | #define (Type) \ |
2707 | void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \ |
2708 | X64OperandGenerator g(this); \ |
2709 | int32_t lane = OpParameter<int32_t>(node->op()); \ |
2710 | Emit(kX64##Type##ExtractLane, g.DefineAsRegister(node), \ |
2711 | g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \ |
2712 | } |
2713 | SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE) |
2714 | #undef VISIT_SIMD_EXTRACT_LANE |
2715 | |
2716 | #define VISIT_SIMD_REPLACE_LANE(Type) \ |
2717 | void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ |
2718 | X64OperandGenerator g(this); \ |
2719 | int32_t lane = OpParameter<int32_t>(node->op()); \ |
2720 | Emit(kX64##Type##ReplaceLane, g.DefineSameAsFirst(node), \ |
2721 | g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \ |
2722 | g.Use(node->InputAt(1))); \ |
2723 | } |
2724 | SIMD_TYPES(VISIT_SIMD_REPLACE_LANE) |
2725 | #undef VISIT_SIMD_REPLACE_LANE |
2726 | |
2727 | #define VISIT_SIMD_SHIFT(Opcode) \ |
2728 | void InstructionSelector::Visit##Opcode(Node* node) { \ |
2729 | X64OperandGenerator g(this); \ |
2730 | int32_t value = OpParameter<int32_t>(node->op()); \ |
2731 | Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ |
2732 | g.UseRegister(node->InputAt(0)), g.UseImmediate(value)); \ |
2733 | } |
2734 | SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT) |
2735 | #undef VISIT_SIMD_SHIFT |
2736 | #undef SIMD_SHIFT_OPCODES |
2737 | |
2738 | #define VISIT_SIMD_UNOP(Opcode) \ |
2739 | void InstructionSelector::Visit##Opcode(Node* node) { \ |
2740 | X64OperandGenerator g(this); \ |
2741 | Emit(kX64##Opcode, g.DefineAsRegister(node), \ |
2742 | g.UseRegister(node->InputAt(0))); \ |
2743 | } |
2744 | SIMD_UNOP_LIST(VISIT_SIMD_UNOP) |
2745 | #undef VISIT_SIMD_UNOP |
2746 | #undef SIMD_UNOP_LIST |
2747 | |
2748 | #define VISIT_SIMD_BINOP(Opcode) \ |
2749 | void InstructionSelector::Visit##Opcode(Node* node) { \ |
2750 | X64OperandGenerator g(this); \ |
2751 | Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ |
2752 | g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \ |
2753 | } |
2754 | SIMD_BINOP_LIST(VISIT_SIMD_BINOP) |
2755 | #undef VISIT_SIMD_BINOP |
2756 | #undef SIMD_BINOP_LIST |
2757 | |
2758 | #define VISIT_SIMD_ANYTRUE(Opcode) \ |
2759 | void InstructionSelector::Visit##Opcode(Node* node) { \ |
2760 | X64OperandGenerator g(this); \ |
2761 | InstructionOperand temps[] = {g.TempRegister()}; \ |
2762 | Emit(kX64##Opcode, g.DefineAsRegister(node), \ |
2763 | g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \ |
2764 | } |
2765 | SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE) |
2766 | #undef VISIT_SIMD_ANYTRUE |
2767 | #undef SIMD_ANYTRUE_LIST |
2768 | |
2769 | #define VISIT_SIMD_ALLTRUE(Opcode) \ |
2770 | void InstructionSelector::Visit##Opcode(Node* node) { \ |
2771 | X64OperandGenerator g(this); \ |
2772 | InstructionOperand temps[] = {g.TempRegister()}; \ |
2773 | Emit(kX64##Opcode, g.DefineAsRegister(node), \ |
2774 | g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \ |
2775 | } |
2776 | SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE) |
2777 | #undef VISIT_SIMD_ALLTRUE |
2778 | #undef SIMD_ALLTRUE_LIST |
2779 | #undef SIMD_TYPES |
2780 | |
2781 | void InstructionSelector::VisitS128Select(Node* node) { |
2782 | X64OperandGenerator g(this); |
2783 | Emit(kX64S128Select, g.DefineSameAsFirst(node), |
2784 | g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), |
2785 | g.UseRegister(node->InputAt(2))); |
2786 | } |
2787 | |
2788 | void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) { |
2789 | X64OperandGenerator g(this); |
2790 | Emit(kX64F32x4UConvertI32x4, g.DefineSameAsFirst(node), |
2791 | g.UseRegister(node->InputAt(0))); |
2792 | } |
2793 | |
2794 | void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) { |
2795 | X64OperandGenerator g(this); |
2796 | Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node), |
2797 | g.UseRegister(node->InputAt(0))); |
2798 | } |
2799 | |
2800 | void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) { |
2801 | X64OperandGenerator g(this); |
2802 | InstructionOperand temps[] = {g.TempSimd128Register()}; |
2803 | Emit(kX64I32x4UConvertF32x4, g.DefineSameAsFirst(node), |
2804 | g.UseRegister(node->InputAt(0)), arraysize(temps), temps); |
2805 | } |
2806 | |
2807 | void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) { |
2808 | X64OperandGenerator g(this); |
2809 | Emit(kX64I16x8UConvertI32x4, g.DefineSameAsFirst(node), |
2810 | g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); |
2811 | } |
2812 | |
2813 | void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) { |
2814 | X64OperandGenerator g(this); |
2815 | Emit(kX64I8x16UConvertI16x8, g.DefineSameAsFirst(node), |
2816 | g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); |
2817 | } |
2818 | |
2819 | void InstructionSelector::VisitI8x16Mul(Node* node) { |
2820 | X64OperandGenerator g(this); |
2821 | InstructionOperand temps[] = {g.TempSimd128Register()}; |
2822 | Emit(kX64I8x16Mul, g.DefineSameAsFirst(node), |
2823 | g.UseUniqueRegister(node->InputAt(0)), |
2824 | g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); |
2825 | } |
2826 | |
2827 | void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { |
2828 | UNREACHABLE(); |
2829 | } |
2830 | |
2831 | void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { |
2832 | UNREACHABLE(); |
2833 | } |
2834 | |
2835 | namespace { |
2836 | |
2837 | // Packs a 4 lane shuffle into a single imm8 suitable for use by pshufd, |
2838 | // pshuflw, and pshufhw. |
2839 | uint8_t PackShuffle4(uint8_t* shuffle) { |
2840 | return (shuffle[0] & 3) | ((shuffle[1] & 3) << 2) | ((shuffle[2] & 3) << 4) | |
2841 | ((shuffle[3] & 3) << 6); |
2842 | } |
2843 | |
2844 | // Gets an 8 bit lane mask suitable for 16x8 pblendw. |
2845 | uint8_t PackBlend8(const uint8_t* shuffle16x8) { |
2846 | int8_t result = 0; |
2847 | for (int i = 0; i < 8; ++i) { |
2848 | result |= (shuffle16x8[i] >= 8 ? 1 : 0) << i; |
2849 | } |
2850 | return result; |
2851 | } |
2852 | |
2853 | // Gets an 8 bit lane mask suitable for 32x4 pblendw. |
2854 | uint8_t PackBlend4(const uint8_t* shuffle32x4) { |
2855 | int8_t result = 0; |
2856 | for (int i = 0; i < 4; ++i) { |
2857 | result |= (shuffle32x4[i] >= 4 ? 0x3 : 0) << (i * 2); |
2858 | } |
2859 | return result; |
2860 | } |
2861 | |
2862 | // Returns true if shuffle can be decomposed into two 16x4 half shuffles |
2863 | // followed by a 16x8 blend. |
2864 | // E.g. [3 2 1 0 15 14 13 12]. |
2865 | bool TryMatch16x8HalfShuffle(uint8_t* shuffle16x8, uint8_t* blend_mask) { |
2866 | *blend_mask = 0; |
2867 | for (int i = 0; i < 8; i++) { |
2868 | if ((shuffle16x8[i] & 0x4) != (i & 0x4)) return false; |
2869 | *blend_mask |= (shuffle16x8[i] > 7 ? 1 : 0) << i; |
2870 | } |
2871 | return true; |
2872 | } |
2873 | |
2874 | struct ShuffleEntry { |
2875 | uint8_t shuffle[kSimd128Size]; |
2876 | ArchOpcode opcode; |
2877 | bool src0_needs_reg; |
2878 | bool src1_needs_reg; |
2879 | }; |
2880 | |
2881 | // Shuffles that map to architecture-specific instruction sequences. These are |
2882 | // matched very early, so we shouldn't include shuffles that match better in |
2883 | // later tests, like 32x4 and 16x8 shuffles. In general, these patterns should |
2884 | // map to either a single instruction, or be finer grained, such as zip/unzip or |
2885 | // transpose patterns. |
2886 | static const ShuffleEntry arch_shuffles[] = { |
2887 | {{0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23}, |
2888 | kX64S64x2UnpackLow, |
2889 | true, |
2890 | false}, |
2891 | {{8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31}, |
2892 | kX64S64x2UnpackHigh, |
2893 | true, |
2894 | false}, |
2895 | {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}, |
2896 | kX64S32x4UnpackLow, |
2897 | true, |
2898 | false}, |
2899 | {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}, |
2900 | kX64S32x4UnpackHigh, |
2901 | true, |
2902 | false}, |
2903 | {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}, |
2904 | kX64S16x8UnpackLow, |
2905 | true, |
2906 | false}, |
2907 | {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}, |
2908 | kX64S16x8UnpackHigh, |
2909 | true, |
2910 | false}, |
2911 | {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}, |
2912 | kX64S8x16UnpackLow, |
2913 | true, |
2914 | false}, |
2915 | {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}, |
2916 | kX64S8x16UnpackHigh, |
2917 | true, |
2918 | false}, |
2919 | |
2920 | {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}, |
2921 | kX64S16x8UnzipLow, |
2922 | true, |
2923 | false}, |
2924 | {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}, |
2925 | kX64S16x8UnzipHigh, |
2926 | true, |
2927 | true}, |
2928 | {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}, |
2929 | kX64S8x16UnzipLow, |
2930 | true, |
2931 | true}, |
2932 | {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}, |
2933 | kX64S8x16UnzipHigh, |
2934 | true, |
2935 | true}, |
2936 | {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}, |
2937 | kX64S8x16TransposeLow, |
2938 | true, |
2939 | true}, |
2940 | {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}, |
2941 | kX64S8x16TransposeHigh, |
2942 | true, |
2943 | true}, |
2944 | {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, |
2945 | kX64S8x8Reverse, |
2946 | false, |
2947 | false}, |
2948 | {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, |
2949 | kX64S8x4Reverse, |
2950 | false, |
2951 | false}, |
2952 | {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, |
2953 | kX64S8x2Reverse, |
2954 | true, |
2955 | true}}; |
2956 | |
2957 | bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table, |
2958 | size_t num_entries, bool is_swizzle, |
2959 | const ShuffleEntry** arch_shuffle) { |
2960 | uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1; |
2961 | for (size_t i = 0; i < num_entries; ++i) { |
2962 | const ShuffleEntry& entry = table[i]; |
2963 | int j = 0; |
2964 | for (; j < kSimd128Size; ++j) { |
2965 | if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) { |
2966 | break; |
2967 | } |
2968 | } |
2969 | if (j == kSimd128Size) { |
2970 | *arch_shuffle = &entry; |
2971 | return true; |
2972 | } |
2973 | } |
2974 | return false; |
2975 | } |
2976 | |
2977 | } // namespace |
2978 | |
2979 | void InstructionSelector::VisitS8x16Shuffle(Node* node) { |
2980 | uint8_t shuffle[kSimd128Size]; |
2981 | bool is_swizzle; |
2982 | CanonicalizeShuffle(node, shuffle, &is_swizzle); |
2983 | |
2984 | int imm_count = 0; |
2985 | static const int kMaxImms = 6; |
2986 | uint32_t imms[kMaxImms]; |
2987 | int temp_count = 0; |
2988 | static const int kMaxTemps = 2; |
2989 | InstructionOperand temps[kMaxTemps]; |
2990 | |
2991 | X64OperandGenerator g(this); |
2992 | // Swizzles don't generally need DefineSameAsFirst to avoid a move. |
2993 | bool no_same_as_first = is_swizzle; |
2994 | // We generally need UseRegister for input0, Use for input1. |
2995 | bool src0_needs_reg = true; |
2996 | bool src1_needs_reg = false; |
2997 | ArchOpcode opcode = kX64S8x16Shuffle; // general shuffle is the default |
2998 | |
2999 | uint8_t offset; |
3000 | uint8_t shuffle32x4[4]; |
3001 | uint8_t shuffle16x8[8]; |
3002 | int index; |
3003 | const ShuffleEntry* arch_shuffle; |
3004 | if (TryMatchConcat(shuffle, &offset)) { |
3005 | // Swap inputs from the normal order for (v)palignr. |
3006 | SwapShuffleInputs(node); |
3007 | is_swizzle = false; // It's simpler to just handle the general case. |
3008 | no_same_as_first = false; // SSE requires same-as-first. |
3009 | opcode = kX64S8x16Alignr; |
3010 | // palignr takes a single imm8 offset. |
3011 | imms[imm_count++] = offset; |
3012 | } else if (TryMatchArchShuffle(shuffle, arch_shuffles, |
3013 | arraysize(arch_shuffles), is_swizzle, |
3014 | &arch_shuffle)) { |
3015 | opcode = arch_shuffle->opcode; |
3016 | src0_needs_reg = arch_shuffle->src0_needs_reg; |
3017 | // SSE can't take advantage of both operands in registers and needs |
3018 | // same-as-first. |
3019 | src1_needs_reg = false; |
3020 | no_same_as_first = false; |
3021 | } else if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) { |
3022 | uint8_t shuffle_mask = PackShuffle4(shuffle32x4); |
3023 | if (is_swizzle) { |
3024 | if (TryMatchIdentity(shuffle)) { |
3025 | // Bypass normal shuffle code generation in this case. |
3026 | EmitIdentity(node); |
3027 | return; |
3028 | } else { |
3029 | // pshufd takes a single imm8 shuffle mask. |
3030 | opcode = kX64S32x4Swizzle; |
3031 | no_same_as_first = true; |
3032 | src0_needs_reg = false; |
3033 | imms[imm_count++] = shuffle_mask; |
3034 | } |
3035 | } else { |
3036 | // 2 operand shuffle |
3037 | // A blend is more efficient than a general 32x4 shuffle; try it first. |
3038 | if (TryMatchBlend(shuffle)) { |
3039 | opcode = kX64S16x8Blend; |
3040 | uint8_t blend_mask = PackBlend4(shuffle32x4); |
3041 | imms[imm_count++] = blend_mask; |
3042 | } else { |
3043 | opcode = kX64S32x4Shuffle; |
3044 | no_same_as_first = true; |
3045 | src0_needs_reg = false; |
3046 | imms[imm_count++] = shuffle_mask; |
3047 | int8_t blend_mask = PackBlend4(shuffle32x4); |
3048 | imms[imm_count++] = blend_mask; |
3049 | } |
3050 | } |
3051 | } else if (TryMatch16x8Shuffle(shuffle, shuffle16x8)) { |
3052 | uint8_t blend_mask; |
3053 | if (TryMatchBlend(shuffle)) { |
3054 | opcode = kX64S16x8Blend; |
3055 | blend_mask = PackBlend8(shuffle16x8); |
3056 | imms[imm_count++] = blend_mask; |
3057 | } else if (TryMatchDup<8>(shuffle, &index)) { |
3058 | opcode = kX64S16x8Dup; |
3059 | src0_needs_reg = false; |
3060 | imms[imm_count++] = index; |
3061 | } else if (TryMatch16x8HalfShuffle(shuffle16x8, &blend_mask)) { |
3062 | opcode = is_swizzle ? kX64S16x8HalfShuffle1 : kX64S16x8HalfShuffle2; |
3063 | // Half-shuffles don't need DefineSameAsFirst or UseRegister(src0). |
3064 | no_same_as_first = true; |
3065 | src0_needs_reg = false; |
3066 | uint8_t mask_lo = PackShuffle4(shuffle16x8); |
3067 | uint8_t mask_hi = PackShuffle4(shuffle16x8 + 4); |
3068 | imms[imm_count++] = mask_lo; |
3069 | imms[imm_count++] = mask_hi; |
3070 | if (!is_swizzle) imms[imm_count++] = blend_mask; |
3071 | } |
3072 | } else if (TryMatchDup<16>(shuffle, &index)) { |
3073 | opcode = kX64S8x16Dup; |
3074 | no_same_as_first = false; |
3075 | src0_needs_reg = true; |
3076 | imms[imm_count++] = index; |
3077 | } |
3078 | if (opcode == kX64S8x16Shuffle) { |
3079 | // Use same-as-first for general swizzle, but not shuffle. |
3080 | no_same_as_first = !is_swizzle; |
3081 | src0_needs_reg = !no_same_as_first; |
3082 | imms[imm_count++] = Pack4Lanes(shuffle); |
3083 | imms[imm_count++] = Pack4Lanes(shuffle + 4); |
3084 | imms[imm_count++] = Pack4Lanes(shuffle + 8); |
3085 | imms[imm_count++] = Pack4Lanes(shuffle + 12); |
3086 | temps[temp_count++] = g.TempRegister(); |
3087 | } |
3088 | |
3089 | // Use DefineAsRegister(node) and Use(src0) if we can without forcing an extra |
3090 | // move instruction in the CodeGenerator. |
3091 | Node* input0 = node->InputAt(0); |
3092 | InstructionOperand dst = |
3093 | no_same_as_first ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node); |
3094 | InstructionOperand src0 = |
3095 | src0_needs_reg ? g.UseRegister(input0) : g.Use(input0); |
3096 | |
3097 | int input_count = 0; |
3098 | InstructionOperand inputs[2 + kMaxImms + kMaxTemps]; |
3099 | inputs[input_count++] = src0; |
3100 | if (!is_swizzle) { |
3101 | Node* input1 = node->InputAt(1); |
3102 | inputs[input_count++] = |
3103 | src1_needs_reg ? g.UseRegister(input1) : g.Use(input1); |
3104 | } |
3105 | for (int i = 0; i < imm_count; ++i) { |
3106 | inputs[input_count++] = g.UseImmediate(imms[i]); |
3107 | } |
3108 | Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps); |
3109 | } |
3110 | |
3111 | // static |
3112 | MachineOperatorBuilder::Flags |
3113 | InstructionSelector::SupportedMachineOperatorFlags() { |
3114 | MachineOperatorBuilder::Flags flags = |
3115 | MachineOperatorBuilder::kWord32ShiftIsSafe | |
3116 | MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz; |
3117 | if (CpuFeatures::IsSupported(POPCNT)) { |
3118 | flags |= MachineOperatorBuilder::kWord32Popcnt | |
3119 | MachineOperatorBuilder::kWord64Popcnt; |
3120 | } |
3121 | if (CpuFeatures::IsSupported(SSE4_1)) { |
3122 | flags |= MachineOperatorBuilder::kFloat32RoundDown | |
3123 | MachineOperatorBuilder::kFloat64RoundDown | |
3124 | MachineOperatorBuilder::kFloat32RoundUp | |
3125 | MachineOperatorBuilder::kFloat64RoundUp | |
3126 | MachineOperatorBuilder::kFloat32RoundTruncate | |
3127 | MachineOperatorBuilder::kFloat64RoundTruncate | |
3128 | MachineOperatorBuilder::kFloat32RoundTiesEven | |
3129 | MachineOperatorBuilder::kFloat64RoundTiesEven; |
3130 | } |
3131 | return flags; |
3132 | } |
3133 | |
3134 | // static |
3135 | MachineOperatorBuilder::AlignmentRequirements |
3136 | InstructionSelector::AlignmentRequirements() { |
3137 | return MachineOperatorBuilder::AlignmentRequirements:: |
3138 | FullUnalignedAccessSupport(); |
3139 | } |
3140 | |
3141 | } // namespace compiler |
3142 | } // namespace internal |
3143 | } // namespace v8 |
3144 | |