1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_
6#define V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12// X64-specific opcodes that specify which assembly sequence to emit.
13// Most opcodes specify a single instruction.
14#define TARGET_ARCH_OPCODE_LIST(V) \
15 V(X64Add) \
16 V(X64Add32) \
17 V(X64And) \
18 V(X64And32) \
19 V(X64Cmp) \
20 V(X64Cmp32) \
21 V(X64Cmp16) \
22 V(X64Cmp8) \
23 V(X64Test) \
24 V(X64Test32) \
25 V(X64Test16) \
26 V(X64Test8) \
27 V(X64Or) \
28 V(X64Or32) \
29 V(X64Xor) \
30 V(X64Xor32) \
31 V(X64Sub) \
32 V(X64Sub32) \
33 V(X64Imul) \
34 V(X64Imul32) \
35 V(X64ImulHigh32) \
36 V(X64UmulHigh32) \
37 V(X64Idiv) \
38 V(X64Idiv32) \
39 V(X64Udiv) \
40 V(X64Udiv32) \
41 V(X64Not) \
42 V(X64Not32) \
43 V(X64Neg) \
44 V(X64Neg32) \
45 V(X64Shl) \
46 V(X64Shl32) \
47 V(X64Shr) \
48 V(X64Shr32) \
49 V(X64Sar) \
50 V(X64Sar32) \
51 V(X64Ror) \
52 V(X64Ror32) \
53 V(X64Lzcnt) \
54 V(X64Lzcnt32) \
55 V(X64Tzcnt) \
56 V(X64Tzcnt32) \
57 V(X64Popcnt) \
58 V(X64Popcnt32) \
59 V(X64Bswap) \
60 V(X64Bswap32) \
61 V(LFence) \
62 V(SSEFloat32Cmp) \
63 V(SSEFloat32Add) \
64 V(SSEFloat32Sub) \
65 V(SSEFloat32Mul) \
66 V(SSEFloat32Div) \
67 V(SSEFloat32Abs) \
68 V(SSEFloat32Neg) \
69 V(SSEFloat32Sqrt) \
70 V(SSEFloat32ToFloat64) \
71 V(SSEFloat32ToInt32) \
72 V(SSEFloat32ToUint32) \
73 V(SSEFloat32Round) \
74 V(SSEFloat64Cmp) \
75 V(SSEFloat64Add) \
76 V(SSEFloat64Sub) \
77 V(SSEFloat64Mul) \
78 V(SSEFloat64Div) \
79 V(SSEFloat64Mod) \
80 V(SSEFloat64Abs) \
81 V(SSEFloat64Neg) \
82 V(SSEFloat64Sqrt) \
83 V(SSEFloat64Round) \
84 V(SSEFloat32Max) \
85 V(SSEFloat64Max) \
86 V(SSEFloat32Min) \
87 V(SSEFloat64Min) \
88 V(SSEFloat64ToFloat32) \
89 V(SSEFloat64ToInt32) \
90 V(SSEFloat64ToUint32) \
91 V(SSEFloat32ToInt64) \
92 V(SSEFloat64ToInt64) \
93 V(SSEFloat32ToUint64) \
94 V(SSEFloat64ToUint64) \
95 V(SSEInt32ToFloat64) \
96 V(SSEInt32ToFloat32) \
97 V(SSEInt64ToFloat32) \
98 V(SSEInt64ToFloat64) \
99 V(SSEUint64ToFloat32) \
100 V(SSEUint64ToFloat64) \
101 V(SSEUint32ToFloat64) \
102 V(SSEUint32ToFloat32) \
103 V(SSEFloat64ExtractLowWord32) \
104 V(SSEFloat64ExtractHighWord32) \
105 V(SSEFloat64InsertLowWord32) \
106 V(SSEFloat64InsertHighWord32) \
107 V(SSEFloat64LoadLowWord32) \
108 V(SSEFloat64SilenceNaN) \
109 V(AVXFloat32Cmp) \
110 V(AVXFloat32Add) \
111 V(AVXFloat32Sub) \
112 V(AVXFloat32Mul) \
113 V(AVXFloat32Div) \
114 V(AVXFloat64Cmp) \
115 V(AVXFloat64Add) \
116 V(AVXFloat64Sub) \
117 V(AVXFloat64Mul) \
118 V(AVXFloat64Div) \
119 V(AVXFloat64Abs) \
120 V(AVXFloat64Neg) \
121 V(AVXFloat32Abs) \
122 V(AVXFloat32Neg) \
123 V(X64Movsxbl) \
124 V(X64Movzxbl) \
125 V(X64Movsxbq) \
126 V(X64Movzxbq) \
127 V(X64Movb) \
128 V(X64Movsxwl) \
129 V(X64Movzxwl) \
130 V(X64Movsxwq) \
131 V(X64Movzxwq) \
132 V(X64Movw) \
133 V(X64Movl) \
134 V(X64Movsxlq) \
135 V(X64MovqDecompressTaggedSigned) \
136 V(X64MovqDecompressTaggedPointer) \
137 V(X64MovqDecompressAnyTagged) \
138 V(X64MovqCompressTagged) \
139 V(X64DecompressSigned) \
140 V(X64DecompressPointer) \
141 V(X64DecompressAny) \
142 V(X64CompressSigned) \
143 V(X64CompressPointer) \
144 V(X64CompressAny) \
145 V(X64Movq) \
146 V(X64Movsd) \
147 V(X64Movss) \
148 V(X64Movdqu) \
149 V(X64BitcastFI) \
150 V(X64BitcastDL) \
151 V(X64BitcastIF) \
152 V(X64BitcastLD) \
153 V(X64Lea32) \
154 V(X64Lea) \
155 V(X64Dec32) \
156 V(X64Inc32) \
157 V(X64Push) \
158 V(X64Poke) \
159 V(X64Peek) \
160 V(X64StackCheck) \
161 V(X64F32x4Splat) \
162 V(X64F32x4ExtractLane) \
163 V(X64F32x4ReplaceLane) \
164 V(X64F32x4SConvertI32x4) \
165 V(X64F32x4UConvertI32x4) \
166 V(X64F32x4Abs) \
167 V(X64F32x4Neg) \
168 V(X64F32x4RecipApprox) \
169 V(X64F32x4RecipSqrtApprox) \
170 V(X64F32x4Add) \
171 V(X64F32x4AddHoriz) \
172 V(X64F32x4Sub) \
173 V(X64F32x4Mul) \
174 V(X64F32x4Min) \
175 V(X64F32x4Max) \
176 V(X64F32x4Eq) \
177 V(X64F32x4Ne) \
178 V(X64F32x4Lt) \
179 V(X64F32x4Le) \
180 V(X64I32x4Splat) \
181 V(X64I32x4ExtractLane) \
182 V(X64I32x4ReplaceLane) \
183 V(X64I32x4SConvertF32x4) \
184 V(X64I32x4SConvertI16x8Low) \
185 V(X64I32x4SConvertI16x8High) \
186 V(X64I32x4Neg) \
187 V(X64I32x4Shl) \
188 V(X64I32x4ShrS) \
189 V(X64I32x4Add) \
190 V(X64I32x4AddHoriz) \
191 V(X64I32x4Sub) \
192 V(X64I32x4Mul) \
193 V(X64I32x4MinS) \
194 V(X64I32x4MaxS) \
195 V(X64I32x4Eq) \
196 V(X64I32x4Ne) \
197 V(X64I32x4GtS) \
198 V(X64I32x4GeS) \
199 V(X64I32x4UConvertF32x4) \
200 V(X64I32x4UConvertI16x8Low) \
201 V(X64I32x4UConvertI16x8High) \
202 V(X64I32x4ShrU) \
203 V(X64I32x4MinU) \
204 V(X64I32x4MaxU) \
205 V(X64I32x4GtU) \
206 V(X64I32x4GeU) \
207 V(X64I16x8Splat) \
208 V(X64I16x8ExtractLane) \
209 V(X64I16x8ReplaceLane) \
210 V(X64I16x8SConvertI8x16Low) \
211 V(X64I16x8SConvertI8x16High) \
212 V(X64I16x8Neg) \
213 V(X64I16x8Shl) \
214 V(X64I16x8ShrS) \
215 V(X64I16x8SConvertI32x4) \
216 V(X64I16x8Add) \
217 V(X64I16x8AddSaturateS) \
218 V(X64I16x8AddHoriz) \
219 V(X64I16x8Sub) \
220 V(X64I16x8SubSaturateS) \
221 V(X64I16x8Mul) \
222 V(X64I16x8MinS) \
223 V(X64I16x8MaxS) \
224 V(X64I16x8Eq) \
225 V(X64I16x8Ne) \
226 V(X64I16x8GtS) \
227 V(X64I16x8GeS) \
228 V(X64I16x8UConvertI8x16Low) \
229 V(X64I16x8UConvertI8x16High) \
230 V(X64I16x8ShrU) \
231 V(X64I16x8UConvertI32x4) \
232 V(X64I16x8AddSaturateU) \
233 V(X64I16x8SubSaturateU) \
234 V(X64I16x8MinU) \
235 V(X64I16x8MaxU) \
236 V(X64I16x8GtU) \
237 V(X64I16x8GeU) \
238 V(X64I8x16Splat) \
239 V(X64I8x16ExtractLane) \
240 V(X64I8x16ReplaceLane) \
241 V(X64I8x16SConvertI16x8) \
242 V(X64I8x16Neg) \
243 V(X64I8x16Shl) \
244 V(X64I8x16ShrS) \
245 V(X64I8x16Add) \
246 V(X64I8x16AddSaturateS) \
247 V(X64I8x16Sub) \
248 V(X64I8x16SubSaturateS) \
249 V(X64I8x16Mul) \
250 V(X64I8x16MinS) \
251 V(X64I8x16MaxS) \
252 V(X64I8x16Eq) \
253 V(X64I8x16Ne) \
254 V(X64I8x16GtS) \
255 V(X64I8x16GeS) \
256 V(X64I8x16UConvertI16x8) \
257 V(X64I8x16AddSaturateU) \
258 V(X64I8x16SubSaturateU) \
259 V(X64I8x16ShrU) \
260 V(X64I8x16MinU) \
261 V(X64I8x16MaxU) \
262 V(X64I8x16GtU) \
263 V(X64I8x16GeU) \
264 V(X64S128Zero) \
265 V(X64S128Not) \
266 V(X64S128And) \
267 V(X64S128Or) \
268 V(X64S128Xor) \
269 V(X64S128Select) \
270 V(X64S8x16Shuffle) \
271 V(X64S32x4Swizzle) \
272 V(X64S32x4Shuffle) \
273 V(X64S16x8Blend) \
274 V(X64S16x8HalfShuffle1) \
275 V(X64S16x8HalfShuffle2) \
276 V(X64S8x16Alignr) \
277 V(X64S16x8Dup) \
278 V(X64S8x16Dup) \
279 V(X64S16x8UnzipHigh) \
280 V(X64S16x8UnzipLow) \
281 V(X64S8x16UnzipHigh) \
282 V(X64S8x16UnzipLow) \
283 V(X64S64x2UnpackHigh) \
284 V(X64S32x4UnpackHigh) \
285 V(X64S16x8UnpackHigh) \
286 V(X64S8x16UnpackHigh) \
287 V(X64S64x2UnpackLow) \
288 V(X64S32x4UnpackLow) \
289 V(X64S16x8UnpackLow) \
290 V(X64S8x16UnpackLow) \
291 V(X64S8x16TransposeLow) \
292 V(X64S8x16TransposeHigh) \
293 V(X64S8x8Reverse) \
294 V(X64S8x4Reverse) \
295 V(X64S8x2Reverse) \
296 V(X64S1x4AnyTrue) \
297 V(X64S1x4AllTrue) \
298 V(X64S1x8AnyTrue) \
299 V(X64S1x8AllTrue) \
300 V(X64S1x16AnyTrue) \
301 V(X64S1x16AllTrue) \
302 V(X64Word64AtomicLoadUint8) \
303 V(X64Word64AtomicLoadUint16) \
304 V(X64Word64AtomicLoadUint32) \
305 V(X64Word64AtomicLoadUint64) \
306 V(X64Word64AtomicStoreWord8) \
307 V(X64Word64AtomicStoreWord16) \
308 V(X64Word64AtomicStoreWord32) \
309 V(X64Word64AtomicStoreWord64) \
310 V(X64Word64AtomicAddUint8) \
311 V(X64Word64AtomicAddUint16) \
312 V(X64Word64AtomicAddUint32) \
313 V(X64Word64AtomicAddUint64) \
314 V(X64Word64AtomicSubUint8) \
315 V(X64Word64AtomicSubUint16) \
316 V(X64Word64AtomicSubUint32) \
317 V(X64Word64AtomicSubUint64) \
318 V(X64Word64AtomicAndUint8) \
319 V(X64Word64AtomicAndUint16) \
320 V(X64Word64AtomicAndUint32) \
321 V(X64Word64AtomicAndUint64) \
322 V(X64Word64AtomicOrUint8) \
323 V(X64Word64AtomicOrUint16) \
324 V(X64Word64AtomicOrUint32) \
325 V(X64Word64AtomicOrUint64) \
326 V(X64Word64AtomicXorUint8) \
327 V(X64Word64AtomicXorUint16) \
328 V(X64Word64AtomicXorUint32) \
329 V(X64Word64AtomicXorUint64) \
330 V(X64Word64AtomicExchangeUint8) \
331 V(X64Word64AtomicExchangeUint16) \
332 V(X64Word64AtomicExchangeUint32) \
333 V(X64Word64AtomicExchangeUint64) \
334 V(X64Word64AtomicCompareExchangeUint8) \
335 V(X64Word64AtomicCompareExchangeUint16) \
336 V(X64Word64AtomicCompareExchangeUint32) \
337 V(X64Word64AtomicCompareExchangeUint64)
338
339// Addressing modes represent the "shape" of inputs to an instruction.
340// Many instructions support multiple addressing modes. Addressing modes
341// are encoded into the InstructionCode of the instruction and tell the
342// code generator after register allocation which assembler method to call.
343//
344// We use the following local notation for addressing modes:
345//
346// M = memory operand
347// R = base register
348// N = index register * N for N in {1, 2, 4, 8}
349// I = immediate displacement (32-bit signed integer)
350
351#define TARGET_ADDRESSING_MODE_LIST(V) \
352 V(MR) /* [%r1 ] */ \
353 V(MRI) /* [%r1 + K] */ \
354 V(MR1) /* [%r1 + %r2*1 ] */ \
355 V(MR2) /* [%r1 + %r2*2 ] */ \
356 V(MR4) /* [%r1 + %r2*4 ] */ \
357 V(MR8) /* [%r1 + %r2*8 ] */ \
358 V(MR1I) /* [%r1 + %r2*1 + K] */ \
359 V(MR2I) /* [%r1 + %r2*2 + K] */ \
360 V(MR4I) /* [%r1 + %r2*3 + K] */ \
361 V(MR8I) /* [%r1 + %r2*4 + K] */ \
362 V(M1) /* [ %r2*1 ] */ \
363 V(M2) /* [ %r2*2 ] */ \
364 V(M4) /* [ %r2*4 ] */ \
365 V(M8) /* [ %r2*8 ] */ \
366 V(M1I) /* [ %r2*1 + K] */ \
367 V(M2I) /* [ %r2*2 + K] */ \
368 V(M4I) /* [ %r2*4 + K] */ \
369 V(M8I) /* [ %r2*8 + K] */ \
370 V(Root) /* [%root + K] */
371
372} // namespace compiler
373} // namespace internal
374} // namespace v8
375
376#endif // V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_
377