1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
6#define V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
7
8#include "src/compiler/backend/instruction.h"
9#include "src/zone/zone-containers.h"
10
11namespace v8 {
12namespace internal {
13namespace compiler {
14
15// A set of flags describing properties of the instructions so that the
16// scheduler is aware of dependencies between instructions.
17enum ArchOpcodeFlags {
18 kNoOpcodeFlags = 0,
19 kHasSideEffect = 1, // The instruction has some side effects (memory
20 // store, function call...)
21 kIsLoadOperation = 2, // The instruction is a memory load.
22 kMayNeedDeoptOrTrapCheck = 4, // The instruction may be associated with a
23 // deopt or trap check which must be run before
24 // instruction e.g. div on Intel platform which
25 // will raise an exception when the divisor is
26 // zero.
27};
28
29class InstructionScheduler final : public ZoneObject {
30 public:
31 V8_EXPORT_PRIVATE InstructionScheduler(Zone* zone,
32 InstructionSequence* sequence);
33
34 V8_EXPORT_PRIVATE void StartBlock(RpoNumber rpo);
35 V8_EXPORT_PRIVATE void EndBlock(RpoNumber rpo);
36
37 V8_EXPORT_PRIVATE void AddInstruction(Instruction* instr);
38 V8_EXPORT_PRIVATE void AddTerminator(Instruction* instr);
39
40 static bool SchedulerSupported();
41
42 private:
43 // A scheduling graph node.
44 // Represent an instruction and their dependencies.
45 class ScheduleGraphNode : public ZoneObject {
46 public:
47 ScheduleGraphNode(Zone* zone, Instruction* instr);
48
49 // Mark the instruction represented by 'node' as a dependecy of this one.
50 // The current instruction will be registered as an unscheduled predecessor
51 // of 'node' (i.e. it must be scheduled before 'node').
52 void AddSuccessor(ScheduleGraphNode* node);
53
54 // Check if all the predecessors of this instruction have been scheduled.
55 bool HasUnscheduledPredecessor() {
56 return unscheduled_predecessors_count_ != 0;
57 }
58
59 // Record that we have scheduled one of the predecessors of this node.
60 void DropUnscheduledPredecessor() {
61 DCHECK_LT(0, unscheduled_predecessors_count_);
62 unscheduled_predecessors_count_--;
63 }
64
65 Instruction* instruction() { return instr_; }
66 ZoneDeque<ScheduleGraphNode*>& successors() { return successors_; }
67 int latency() const { return latency_; }
68
69 int total_latency() const { return total_latency_; }
70 void set_total_latency(int latency) { total_latency_ = latency; }
71
72 int start_cycle() const { return start_cycle_; }
73 void set_start_cycle(int start_cycle) { start_cycle_ = start_cycle; }
74
75 private:
76 Instruction* instr_;
77 ZoneDeque<ScheduleGraphNode*> successors_;
78
79 // Number of unscheduled predecessors for this node.
80 int unscheduled_predecessors_count_;
81
82 // Estimate of the instruction latency (the number of cycles it takes for
83 // instruction to complete).
84 int latency_;
85
86 // The sum of all the latencies on the path from this node to the end of
87 // the graph (i.e. a node with no successor).
88 int total_latency_;
89
90 // The scheduler keeps a nominal cycle count to keep track of when the
91 // result of an instruction is available. This field is updated by the
92 // scheduler to indicate when the value of all the operands of this
93 // instruction will be available.
94 int start_cycle_;
95 };
96
97 // Keep track of all nodes ready to be scheduled (i.e. all their dependencies
98 // have been scheduled. Note that this class is inteded to be extended by
99 // concrete implementation of the scheduling queue which define the policy
100 // to pop node from the queue.
101 class SchedulingQueueBase {
102 public:
103 explicit SchedulingQueueBase(InstructionScheduler* scheduler)
104 : scheduler_(scheduler), nodes_(scheduler->zone()) {}
105
106 void AddNode(ScheduleGraphNode* node);
107
108 bool IsEmpty() const { return nodes_.empty(); }
109
110 protected:
111 InstructionScheduler* scheduler_;
112 ZoneLinkedList<ScheduleGraphNode*> nodes_;
113 };
114
115 // A scheduling queue which prioritize nodes on the critical path (we look
116 // for the instruction with the highest latency on the path to reach the end
117 // of the graph).
118 class CriticalPathFirstQueue : public SchedulingQueueBase {
119 public:
120 explicit CriticalPathFirstQueue(InstructionScheduler* scheduler)
121 : SchedulingQueueBase(scheduler) {}
122
123 // Look for the best candidate to schedule, remove it from the queue and
124 // return it.
125 ScheduleGraphNode* PopBestCandidate(int cycle);
126 };
127
128 // A queue which pop a random node from the queue to perform stress tests on
129 // the scheduler.
130 class StressSchedulerQueue : public SchedulingQueueBase {
131 public:
132 explicit StressSchedulerQueue(InstructionScheduler* scheduler)
133 : SchedulingQueueBase(scheduler) {}
134
135 ScheduleGraphNode* PopBestCandidate(int cycle);
136
137 private:
138 Isolate* isolate() { return scheduler_->isolate(); }
139 };
140
141 // Perform scheduling for the current block specifying the queue type to
142 // use to determine the next best candidate.
143 template <typename QueueType>
144 void ScheduleBlock();
145
146 // Return the scheduling properties of the given instruction.
147 V8_EXPORT_PRIVATE int GetInstructionFlags(const Instruction* instr) const;
148 int GetTargetInstructionFlags(const Instruction* instr) const;
149
150 // Check whether the given instruction has side effects (e.g. function call,
151 // memory store).
152 bool HasSideEffect(const Instruction* instr) const {
153 return (GetInstructionFlags(instr) & kHasSideEffect) != 0;
154 }
155
156 // Return true if the instruction is a memory load.
157 bool IsLoadOperation(const Instruction* instr) const {
158 return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
159 }
160
161 // The scheduler will not move the following instructions before the last
162 // deopt/trap check:
163 // * loads (this is conservative)
164 // * instructions with side effect
165 // * other deopts/traps
166 // Any other instruction can be moved, apart from those that raise exceptions
167 // on specific inputs - these are filtered out by the deopt/trap check.
168 bool MayNeedDeoptOrTrapCheck(const Instruction* instr) const {
169 return (GetInstructionFlags(instr) & kMayNeedDeoptOrTrapCheck) != 0;
170 }
171
172 // Return true if the instruction cannot be moved before the last deopt or
173 // trap point we encountered.
174 bool DependsOnDeoptOrTrap(const Instruction* instr) const {
175 return MayNeedDeoptOrTrapCheck(instr) || instr->IsDeoptimizeCall() ||
176 instr->IsTrap() || HasSideEffect(instr) || IsLoadOperation(instr);
177 }
178
179 // Identify nops used as a definition point for live-in registers at
180 // function entry.
181 bool IsFixedRegisterParameter(const Instruction* instr) const {
182 return (instr->arch_opcode() == kArchNop) && (instr->OutputCount() == 1) &&
183 (instr->OutputAt(0)->IsUnallocated()) &&
184 (UnallocatedOperand::cast(instr->OutputAt(0))
185 ->HasFixedRegisterPolicy() ||
186 UnallocatedOperand::cast(instr->OutputAt(0))
187 ->HasFixedFPRegisterPolicy());
188 }
189
190 void ComputeTotalLatencies();
191
192 static int GetInstructionLatency(const Instruction* instr);
193
194 Zone* zone() { return zone_; }
195 InstructionSequence* sequence() { return sequence_; }
196 Isolate* isolate() { return sequence()->isolate(); }
197
198 Zone* zone_;
199 InstructionSequence* sequence_;
200 ZoneVector<ScheduleGraphNode*> graph_;
201
202 friend class InstructionSchedulerTester;
203
204 // Last side effect instruction encountered while building the graph.
205 ScheduleGraphNode* last_side_effect_instr_;
206
207 // Set of load instructions encountered since the last side effect instruction
208 // which will be added as predecessors of the next instruction with side
209 // effects.
210 ZoneVector<ScheduleGraphNode*> pending_loads_;
211
212 // Live-in register markers are nop instructions which are emitted at the
213 // beginning of a basic block so that the register allocator will find a
214 // defining instruction for live-in values. They must not be moved.
215 // All these nops are chained together and added as a predecessor of every
216 // other instructions in the basic block.
217 ScheduleGraphNode* last_live_in_reg_marker_;
218
219 // Last deoptimization or trap instruction encountered while building the
220 // graph.
221 ScheduleGraphNode* last_deopt_or_trap_;
222
223 // Keep track of definition points for virtual registers. This is used to
224 // record operand dependencies in the scheduling graph.
225 ZoneMap<int32_t, ScheduleGraphNode*> operands_map_;
226};
227
228} // namespace compiler
229} // namespace internal
230} // namespace v8
231
232#endif // V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
233