1 | // Copyright 2018 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/constant-pool.h" |
6 | #include "src/assembler-inl.h" |
7 | |
8 | namespace v8 { |
9 | namespace internal { |
10 | |
11 | #if defined(V8_TARGET_ARCH_PPC) |
12 | |
13 | ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits, |
14 | int double_reach_bits) { |
15 | info_[ConstantPoolEntry::INTPTR].entries.reserve(64); |
16 | info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits; |
17 | info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits; |
18 | } |
19 | |
20 | ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess( |
21 | ConstantPoolEntry::Type type) const { |
22 | const PerTypeEntryInfo& info = info_[type]; |
23 | |
24 | if (info.overflow()) return ConstantPoolEntry::OVERFLOWED; |
25 | |
26 | int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count; |
27 | int dbl_offset = dbl_count * kDoubleSize; |
28 | int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count; |
29 | int ptr_offset = ptr_count * kSystemPointerSize + dbl_offset; |
30 | |
31 | if (type == ConstantPoolEntry::DOUBLE) { |
32 | // Double overflow detection must take into account the reach for both types |
33 | int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits; |
34 | if (!is_uintn(dbl_offset, info.regular_reach_bits) || |
35 | (ptr_count > 0 && |
36 | !is_uintn(ptr_offset + kDoubleSize - kSystemPointerSize, |
37 | ptr_reach_bits))) { |
38 | return ConstantPoolEntry::OVERFLOWED; |
39 | } |
40 | } else { |
41 | DCHECK(type == ConstantPoolEntry::INTPTR); |
42 | if (!is_uintn(ptr_offset, info.regular_reach_bits)) { |
43 | return ConstantPoolEntry::OVERFLOWED; |
44 | } |
45 | } |
46 | |
47 | return ConstantPoolEntry::REGULAR; |
48 | } |
49 | |
50 | ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry( |
51 | ConstantPoolEntry& entry, ConstantPoolEntry::Type type) { |
52 | DCHECK(!emitted_label_.is_bound()); |
53 | PerTypeEntryInfo& info = info_[type]; |
54 | const int entry_size = ConstantPoolEntry::size(type); |
55 | bool merged = false; |
56 | |
57 | if (entry.sharing_ok()) { |
58 | // Try to merge entries |
59 | std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin(); |
60 | int end = static_cast<int>(info.shared_entries.size()); |
61 | for (int i = 0; i < end; i++, it++) { |
62 | if ((entry_size == kSystemPointerSize) |
63 | ? entry.value() == it->value() |
64 | : entry.value64() == it->value64()) { |
65 | // Merge with found entry. |
66 | entry.set_merged_index(i); |
67 | merged = true; |
68 | break; |
69 | } |
70 | } |
71 | } |
72 | |
73 | // By definition, merged entries have regular access. |
74 | DCHECK(!merged || entry.merged_index() < info.regular_count); |
75 | ConstantPoolEntry::Access access = |
76 | (merged ? ConstantPoolEntry::REGULAR : NextAccess(type)); |
77 | |
78 | // Enforce an upper bound on search time by limiting the search to |
79 | // unique sharable entries which fit in the regular section. |
80 | if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) { |
81 | info.shared_entries.push_back(entry); |
82 | } else { |
83 | info.entries.push_back(entry); |
84 | } |
85 | |
86 | // We're done if we found a match or have already triggered the |
87 | // overflow state. |
88 | if (merged || info.overflow()) return access; |
89 | |
90 | if (access == ConstantPoolEntry::REGULAR) { |
91 | info.regular_count++; |
92 | } else { |
93 | info.overflow_start = static_cast<int>(info.entries.size()) - 1; |
94 | } |
95 | |
96 | return access; |
97 | } |
98 | |
99 | void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm, |
100 | ConstantPoolEntry::Type type) { |
101 | PerTypeEntryInfo& info = info_[type]; |
102 | std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries; |
103 | const int entry_size = ConstantPoolEntry::size(type); |
104 | int base = emitted_label_.pos(); |
105 | DCHECK_GT(base, 0); |
106 | int shared_end = static_cast<int>(shared_entries.size()); |
107 | std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin(); |
108 | for (int i = 0; i < shared_end; i++, shared_it++) { |
109 | int offset = assm->pc_offset() - base; |
110 | shared_it->set_offset(offset); // Save offset for merged entries. |
111 | if (entry_size == kSystemPointerSize) { |
112 | assm->dp(shared_it->value()); |
113 | } else { |
114 | assm->dq(shared_it->value64()); |
115 | } |
116 | DCHECK(is_uintn(offset, info.regular_reach_bits)); |
117 | |
118 | // Patch load sequence with correct offset. |
119 | assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset, |
120 | ConstantPoolEntry::REGULAR, type); |
121 | } |
122 | } |
123 | |
124 | void ConstantPoolBuilder::EmitGroup(Assembler* assm, |
125 | ConstantPoolEntry::Access access, |
126 | ConstantPoolEntry::Type type) { |
127 | PerTypeEntryInfo& info = info_[type]; |
128 | const bool overflow = info.overflow(); |
129 | std::vector<ConstantPoolEntry>& entries = info.entries; |
130 | std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries; |
131 | const int entry_size = ConstantPoolEntry::size(type); |
132 | int base = emitted_label_.pos(); |
133 | DCHECK_GT(base, 0); |
134 | int begin; |
135 | int end; |
136 | |
137 | if (access == ConstantPoolEntry::REGULAR) { |
138 | // Emit any shared entries first |
139 | EmitSharedEntries(assm, type); |
140 | } |
141 | |
142 | if (access == ConstantPoolEntry::REGULAR) { |
143 | begin = 0; |
144 | end = overflow ? info.overflow_start : static_cast<int>(entries.size()); |
145 | } else { |
146 | DCHECK(access == ConstantPoolEntry::OVERFLOWED); |
147 | if (!overflow) return; |
148 | begin = info.overflow_start; |
149 | end = static_cast<int>(entries.size()); |
150 | } |
151 | |
152 | std::vector<ConstantPoolEntry>::iterator it = entries.begin(); |
153 | if (begin > 0) std::advance(it, begin); |
154 | for (int i = begin; i < end; i++, it++) { |
155 | // Update constant pool if necessary and get the entry's offset. |
156 | int offset; |
157 | ConstantPoolEntry::Access entry_access; |
158 | if (!it->is_merged()) { |
159 | // Emit new entry |
160 | offset = assm->pc_offset() - base; |
161 | entry_access = access; |
162 | if (entry_size == kSystemPointerSize) { |
163 | assm->dp(it->value()); |
164 | } else { |
165 | assm->dq(it->value64()); |
166 | } |
167 | } else { |
168 | // Retrieve offset from shared entry. |
169 | offset = shared_entries[it->merged_index()].offset(); |
170 | entry_access = ConstantPoolEntry::REGULAR; |
171 | } |
172 | |
173 | DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED || |
174 | is_uintn(offset, info.regular_reach_bits)); |
175 | |
176 | // Patch load sequence with correct offset. |
177 | assm->PatchConstantPoolAccessInstruction(it->position(), offset, |
178 | entry_access, type); |
179 | } |
180 | } |
181 | |
182 | // Emit and return size of pool. |
183 | int ConstantPoolBuilder::Emit(Assembler* assm) { |
184 | bool emitted = emitted_label_.is_bound(); |
185 | bool empty = IsEmpty(); |
186 | |
187 | if (!emitted) { |
188 | // Mark start of constant pool. Align if necessary. |
189 | if (!empty) assm->DataAlign(kDoubleSize); |
190 | assm->bind(&emitted_label_); |
191 | if (!empty) { |
192 | // Emit in groups based on access and type. |
193 | // Emit doubles first for alignment purposes. |
194 | EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE); |
195 | EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR); |
196 | if (info_[ConstantPoolEntry::DOUBLE].overflow()) { |
197 | assm->DataAlign(kDoubleSize); |
198 | EmitGroup(assm, ConstantPoolEntry::OVERFLOWED, |
199 | ConstantPoolEntry::DOUBLE); |
200 | } |
201 | if (info_[ConstantPoolEntry::INTPTR].overflow()) { |
202 | EmitGroup(assm, ConstantPoolEntry::OVERFLOWED, |
203 | ConstantPoolEntry::INTPTR); |
204 | } |
205 | } |
206 | } |
207 | |
208 | return !empty ? (assm->pc_offset() - emitted_label_.pos()) : 0; |
209 | } |
210 | |
211 | #endif // defined(V8_TARGET_ARCH_PPC) |
212 | |
213 | } // namespace internal |
214 | } // namespace v8 |
215 | |